summaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorcvs2svn <cvs2svn@FreeBSD.org>1999-03-30 07:36:37 +0000
committercvs2svn <cvs2svn@FreeBSD.org>1999-03-30 07:36:37 +0000
commitb822db150531f5b7067d4d970080b7358e07ae89 (patch)
treedc2ea54d183c59ba5288679737fa9d835d3f3c64 /contrib
parent4182f559065a2fcfe3f7daad75904882f26c14c9 (diff)
downloadFreeBSD-src-b822db150531f5b7067d4d970080b7358e07ae89.zip
FreeBSD-src-b822db150531f5b7067d4d970080b7358e07ae89.tar.gz
This commit was manufactured by cvs2svn to create branch 'VENDOR-gcc'.
Diffstat (limited to 'contrib')
-rw-r--r--contrib/gcc/config/alpha/alpha.c1700
-rw-r--r--contrib/gcc/config/alpha/alpha.h2088
-rw-r--r--contrib/gcc/config/alpha/alpha.md3770
-rw-r--r--contrib/gcc/config/alpha/elf.h524
-rw-r--r--contrib/gcc/config/alpha/freebsd.h103
-rw-r--r--contrib/gcc/config/alpha/gdb-osf12.h26
-rw-r--r--contrib/gcc/config/alpha/gdb-osf2.h26
-rw-r--r--contrib/gcc/config/alpha/gdb.h26
-rw-r--r--contrib/gcc/config/alpha/osf12.h31
-rw-r--r--contrib/gcc/config/alpha/osf2.h32
-rw-r--r--contrib/gcc/config/alpha/x-alpha1
-rw-r--r--contrib/gcc/config/alpha/xm-alpha.h78
-rw-r--r--contrib/gcc/config/freebsd.h120
-rw-r--r--contrib/gcc/config/i386/freebsd-elf.h199
14 files changed, 8724 insertions, 0 deletions
diff --git a/contrib/gcc/config/alpha/alpha.c b/contrib/gcc/config/alpha/alpha.c
new file mode 100644
index 0000000..f7428c3
--- /dev/null
+++ b/contrib/gcc/config/alpha/alpha.c
@@ -0,0 +1,1700 @@
+/* Subroutines used for code generation on the DEC Alpha.
+ Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "reload.h"
+#include "expr.h"
+#include "obstack.h"
+#include "tree.h"
+
+/* Save information from a "cmpxx" operation until the branch or scc is
+ emitted. */
+
+rtx alpha_compare_op0, alpha_compare_op1;
+int alpha_compare_fp_p;
+
+/* Save the name of the current function as used by the assembler. This
+ is used by the epilogue. */
+
+char *alpha_function_name;
+
+/* Non-zero if inside of a function, because the Alpha asm can't
+ handle .files inside of functions. */
+
+static int inside_function = FALSE;
+
+/* Nonzero if the current function needs gp. */
+
+int alpha_function_needs_gp;
+
+extern char *version_string;
+extern int rtx_equal_function_value_matters;
+
+/* Declarations of static functions. */
+static void alpha_set_memflags_1 PROTO((rtx, int, int, int));
+static void add_long_const PROTO((FILE *, HOST_WIDE_INT, int, int, int));
+
+/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
+
+int
+zap_mask (value)
+ HOST_WIDE_INT value;
+{
+ int i;
+
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ i++, value >>= 8)
+ if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
+ return 0;
+
+ return 1;
+}
+
+/* Returns 1 if OP is either the constant zero or a register. If a
+ register, it must be in the proper mode unless MODE is VOIDmode. */
+
+int
+reg_or_0_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return op == const0_rtx || register_operand (op, mode);
+}
+
+/* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
+ any register. */
+
+int
+reg_or_6bit_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
+ || register_operand (op, mode));
+}
+
+
+/* Return 1 if OP is an 8-bit constant or any register. */
+
+int
+reg_or_8bit_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
+ || register_operand (op, mode));
+}
+
+/* Return 1 if OP is an 8-bit constant. */
+
+int
+cint8_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100);
+}
+
+/* Return 1 if the operand is a valid second operand to an add insn. */
+
+int
+add_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
+
+ return register_operand (op, mode);
+}
+
+/* Return 1 if the operand is a valid second operand to a sign-extending
+ add insn. */
+
+int
+sext_add_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return ((unsigned HOST_WIDE_INT) INTVAL (op) < 255
+ || (unsigned HOST_WIDE_INT) (- INTVAL (op)) < 255);
+
+ return register_operand (op, mode);
+}
+
+/* Return 1 if OP is the constant 4 or 8. */
+
+int
+const48_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_INT
+ && (INTVAL (op) == 4 || INTVAL (op) == 8));
+}
+
+/* Return 1 if OP is a valid first operand to an AND insn. */
+
+int
+and_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
+ return (zap_mask (CONST_DOUBLE_LOW (op))
+ && zap_mask (CONST_DOUBLE_HIGH (op)));
+
+ if (GET_CODE (op) == CONST_INT)
+ return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
+ || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
+ || zap_mask (INTVAL (op)));
+
+ return register_operand (op, mode);
+}
+
+/* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
+
+int
+or_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
+ || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
+
+ return register_operand (op, mode);
+}
+
+/* Return 1 if OP is a constant that is the width, in bits, of an integral
+ mode smaller than DImode. */
+
+int
+mode_width_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_INT
+ && (INTVAL (op) == 8 || INTVAL (op) == 16 || INTVAL (op) == 32));
+}
+
+/* Return 1 if OP is a constant that is the width of an integral machine mode
+ smaller than an integer. */
+
+int
+mode_mask_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+#if HOST_BITS_PER_WIDE_INT == 32
+ if (GET_CODE (op) == CONST_DOUBLE)
+ return CONST_DOUBLE_HIGH (op) == 0 && CONST_DOUBLE_LOW (op) == -1;
+#endif
+
+ return (GET_CODE (op) == CONST_INT
+ && (INTVAL (op) == 0xff
+ || INTVAL (op) == 0xffff
+#if HOST_BITS_PER_WIDE_INT == 64
+ || INTVAL (op) == 0xffffffff
+#endif
+ ));
+}
+
+/* Return 1 if OP is a multiple of 8 less than 64. */
+
+int
+mul8_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
+ && (INTVAL (op) & 7) == 0);
+}
+
+/* Return 1 if OP is the constant zero in floating-point. */
+
+int
+fp0_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_MODE (op) == mode
+ && GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode));
+}
+
+/* Return 1 if OP is the floating-point constant zero or a register. */
+
+int
+reg_or_fp0_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return fp0_operand (op, mode) || register_operand (op, mode);
+}
+
+/* Return 1 if OP is a register or a constant integer. */
+
+
+int
+reg_or_cint_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return GET_CODE (op) == CONST_INT || register_operand (op, mode);
+}
+
+/* Return 1 if OP is something that can be reloaded into a register;
+ if it is a MEM, it need not be valid. */
+
+int
+some_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
+ return 0;
+
+ switch (GET_CODE (op))
+ {
+ case REG: case MEM: case CONST_DOUBLE:
+ case CONST_INT: case LABEL_REF: case SYMBOL_REF: case CONST:
+ return 1;
+
+ case SUBREG:
+ return some_operand (SUBREG_REG (op), VOIDmode);
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a valid operand for the source of a move insn. */
+
+int
+input_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
+ return 0;
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
+ return 0;
+
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ /* This handles both the Windows/NT and OSF cases. */
+ return mode == ptr_mode || mode == DImode;
+
+ case REG:
+ return 1;
+
+ case SUBREG:
+ if (register_operand (op, mode))
+ return 1;
+ /* ... fall through ... */
+ case MEM:
+ return mode != HImode && mode != QImode && general_operand (op, mode);
+
+ case CONST_DOUBLE:
+ return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
+
+ case CONST_INT:
+ return mode == QImode || mode == HImode || add_operand (op, mode);
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a SYMBOL_REF for a function known to be in this
+ file. */
+
+int
+current_file_function_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == SYMBOL_REF
+ && ! profile_flag && ! profile_block_flag
+ && (SYMBOL_REF_FLAG (op)
+ || op == XEXP (DECL_RTL (current_function_decl), 0)));
+}
+
+/* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
+
+int
+call_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mode != Pmode)
+ return 0;
+
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG);
+}
+
+/* Return 1 if OP is a valid Alpha comparison operator. Here we know which
+ comparisons are valid in which insn. */
+
+int
+alpha_comparison_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ return (code == EQ || code == LE || code == LT
+ || (mode == DImode && (code == LEU || code == LTU)));
+}
+
+/* Return 1 if OP is a signed comparison operation. */
+
+int
+signed_comparison_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (op))
+ {
+ case EQ: case NE: case LE: case LT: case GE: case GT:
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return 1 if this is a divide or modulus operator. */
+
+int
+divmod_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (op))
+ {
+ case DIV: case MOD: case UDIV: case UMOD:
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return 1 if this memory address is a known aligned register plus
+ a constant. It must be a valid address. This means that we can do
+ this as an aligned reference plus some offset.
+
+ Take into account what reload will do.
+
+ We could say that out-of-range stack slots are alignable, but that would
+ complicate get_aligned_mem and it isn't worth the trouble since few
+ functions have large stack space. */
+
+int
+aligned_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == SUBREG)
+ {
+ if (GET_MODE (op) != mode)
+ return 0;
+ op = SUBREG_REG (op);
+ mode = GET_MODE (op);
+ }
+
+ if (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ op = reg_equiv_mem[REGNO (op)];
+
+ if (GET_CODE (op) != MEM || GET_MODE (op) != mode
+ || ! memory_address_p (mode, XEXP (op, 0)))
+ return 0;
+
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == PLUS)
+ op = XEXP (op, 0);
+
+ return (GET_CODE (op) == REG
+ && (REGNO (op) == STACK_POINTER_REGNUM
+ || op == hard_frame_pointer_rtx
+ || (REGNO (op) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (op) <= LAST_VIRTUAL_REGISTER)));
+}
+
+/* Similar, but return 1 if OP is a MEM which is not alignable. */
+
+int
+unaligned_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == SUBREG)
+ {
+ if (GET_MODE (op) != mode)
+ return 0;
+ op = SUBREG_REG (op);
+ mode = GET_MODE (op);
+ }
+
+ if (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ op = reg_equiv_mem[REGNO (op)];
+
+ if (GET_CODE (op) != MEM || GET_MODE (op) != mode)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ if (! memory_address_p (mode, op))
+ return 1;
+
+ if (GET_CODE (op) == PLUS)
+ op = XEXP (op, 0);
+
+ return (GET_CODE (op) != REG
+ || (REGNO (op) != STACK_POINTER_REGNUM
+ && op != hard_frame_pointer_rtx
+ && (REGNO (op) < FIRST_VIRTUAL_REGISTER
+ || REGNO (op) > LAST_VIRTUAL_REGISTER)));
+}
+
+/* Return 1 if OP is any memory location. During reload a pseudo matches. */
+
+int
+any_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == MEM
+ || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
+ || (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (op) == SUBREG
+ && GET_CODE (SUBREG_REG (op)) == REG
+ && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
+}
+
+/* REF is an alignable memory location. Place an aligned SImode
+ reference into *PALIGNED_MEM and the number of bits to shift into
+ *PBITNUM. */
+
+void
+get_aligned_mem (ref, paligned_mem, pbitnum)
+ rtx ref;
+ rtx *paligned_mem, *pbitnum;
+{
+ rtx base;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (ref) == SUBREG)
+ {
+ offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
+ ref = SUBREG_REG (ref);
+ }
+
+ if (GET_CODE (ref) == REG)
+ ref = reg_equiv_mem[REGNO (ref)];
+
+ if (reload_in_progress)
+ base = find_replacement (&XEXP (ref, 0));
+ else
+ base = XEXP (ref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
+
+ *paligned_mem = gen_rtx (MEM, SImode,
+ plus_constant (base, offset & ~3));
+ MEM_IN_STRUCT_P (*paligned_mem) = MEM_IN_STRUCT_P (ref);
+ MEM_VOLATILE_P (*paligned_mem) = MEM_VOLATILE_P (ref);
+ RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
+
+ *pbitnum = GEN_INT ((offset & 3) * 8);
+}
+
+/* Similar, but just get the address. Handle the two reload cases. */
+
+rtx
+get_unaligned_address (ref)
+ rtx ref;
+{
+ rtx base;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (ref) == SUBREG)
+ {
+ offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN)
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
+ ref = SUBREG_REG (ref);
+ }
+
+ if (GET_CODE (ref) == REG)
+ ref = reg_equiv_mem[REGNO (ref)];
+
+ if (reload_in_progress)
+ base = find_replacement (&XEXP (ref, 0));
+ else
+ base = XEXP (ref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
+
+ return plus_constant (base, offset);
+}
+
+/* Subfunction of the following function. Update the flags of any MEM
+ found in part of X. */
+
+static void
+alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
+ rtx x;
+ int in_struct_p, volatile_p, unchanging_p;
+{
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case SEQUENCE:
+ case PARALLEL:
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
+ unchanging_p);
+ break;
+
+ case INSN:
+ alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
+ unchanging_p);
+ break;
+
+ case SET:
+ alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
+ unchanging_p);
+ alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
+ unchanging_p);
+ break;
+
+ case MEM:
+ MEM_IN_STRUCT_P (x) = in_struct_p;
+ MEM_VOLATILE_P (x) = volatile_p;
+ RTX_UNCHANGING_P (x) = unchanging_p;
+ break;
+ }
+}
+
+/* Given INSN, which is either an INSN or a SEQUENCE generated to
+ perform a memory operation, look for any MEMs in either a SET_DEST or
+ a SET_SRC and copy the in-struct, unchanging, and volatile flags from
+ REF into each of the MEMs found. If REF is not a MEM, don't do
+ anything. */
+
+void
+alpha_set_memflags (insn, ref)
+ rtx insn;
+ rtx ref;
+{
+ /* Note that it is always safe to get these flags, though they won't
+ be what we think if REF is not a MEM. */
+ int in_struct_p = MEM_IN_STRUCT_P (ref);
+ int volatile_p = MEM_VOLATILE_P (ref);
+ int unchanging_p = RTX_UNCHANGING_P (ref);
+
+ if (GET_CODE (ref) != MEM
+ || (! in_struct_p && ! volatile_p && ! unchanging_p))
+ return;
+
+ alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
+}
+
+/* Try to output insns to set TARGET equal to the constant C if it can be
+ done in less than N insns. Do all computations in MODE. Returns the place
+ where the output has been placed if it can be done and the insns have been
+ emitted. If it would take more than N insns, zero is returned and no
+ insns and emitted. */
+
+rtx
+alpha_emit_set_const (target, mode, c, n)
+ rtx target;
+ enum machine_mode mode;
+ HOST_WIDE_INT c;
+ int n;
+{
+ HOST_WIDE_INT new = c;
+ int i, bits;
+ /* Use a pseudo if highly optimizing and still generating RTL. */
+ rtx subtarget
+ = (flag_expensive_optimizations && rtx_equal_function_value_matters
+ ? 0 : target);
+ rtx temp;
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ /* We are only called for SImode and DImode. If this is SImode, ensure that
+ we are sign extended to a full word. This does not make any sense when
+ cross-compiling on a narrow machine. */
+
+ if (mode == SImode)
+ c = (c & 0xffffffff) - 2 * (c & 0x80000000);
+#endif
+
+ /* If this is a sign-extended 32-bit constant, we can do this in at most
+ three insns, so do it if we have enough insns left. We always have
+ a sign-extended 32-bit constant when compiling on a narrow machine.
+ Note that we cannot handle the constant 0x80000000. */
+
+ if ((HOST_BITS_PER_WIDE_INT != 64
+ || c >> 31 == -1 || c >> 31 == 0)
+ && c != 0x80000000U)
+ {
+ HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
+ HOST_WIDE_INT tmp1 = c - low;
+ HOST_WIDE_INT high
+ = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
+ HOST_WIDE_INT extra = 0;
+
+ /* If HIGH will be interpreted as negative but the constant is
+ positive, we must adjust it to do two ldha insns. */
+
+ if ((high & 0x8000) != 0 && c >= 0)
+ {
+ extra = 0x4000;
+ tmp1 -= 0x40000000;
+ high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
+ }
+
+ if (c == low || (low == 0 && extra == 0))
+ return copy_to_suggested_reg (GEN_INT (c), target, mode);
+ else if (n >= 2 + (extra != 0)
+ /* We can't do this when SImode if HIGH required adjustment.
+ This is because the code relies on an implicit overflow
+ which is invisible to the RTL. We can thus get incorrect
+ code if the two ldah instructions are combined. */
+ && ! (mode == SImode && extra != 0))
+ {
+ temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
+
+ if (extra != 0)
+ temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),
+ subtarget, 0, OPTAB_WIDEN);
+
+ return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),
+ target, 0, OPTAB_WIDEN);
+ }
+ }
+
+ /* If we couldn't do it that way, try some other methods. But if we have
+ no instructions left, don't bother. Likewise, if this is SImode and
+ we can't make pseudos, we can't do anything since the expand_binop
+ and expand_unop calls will widen and try to make pseudos. */
+
+ if (n == 1
+ || (mode == SImode && ! rtx_equal_function_value_matters))
+ return 0;
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ /* First, see if can load a value into the target that is the same as the
+ constant except that all bytes that are 0 are changed to be 0xff. If we
+ can, then we can do a ZAPNOT to obtain the desired constant. */
+
+ for (i = 0; i < 64; i += 8)
+ if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
+ new |= (HOST_WIDE_INT) 0xff << i;
+
+ /* We are only called for SImode and DImode. If this is SImode, ensure that
+ we are sign extended to a full word. */
+
+ if (mode == SImode)
+ new = (new & 0xffffffff) - 2 * (new & 0x80000000);
+
+ if (new != c
+ && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
+ return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
+ target, 0, OPTAB_WIDEN);
+#endif
+
+ /* Next, see if we can load a related constant and then shift and possibly
+ negate it to get the constant we want. Try this once each increasing
+ numbers of insns. */
+
+ for (i = 1; i < n; i++)
+ {
+ /* First try complementing. */
+ if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
+ return expand_unop (mode, one_cmpl_optab, temp, target, 0);
+
+ /* Next try to form a constant and do a left shift. We can do this
+ if some low-order bits are zero; the exact_log2 call below tells
+ us that information. The bits we are shifting out could be any
+ value, but here we'll just try the 0- and sign-extended forms of
+ the constant. To try to increase the chance of having the same
+ constant in more than one insn, start at the highest number of
+ bits to shift, but try all possibilities in case a ZAPNOT will
+ be useful. */
+
+ if ((bits = exact_log2 (c & - c)) > 0)
+ for (; bits > 0; bits--)
+ if ((temp = (alpha_emit_set_const
+ (subtarget, mode,
+ (unsigned HOST_WIDE_INT) c >> bits, i))) != 0
+ || ((temp = (alpha_emit_set_const
+ (subtarget, mode,
+ ((unsigned HOST_WIDE_INT) c) >> bits, i)))
+ != 0))
+ return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
+ target, 0, OPTAB_WIDEN);
+
+ /* Now try high-order zero bits. Here we try the shifted-in bits as
+ all zero and all ones. Be careful to avoid shifting outside the
+ mode and to avoid shifting outside the host wide int size. */
+
+ if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
+ - floor_log2 (c) - 1)) > 0)
+ for (; bits > 0; bits--)
+ if ((temp = alpha_emit_set_const (subtarget, mode,
+ c << bits, i)) != 0
+ || ((temp = (alpha_emit_set_const
+ (subtarget, mode,
+ ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
+ i)))
+ != 0))
+ return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
+ target, 1, OPTAB_WIDEN);
+
+ /* Now try high-order 1 bits. We get that with a sign-extension.
+ But one bit isn't enough here. Be careful to avoid shifting outside
+ the mode and to avoid shifting outside the host wide int size. */
+
+ if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
+ - floor_log2 (~ c) - 2)) > 0)
+ for (; bits > 0; bits--)
+ if ((temp = alpha_emit_set_const (subtarget, mode,
+ c << bits, i)) != 0
+ || ((temp = (alpha_emit_set_const
+ (subtarget, mode,
+ ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
+ i)))
+ != 0))
+ return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
+ target, 0, OPTAB_WIDEN);
+ }
+
+ return 0;
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+int
+alpha_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn;
+ int cost;
+{
+ rtx set;
+
+ /* If the dependence is an anti-dependence, there is no cost. For an
+ output dependence, there is sometimes a cost, but it doesn't seem
+ worth handling those few cases. */
+
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+
+ /* If INSN is a store insn and DEP_INSN is setting the data being stored,
+ we can sometimes lower the cost. */
+
+ if (recog_memoized (insn) >= 0 && get_attr_type (insn) == TYPE_ST
+ && (set = single_set (dep_insn)) != 0
+ && GET_CODE (PATTERN (insn)) == SET
+ && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
+ switch (get_attr_type (dep_insn))
+ {
+ case TYPE_LD:
+ /* No savings here. */
+ return cost;
+
+ case TYPE_IMULL:
+ case TYPE_IMULQ:
+ /* In these cases, we save one cycle. */
+ return cost - 2;
+
+ default:
+ /* In all other cases, we save two cycles. */
+ return MAX (0, cost - 4);
+ }
+
+ /* Another case that needs adjustment is an arithmetic or logical
+ operation. It's cost is usually one cycle, but we default it to
+ two in the MD file. The only case that it is actually two is
+ for the address in loads and stores. */
+
+ if (recog_memoized (dep_insn) >= 0
+ && get_attr_type (dep_insn) == TYPE_IADDLOG)
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LD:
+ case TYPE_ST:
+ return cost;
+
+ default:
+ return 2;
+ }
+
+ /* The final case is when a compare feeds into an integer branch. The cost
+ is only one cycle in that case. */
+
+ if (recog_memoized (dep_insn) >= 0
+ && get_attr_type (dep_insn) == TYPE_ICMP
+ && recog_memoized (insn) >= 0
+ && get_attr_type (insn) == TYPE_IBR)
+ return 2;
+
+ /* Otherwise, return the default cost. */
+
+ return cost;
+}
+
+/* Print an operand. Recognize special options, documented below. */
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ char code;
+{
+ int i;
+
+ switch (code)
+ {
+ case 'r':
+ /* If this operand is the constant zero, write it as "$31". */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (x == CONST0_RTX (GET_MODE (x)))
+ fprintf (file, "$31");
+ else
+ output_operand_lossage ("invalid %%r value");
+
+ break;
+
+ case 'R':
+ /* Similar, but for floating-point. */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (x == CONST0_RTX (GET_MODE (x)))
+ fprintf (file, "$f31");
+ else
+ output_operand_lossage ("invalid %%R value");
+
+ break;
+
+ case 'N':
+ /* Write the 1's complement of a constant. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%N value");
+
+ fprintf (file, "%ld", ~ INTVAL (x));
+ break;
+
+ case 'P':
+ /* Write 1 << C, for a constant C. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%P value");
+
+ fprintf (file, "%ld", (HOST_WIDE_INT) 1 << INTVAL (x));
+ break;
+
+ case 'h':
+ /* Write the high-order 16 bits of a constant, sign-extended. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%h value");
+
+ fprintf (file, "%ld", INTVAL (x) >> 16);
+ break;
+
+ case 'L':
+ /* Write the low-order 16 bits of a constant, sign-extended. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%L value");
+
+ fprintf (file, "%ld", (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
+ break;
+
+ case 'm':
+ /* Write mask for ZAP insn. */
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ HOST_WIDE_INT mask = 0;
+ HOST_WIDE_INT value;
+
+ value = CONST_DOUBLE_LOW (x);
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ i++, value >>= 8)
+ if (value & 0xff)
+ mask |= (1 << i);
+
+ value = CONST_DOUBLE_HIGH (x);
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ i++, value >>= 8)
+ if (value & 0xff)
+ mask |= (1 << (i + sizeof (int)));
+
+ fprintf (file, "%ld", mask & 0xff);
+ }
+
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = 0, value = INTVAL (x);
+
+ for (i = 0; i < 8; i++, value >>= 8)
+ if (value & 0xff)
+ mask |= (1 << i);
+
+ fprintf (file, "%ld", mask);
+ }
+ else
+ output_operand_lossage ("invalid %%m value");
+ break;
+
+ case 'M':
+ /* 'b', 'w', or 'l' as the value of the constant. */
+ if (GET_CODE (x) != CONST_INT
+ || (INTVAL (x) != 8 && INTVAL (x) != 16 && INTVAL (x) != 32))
+ output_operand_lossage ("invalid %%M value");
+
+ fprintf (file, "%s",
+ INTVAL (x) == 8 ? "b" : INTVAL (x) == 16 ? "w" : "l");
+ break;
+
+ case 'U':
+ /* Similar, except do it from the mask. */
+ if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xff)
+ fprintf (file, "b");
+ else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
+ fprintf (file, "w");
+#if HOST_BITS_PER_WIDE_INT == 32
+ else if (GET_CODE (x) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (x) == 0
+ && CONST_DOUBLE_LOW (x) == -1)
+ fprintf (file, "l");
+#else
+ else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
+ fprintf (file, "l");
+#endif
+ else
+ output_operand_lossage ("invalid %%U value");
+ break;
+
+ case 's':
+ /* Write the constant value divided by 8. */
+ if (GET_CODE (x) != CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
+ && (INTVAL (x) & 7) != 8)
+ output_operand_lossage ("invalid %%s value");
+
+ fprintf (file, "%ld", INTVAL (x) / 8);
+ break;
+
+ case 'S':
+ /* Same, except compute (64 - c) / 8 */
+
+ if (GET_CODE (x) != CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
+ && (INTVAL (x) & 7) != 8)
+ output_operand_lossage ("invalid %%s value");
+
+ fprintf (file, "%ld", (64 - INTVAL (x)) / 8);
+ break;
+
+ case 'C':
+ /* Write out comparison name. */
+ if (GET_RTX_CLASS (GET_CODE (x)) != '<')
+ output_operand_lossage ("invalid %%C value");
+
+ if (GET_CODE (x) == LEU)
+ fprintf (file, "ule");
+ else if (GET_CODE (x) == LTU)
+ fprintf (file, "ult");
+ else
+ fprintf (file, "%s", GET_RTX_NAME (GET_CODE (x)));
+ break;
+
+ case 'D':
+ /* Similar, but write reversed code. We can't get an unsigned code
+ here. */
+ if (GET_RTX_CLASS (GET_CODE (x)) != '<')
+ output_operand_lossage ("invalid %%D value");
+
+ fprintf (file, "%s", GET_RTX_NAME (reverse_condition (GET_CODE (x))));
+ break;
+
+ case 'c':
+ /* Similar to `c', but swap. We can't get unsigned here either. */
+ if (GET_RTX_CLASS (GET_CODE (x)) != '<')
+ output_operand_lossage ("invalid %%D value");
+
+ fprintf (file, "%s", GET_RTX_NAME (swap_condition (GET_CODE (x))));
+ break;
+
+ case 'd':
+ /* Similar, but reverse and swap. We can't get unsigned here either. */
+ if (GET_RTX_CLASS (GET_CODE (x)) != '<')
+ output_operand_lossage ("invalid %%D value");
+
+ fprintf (file, "%s",
+ GET_RTX_NAME (swap_condition (reverse_condition ((GET_CODE (x))))));
+ break;
+
+ case 'E':
+ /* Write the divide or modulus operator. */
+ switch (GET_CODE (x))
+ {
+ case DIV:
+ fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ case UDIV:
+ fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ case MOD:
+ fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ case UMOD:
+ fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ default:
+ output_operand_lossage ("invalid %%E value");
+ break;
+ }
+ break;
+
+ case 'A':
+ /* Write "_u" for unaligned access. */
+ if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
+ fprintf (file, "_u");
+ break;
+
+ case 0:
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else
+ output_addr_const (file, x);
+ break;
+
+ default:
+ output_operand_lossage ("invalid %%xn code");
+ }
+}
+
+/* Do what is necessary for `va_start'. The argument is ignored;
+ We look at the current function to determine if stdarg or varargs
+ is used and fill in an initial va_list. A pointer to this constructor
+ is returned. */
+
+struct rtx_def *
+alpha_builtin_saveregs (arglist)
+ tree arglist;
+{
+ rtx block, addr, argsize;
+ tree fntype = TREE_TYPE (current_function_decl);
+ int stdarg = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ /* Compute the current position into the args, taking into account
+ both registers and memory. Both of these are already included in
+ current_function_args_info. */
+
+ argsize = GEN_INT (current_function_args_info * UNITS_PER_WORD);
+
+ /* SETUP_INCOMING_VARARGS moves the starting address base up by 48,
+ storing fp arg registers in the first 48 bytes, and the integer arg
+ registers in the next 48 bytes. This is only done, however, if any
+ integer registers need to be stored.
+
+ If no integer registers need be stored, then we must subtract 48 in
+ order to account for the integer arg registers which are counted in
+ argsize above, but which are not actually stored on the stack. */
+
+ addr = (current_function_args_info <= 5 + stdarg
+ ? plus_constant (virtual_incoming_args_rtx, 6 * UNITS_PER_WORD)
+ : plus_constant (virtual_incoming_args_rtx, - (6 * UNITS_PER_WORD)));
+
+ addr = force_operand (addr, NULL_RTX);
+
+ /* Allocate the va_list constructor */
+ block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
+ RTX_UNCHANGING_P (block) = 1;
+ RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
+
+ /* Store the address of the first integer register in the __base member. */
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ addr = convert_memory_address (ptr_mode, addr);
+#endif
+
+ emit_move_insn (change_address (block, ptr_mode, XEXP (block, 0)), addr);
+
+ /* Store the argsize as the __va_offset member. */
+ emit_move_insn (change_address (block, TYPE_MODE (integer_type_node),
+ plus_constant (XEXP (block, 0),
+ POINTER_SIZE/BITS_PER_UNIT)),
+ argsize);
+
+ /* Return the address of the va_list constructor, but don't put it in a
+ register. Doing so would fail when not optimizing and produce worse
+ code when optimizing. */
+ return XEXP (block, 0);
+}
+
+/* This page contains routines that are used to determine what the function
+ prologue and epilogue code will do and write them out. */
+
+/* Compute the size of the save area in the stack. */
+
+int
+alpha_sa_size ()
+{
+ int size = 0;
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i])
+ size++;
+
+ /* If some registers were saved but not reg 26, reg 26 must also
+ be saved, so leave space for it. */
+ if (size != 0 && ! regs_ever_live[26])
+ size++;
+
+ /* Our size must be even (multiple of 16 bytes). */
+ if (size & 1)
+ size ++;
+
+ return size * 8;
+}
+
+/* Return 1 if this function can directly return via $26. */
+
+int
+direct_return ()
+{
+ return (reload_completed && alpha_sa_size () == 0
+ && get_frame_size () == 0
+ && current_function_outgoing_args_size == 0
+ && current_function_pretend_args_size == 0);
+}
+
+/* Write a version stamp. Don't write anything if we are running as a
+ cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
+
+#if !defined(CROSS_COMPILE) && !defined(_WIN32) && \
+ !defined(__NetBSD__) && !defined(__FreeBSD__)
+#include <stamp.h>
+#endif
+
+void
+alpha_write_verstamp (file)
+ FILE *file;
+{
+#ifdef MS_STAMP
+ fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
+#endif
+}
+
+/* Write code to add constant C to register number IN_REG (possibly 31)
+ and put the result into OUT_REG. Use TEMP_REG as a scratch register;
+ usually this will be OUT_REG, but should not be if OUT_REG is
+ STACK_POINTER_REGNUM, since it must be updated in a single instruction.
+ Write the code to FILE. */
+
+static void
+add_long_const (file, c, in_reg, out_reg, temp_reg)
+ FILE *file;
+ HOST_WIDE_INT c;
+ int in_reg, out_reg, temp_reg;
+{
+ HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
+ HOST_WIDE_INT tmp1 = c - low;
+ HOST_WIDE_INT high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
+ HOST_WIDE_INT extra = 0;
+
+ /* We don't have code to write out constants larger than 32 bits. */
+#if HOST_BITS_PER_LONG_INT == 64
+ if ((unsigned HOST_WIDE_INT) c >> 32 != 0)
+ abort ();
+#endif
+
+ /* If HIGH will be interpreted as negative, we must adjust it to do two
+ ldha insns. Note that we will never be building a negative constant
+ here. */
+
+ if (high & 0x8000)
+ {
+ extra = 0x4000;
+ tmp1 -= 0x40000000;
+ high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
+ }
+
+ if (low != 0)
+ {
+ int result_reg = (extra == 0 && high == 0) ? out_reg : temp_reg;
+
+ if (low >= 0 && low < 255)
+ fprintf (file, "\taddq $%d,%d,$%d\n", in_reg, low, result_reg);
+ else
+ fprintf (file, "\tlda $%d,%d($%d)\n", result_reg, low, in_reg);
+
+ in_reg = result_reg;
+ }
+
+ if (extra)
+ {
+ int result_reg = (high == 0) ? out_reg : temp_reg;
+
+ fprintf (file, "\tldah $%d,%d($%d)\n", result_reg, extra, in_reg);
+ in_reg = result_reg;
+ }
+
+ if (high)
+ fprintf (file, "\tldah $%d,%d($%d)\n", out_reg, high, in_reg);
+}
+
+/* Write function prologue. */
+
+void
+output_prolog (file, size)
+ FILE *file;
+ int size;
+{
+ HOST_WIDE_INT out_args_size
+ = ALPHA_ROUND (current_function_outgoing_args_size);
+ HOST_WIDE_INT sa_size = alpha_sa_size ();
+ HOST_WIDE_INT frame_size
+ = (out_args_size + sa_size
+ + ALPHA_ROUND (size + current_function_pretend_args_size));
+ HOST_WIDE_INT reg_offset = out_args_size;
+ HOST_WIDE_INT start_reg_offset = reg_offset;
+ HOST_WIDE_INT actual_start_reg_offset = start_reg_offset;
+ int int_reg_save_area_size = 0;
+ rtx insn;
+ unsigned reg_mask = 0;
+ int i;
+
+ /* Ecoff can handle multiple .file directives, so put out file and lineno.
+ We have to do that before the .ent directive as we cannot switch
+ files within procedures with native ecoff because line numbers are
+ linked to procedure descriptors.
+ Outputting the lineno helps debugging of one line functions as they
+ would otherwise get no line number at all. Please note that we would
+ like to put out last_linenum from final.c, but it is not accessible. */
+
+ if (write_symbols == SDB_DEBUG)
+ {
+ ASM_OUTPUT_SOURCE_FILENAME (file,
+ DECL_SOURCE_FILE (current_function_decl));
+ if (debug_info_level != DINFO_LEVEL_TERSE)
+ ASM_OUTPUT_SOURCE_LINE (file,
+ DECL_SOURCE_LINE (current_function_decl));
+ }
+
+ /* The assembly language programmer's guide states that the second argument
+ to the .ent directive, the lex_level, is ignored by the assembler,
+ so we might as well omit it. */
+
+ fprintf (file, "\t.ent ");
+ assemble_name (file, alpha_function_name);
+ fprintf (file, "\n");
+ ASM_OUTPUT_LABEL (file, alpha_function_name);
+ inside_function = TRUE;
+
+ /* Set up offsets to alpha virtual arg/local debugging pointer. */
+
+ alpha_auto_offset = -frame_size + current_function_pretend_args_size;
+ alpha_arg_offset = -frame_size + 48;
+
+ /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
+ Even if we are a static function, we still need to do this in case
+ our address is taken and passed to something like qsort.
+
+ We never need a GP for Windows/NT. */
+
+ alpha_function_needs_gp = 0;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if ((GET_CODE (insn) == CALL_INSN)
+ || (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && (get_attr_type (insn) == TYPE_LDSYM
+ || get_attr_type (insn) == TYPE_ISUBR)))
+ {
+ alpha_function_needs_gp = 1;
+ break;
+ }
+
+ if (WINDOWS_NT == 0)
+ {
+ if (alpha_function_needs_gp)
+ fprintf (file, "\tldgp $29,0($27)\n");
+
+ /* Put a label after the GP load so we can enter the function at it. */
+ assemble_name (file, alpha_function_name);
+ fprintf (file, "..ng:\n");
+ }
+
+ /* Adjust the stack by the frame size. If the frame size is > 4096
+ bytes, we need to be sure we probe somewhere in the first and last
+ 4096 bytes (we can probably get away without the latter test) and
+ every 8192 bytes in between. If the frame size is > 32768, we
+ do this in a loop. Otherwise, we generate the explicit probe
+ instructions.
+
+ Note that we are only allowed to adjust sp once in the prologue. */
+
+ if (frame_size < 32768)
+ {
+ if (frame_size > 4096)
+ {
+ int probed = 4096;
+
+ fprintf (file, "\tstq $31,-%d($30)\n", probed);
+
+ while (probed + 8192 < frame_size)
+ fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
+
+ /* We only have to do this probe if we aren't saving registers. */
+ if (sa_size == 0 && probed + 4096 < frame_size)
+ fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
+ }
+
+ if (frame_size != 0)
+ fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
+ }
+ else
+ {
+ /* Here we generate code to set R4 to SP + 4096 and set R5 to the
+ number of 8192 byte blocks to probe. We then probe each block
+ in the loop and then set SP to the proper location. If the
+ amount remaining is > 4096, we have to do one more probe if we
+ are not saving any registers. */
+
+ HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
+ HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
+
+ add_long_const (file, blocks, 31, 5, 5);
+
+ fprintf (file, "\tlda $4,4096($30)\n");
+
+ assemble_name (file, alpha_function_name);
+ fprintf (file, "..sc:\n");
+
+ fprintf (file, "\tstq $31,-8192($4)\n");
+ fprintf (file, "\tsubq $5,1,$5\n");
+ fprintf (file, "\tlda $4,-8192($4)\n");
+
+ fprintf (file, "\tbne $5,");
+ assemble_name (file, alpha_function_name);
+ fprintf (file, "..sc\n");
+
+ if (leftover > 4096 && sa_size == 0)
+ fprintf (file, "\tstq $31,-%d($4)\n", leftover);
+
+ fprintf (file, "\tlda $30,-%d($4)\n", leftover);
+ }
+
+ /* Describe our frame. */
+ fprintf (file, "\t.frame $%d,%d,$26,%d\n",
+ (frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
+ frame_size, current_function_pretend_args_size);
+
+ /* Save register 26 if any other register needs to be saved. */
+ if (sa_size != 0)
+ {
+ reg_mask |= 1 << 26;
+ fprintf (file, "\tstq $26,%d($30)\n", reg_offset);
+ reg_offset += 8;
+ int_reg_save_area_size += 8;
+ }
+
+ /* Now save any other used integer registers required to be saved. */
+ for (i = 0; i < 32; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i] && i != 26)
+ {
+ reg_mask |= 1 << i;
+ fprintf (file, "\tstq $%d,%d($30)\n", i, reg_offset);
+ reg_offset += 8;
+ int_reg_save_area_size += 8;
+ }
+
+ /* Print the register mask and do floating-point saves. */
+ if (reg_mask)
+ fprintf (file, "\t.mask 0x%x,%d\n", reg_mask,
+ actual_start_reg_offset - frame_size);
+
+ start_reg_offset = reg_offset;
+ reg_mask = 0;
+
+ for (i = 0; i < 32; i++)
+ if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
+ && regs_ever_live[i + 32])
+ {
+ reg_mask |= 1 << i;
+ fprintf (file, "\tstt $f%d,%d($30)\n", i, reg_offset);
+ reg_offset += 8;
+ }
+
+ /* Print the floating-point mask, if we've saved any fp register. */
+ if (reg_mask)
+ fprintf (file, "\t.fmask 0x%x,%d\n", reg_mask,
+ actual_start_reg_offset - frame_size + int_reg_save_area_size);
+
+ /* If we need a frame pointer, set it from the stack pointer. Note that
+ this must always be the last instruction in the prologue. */
+ if (frame_pointer_needed)
+ fprintf (file, "\tbis $30,$30,$15\n");
+
+ /* End the prologue and say if we used gp. */
+ fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
+}
+
+/* Write function epilogue. */
+
+void
+output_epilog (file, size)
+ FILE *file;
+ int size;
+{
+ rtx insn = get_last_insn ();
+ HOST_WIDE_INT out_args_size
+ = ALPHA_ROUND (current_function_outgoing_args_size);
+ HOST_WIDE_INT sa_size = alpha_sa_size ();
+ HOST_WIDE_INT frame_size
+ = (out_args_size + sa_size
+ + ALPHA_ROUND (size + current_function_pretend_args_size));
+ HOST_WIDE_INT reg_offset = out_args_size;
+ HOST_WIDE_INT frame_size_from_reg_save = frame_size - reg_offset;
+ int restore_fp
+ = frame_pointer_needed && regs_ever_live[HARD_FRAME_POINTER_REGNUM];
+ int i;
+
+ /* If the last insn was a BARRIER, we don't have to write anything except
+ the .end pseudo-op. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn == 0 || GET_CODE (insn) != BARRIER)
+ {
+ int fp_offset = 0;
+
+ /* If we have a frame pointer, restore SP from it. */
+ if (frame_pointer_needed)
+ fprintf (file, "\tbis $15,$15,$30\n");
+
+ /* Restore all the registers, starting with the return address
+ register. */
+ if (sa_size != 0)
+ {
+ fprintf (file, "\tldq $26,%d($30)\n", reg_offset);
+ reg_offset += 8;
+ }
+
+ /* Now restore any other used integer registers that that we saved,
+ except for FP if it is being used as FP, since it must be
+ restored last. */
+
+ for (i = 0; i < 32; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i]
+ && i != 26)
+ {
+ if (i == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
+ fp_offset = reg_offset;
+ else
+ fprintf (file, "\tldq $%d,%d($30)\n", i, reg_offset);
+ reg_offset += 8;
+ }
+
+ for (i = 0; i < 32; i++)
+ if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
+ && regs_ever_live[i + 32])
+ {
+ fprintf (file, "\tldt $f%d,%d($30)\n", i, reg_offset);
+ reg_offset += 8;
+ }
+
+ /* If the stack size is large and we have a frame pointer, compute the
+ size of the stack into a register because the old FP restore, stack
+ pointer adjust, and return are required to be consecutive
+ instructions. */
+ if (frame_size > 32767 && restore_fp)
+ add_long_const (file, frame_size, 31, 1, 1);
+
+ /* If we needed a frame pointer and we have to restore it, do it
+ now. This must be done in one instruction immediately
+ before the SP update. */
+ if (restore_fp && fp_offset)
+ fprintf (file, "\tldq $15,%d($30)\n", fp_offset);
+
+ /* Now update the stack pointer, if needed. Only one instruction must
+ modify the stack pointer. It must be the last instruction in the
+ sequence and must be an ADDQ or LDA instruction. If the frame
+ pointer was loaded above, we may only put one instruction here. */
+
+ if (frame_size > 32768 && restore_fp)
+ fprintf (file, "\taddq $1,$30,$30\n");
+ else
+ add_long_const (file, frame_size, 30, 30, 1);
+
+ /* Finally return to the caller. */
+ fprintf (file, "\tret $31,($26),1\n");
+ }
+
+ /* End the function. */
+ fprintf (file, "\t.end ");
+ assemble_name (file, alpha_function_name);
+ fprintf (file, "\n");
+ inside_function = FALSE;
+
+ /* Show that we know this function if it is called again. */
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
+}
+
+/* Debugging support. */
+
+#include "gstab.h"
+
+/* Count the number of sdb related labels are generated (to find block
+ start and end boundaries). */
+
+int sdb_label_count = 0;
+
+/* Next label # for each statement. */
+
+static int sym_lineno = 0;
+
+/* Count the number of .file directives, so that .loc is up to date. */
+
+static int num_source_filenames = 0;
+
+/* Name of the file containing the current function. */
+
+static char *current_function_file = "";
+
+/* Offsets to alpha virtual arg/local debugging pointers. */
+
+long alpha_arg_offset;
+long alpha_auto_offset;
+
+/* Emit a new filename to a stream. */
+
+void
+alpha_output_filename (stream, name)
+ FILE *stream;
+ char *name;
+{
+ static int first_time = TRUE;
+ char ltext_label_name[100];
+
+ if (first_time)
+ {
+ first_time = FALSE;
+ ++num_source_filenames;
+ current_function_file = name;
+ fprintf (stream, "\t.file\t%d ", num_source_filenames);
+ output_quoted_string (stream, name);
+ fprintf (stream, "\n");
+ if (!TARGET_GAS && write_symbols == DBX_DEBUG)
+ fprintf (stream, "\t#@stabs\n");
+ }
+
+ else if (!TARGET_GAS && write_symbols == DBX_DEBUG)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
+ fprintf (stream, "%s ", ASM_STABS_OP);
+ output_quoted_string (stream, name);
+ fprintf (stream, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
+ }
+
+ else if (name != current_function_file
+ && strcmp (name, current_function_file) != 0)
+ {
+ if (inside_function && ! TARGET_GAS)
+ fprintf (stream, "\t#.file\t%d ", num_source_filenames);
+ else
+ {
+ ++num_source_filenames;
+ current_function_file = name;
+ fprintf (stream, "\t.file\t%d ", num_source_filenames);
+ }
+
+ output_quoted_string (stream, name);
+ fprintf (stream, "\n");
+ }
+}
+
+/* Emit a linenumber to a stream. */
+
+void
+alpha_output_lineno (stream, line)
+ FILE *stream;
+ int line;
+{
+ if (! TARGET_GAS && write_symbols == DBX_DEBUG)
+ {
+ /* mips-tfile doesn't understand .stabd directives. */
+ ++sym_lineno;
+ fprintf (stream, "$LM%d:\n\t%s %d,0,%d,$LM%d\n",
+ sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
+ }
+ else
+ fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
+}
diff --git a/contrib/gcc/config/alpha/alpha.h b/contrib/gcc/config/alpha/alpha.h
new file mode 100644
index 0000000..9504c09
--- /dev/null
+++ b/contrib/gcc/config/alpha/alpha.h
@@ -0,0 +1,2088 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha.
+ Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define CPP_PREDEFINES "\
+-Dunix -D__osf__ -D__alpha -D__alpha__ -D_LONGLONG -DSYSTYPE_BSD \
+-D_SYSTYPE_BSD -Asystem(unix) -Asystem(xpg4) -Acpu(alpha) -Amachine(alpha)"
+
+/* Write out the correct language type definition for the header files.
+ Unless we have assembler language, write out the symbols for C. */
+#define CPP_SPEC "\
+%{!.S: -D__LANGUAGE_C__ -D__LANGUAGE_C %{!ansi:-DLANGUAGE_C}} \
+%{.S: -D__LANGUAGE_ASSEMBLY__ -D__LANGUAGE_ASSEMBLY %{!ansi:-DLANGUAGE_ASSEMBLY}} \
+%{.cc: -D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus} \
+%{.cxx: -D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus} \
+%{.C: -D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus} \
+%{.m: -D__LANGUAGE_OBJECTIVE_C__ -D__LANGUAGE_OBJECTIVE_C}"
+
+/* Set the spec to use for signed char. The default tests the above macro
+ but DEC's compiler can't handle the conditional in a "constant"
+ operand. */
+
+#define SIGNED_CHAR_SPEC "%{funsigned-char:-D__CHAR_UNSIGNED__}"
+
+/* Under OSF/1, -p and -pg require -lprof1. */
+
+#define LIB_SPEC "%{p:-lprof1} %{pg:-lprof1} %{a:-lprof2} -lc"
+
+/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
+ optimizing, -O1 if we are not. Pass -shared, -non_shared or
+ -call_shared as appropriate. Also pass -pg. */
+#define LINK_SPEC \
+ "-G 8 %{O*:-O3} %{!O*:-O1} %{static:-non_shared} \
+ %{!static:%{shared:-shared} %{!shared:-call_shared}} %{pg} %{taso} \
+ %{rpath*}"
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (!strcmp (STR, "rpath") || !strcmp (STR, "include") \
+ || !strcmp (STR, "imacros") || !strcmp (STR, "aux-info") \
+ || !strcmp (STR, "idirafter") || !strcmp (STR, "iprefix") \
+ || !strcmp (STR, "iwithprefix") || !strcmp (STR, "iwithprefixbefore") \
+ || !strcmp (STR, "isystem"))
+
+#define STARTFILE_SPEC \
+ "%{!shared:%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}}"
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION
+
+/* Default this to not be compiling for Windows/NT. */
+#ifndef WINDOWS_NT
+#define WINDOWS_NT 0
+#endif
+
+/* Define the location for the startup file on OSF/1 for Alpha. */
+
+#define MD_STARTFILE_PREFIX "/usr/lib/cmplrs/cc/"
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* This means that floating-point support exists in the target implementation
+ of the Alpha architecture. This is usually the default. */
+
+#define TARGET_FP (target_flags & 1)
+
+/* This means that floating-point registers are allowed to be used. Note
+ that Alpha implementations without FP operations are required to
+ provide the FP registers. */
+
+#define TARGET_FPREGS (target_flags & 2)
+
+/* This means that gas is used to process the assembler file. */
+
+#define MASK_GAS 4
+#define TARGET_GAS (target_flags & MASK_GAS)
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ { {"no-soft-float", 1}, \
+ {"soft-float", -1}, \
+ {"fp-regs", 2}, \
+ {"no-fp-regs", -3}, \
+ {"alpha-as", -MASK_GAS}, \
+ {"gas", MASK_GAS}, \
+ {"", TARGET_DEFAULT | TARGET_CPU_DEFAULT} }
+
+#define TARGET_DEFAULT 3
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 0
+#endif
+
+/* Define this macro to change register usage conditional on target flags.
+
+ On the Alpha, we use this to disable the floating-point registers when
+ they don't exist. */
+
+#define CONDITIONAL_REGISTER_USAGE \
+ if (! TARGET_FPREGS) \
+ for (i = 32; i < 63; i++) \
+ fixed_regs[i] = call_used_regs[i] = 1;
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* target machine storage layout */
+
+/* Define to enable software floating point emulation. */
+#define REAL_ARITHMETIC
+
+/* Define the size of `int'. The default is the same as the word size. */
+#define INT_TYPE_SIZE 32
+
+/* Define the size of `long long'. The default is the twice the word size. */
+#define LONG_LONG_TYPE_SIZE 64
+
+/* The two floating-point formats we support are S-floating, which is
+ 4 bytes, and T-floating, which is 8 bytes. `float' is S and `double'
+ and `long double' are T. */
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#define WCHAR_TYPE "short unsigned int"
+#define WCHAR_TYPE_SIZE 16
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type.
+
+ For Alpha, we always store objects in a full register. 32-bit objects
+ are always sign-extended, but smaller objects retain their signedness. */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ { \
+ if ((MODE) == SImode) \
+ (UNSIGNEDP) = 0; \
+ (MODE) = DImode; \
+ }
+
+/* Define this if function arguments should also be promoted using the above
+ procedure. */
+
+#define PROMOTE_FUNCTION_ARGS
+
+/* Likewise, if the function return value is promoted. */
+
+#define PROMOTE_FUNCTION_RETURN
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+
+ There are no such instructions on the Alpha, but the documentation
+ is little endian. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ This is false on the Alpha. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+
+ For Alpha we can decide arbitrarily since there are no machine instructions
+ for them. Might as well be consistent with bytes. */
+#define WORDS_BIG_ENDIAN 0
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 8
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 64
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 64
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 64
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 64
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 64
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Align loop starts for optimal branching.
+
+ ??? Kludge this and the next macro for the moment by not doing anything if
+ we don't optimize and also if we are writing ECOFF symbols to work around
+ a bug in DEC's assembler. */
+
+#define ASM_OUTPUT_LOOP_ALIGN(FILE) \
+ if (optimize > 0 && write_symbols != SDB_DEBUG) \
+ ASM_OUTPUT_ALIGN (FILE, 5)
+
+/* This is how to align an instruction for optimal branching.
+ On Alpha we'll get better performance by aligning on a quadword
+ boundary. */
+
+#define ASM_OUTPUT_ALIGN_CODE(FILE) \
+ if (optimize > 0 && write_symbols != SDB_DEBUG) \
+ ASM_OUTPUT_ALIGN ((FILE), 4)
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 64
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Set this non-zero if move instructions will actually fail to work
+ when given unaligned data.
+
+ Since we get an error message when we do one, call them invalid. */
+
+#define STRICT_ALIGNMENT 1
+
+/* Set this non-zero if unaligned move instructions are extremely slow.
+
+ On the Alpha, they trap. */
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ We define all 32 integer registers, even though $31 is always zero,
+ and all 32 floating-point registers, even though $f31 is also
+ always zero. We do not bother defining the FP status register and
+ there are no other registers.
+
+ Since $31 is always zero, we will use register number 31 as the
+ argument pointer. It will never appear in the generated code
+ because we will always be eliminating it in favor of the stack
+ pointer or hardware frame pointer.
+
+ Likewise, we use $f31 for the frame pointer, which will always
+ be eliminated in favor of the hardware frame pointer or the
+ stack pointer. */
+
+#define FIRST_PSEUDO_REGISTER 64
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+
+#define FIXED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+
+ We allocate in the following order:
+ $f1 (nonsaved floating-point register)
+ $f10-$f15 (likewise)
+ $f22-$f30 (likewise)
+ $f21-$f16 (likewise, but input args)
+ $f0 (nonsaved, but return value)
+ $f2-$f9 (saved floating-point registers)
+ $1-$8 (nonsaved integer registers)
+ $22-$25 (likewise)
+ $28 (likewise)
+ $0 (likewise, but return value)
+ $21-$16 (likewise, but input args)
+ $27 (procedure value in OSF, nonsaved in NT)
+ $9-$14 (saved integer registers)
+ $26 (return PC)
+ $15 (frame pointer)
+ $29 (global pointer)
+ $30, $31, $f31 (stack pointer and always zero/ap & fp) */
+
+#define REG_ALLOC_ORDER \
+ {33, \
+ 42, 43, 44, 45, 46, 47, \
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, \
+ 53, 52, 51, 50, 49, 48, \
+ 32, \
+ 34, 35, 36, 37, 38, 39, 40, 41, \
+ 1, 2, 3, 4, 5, 6, 7, 8, \
+ 22, 23, 24, 25, \
+ 28, \
+ 0, \
+ 21, 20, 19, 18, 17, 16, \
+ 27, \
+ 9, 10, 11, 12, 13, 14, \
+ 26, \
+ 15, \
+ 29, \
+ 30, 31, 63 }
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On Alpha, the integer registers can hold any mode. The floating-point
+ registers can hold 32-bit and 64-bit integers as well, but not 16-bit
+ or 8-bit values. If we only allowed the larger integers into FP registers,
+ we'd have to say that QImode and SImode aren't tiable, which is a
+ pain. So say all registers can hold everything and see how that works. */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Alpha pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 30
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM 15
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 31
+
+/* Base register for access to local variables of function. */
+#define FRAME_POINTER_REGNUM 63
+
+/* Register in which static-chain is passed to a function.
+
+ For the Alpha, this is based on an example; the calling sequence
+ doesn't seem to specify this. */
+#define STATIC_CHAIN_REGNUM 1
+
+/* Register in which address to store a structure value
+ arrives in the function. On the Alpha, the address is passed
+ as a hidden argument. */
+#define STRUCT_VALUE 0
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
+ LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ {"NO_REGS", "GENERAL_REGS", "FLOAT_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+ { {0, 0}, {~0, 0x80000000}, {0, 0x7fffffff}, {~0, ~0} }
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) >= 32 && (REGNO) <= 62 ? FLOAT_REGS : GENERAL_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS NO_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'f' ? FLOAT_REGS : NO_REGS)
+
+/* Define this macro to change register usage conditional on target flags. */
+/* #define CONDITIONAL_REGISTER_USAGE */
+
+/* The letters I, J, K, L, M, N, O, and P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ For Alpha:
+ `I' is used for the range of constants most insns can contain.
+ `J' is the constant zero.
+ `K' is used for the constant in an LDA insn.
+ `L' is used for the constant in a LDAH insn.
+ `M' is used for the constants that can be AND'ed with using a ZAP insn.
+ `N' is used for complemented 8-bit constants.
+ `O' is used for negated 8-bit constants.
+ `P' is used for the constants 1, 2 and 3. */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VALUE) < 0x100 \
+ : (C) == 'J' ? (VALUE) == 0 \
+ : (C) == 'K' ? (unsigned HOST_WIDE_INT) ((VALUE) + 0x8000) < 0x10000 \
+ : (C) == 'L' ? (((VALUE) & 0xffff) == 0 \
+ && (((VALUE)) >> 31 == -1 || (VALUE) >> 31 == 0) \
+ && ((HOST_BITS_PER_WIDE_INT == 64 \
+ || (unsigned) (VALUE) != 0x80000000U))) \
+ : (C) == 'M' ? zap_mask (VALUE) \
+ : (C) == 'N' ? (unsigned HOST_WIDE_INT) (~ (VALUE)) < 0x100 \
+ : (C) == 'O' ? (unsigned HOST_WIDE_INT) (- (VALUE)) < 0x100 \
+ : (C) == 'P' ? (VALUE) == 1 || (VALUE) == 2 || (VALUE) == 3 \
+ : 0)
+
+/* Similar, but for floating or large integer constants, and defining letters
+ G and H. Here VALUE is the CONST_DOUBLE rtx itself.
+
+ For Alpha, `G' is the floating-point constant zero. `H' is a CONST_DOUBLE
+ that is the operand of a ZAP insn. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? (GET_MODE_CLASS (GET_MODE (VALUE)) == MODE_FLOAT \
+ && (VALUE) == CONST0_RTX (GET_MODE (VALUE))) \
+ : (C) == 'H' ? (GET_MODE (VALUE) == VOIDmode \
+ && zap_mask (CONST_DOUBLE_LOW (VALUE)) \
+ && zap_mask (CONST_DOUBLE_HIGH (VALUE))) \
+ : 0)
+
+/* Optional extra constraints for this machine.
+
+ For the Alpha, `Q' means that this is a memory operand but not a
+ reference to an unaligned location.
+ `R' is a SYMBOL_REF that has SYMBOL_REF_FLAG set or is the current
+ function. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) != AND \
+ : (C) == 'R' ? current_file_function_operand (OP, Pmode) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the Alpha, all constants except zero go into a floating-point
+ register via memory. */
+
+#define PREFERRED_RELOAD_CLASS(X, CLASS) \
+ (CONSTANT_P (X) && (X) != const0_rtx && (X) != CONST0_RTX (GET_MODE (X)) \
+ ? ((CLASS) == FLOAT_REGS ? NO_REGS : GENERAL_REGS) \
+ : (CLASS))
+
+/* Loading and storing HImode or QImode values to and from memory
+ usually requires a scratch register. The exceptions are loading
+ QImode and HImode from an aligned address to a general register.
+ We also cannot load an unaligned address or a paradoxical SUBREG into an
+ FP register. */
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,IN) \
+(((GET_CODE (IN) == MEM \
+ || (GET_CODE (IN) == REG && REGNO (IN) >= FIRST_PSEUDO_REGISTER) \
+ || (GET_CODE (IN) == SUBREG \
+ && (GET_CODE (SUBREG_REG (IN)) == MEM \
+ || (GET_CODE (SUBREG_REG (IN)) == REG \
+ && REGNO (SUBREG_REG (IN)) >= FIRST_PSEUDO_REGISTER)))) \
+ && (((CLASS) == FLOAT_REGS \
+ && ((MODE) == SImode || (MODE) == HImode || (MODE) == QImode)) \
+ || (((MODE) == QImode || (MODE) == HImode) \
+ && unaligned_memory_operand (IN, MODE)))) \
+ ? GENERAL_REGS \
+ : ((CLASS) == FLOAT_REGS && GET_CODE (IN) == MEM \
+ && GET_CODE (XEXP (IN, 0)) == AND) ? GENERAL_REGS \
+ : ((CLASS) == FLOAT_REGS && GET_CODE (IN) == SUBREG \
+ && (GET_MODE_SIZE (GET_MODE (IN)) \
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (IN))))) ? GENERAL_REGS \
+ : NO_REGS)
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,OUT) \
+(((GET_CODE (OUT) == MEM \
+ || (GET_CODE (OUT) == REG && REGNO (OUT) >= FIRST_PSEUDO_REGISTER) \
+ || (GET_CODE (OUT) == SUBREG \
+ && (GET_CODE (SUBREG_REG (OUT)) == MEM \
+ || (GET_CODE (SUBREG_REG (OUT)) == REG \
+ && REGNO (SUBREG_REG (OUT)) >= FIRST_PSEUDO_REGISTER)))) \
+ && (((MODE) == HImode || (MODE) == QImode \
+ || ((MODE) == SImode && (CLASS) == FLOAT_REGS)))) \
+ ? GENERAL_REGS \
+ : ((CLASS) == FLOAT_REGS && GET_CODE (OUT) == MEM \
+ && GET_CODE (XEXP (OUT, 0)) == AND) ? GENERAL_REGS \
+ : ((CLASS) == FLOAT_REGS && GET_CODE (OUT) == SUBREG \
+ && (GET_MODE_SIZE (GET_MODE (OUT)) \
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (OUT))))) ? GENERAL_REGS \
+ : NO_REGS)
+
+/* If we are copying between general and FP registers, we need a memory
+ location. */
+
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) ((CLASS1) != (CLASS2))
+
+/* Specify the mode to be used for memory when a secondary memory
+ location is needed. If MODE is floating-point, use it. Otherwise,
+ widen to a word like the default. This is needed because we always
+ store integers in FP registers in quadword format. This whole
+ area is very tricky! */
+#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT ? (MODE) \
+ : GET_MODE_SIZE (MODE) >= 4 ? (MODE) \
+ : mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (MODE), 0))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* If defined, gives a class of registers that cannot be used as the
+ operand of a SUBREG that changes the size of the object. */
+
+#define CLASS_CANNOT_CHANGE_SIZE FLOAT_REGS
+
+/* Define the cost of moving between registers of various classes. Moving
+ between FLOAT_REGS and anything else except float regs is expensive.
+ In fact, we make it quite expensive because we really don't want to
+ do these moves unless it is clearly worth it. Optimizations may
+ reduce the impact of not being able to allocate a pseudo to a
+ hard register. */
+
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ (((CLASS1) == FLOAT_REGS) == ((CLASS2) == FLOAT_REGS) ? 2 : 20)
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory.
+
+ On the Alpha, bump this up a bit. */
+
+#define MEMORY_MOVE_COST(MODE) 6
+
+/* Provide the cost of a branch. Exact meaning under development. */
+#define BRANCH_COST 5
+
+/* Adjust the cost of dependencies. */
+
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = alpha_adjust_cost (INSN, LINK, DEP, COST)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+/* #define FRAME_GROWS_DOWNWARD */
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On Alpha, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the Alpha. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer. */
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are valid since the cases where FP can't be
+ eliminated are already handled. */
+
+#define CAN_ELIMINATE(FROM, TO) 1
+
+/* Round up to a multiple of 16 bytes. */
+#define ALPHA_ROUND(X) (((X) + 15) & ~ 15)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = (ALPHA_ROUND (current_function_outgoing_args_size) \
+ + alpha_sa_size ()); \
+ else if ((FROM) == ARG_POINTER_REGNUM) \
+ (OFFSET) = (ALPHA_ROUND (current_function_outgoing_args_size) \
+ + alpha_sa_size () \
+ + (ALPHA_ROUND (get_frame_size () \
+ + current_function_pretend_args_size) \
+ - current_function_pretend_args_size)); \
+}
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. */
+/* #define REG_PARM_STACK_SPACE */
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0.
+
+ On Alpha the value is found in $0 for integer functions and
+ $f0 for floating-point functions. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx (REG, \
+ (INTEGRAL_MODE_P (TYPE_MODE (VALTYPE)) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ ? word_mode : TYPE_MODE (VALTYPE), \
+ TARGET_FPREGS && TREE_CODE (VALTYPE) == REAL_TYPE ? 32 : 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx (REG, MODE, \
+ TARGET_FPREGS && GET_MODE_CLASS (MODE) == MODE_FLOAT ? 32 : 0)
+
+/* The definition of this macro implies that there are cases where
+ a scalar value cannot be returned in registers.
+
+ For the Alpha, any structure or union type is returned in memory, as
+ are integers whose size is larger than 64 bits. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+ (TYPE_MODE (TYPE) == BLKmode \
+ || (TREE_CODE (TYPE) == INTEGER_TYPE && TYPE_PRECISION (TYPE) > 64))
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0 || (N) == 32)
+
+/* 1 if N is a possible register number for function argument passing.
+ On Alpha, these are $16-$21 and $f16-$f21. */
+
+#define FUNCTION_ARG_REGNO_P(N) \
+ (((N) >= 16 && (N) <= 21) || ((N) >= 16 + 32 && (N) <= 21 + 32))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On Alpha, this is a single integer, which is a number of words
+ of arguments scanned so far.
+ Thus 6 or more means all following args should go on the stack. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) (CUM) = 0
+
+/* Define intermediate macro to compute the size (in registers) of an argument
+ for the Alpha. */
+
+#define ALPHA_ARG_SIZE(MODE, TYPE, NAMED) \
+((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD \
+ : (int_size_in_bytes (TYPE) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ if (MUST_PASS_IN_STACK (MODE, TYPE)) \
+ (CUM) = 6; \
+ else \
+ (CUM) += ALPHA_ARG_SIZE (MODE, TYPE, NAMED)
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On Alpha the first 6 words of args are normally in registers
+ and the rest are pushed. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+((CUM) < 6 && ! MUST_PASS_IN_STACK (MODE, TYPE) \
+ ? gen_rtx(REG, (MODE), \
+ (CUM) + 16 + ((TARGET_FPREGS \
+ && (GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT)) \
+ * 32)) \
+ : 0)
+
+/* Specify the padding direction of arguments.
+
+ On the Alpha, we must pad upwards in order to be able to pass args in
+ registers. */
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) upward
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+((CUM) < 6 && 6 < (CUM) + ALPHA_ARG_SIZE (MODE, TYPE, NAMED) \
+ ? 6 - (CUM) : 0)
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the Alpha, we allocate space for all 12 arg registers, but only
+ push those that are remaining.
+
+ However, if NO registers need to be saved, don't allocate any space.
+ This is not only because we won't need the space, but because AP includes
+ the current_pretend_args_size and we don't want to mess up any
+ ap-relative addresses already made.
+
+ If we are not to use the floating-point registers, save the integer
+ registers where we would put the floating-point registers. This is
+ not the most efficient way to implement varargs with just one register
+ class, but it isn't worth doing anything more efficient in this rare
+ case. */
+
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ if ((CUM) < 6) \
+ { \
+ if (! (NO_RTL)) \
+ { \
+ move_block_from_reg \
+ (16 + CUM, \
+ gen_rtx (MEM, BLKmode, \
+ plus_constant (virtual_incoming_args_rtx, \
+ ((CUM) + 6)* UNITS_PER_WORD)), \
+ 6 - (CUM), (6 - (CUM)) * UNITS_PER_WORD); \
+ move_block_from_reg \
+ (16 + (TARGET_FPREGS ? 32 : 0) + CUM, \
+ gen_rtx (MEM, BLKmode, \
+ plus_constant (virtual_incoming_args_rtx, \
+ (CUM) * UNITS_PER_WORD)), \
+ 6 - (CUM), (6 - (CUM)) * UNITS_PER_WORD); \
+ } \
+ PRETEND_SIZE = 12 * UNITS_PER_WORD; \
+ } \
+}
+
+/* Try to output insns to set TARGET equal to the constant C if it can be
+ done in less than N insns. Do all computations in MODE. Returns the place
+ where the output has been placed if it can be done and the insns have been
+ emitted. If it would take more than N insns, zero is returned and no
+ insns and emitted. */
+extern struct rtx_def *alpha_emit_set_const ();
+
+/* Generate necessary RTL for __builtin_saveregs().
+ ARGLIST is the argument list; see expr.c. */
+extern struct rtx_def *alpha_builtin_saveregs ();
+#define EXPAND_BUILTIN_SAVEREGS(ARGLIST) alpha_builtin_saveregs (ARGLIST)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *alpha_compare_op0, *alpha_compare_op1;
+extern int alpha_compare_fp_p;
+
+/* This macro produces the initial definition of a function name. On the
+ Alpha, we need to save the function name for the prologue and epilogue. */
+
+extern char *alpha_function_name;
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
+{ \
+ alpha_function_name = NAME; \
+}
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#define FUNCTION_PROLOGUE(FILE, SIZE) output_prolog (FILE, SIZE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. Under OSF/1, profiling is enabled
+ by simply passing -pg to the assembler and linker. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO)
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done.
+ This assumes that __bb_init_func doesn't garble a1-a5. */
+
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ do { \
+ ASM_OUTPUT_REG_PUSH (FILE, 16); \
+ fputs ("\tlda $16,$PBX32\n", (FILE)); \
+ fputs ("\tldq $26,0($16)\n", (FILE)); \
+ fputs ("\tbne $26,1f\n", (FILE)); \
+ fputs ("\tlda $27,__bb_init_func\n", (FILE)); \
+ fputs ("\tjsr $26,($27),__bb_init_func\n", (FILE)); \
+ fputs ("\tldgp $29,0($26)\n", (FILE)); \
+ fputs ("1:\n", (FILE)); \
+ ASM_OUTPUT_REG_POP (FILE, 16); \
+ } while (0);
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. */
+
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+ do { \
+ int blockn = (BLOCKNO); \
+ fputs ("\tsubq $30,16,$30\n", (FILE)); \
+ fputs ("\tstq $26,0($30)\n", (FILE)); \
+ fputs ("\tstq $27,8($30)\n", (FILE)); \
+ fputs ("\tlda $26,$PBX34\n", (FILE)); \
+ fprintf ((FILE), "\tldq $27,%d($26)\n", 8*blockn); \
+ fputs ("\taddq $27,1,$27\n", (FILE)); \
+ fprintf ((FILE), "\tstq $27,%d($26)\n", 8*blockn); \
+ fputs ("\tldq $26,0($30)\n", (FILE)); \
+ fputs ("\tldq $27,8($30)\n", (FILE)); \
+ fputs ("\taddq $30,16,$30\n", (FILE)); \
+ } while (0)
+
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only. This is mandatory because
+ of alloca; we also take advantage of it to omit stack adjustments
+ before returning. */
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) output_epilog (FILE, SIZE)
+
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ The trampoline should set the static chain pointer to value placed
+ into the trampoline and should branch to the specified routine.
+ Note that $27 has been set to the address of the trampoline, so we can
+ use it for addressability of the two data items. Trampolines are always
+ aligned to FUNCTION_BOUNDARY, which is 64 bits. */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf (FILE, "\tldq $1,24($27)\n"); \
+ fprintf (FILE, "\tldq $27,16($27)\n"); \
+ fprintf (FILE, "\tjmp $31,($27),0\n"); \
+ fprintf (FILE, "\tnop\n"); \
+ fprintf (FILE, "\t.quad 0,0\n"); \
+}
+
+/* Section in which to place the trampoline. On Alpha, instructions
+ may only be placed in a text segment. */
+
+#define TRAMPOLINE_SECTION text_section
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 32
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. We assume
+ here that a function will be called many more times than its address
+ is taken (e.g., it might be passed to qsort), so we take the trouble
+ to initialize the "hint" field in the JMP insn. Note that the hint
+ field is PC (new) + 4 * bits 13:0. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ rtx _temp, _temp1, _addr; \
+ \
+ _addr = memory_address (Pmode, plus_constant ((TRAMP), 16)); \
+ emit_move_insn (gen_rtx (MEM, Pmode, _addr), (FNADDR)); \
+ _addr = memory_address (Pmode, plus_constant ((TRAMP), 24)); \
+ emit_move_insn (gen_rtx (MEM, Pmode, _addr), (CXT)); \
+ \
+ _temp = force_operand (plus_constant ((TRAMP), 12), NULL_RTX); \
+ _temp = expand_binop (DImode, sub_optab, (FNADDR), _temp, _temp, 1, \
+ OPTAB_WIDEN); \
+ _temp = expand_shift (RSHIFT_EXPR, Pmode, _temp, \
+ build_int_2 (2, 0), NULL_RTX, 1); \
+ _temp = expand_and (gen_lowpart (SImode, _temp), \
+ GEN_INT (0x3fff), 0); \
+ \
+ _addr = memory_address (SImode, plus_constant ((TRAMP), 8)); \
+ _temp1 = force_reg (SImode, gen_rtx (MEM, SImode, _addr)); \
+ _temp1 = expand_and (_temp1, GEN_INT (0xffffc000), NULL_RTX); \
+ _temp1 = expand_binop (SImode, ior_optab, _temp1, _temp, _temp1, 1, \
+ OPTAB_WIDEN); \
+ \
+ emit_move_insn (gen_rtx (MEM, SImode, _addr), _temp1); \
+ \
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \
+ "__enable_execute_stack"), \
+ 0, VOIDmode, 1,_addr, Pmode); \
+ \
+ emit_insn (gen_rtx (UNSPEC_VOLATILE, VOIDmode, \
+ gen_rtvec (1, const0_rtx), 0)); \
+}
+
+/* Attempt to turn on access permissions for the stack. */
+
+#define TRANSFER_FROM_TRAMPOLINE \
+ \
+void \
+__enable_execute_stack (addr) \
+ void *addr; \
+{ \
+ long size = getpagesize (); \
+ long mask = ~(size-1); \
+ char *page = (char *) (((long) addr) & mask); \
+ char *end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
+ \
+ /* 7 is PROT_READ | PROT_WRITE | PROT_EXEC */ \
+ if (mprotect (page, end - page, 7) < 0) \
+ perror ("mprotect of trampoline code"); \
+}
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame.
+ FRAMEADDR is the frame pointer of the COUNT frame, or the frame pointer of
+ the COUNT-1 frame if RETURN_ADDR_IN_PREVIOUS_FRAME} is defined.
+
+ This definition for Alpha is broken, but is put in at the request of
+ Mike Stump. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+((COUNT == 0 && alpha_sa_size () == 0 && 0 /* not right. */) \
+ ? gen_rtx (REG, Pmode, 26) \
+ : gen_rtx (MEM, Pmode, \
+ memory_address (Pmode, FRAME)))
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT */
+/* #define HAVE_POST_DECREMENT */
+
+/* #define HAVE_PRE_DECREMENT */
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32 \
+ || (REGNO) == 63 || reg_renumber[REGNO] == 63)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* Recognize any constant value that is a valid address. For the Alpha,
+ there are only constants none since we want to use LDA to load any
+ symbolic addresses into registers. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) (INTVAL (X) + 0x8000) < 0x10000)
+
+/* Include all constant integers and constant doubles, but not
+ floating-point, except for floating-point zero. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_MODE_CLASS (GET_MODE (X)) != MODE_FLOAT \
+ || (X) == CONST0_RTX (GET_MODE (X)))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 0
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 32 || REGNO (X) == 63 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ For Alpha, we have either a constant address or the sum of a register
+ and a constant address, or just a register. For DImode, any of those
+ forms can be surrounded with an AND that clear the low-order three bits;
+ this is an "unaligned" access.
+
+ First define the basic valid address. */
+
+#define GO_IF_LEGITIMATE_SIMPLE_ADDRESS(MODE, X, ADDR) \
+{ if (REG_P (X) && REG_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ if (CONSTANT_ADDRESS_P (X)) \
+ goto ADDR; \
+ if (GET_CODE (X) == PLUS \
+ && REG_P (XEXP (X, 0)) \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ goto ADDR; \
+}
+
+/* Now accept the simple address, or, for DImode only, an AND of a simple
+ address that turns off the low three bits. */
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ GO_IF_LEGITIMATE_SIMPLE_ADDRESS (MODE, X, ADDR); \
+ if ((MODE) == DImode \
+ && GET_CODE (X) == AND \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && INTVAL (XEXP (X, 1)) == -8) \
+ GO_IF_LEGITIMATE_SIMPLE_ADDRESS (MODE, XEXP (X, 0), ADDR); \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For the Alpha, there are three cases we handle:
+
+ (1) If the address is (plus reg const_int) and the CONST_INT is not a
+ valid offset, compute the high part of the constant and add it to the
+ register. Then our address is (plus temp low-part-const).
+ (2) If the address is (const (plus FOO const_int)), find the low-order
+ part of the CONST_INT. Then load FOO plus any high-order part of the
+ CONST_INT into a register. Our address is (plus reg low-part-const).
+ This is done to reduce the number of GOT entries.
+ (3) If we have a (plus reg const), emit the load as in (2), then add
+ the two registers, and finally generate (plus reg low-part-const) as
+ our address. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT lowpart = (val & 0xffff) - 2 * (val & 0x8000); \
+ HOST_WIDE_INT highpart = val - lowpart; \
+ rtx high = GEN_INT (highpart); \
+ rtx temp = expand_binop (Pmode, add_optab, XEXP (x, 0), \
+ high, NULL_RTX, 1, OPTAB_LIB_WIDEN); \
+ \
+ (X) = plus_constant (temp, lowpart); \
+ goto WIN; \
+ } \
+ else if (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (X, 0), 1)); \
+ HOST_WIDE_INT lowpart = (val & 0xffff) - 2 * (val & 0x8000); \
+ HOST_WIDE_INT highpart = val - lowpart; \
+ rtx high = XEXP (XEXP (X, 0), 0); \
+ \
+ if (highpart) \
+ high = plus_constant (high, highpart); \
+ \
+ (X) = plus_constant (force_reg (Pmode, high), lowpart); \
+ goto WIN; \
+ } \
+ else if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST \
+ && GET_CODE (XEXP (XEXP (X, 1), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (XEXP (X, 1), 0), 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (X, 1), 0), 1)); \
+ HOST_WIDE_INT lowpart = (val & 0xffff) - 2 * (val & 0x8000); \
+ HOST_WIDE_INT highpart = val - lowpart; \
+ rtx high = XEXP (XEXP (XEXP (X, 1), 0), 0); \
+ \
+ if (highpart) \
+ high = plus_constant (high, highpart); \
+ \
+ high = expand_binop (Pmode, add_optab, XEXP (X, 0), \
+ force_reg (Pmode, high), \
+ high, 1, OPTAB_LIB_WIDEN); \
+ (X) = plus_constant (high, lowpart); \
+ goto WIN; \
+ } \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the Alpha this is true only for the unaligned modes. We can
+ simplify this test since we know that the address must be valid. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ if (GET_CODE (ADDR) == AND) goto LABEL; }
+
+/* Compute the cost of an address. For the Alpha, all valid addresses are
+ the same cost. */
+
+#define ADDRESS_COST(X) 0
+
+/* Define this if some processing needs to be done immediately before
+ emitting code for an insn. */
+
+/* #define FINAL_PRESCAN_INSN(INSN,OPERANDS,NOPERANDS) */
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define this if the tablejump instruction expects the table
+ to contain offsets from the address of the table.
+ Do not define this if the table should contain absolute addresses.
+ On the Alpha, the table is really GP-relative, not relative to the PC
+ of the table, but we pretend that it is PC-relative; this should be OK,
+ but we should try to find some better way sometime. */
+#define CASE_VECTOR_PC_RELATIVE
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one.
+
+ We actually lie a bit here as overflow conditions are different. But
+ they aren't being checked anyway. */
+
+#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
+
+/* Max number of bytes we can move to or from memory
+ in one reasonably fast instruction. */
+
+#define MOVE_MAX 8
+
+/* Largest number of bytes of an object that can be placed in a register.
+ On the Alpha we have plenty of registers, so use TImode. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
+
+/* Nonzero if access to memory by bytes is no faster than for words.
+ Also non-zero if doing byte operations (specifically shifts) in registers
+ is undesirable.
+
+ On the Alpha, we want to not use the byte operation and instead use
+ masking operations to access fields; these will save instructions. */
+
+#define SLOW_BYTE_ACCESS 1
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) SIGN_EXTEND
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the value returned by a floating-point comparison instruction. */
+
+#define FLOAT_STORE_FLAG_VALUE 0.5
+
+/* Canonicalize a comparison from one we don't have to one we do have. */
+
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+ do { \
+ if (((CODE) == GE || (CODE) == GT || (CODE) == GEU || (CODE) == GTU) \
+ && (GET_CODE (OP1) == REG || (OP1) == const0_rtx)) \
+ { \
+ rtx tem = (OP0); \
+ (OP0) = (OP1); \
+ (OP1) = tem; \
+ (CODE) = swap_condition (CODE); \
+ } \
+ if (((CODE) == LT || (CODE) == LTU) \
+ && GET_CODE (OP1) == CONST_INT && INTVAL (OP1) == 256) \
+ { \
+ (CODE) = (CODE) == LT ? LE : LEU; \
+ (OP1) = GEN_INT (255); \
+ } \
+ } while (0)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode DImode
+
+/* Mode of a function address in a call instruction (for indexing purposes). */
+
+#define FUNCTION_MODE Pmode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap.
+
+ We define this on the Alpha so that gen_call and gen_call_value
+ get to see the SYMBOL_REF (for the hint field of the jsr). It will
+ then copy it into a register, thus actually letting the address be
+ cse'ed. */
+
+#define NO_FUNCTION_CSE
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Use atexit for static constructors/destructors, instead of defining
+ our own exit function. */
+#define HAVE_ATEXIT
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch.
+
+ If this is an 8-bit constant, return zero since it can be used
+ nearly anywhere with no cost. If it is a valid operand for an
+ ADD or AND, likewise return 0 if we know it will be used in that
+ context. Otherwise, return 2 since it might be used there later.
+ All other constants take at least two insns. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ if (INTVAL (RTX) >= 0 && INTVAL (RTX) < 256) \
+ return 0; \
+ case CONST_DOUBLE: \
+ if (((OUTER_CODE) == PLUS && add_operand (RTX, VOIDmode)) \
+ || ((OUTER_CODE) == AND && and_operand (RTX, VOIDmode))) \
+ return 0; \
+ else if (add_operand (RTX, VOIDmode) || and_operand (RTX, VOIDmode)) \
+ return 2; \
+ else \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case SYMBOL_REF: \
+ case LABEL_REF: \
+ return COSTS_N_INSNS (3);
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. */
+
+#define RTX_COSTS(X,CODE,OUTER_CODE) \
+ case PLUS: case MINUS: \
+ if (FLOAT_MODE_P (GET_MODE (X))) \
+ return COSTS_N_INSNS (6); \
+ else if (GET_CODE (XEXP (X, 0)) == MULT \
+ && const48_operand (XEXP (XEXP (X, 0), 1), VOIDmode)) \
+ return (2 + rtx_cost (XEXP (XEXP (X, 0), 0), OUTER_CODE) \
+ + rtx_cost (XEXP (X, 1), OUTER_CODE)); \
+ break; \
+ case MULT: \
+ if (FLOAT_MODE_P (GET_MODE (X))) \
+ return COSTS_N_INSNS (6); \
+ return COSTS_N_INSNS (23); \
+ case ASHIFT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && INTVAL (XEXP (X, 1)) <= 3) \
+ break; \
+ /* ... fall through ... */ \
+ case ASHIFTRT: case LSHIFTRT: case IF_THEN_ELSE: \
+ return COSTS_N_INSNS (2); \
+ case DIV: case UDIV: case MOD: case UMOD: \
+ if (GET_MODE (X) == SFmode) \
+ return COSTS_N_INSNS (34); \
+ else if (GET_MODE (X) == DFmode) \
+ return COSTS_N_INSNS (63); \
+ else \
+ return COSTS_N_INSNS (70); \
+ case MEM: \
+ return COSTS_N_INSNS (3); \
+ case FLOAT: case UNSIGNED_FLOAT: case FIX: case UNSIGNED_FIX: \
+ case FLOAT_EXTEND: case FLOAT_TRUNCATE: \
+ return COSTS_N_INSNS (6); \
+ case NEG: case ABS: \
+ if (FLOAT_MODE_P (GET_MODE (X))) \
+ return COSTS_N_INSNS (6); \
+ break;
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#define ASM_FILE_START(FILE) \
+{ \
+ alpha_write_verstamp (FILE); \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ fprintf (FILE, "\t.set volatile\n"); \
+ fprintf (FILE, "\t.set noat\n"); \
+ ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
+}
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before read-only data. */
+
+#define READONLY_DATA_SECTION_ASM_OP ".rdata"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* Define an extra section for read-only data, a routine to enter it, and
+ indicate that it is for read-only data.
+
+ The first time we enter the readonly data section for a file, we write
+ eight bytes of zero. This works around a bug in DEC's assembler in
+ some versions of OSF/1 V3.x. */
+
+#define EXTRA_SECTIONS readonly_data
+
+#define EXTRA_SECTION_FUNCTIONS \
+void \
+literal_section () \
+{ \
+ if (in_section != readonly_data) \
+ { \
+ static int firsttime = 1; \
+ \
+ fprintf (asm_out_file, "%s\n", READONLY_DATA_SECTION_ASM_OP); \
+ if (firsttime) \
+ { \
+ firsttime = 0; \
+ ASM_OUTPUT_DOUBLE_INT (asm_out_file, const0_rtx); \
+ } \
+ \
+ in_section = readonly_data; \
+ } \
+} \
+
+#define READONLY_DATA_SECTION literal_section
+
+/* If we are referencing a function that is static, make the SYMBOL_REF
+ special. We use this to see indicate we can branch to this function
+ without setting PV or restoring GP. */
+
+#define ENCODE_SECTION_INFO(DECL) \
+ if (TREE_CODE (DECL) == FUNCTION_DECL && ! TREE_PUBLIC (DECL)) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1;
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
+ "$9", "$10", "$11", "$12", "$13", "$14", "$15", \
+ "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23", \
+ "$24", "$25", "$26", "$27", "$28", "$29", "$30", "AP", \
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", \
+ "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",\
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "FP"}
+
+/* How to renumber registers for dbx and gdb. */
+
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.globl ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ fprintf (FILE, "%s", NAME)
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ if ((PREFIX)[0] == 'L') \
+ fprintf (FILE, "$%s%d:\n", & (PREFIX)[1], NUM + 32); \
+ else \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM);
+
+/* This is how to output a label for a jump table. Arguments are the same as
+ for ASM_OUTPUT_INTERNAL_LABEL, except the insn for the jump table is
+ passed. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+{ ASM_OUTPUT_ALIGN (FILE, 2); ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); }
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ if ((PREFIX)[0] == 'L') \
+ sprintf (LABEL, "*$%s%d", & (PREFIX)[1], NUM + 32); \
+ else \
+ sprintf (LABEL, "*%s%d", PREFIX, NUM)
+
+/* This is how to output an assembler line defining a `double' constant. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+ { \
+ if (REAL_VALUE_ISINF (VALUE) \
+ || REAL_VALUE_ISNAN (VALUE) \
+ || REAL_VALUE_MINUS_ZERO (VALUE)) \
+ { \
+ long t[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), t); \
+ fprintf (FILE, "\t.quad 0x%lx%08lx\n", \
+ t[1] & 0xffffffff, t[0] & 0xffffffff); \
+ } \
+ else \
+ { \
+ char str[30]; \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20e", str); \
+ fprintf (FILE, "\t.t_floating %s\n", str); \
+ } \
+ }
+
+/* This is how to output an assembler line defining a `float' constant. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ { \
+ if (REAL_VALUE_ISINF (VALUE) \
+ || REAL_VALUE_ISNAN (VALUE) \
+ || REAL_VALUE_MINUS_ZERO (VALUE)) \
+ { \
+ long t; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
+ fprintf (FILE, "\t.long 0x%lx\n", t & 0xffffffff); \
+ } \
+ else \
+ { \
+ char str[30]; \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t.s_floating %s\n", str); \
+ } \
+ }
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "\t.long "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line defining a `long' constant. */
+
+#define ASM_OUTPUT_DOUBLE_INT(FILE,VALUE) \
+( fprintf (FILE, "\t.quad "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+ fprintf (FILE, "\t.word %d\n", \
+ (GET_CODE (VALUE) == CONST_INT \
+ ? INTVAL (VALUE) & 0xffff : (abort (), 0)))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+ fprintf (FILE, "\t.byte %d\n", \
+ (GET_CODE (VALUE) == CONST_INT \
+ ? INTVAL (VALUE) & 0xff : (abort (), 0)))
+
+/* We use the default ASCII-output routine, except that we don't write more
+ than 50 characters since the assembler doesn't support very long lines. */
+
+#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \
+ do { \
+ FILE *_hide_asm_out_file = (MYFILE); \
+ unsigned char *_hide_p = (unsigned char *) (MYSTRING); \
+ int _hide_thissize = (MYLENGTH); \
+ int _size_so_far = 0; \
+ { \
+ FILE *asm_out_file = _hide_asm_out_file; \
+ unsigned char *p = _hide_p; \
+ int thissize = _hide_thissize; \
+ int i; \
+ fprintf (asm_out_file, "\t.ascii \""); \
+ \
+ for (i = 0; i < thissize; i++) \
+ { \
+ register int c = p[i]; \
+ \
+ if (_size_so_far ++ > 50 && i < thissize - 4) \
+ _size_so_far = 0, fprintf (asm_out_file, "\"\n\t.ascii \""); \
+ \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', asm_out_file); \
+ if (c >= ' ' && c < 0177) \
+ putc (c, asm_out_file); \
+ else \
+ { \
+ fprintf (asm_out_file, "\\%o", c); \
+ /* After an octal-escape, if a digit follows, \
+ terminate one string constant and start another. \
+ The Vax assembler fails to stop reading the escape \
+ after three digits, so this is the only way we \
+ can get it to parse the data properly. */ \
+ if (i < thissize - 1 \
+ && p[i + 1] >= '0' && p[i + 1] <= '9') \
+ _size_so_far = 0, fprintf (asm_out_file, "\"\n\t.ascii \""); \
+ } \
+ } \
+ fprintf (asm_out_file, "\"\n"); \
+ } \
+ } \
+ while (0)
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tsubq $30,8,$30\n\tst%s $%s%d,0($30)\n", \
+ (REGNO) > 32 ? "t" : "q", (REGNO) > 32 ? "f" : "", \
+ (REGNO) & 31);
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tld%s $%s%d,0($30)\n\taddq $30,8,$30\n", \
+ (REGNO) > 32 ? "t" : "q", (REGNO) > 32 ? "f" : "", \
+ (REGNO) & 31);
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\t.byte 0x%x\n", (VALUE) & 0xff)
+
+/* This is how to output an element of a case-vector that is absolute.
+ (Alpha does not use such vectors, but we must define this macro anyway.) */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) abort ()
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#if WINDOWS_NT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.long $%d\n", (VALUE) + 32)
+#else
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.gprel32 $%d\n", (VALUE) + 32)
+#endif
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", LOG);
+
+/* This is how to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %d\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs ("\t.comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%d\n", (SIZE)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE,ROUNDED) \
+( fputs ("\t.lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%d\n", (SIZE)))
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Determine which codes are valid without a following integer. These must
+ not be alphabetic. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) 0
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+{ rtx addr = (ADDR); \
+ int basereg = 31; \
+ HOST_WIDE_INT offset = 0; \
+ \
+ if (GET_CODE (addr) == AND) \
+ addr = XEXP (addr, 0); \
+ \
+ if (GET_CODE (addr) == REG) \
+ basereg = REGNO (addr); \
+ else if (GET_CODE (addr) == CONST_INT) \
+ offset = INTVAL (addr); \
+ else if (GET_CODE (addr) == PLUS \
+ && GET_CODE (XEXP (addr, 0)) == REG \
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT) \
+ basereg = REGNO (XEXP (addr, 0)), offset = INTVAL (XEXP (addr, 1)); \
+ else \
+ abort (); \
+ \
+ fprintf (FILE, "%d($%d)", offset, basereg); \
+}
+/* Define the codes that are matched by predicates in alpha.c. */
+
+#define PREDICATE_CODES \
+ {"reg_or_0_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_6bit_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_8bit_operand", {SUBREG, REG, CONST_INT}}, \
+ {"cint8_operand", {CONST_INT}}, \
+ {"reg_or_cint_operand", {SUBREG, REG, CONST_INT}}, \
+ {"add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"sext_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"const48_operand", {CONST_INT}}, \
+ {"and_operand", {SUBREG, REG, CONST_INT}}, \
+ {"or_operand", {SUBREG, REG, CONST_INT}}, \
+ {"mode_mask_operand", {CONST_INT}}, \
+ {"mul8_operand", {CONST_INT}}, \
+ {"mode_width_operand", {CONST_INT}}, \
+ {"reg_or_fp0_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"alpha_comparison_operator", {EQ, LE, LT, LEU, LTU}}, \
+ {"signed_comparison_operator", {EQ, NE, LE, LT, GE, GT}}, \
+ {"divmod_operator", {DIV, MOD, UDIV, UMOD}}, \
+ {"fp0_operand", {CONST_DOUBLE}}, \
+ {"current_file_function_operand", {SYMBOL_REF}}, \
+ {"call_operand", {REG, SYMBOL_REF}}, \
+ {"input_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
+ SYMBOL_REF, CONST, LABEL_REF}}, \
+ {"some_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
+ SYMBOL_REF, CONST, LABEL_REF}}, \
+ {"aligned_memory_operand", {MEM}}, \
+ {"unaligned_memory_operand", {MEM}}, \
+ {"any_memory_operand", {MEM}},
+
+/* Tell collect that the object format is ECOFF. */
+#define OBJECT_FORMAT_COFF
+#define EXTENDED_COFF
+
+/* If we use NM, pass -g to it so it only lists globals. */
+#define NM_FLAGS "-pg"
+
+/* Definitions for debugging. */
+
+#define SDB_DEBUGGING_INFO /* generate info for mips-tfile */
+#define DBX_DEBUGGING_INFO /* generate embedded stabs */
+#define MIPS_DEBUGGING_INFO /* MIPS specific debugging info */
+
+#ifndef PREFERRED_DEBUGGING_TYPE /* assume SDB_DEBUGGING_INFO */
+#define PREFERRED_DEBUGGING_TYPE \
+ ((len > 1 && !strncmp (str, "ggdb", len)) ? DBX_DEBUG : SDB_DEBUG)
+#endif
+
+
+/* Correct the offset of automatic variables and arguments. Note that
+ the Alpha debug format wants all automatic variables and arguments
+ to be in terms of two different offsets from the virtual frame pointer,
+ which is the stack pointer before any adjustment in the function.
+ The offset for the argument pointer is fixed for the native compiler,
+ it is either zero (for the no arguments case) or large enough to hold
+ all argument registers.
+ The offset for the auto pointer is the fourth argument to the .frame
+ directive (local_offset).
+ To stay compatible with the native tools we use the same offsets
+ from the virtual frame pointer and adjust the debugger arg/auto offsets
+ accordingly. These debugger offsets are set up in output_prolog. */
+
+extern long alpha_arg_offset;
+extern long alpha_auto_offset;
+#define DEBUGGER_AUTO_OFFSET(X) \
+ ((GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) + alpha_auto_offset)
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET + alpha_arg_offset)
+
+
+#define ASM_OUTPUT_SOURCE_LINE(STREAM, LINE) \
+ alpha_output_lineno (STREAM, LINE)
+extern void alpha_output_lineno ();
+
+#define ASM_OUTPUT_SOURCE_FILENAME(STREAM, NAME) \
+ alpha_output_filename (STREAM, NAME)
+extern void alpha_output_filename ();
+
+
+/* mips-tfile.c limits us to strings of one page. */
+#define DBX_CONTIN_LENGTH 4000
+
+/* By default, turn on GDB extensions. */
+#define DEFAULT_GDB_EXTENSIONS 1
+
+/* If we are smuggling stabs through the ALPHA ECOFF object
+ format, put a comment in front of the .stab<x> operation so
+ that the ALPHA assembler does not choke. The mips-tfile program
+ will correctly put the stab into the object file. */
+
+#define ASM_STABS_OP ((TARGET_GAS) ? ".stabs" : " #.stabs")
+#define ASM_STABN_OP ((TARGET_GAS) ? ".stabn" : " #.stabn")
+#define ASM_STABD_OP ((TARGET_GAS) ? ".stabd" : " #.stabd")
+
+/* Forward references to tags are allowed. */
+#define SDB_ALLOW_FORWARD_REFERENCES
+
+/* Unknown tags are also allowed. */
+#define SDB_ALLOW_UNKNOWN_REFERENCES
+
+#define PUT_SDB_DEF(a) \
+do { \
+ fprintf (asm_out_file, "\t%s.def\t", \
+ (TARGET_GAS) ? "" : "#"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, a); \
+ fputc (';', asm_out_file); \
+} while (0)
+
+#define PUT_SDB_PLAIN_DEF(a) \
+do { \
+ fprintf (asm_out_file, "\t%s.def\t.%s;", \
+ (TARGET_GAS) ? "" : "#", (a)); \
+} while (0)
+
+#define PUT_SDB_TYPE(a) \
+do { \
+ fprintf (asm_out_file, "\t.type\t0x%x;", (a)); \
+} while (0)
+
+/* For block start and end, we create labels, so that
+ later we can figure out where the correct offset is.
+ The normal .ent/.end serve well enough for functions,
+ so those are just commented out. */
+
+extern int sdb_label_count; /* block start/end next label # */
+
+#define PUT_SDB_BLOCK_START(LINE) \
+do { \
+ fprintf (asm_out_file, \
+ "$Lb%d:\n\t%s.begin\t$Lb%d\t%d\n", \
+ sdb_label_count, \
+ (TARGET_GAS) ? "" : "#", \
+ sdb_label_count, \
+ (LINE)); \
+ sdb_label_count++; \
+} while (0)
+
+#define PUT_SDB_BLOCK_END(LINE) \
+do { \
+ fprintf (asm_out_file, \
+ "$Le%d:\n\t%s.bend\t$Le%d\t%d\n", \
+ sdb_label_count, \
+ (TARGET_GAS) ? "" : "#", \
+ sdb_label_count, \
+ (LINE)); \
+ sdb_label_count++; \
+} while (0)
+
+#define PUT_SDB_FUNCTION_START(LINE)
+
+#define PUT_SDB_FUNCTION_END(LINE)
+
+#define PUT_SDB_EPILOGUE_END(NAME)
+
+/* No point in running CPP on our assembler output. */
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+/* Don't pass -g to GNU as, because some versions don't accept this option. */
+#define ASM_SPEC "%{malpha-as:-g} -nocpp %{pg}"
+#else
+/* In OSF/1 v3.2c, the assembler by default does not output file names which
+ causes mips-tfile to fail. Passing -g to the assembler fixes this problem.
+ ??? Stricly speaking, we only need -g if the user specifies -g. Passing
+ it always means that we get slightly larger than necessary object files
+ if the user does not specify -g. If we don't pass -g, then mips-tfile
+ will need to be fixed to work in this case. */
+#define ASM_SPEC "%{!mgas:-g} -nocpp %{pg}"
+#endif
+
+/* Specify to run a post-processor, mips-tfile after the assembler
+ has run to stuff the ecoff debug information into the object file.
+ This is needed because the Alpha assembler provides no way
+ of specifying such information in the assembly file. */
+
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+
+#define ASM_FINAL_SPEC "\
+%{malpha-as: %{!mno-mips-tfile: \
+ \n mips-tfile %{v*: -v} \
+ %{K: -I %b.o~} \
+ %{!K: %{save-temps: -I %b.o~}} \
+ %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
+ %{.s:%i} %{!.s:%g.s}}}"
+
+#else
+#define ASM_FINAL_SPEC "\
+%{!mgas: %{!mno-mips-tfile: \
+ \n mips-tfile %{v*: -v} \
+ %{K: -I %b.o~} \
+ %{!K: %{save-temps: -I %b.o~}} \
+ %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
+ %{.s:%i} %{!.s:%g.s}}}"
+
+#endif
+
+/* Macros for mips-tfile.c to encapsulate stabs in ECOFF, and for
+ mips-tdump.c to print them out.
+
+ These must match the corresponding definitions in gdb/mipsread.c.
+ Unfortunately, gcc and gdb do not currently share any directories. */
+
+#define CODE_MASK 0x8F300
+#define MIPS_IS_STAB(sym) (((sym)->index & 0xFFF00) == CODE_MASK)
+#define MIPS_MARK_STAB(code) ((code)+CODE_MASK)
+#define MIPS_UNMARK_STAB(code) ((code)-CODE_MASK)
+
+/* Override some mips-tfile definitions. */
+
+#define SHASH_SIZE 511
+#define THASH_SIZE 55
+
+/* Align ecoff symbol tables to avoid OSF1/1.3 nm complaints. */
+
+#define ALIGN_SYMTABLE_OFFSET(OFFSET) (((OFFSET) + 7) & ~7)
+
+/* The system headers under OSF/1 are C++-aware. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* Also define __LANGUAGE_C__ when running fix-header. */
+#define FIXPROTO_INIT(CPPFILE) cpp_define (CPPFILE, "__LANGUAGE_C__")
+
+/* The linker will stick __main into the .init section. */
+#define HAS_INIT_SECTION
+#define LD_INIT_SWITCH "-init"
+#define LD_FINI_SWITCH "-fini"
+
+/* We do want to link in libgcc when building shared libraries under OSF/1. */
+#define LIBGCC_SPEC "-lgcc"
diff --git a/contrib/gcc/config/alpha/alpha.md b/contrib/gcc/config/alpha/alpha.md
new file mode 100644
index 0000000..72c54ee
--- /dev/null
+++ b/contrib/gcc/config/alpha/alpha.md
@@ -0,0 +1,3770 @@
+;; Machine description for DEC Alpha for GNU C compiler
+;; Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; Define an insn type attribute. This is used in function unit delay
+;; computations, among other purposes. For the most part, we use the names
+;; defined in the EV4 documentation, but add a few that we have to know about
+;; separately.
+
+(define_attr "type"
+ "ld,st,ibr,fbr,jsr,iaddlog,shiftcm,icmp,imull,imulq,fpop,fdivs,fdivt,ldsym,isubr"
+ (const_string "shiftcm"))
+
+;; We include four function units: ABOX, which computes the address,
+;; BBOX, used for branches, EBOX, used for integer operations, and FBOX,
+;; used for FP operations.
+;;
+;; We assume that we have been successful in getting double issues and
+;; hence multiply all costs by two insns per cycle. The minimum time in
+;; a function unit is 2 cycle, which will tend to produce the double
+;; issues.
+
+;; Memory delivers its result in three cycles.
+(define_function_unit "abox" 1 0 (eq_attr "type" "ld,ldsym,st") 6 2)
+
+;; Branches have no delay cost, but do tie up the unit for two cycles.
+(define_function_unit "bbox" 1 1 (eq_attr "type" "ibr,fbr,jsr") 4 4)
+
+;; Arithmetic insns are normally have their results available after two
+;; cycles. There are a number of exceptions. They are encoded in
+;; ADJUST_COST. Some of the other insns have similar exceptions.
+
+(define_function_unit "ebox" 1 0 (eq_attr "type" "iaddlog,shiftcm,icmp") 4 2)
+
+;; These really don't take up the integer pipeline, but they do occupy
+;; IBOX1; we approximate here.
+
+(define_function_unit "ebox" 1 0 (eq_attr "type" "imull") 42 2)
+(define_function_unit "ebox" 1 0 (eq_attr "type" "imulq") 46 2)
+
+(define_function_unit "imult" 1 0 (eq_attr "type" "imull") 42 38)
+(define_function_unit "imult" 1 0 (eq_attr "type" "imulq") 46 42)
+
+(define_function_unit "fbox" 1 0 (eq_attr "type" "fpop") 12 2)
+
+(define_function_unit "fbox" 1 0 (eq_attr "type" "fdivs") 68 0)
+(define_function_unit "fbox" 1 0 (eq_attr "type" "fdivt") 126 0)
+
+(define_function_unit "divider" 1 0 (eq_attr "type" "fdivs") 68 60)
+(define_function_unit "divider" 1 0 (eq_attr "type" "fdivt") 126 118)
+
+;; First define the arithmetic insns. Note that the 32-bit forms also
+;; sign-extend.
+
+;; Note that we can do sign extensions in both FP and integer registers.
+;; However, the result must be in the same type of register as the input.
+;; The register preferencing code can't handle this case very well, so, for
+;; now, don't let the FP case show up here for preferencing. Also,
+;; sign-extends in FP registers take two instructions.
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,*f")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,*f")))]
+ ""
+ "@
+ addl %1,$31,%0
+ ldl %0,%1
+ cvtql %1,%0\;cvtlq %0,%0"
+ [(set_attr "type" "iaddlog,ld,fpop")])
+
+;; Do addsi3 the way expand_binop would do if we didn't have one. This
+;; generates better code. We have the anonymous addsi3 pattern below in
+;; case combine wants to make it.
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "add_operand" "")))]
+ ""
+ "
+{ emit_insn (gen_rtx (SET, VOIDmode, gen_lowpart (DImode, operands[0]),
+ gen_rtx (PLUS, DImode,
+ gen_lowpart (DImode, operands[1]),
+ gen_lowpart (DImode, operands[2]))));
+ DONE;
+} ")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ,rJ")
+ (match_operand:SI 2 "add_operand" "rI,O,K,L")))]
+ ""
+ "@
+ addl %r1,%2,%0
+ subl %r1,%n2,%0
+ lda %0,%2(%r1)
+ ldah %0,%h2(%r1)"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! add_operand (operands[2], SImode)"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
+ "
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = val - low;
+
+ operands[3] = GEN_INT (rest);
+ operands[4] = GEN_INT (low);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:SI 2 "sext_add_operand" "rI,O"))))]
+ ""
+ "@
+ addl %r1,%2,%0
+ subl %r1,%n2,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "! sext_add_operand (operands[2], SImode) && INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) % 4 == 0"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (sign_extend:DI (plus:SI (mult:SI (match_dup 3)
+ (match_dup 5))
+ (match_dup 1))))]
+ "
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]) / 4;
+ int mult = 4;
+
+ if (val % 2 == 0)
+ val /= 2, mult = 8;
+
+ operands[4] = GEN_INT (val);
+ operands[5] = GEN_INT (mult);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "" "")
+ (match_operand 3 "" "")])
+ (match_operand:SI 4 "add_operand" ""))))
+ (clobber (match_operand:DI 5 "register_operand" ""))]
+ ""
+ [(set (match_dup 5) (match_dup 6))
+ (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 7) (match_dup 4))))]
+ "
+{
+ operands[6] = gen_rtx (GET_CODE (operands[1]), DImode,
+ operands[2], operands[3]);
+ operands[7] = gen_lowpart (SImode, operands[5]);
+}")
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ,rJ,rJ")
+ (match_operand:DI 2 "add_operand" "rI,O,K,L")))]
+ ""
+ "@
+ addq %r1,%2,%0
+ subq %r1,%n2,%0
+ lda %0,%2(%r1)
+ ldah %0,%h2(%r1)"
+ [(set_attr "type" "iaddlog")])
+
+;; Don't do this if we are adjusting SP since we don't want to do
+;; it in two steps.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))]
+ "! add_operand (operands[2], DImode)
+ && REGNO (operands[0]) != STACK_POINTER_REGNUM"
+ [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
+ "
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = val - low;
+
+ operands[3] = GEN_INT (rest);
+ operands[4] = GEN_INT (low);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ")
+ (match_operand:SI 2 "const48_operand" "I,I"))
+ (match_operand:SI 3 "sext_add_operand" "rI,O")))]
+ ""
+ "@
+ s%2addl %r1,%3,%0
+ s%2subl %r1,%n3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (plus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ")
+ (match_operand:SI 2 "const48_operand" "I,I"))
+ (match_operand:SI 3 "sext_add_operand" "rI,O"))))]
+ ""
+ "@
+ s%2addl %r1,%3,%0
+ s%2subl %r1,%n3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (mult:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "" "")
+ (match_operand 3 "" "")])
+ (match_operand:SI 4 "const48_operand" ""))
+ (match_operand:SI 5 "add_operand" ""))))
+ (clobber (match_operand:DI 6 "register_operand" ""))]
+ ""
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (sign_extend:DI (plus:SI (mult:SI (match_dup 8) (match_dup 4))
+ (match_dup 5))))]
+ "
+{
+ operands[7] = gen_rtx (GET_CODE (operands[1]), DImode,
+ operands[2], operands[3]);
+ operands[8] = gen_lowpart (SImode, operands[6]);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (mult:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ")
+ (match_operand:DI 2 "const48_operand" "I,I"))
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI,O")))]
+ ""
+ "@
+ s%2addq %r1,%3,%0
+ s%2subq %1,%n3,%0"
+ [(set_attr "type" "iaddlog")])
+
+;; These variants of the above insns can occur if the third operand
+;; is the frame pointer. This is a kludge, but there doesn't
+;; seem to be a way around it. Only recognize them while reloading.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "some_operand" "=&r")
+ (plus:DI (plus:DI (match_operand:DI 1 "some_operand" "r")
+ (match_operand:DI 2 "some_operand" "r"))
+ (match_operand:DI 3 "some_operand" "rIOKL")))]
+ "reload_in_progress"
+ "#"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" ""))
+ (match_operand:DI 3 "add_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "some_operand" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "some_operand" "rJ")
+ (match_operand:SI 2 "const48_operand" "I"))
+ (match_operand:SI 3 "some_operand" "r"))
+ (match_operand:SI 4 "some_operand" "rIOKL")))]
+ "reload_in_progress"
+ "#"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "const48_operand" ""))
+ (match_operand:SI 3 "register_operand" ""))
+ (match_operand:SI 4 "add_operand" "rIOKL")))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (mult:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "some_operand" "=&r")
+ (sign_extend:DI
+ (plus:SI (plus:SI
+ (mult:SI (match_operand:SI 1 "some_operand" "rJ")
+ (match_operand:SI 2 "const48_operand" "I"))
+ (match_operand:SI 3 "some_operand" "r"))
+ (match_operand:SI 4 "some_operand" "rIOKL"))))]
+ "reload_in_progress"
+ "#"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (plus:SI
+ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "const48_operand" ""))
+ (match_operand:SI 3 "register_operand" ""))
+ (match_operand:SI 4 "add_operand" ""))))]
+ "reload_completed"
+ [(set (match_dup 5)
+ (plus:SI (mult:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 5) (match_dup 4))))]
+ "
+{ operands[5] = gen_lowpart (SImode, operands[0]);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "some_operand" "=&r")
+ (plus:DI (plus:DI (mult:DI (match_operand:DI 1 "some_operand" "rJ")
+ (match_operand:DI 2 "const48_operand" "I"))
+ (match_operand:DI 3 "some_operand" "r"))
+ (match_operand:DI 4 "some_operand" "rIOKL")))]
+ "reload_in_progress"
+ "#"
+ [(set_attr "type" "iaddlog")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=")
+ (plus:DI (plus:DI (mult:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "const48_operand" ""))
+ (match_operand:DI 3 "register_operand" ""))
+ (match_operand:DI 4 "add_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (plus:DI (mult:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
+ "")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subl $31,%1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (neg:SI
+ (match_operand:SI 1 "reg_or_8bit_operand" "rI"))))]
+ ""
+ "subl $31,%1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subq $31,%1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "reg_or_8bit_operand" "")))]
+ ""
+ "
+{ emit_insn (gen_rtx (SET, VOIDmode, gen_lowpart (DImode, operands[0]),
+ gen_rtx (MINUS, DImode,
+ gen_lowpart (DImode, operands[1]),
+ gen_lowpart (DImode, operands[2]))));
+ DONE;
+
+} ")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subl %r1,%2,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))]
+ ""
+ "subl %r1,%2,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subq %r1,%2,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "const48_operand" "I"))
+ (match_operand:SI 3 "reg_or_8bit_operand" "rI")))]
+ ""
+ "s%2subl %r1,%3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (minus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "const48_operand" "I"))
+ (match_operand:SI 3 "reg_or_8bit_operand" "rI"))))]
+ ""
+ "s%2subl %r1,%3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (mult:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "const48_operand" "I"))
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")))]
+ ""
+ "s%2subq %r1,%3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "mull %r1,%r2,%0"
+ [(set_attr "type" "imull")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "reg_or_0_operand" "rJ"))))]
+ ""
+ "mull %r1,%r2,%0"
+ [(set_attr "type" "imull")])
+
+(define_insn "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "mulq %r1,%r2,%0"
+ [(set_attr "type" "imulq")])
+
+(define_insn "umuldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand" "r"))
+ (zero_extend:TI (match_operand:DI 2 "register_operand" "r")))
+ (const_int 64))))]
+ ""
+ "umulh %1,%2,%0"
+ [(set_attr "type" "imulq")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:TI 2 "cint8_operand" "I"))
+ (const_int 64))))]
+ ""
+ "umulh %1,%2,%0"
+ [(set_attr "type" "imulq")])
+
+;; The divide and remainder operations always take their inputs from
+;; r24 and r25, put their output in r27, and clobber r23 and r28.
+
+(define_expand "divsi3"
+ [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
+ (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
+ (parallel [(set (reg:SI 27)
+ (div:SI (reg:SI 24)
+ (reg:SI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (reg:SI 27))]
+ ""
+ "")
+
+(define_expand "udivsi3"
+ [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
+ (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
+ (parallel [(set (reg:SI 27)
+ (udiv:SI (reg:SI 24)
+ (reg:SI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (reg:SI 27))]
+ ""
+ "")
+
+(define_expand "modsi3"
+ [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
+ (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
+ (parallel [(set (reg:SI 27)
+ (mod:SI (reg:SI 24)
+ (reg:SI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (reg:SI 27))]
+ ""
+ "")
+
+(define_expand "umodsi3"
+ [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
+ (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
+ (parallel [(set (reg:SI 27)
+ (umod:SI (reg:SI 24)
+ (reg:SI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (reg:SI 27))]
+ ""
+ "")
+
+(define_expand "divdi3"
+ [(set (reg:DI 24) (match_operand:DI 1 "input_operand" ""))
+ (set (reg:DI 25) (match_operand:DI 2 "input_operand" ""))
+ (parallel [(set (reg:DI 27)
+ (div:DI (reg:DI 24)
+ (reg:DI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:DI 0 "general_operand" "")
+ (reg:DI 27))]
+ ""
+ "")
+
+(define_expand "udivdi3"
+ [(set (reg:DI 24) (match_operand:DI 1 "input_operand" ""))
+ (set (reg:DI 25) (match_operand:DI 2 "input_operand" ""))
+ (parallel [(set (reg:DI 27)
+ (udiv:DI (reg:DI 24)
+ (reg:DI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:DI 0 "general_operand" "")
+ (reg:DI 27))]
+ ""
+ "")
+
+(define_expand "moddi3"
+ [(set (reg:DI 24) (match_operand:DI 1 "input_operand" ""))
+ (set (reg:DI 25) (match_operand:DI 2 "input_operand" ""))
+ (parallel [(set (reg:DI 27)
+ (mod:DI (reg:DI 24)
+ (reg:DI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:DI 0 "general_operand" "")
+ (reg:DI 27))]
+ ""
+ "")
+
+(define_expand "umoddi3"
+ [(set (reg:DI 24) (match_operand:DI 1 "input_operand" ""))
+ (set (reg:DI 25) (match_operand:DI 2 "input_operand" ""))
+ (parallel [(set (reg:DI 27)
+ (umod:DI (reg:DI 24)
+ (reg:DI 25)))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:DI 0 "general_operand" "")
+ (reg:DI 27))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (reg:SI 27)
+ (match_operator:SI 1 "divmod_operator"
+ [(reg:SI 24) (reg:SI 25)]))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ ""
+ "%E1 $24,$25,$27"
+ [(set_attr "type" "isubr")])
+
+(define_insn ""
+ [(set (reg:DI 27)
+ (match_operator:DI 1 "divmod_operator"
+ [(reg:DI 24) (reg:DI 25)]))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ ""
+ "%E1 $24,$25,$27"
+ [(set_attr "type" "isubr")])
+
+;; Next are the basic logical operations. These only exist in DImode.
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (and:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ,rJ")
+ (match_operand:DI 2 "and_operand" "rI,N,MH")))]
+ ""
+ "@
+ and %r1,%2,%0
+ bic %r1,%N2,%0
+ zapnot %r1,%m2,%0"
+ [(set_attr "type" "iaddlog,iaddlog,shiftcm")])
+
+;; There are times when we can split and AND into two AND insns. This occurs
+;; when we can first clear any bytes and then clear anything else. For
+;; example "I & 0xffff07" is "(I & 0xffffff) & 0xffffffffffffff07".
+;; Only to this when running on 64-bit host since the computations are
+;; too messy otherwise.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))]
+ "HOST_BITS_PER_WIDE_INT == 64 && ! and_operand (operands[2], DImode)"
+ [(set (match_dup 0) (and:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (and:DI (match_dup 0) (match_dup 4)))]
+ "
+{
+ unsigned HOST_WIDE_INT mask1 = INTVAL (operands[2]);
+ unsigned HOST_WIDE_INT mask2 = mask1;
+ int i;
+
+ /* For each byte that isn't all zeros, make it all ones. */
+ for (i = 0; i < 64; i += 8)
+ if ((mask1 & ((HOST_WIDE_INT) 0xff << i)) != 0)
+ mask1 |= (HOST_WIDE_INT) 0xff << i;
+
+ /* Now turn on any bits we've just turned off. */
+ mask2 |= ~ mask1;
+
+ operands[3] = GEN_INT (mask1);
+ operands[4] = GEN_INT (mask2);
+}")
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,3,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,15,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI"))
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "bic %r2,%1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (ior:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "or_operand" "rI,N")))]
+ ""
+ "@
+ bis %r1,%2,%0
+ ornot %r1,%N2,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "ornot $31,%1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI"))
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "ornot %r2,%1,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "or_operand" "rI,N")))]
+ ""
+ "@
+ xor %r1,%2,%0
+ eqv %r1,%N2,%0"
+ [(set_attr "type" "iaddlog")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "%rJ")
+ (match_operand:DI 2 "register_operand" "rI"))))]
+ ""
+ "eqv %r1,%2,%0"
+ [(set_attr "type" "iaddlog")])
+
+;; Next come the shifts and the various extract and insert operations.
+
+(define_insn "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ")
+ (match_operand:DI 2 "reg_or_6bit_operand" "P,rI")))]
+ ""
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (operands[2] == const1_rtx)
+ return \"addq %r1,%r1,%0\";
+ else
+ return \"s%P2addq %r1,0,%0\";
+ case 1:
+ return \"sll %r1,%2,%0\";
+ }
+}"
+ [(set_attr "type" "iaddlog,shiftcm")])
+
+;; ??? The following pattern is made by combine, but earlier phases
+;; (specifically flow) can't handle it. This occurs in jump.c. Deal
+;; with this in a better way at some point.
+;;(define_insn ""
+;; [(set (match_operand:DI 0 "register_operand" "=r")
+;; (sign_extend:DI
+;; (subreg:SI (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+;; (match_operand:DI 2 "const_int_operand" "P"))
+;; 0)))]
+;; "INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3"
+;; "*
+;;{
+;; if (operands[2] == const1_rtx)
+;; return \"addl %r1,%r1,%0\";
+;; else
+;; return \"s%P2addl %r1,0,%0\";
+;; }"
+;; [(set_attr "type" "iaddlog")])
+
+(define_insn "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_6bit_operand" "rI")))]
+ ""
+ "srl %r1,%2,%0")
+
+(define_insn "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_6bit_operand" "rI")))]
+ ""
+ "sra %r1,%2,%0")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:QI 1 "register_operand" "")
+ (const_int 56)))
+ (set (match_operand:HI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 56)))]
+ ""
+ "
+{ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, operands[1]);
+ operands[2] = gen_reg_rtx (DImode);
+}")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:QI 1 "register_operand" "")
+ (const_int 56)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 56)))]
+ ""
+ "
+{ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, operands[1]);
+ operands[2] = gen_reg_rtx (DImode);
+}")
+
+(define_expand "extendqidi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:QI 1 "register_operand" "")
+ (const_int 56)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 56)))]
+ ""
+ "
+{ operands[1] = gen_lowpart (DImode, operands[1]);
+ operands[2] = gen_reg_rtx (DImode);
+}")
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:HI 1 "register_operand" "")
+ (const_int 48)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 48)))]
+ ""
+ "
+{ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, operands[1]);
+ operands[2] = gen_reg_rtx (DImode);
+}")
+
+(define_expand "extendhidi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:HI 1 "register_operand" "")
+ (const_int 48)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 48)))]
+ ""
+ "
+{ operands[1] = gen_lowpart (DImode, operands[1]);
+ operands[2] = gen_reg_rtx (DImode);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (match_operand:DI 3 "mul8_operand" "I")))]
+ ""
+ "ext%M2l %r1,%s3,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (ashift:DI (match_operand:DI 3 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ ""
+ "ext%M2l %r1,%3,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 8)
+ (ashift:DI
+ (plus:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int -1))
+ (const_int 3)))
+ (const_int 56)))]
+ ""
+ "extqh %r1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 16)
+ (ashift:DI
+ (plus:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int -2))
+ (const_int 3)))
+ (const_int 48)))]
+ ""
+ "extwh %r1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 32)
+ (ashift:DI
+ (plus:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int -4))
+ (const_int 3)))
+ (const_int 32)))]
+ ""
+ "extlh %r1,%2,%0")
+
+;; This converts an extXl into an extXh with an appropriate adjustment
+;; to the address calculation.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (zero_extract:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "mode_width_operand" "")
+ (ashift:DI (match_operand:DI 3 "" "")
+ (const_int 3)))
+ (match_operand:DI 4 "const_int_operand" "")))
+ (clobber (match_operand:DI 5 "register_operand" ""))]
+ "INTVAL (operands[4]) == 64 - INTVAL (operands[2])"
+ [(set (match_dup 5) (match_dup 6))
+ (set (match_dup 0)
+ (ashift:DI (zero_extract:DI (match_dup 1) (match_dup 2)
+ (ashift:DI (plus:DI (match_dup 5)
+ (match_dup 7))
+ (const_int 3)))
+ (match_dup 4)))]
+ "
+{
+ operands[6] = plus_constant (operands[3],
+ INTVAL (operands[2]) / BITS_PER_UNIT);
+ operands[7] = GEN_INT (- INTVAL (operands[2]) / BITS_PER_UNIT);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:DI 2 "mul8_operand" "I")))]
+ ""
+ "insbl %1,%s2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
+ (match_operand:DI 2 "mul8_operand" "I")))]
+ ""
+ "inswl %1,%s2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "mul8_operand" "I")))]
+ ""
+ "insll %1,%s2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ ""
+ "insbl %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ ""
+ "inswl %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ ""
+ "insll %1,%2,%0")
+
+;; We do not include the insXh insns because they are complex to express
+;; and it does not appear that we would ever want to generate them.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (ashift:DI
+ (match_operand:DI 2 "mode_mask_operand" "n")
+ (ashift:DI
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")
+ (const_int 3))))
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ ""
+ "msk%U2l %r1,%3,%0")
+
+;; We do not include the mskXh insns because it does not appear we would ever
+;; generate one.
+
+;; Floating-point operations. All the double-precision insns can extend
+;; from single, so indicate that. The exception are the ones that simply
+;; play with the sign bits; it's not clear what to do there.
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "cpys $f31,%R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "cpys $f31,%R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "cpysn %R1,%R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "cpysn %R1,%R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (plus:SF (match_operand:SF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "adds %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "addt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "addt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "%fG"))
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "addt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "cvttqc %R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (fix:DI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "cvttqc %R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "register_operand" "f")))]
+ "TARGET_FP"
+ "cvtqs %1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:DI 1 "register_operand" "f")))]
+ "TARGET_FP"
+ "cvtqt %1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m")))]
+ "TARGET_FP"
+ "@
+ addt $f31,%1,%0
+ lds %0,%1"
+ [(set_attr "type" "fpop,ld")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "cvtts %R1,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "divs %R1,%R2,%0"
+ [(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "divt %R1,%R2,%0"
+ [(set_attr "type" "fdivt")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "divt %R1,%R2,%0"
+ [(set_attr "type" "fdivt")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "divt %R1,%R2,%0"
+ [(set_attr "type" "fdivt")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
+ (float_extend:DF (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "divt %R1,%R2,%0"
+ [(set_attr "type" "fdivt")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "muls %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "mult %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "mult %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "%fG"))
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "mult %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "subs %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "subt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "subt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "subt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "subt %R1,%R2,%0"
+ [(set_attr "type" "fpop")])
+
+;; Next are all the integer comparisons, and conditional moves and branches
+;; and some of the related define_expand's and define_split's.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operator:DI 1 "alpha_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]))]
+ ""
+ "cmp%C1 %r2,%3,%0"
+ [(set_attr "type" "icmp")])
+
+;; There are three important special-case that don't fit the above pattern
+;; but which we want to handle here.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ ""
+ "cmpult $31,%1,%0"
+ [(set_attr "type" "icmp")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (gt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ ""
+ "cmplt $31,%1,%0"
+ [(set_attr "type" "icmp")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ge:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ ""
+ "cmple $31,%1,%0"
+ [(set_attr "type" "icmp")])
+
+;; This pattern exists so conditional moves of SImode values are handled.
+;; Comparisons are still done in DImode though.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:DI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
+ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
+ (match_operand:SI 1 "reg_or_8bit_operand" "rI,0,rI,0")
+ (match_operand:SI 5 "reg_or_8bit_operand" "0,rI,0,rI")))]
+ "operands[3] == const0_rtx || operands[4] == const0_rtx"
+ "@
+ cmov%C2 %r3,%1,%0
+ cmov%D2 %r3,%5,%0
+ cmov%c2 %r4,%1,%0
+ cmov%d2 %r4,%5,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:DI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
+ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
+ (match_operand:DI 1 "reg_or_8bit_operand" "rI,0,rI,0")
+ (match_operand:DI 5 "reg_or_8bit_operand" "0,rI,0,rI")))]
+ "operands[3] == const0_rtx || operands[4] == const0_rtx"
+ "@
+ cmov%C2 %r3,%1,%0
+ cmov%D2 %r3,%5,%0
+ cmov%c2 %r4,%1,%0
+ cmov%d2 %r4,%5,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI
+ (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:DI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:DI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbc %r2,%1,%0
+ cmovlbs %r2,%3,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI
+ (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:DI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:DI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbs %r2,%1,%0
+ cmovlbc %r2,%3,%0")
+
+;; This form is added since combine thinks that an IF_THEN_ELSE with both
+;; arms constant is a single insn, so it won't try to form it if combine
+;; knows they are really two insns. This occurs in divides by powers
+;; of two.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (if_then_else:DI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (plus:DI (match_dup 0)
+ (match_operand:DI 1 "reg_or_8bit_operand" "rI"))
+ (match_dup 0)))
+ (clobber (match_scratch:DI 4 "=&r"))]
+ ""
+ "addq %0,%1,%4\;cmov%C2 %r3,%4,%0")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "")
+ (const_int 0)])
+ (plus:DI (match_dup 0)
+ (match_operand:DI 1 "reg_or_8bit_operand" ""))
+ (match_dup 0)))
+ (clobber (match_operand:DI 4 "register_operand" ""))]
+ ""
+ [(set (match_dup 4) (plus:DI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (if_then_else:DI (match_op_dup 2
+ [(match_dup 3)
+ (const_int 0)])
+ (match_dup 4) (match_dup 0)))]
+ "")
+
+(define_split
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:DI (match_operand:DI 2 "register_operand" "")
+ (const_int 1)
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)])
+ (match_operand:DI 4 "reg_or_8bit_operand" "")
+ (match_operand:DI 5 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 6 "register_operand" ""))])]
+ "INTVAL (operands[3]) != 0"
+ [(set (match_dup 6)
+ (lshiftrt:DI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:DI (match_op_dup 1
+ [(zero_extract:DI (match_dup 6)
+ (const_int 1)
+ (const_int 0))
+ (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "")
+
+;; For ABS, we have two choices, depending on whether the input and output
+;; registers are the same or not.
+(define_expand "absdi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (abs:DI (match_operand:DI 1 "register_operand" "")))]
+ ""
+ "
+{ if (rtx_equal_p (operands[0], operands[1]))
+ emit_insn (gen_absdi2_same (operands[0], gen_reg_rtx (DImode)));
+ else
+ emit_insn (gen_absdi2_diff (operands[0], operands[1]));
+
+ DONE;
+}")
+
+(define_expand "absdi2_same"
+ [(set (match_operand:DI 1 "register_operand" "")
+ (neg:DI (match_operand:DI 0 "register_operand" "")))
+ (set (match_dup 0)
+ (if_then_else:DI (ge (match_dup 0) (const_int 0))
+ (match_dup 0)
+ (match_dup 1)))]
+ ""
+ "")
+
+(define_expand "absdi2_diff"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (match_operand:DI 1 "register_operand" "")))
+ (set (match_dup 0)
+ (if_then_else:DI (lt (match_dup 1) (const_int 0))
+ (match_dup 0)
+ (match_dup 1)))]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (abs:DI (match_dup 0)))
+ (clobber (match_operand:DI 2 "register_operand" ""))]
+ ""
+ [(set (match_dup 1) (neg:DI (match_dup 0)))
+ (set (match_dup 0) (if_then_else:DI (ge (match_dup 0) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (abs:DI (match_operand:DI 1 "register_operand" "")))]
+ "! rtx_equal_p (operands[0], operands[1])"
+ [(set (match_dup 0) (neg:DI (match_dup 1)))
+ (set (match_dup 0) (if_then_else:DI (lt (match_dup 1) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (abs:DI (match_dup 0))))
+ (clobber (match_operand:DI 2 "register_operand" ""))]
+ ""
+ [(set (match_dup 1) (neg:DI (match_dup 0)))
+ (set (match_dup 0) (if_then_else:DI (le (match_dup 0) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (abs:DI (match_operand:DI 1 "register_operand" ""))))]
+ "! rtx_equal_p (operands[0], operands[1])"
+ [(set (match_dup 0) (neg:DI (match_dup 1)))
+ (set (match_dup 0) (if_then_else:DI (gt (match_dup 1) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_expand "smaxdi3"
+ [(set (match_dup 3)
+ (le:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (DImode);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (smax:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (le:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (smax:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 0)))]
+ ""
+ "cmovlt %0,0,%0")
+
+(define_expand "smindi3"
+ [(set (match_dup 3)
+ (lt:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (DImode);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (smin:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (lt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (smin:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 0)))]
+ ""
+ "cmovgt %0,0,%0")
+
+(define_expand "umaxdi3"
+ [(set (match_dup 3)
+ (leu:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (DImode);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (umax:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (leu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_expand "umindi3"
+ [(set (match_dup 3)
+ (ltu:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (DImode);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (umin:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (ltu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "b%C1 %r2,%0"
+ [(set_attr "type" "ibr")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "blbs %r1,%0"
+ [(set_attr "type" "ibr")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "blbc %r1,%0"
+ [(set_attr "type" "ibr")])
+
+(define_split
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:DI (match_operand:DI 2 "register_operand" "")
+ (const_int 1)
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (clobber (match_operand:DI 4 "register_operand" ""))])]
+ "INTVAL (operands[3]) != 0"
+ [(set (match_dup 4)
+ (lshiftrt:DI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (match_op_dup 1
+ [(zero_extract:DI (match_dup 4)
+ (const_int 1)
+ (const_int 0))
+ (const_int 0)])
+ (label_ref (match_dup 0))
+ (pc)))]
+ "")
+
+;; The following are the corresponding floating-point insns. Recall
+;; we need to have variants that expand the arguments from SF mode
+;; to DFmode.
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(match_operand:DF 2 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 3 "reg_or_fp0_operand" "fG")]))]
+ "TARGET_FP"
+ "cmpt%C1 %R2,%R3,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 3 "reg_or_fp0_operand" "fG")]))]
+ "TARGET_FP"
+ "cmpt%C1 %R2,%R3,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(match_operand:DF 2 "reg_or_fp0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_fp0_operand" "fG"))]))]
+ "TARGET_FP"
+ "cmpt%C1 %R2,%R3,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_fp0_operand" "fG"))]))]
+ "TARGET_FP"
+ "cmpt%C1 %R2,%R3,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(match_operand:DF 4 "reg_or_fp0_operand" "fG,fG")
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+ (match_operand:DF 1 "reg_or_fp0_operand" "fG,0")
+ (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF
+ (match_operator 3 "signed_comparison_operator"
+ [(match_operand:DF 4 "reg_or_fp0_operand" "fG,fG")
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG,0")
+ (match_operand:SF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(match_operand:DF 1 "reg_or_fp0_operand" "fG,fG")
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+ (float_extend:DF (match_operand:SF 4 "reg_or_fp0_operand" "fG,0"))
+ (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 4 "reg_or_fp0_operand" "fG,fG"))
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+ (match_operand:DF 1 "reg_or_fp0_operand" "fG,0")
+ (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF
+ (match_operator 3 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 4 "reg_or_fp0_operand" "fG,fG"))
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG,0")
+ (match_operand:SF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fpop")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 4 "reg_or_fp0_operand" "fG,fG"))
+ (match_operand:DF 2 "fp0_operand" "G,G")])
+ (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG,0"))
+ (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fpop")])
+
+(define_expand "maxdf3"
+ [(set (match_dup 3)
+ (le:DF (match_operand:DF 1 "reg_or_fp0_operand" "")
+ (match_operand:DF 2 "reg_or_fp0_operand" "")))
+ (set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (eq (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+ "
+{ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+}")
+
+(define_expand "mindf3"
+ [(set (match_dup 3)
+ (lt:DF (match_operand:DF 1 "reg_or_fp0_operand" "")
+ (match_operand:DF 2 "reg_or_fp0_operand" "")))
+ (set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (ne (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+ "
+{ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+}")
+
+(define_expand "maxsf3"
+ [(set (match_dup 3)
+ (le:DF (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" ""))
+ (float_extend:DF (match_operand:SF 2 "reg_or_fp0_operand" ""))))
+ (set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (eq (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+ "
+{ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+}")
+
+(define_expand "minsf3"
+ [(set (match_dup 3)
+ (lt:DF (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" ""))
+ (float_extend:DF (match_operand:SF 2 "reg_or_fp0_operand" ""))))
+ (set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (ne (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+ "
+{ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DF 2 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 3 "fp0_operand" "G")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_FP"
+ "fb%C1 %R2,%0"
+ [(set_attr "type" "fbr")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 3 "fp0_operand" "G")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_FP"
+ "fb%C1 %R2,%0"
+ [(set_attr "type" "fbr")])
+
+;; These are the main define_expand's used to make conditional branches
+;; and compares.
+
+(define_expand "cmpdf"
+ [(set (cc0) (compare (match_operand:DF 0 "reg_or_fp0_operand" "")
+ (match_operand:DF 1 "reg_or_fp0_operand" "")))]
+ "TARGET_FP"
+ "
+{
+ alpha_compare_op0 = operands[0];
+ alpha_compare_op1 = operands[1];
+ alpha_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "cmpdi"
+ [(set (cc0) (compare (match_operand:DI 0 "reg_or_0_operand" "")
+ (match_operand:DI 1 "reg_or_8bit_operand" "")))]
+ ""
+ "
+{
+ alpha_compare_op0 = operands[0];
+ alpha_compare_op1 = operands[1];
+ alpha_compare_fp_p = 0;
+ DONE;
+}")
+
+(define_expand "beq"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ enum machine_mode mode;
+ enum rtx_code compare_code, branch_code;
+
+ if (alpha_compare_fp_p)
+ mode = DFmode, compare_code = EQ, branch_code = NE;
+ else
+ {
+ mode = DImode, compare_code = MINUS, branch_code = EQ;
+ if (GET_CODE (alpha_compare_op1) == CONST_INT)
+ {
+ compare_code = PLUS;
+ alpha_compare_op1 = GEN_INT (- INTVAL (alpha_compare_op1));
+ }
+ }
+
+ operands[1] = gen_reg_rtx (mode);
+ operands[2] = gen_rtx (compare_code, mode,
+ alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (branch_code, VOIDmode,
+ operands[1], CONST0_RTX (mode));
+}")
+
+(define_expand "bne"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ enum machine_mode mode;
+ enum rtx_code compare_code, branch_code;
+
+ if (alpha_compare_fp_p)
+ mode = DFmode, compare_code = EQ, branch_code = EQ;
+ else
+ {
+ mode = DImode, compare_code = MINUS, branch_code = NE;
+ if (GET_CODE (alpha_compare_op1) == CONST_INT)
+ {
+ compare_code = PLUS;
+ alpha_compare_op1 = GEN_INT (- INTVAL (alpha_compare_op1));
+ }
+ }
+
+ operands[1] = gen_reg_rtx (mode);
+ operands[2] = gen_rtx (compare_code, mode,
+ alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (branch_code, VOIDmode,
+ operands[1], CONST0_RTX (mode));
+}")
+
+(define_expand "blt"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ enum machine_mode mode = alpha_compare_fp_p ? DFmode : DImode;
+ operands[1] = gen_reg_rtx (mode);
+ operands[2] = gen_rtx (LT, mode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (mode));
+}")
+
+(define_expand "ble"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ enum machine_mode mode = alpha_compare_fp_p ? DFmode : DImode;
+ operands[1] = gen_reg_rtx (mode);
+ operands[2] = gen_rtx (LE, mode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (mode));
+}")
+
+(define_expand "bgt"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ {
+ operands[1] = gen_reg_rtx (DFmode);
+ operands[2] = gen_rtx (LT, DFmode, alpha_compare_op1, alpha_compare_op0);
+ operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (DFmode));
+ }
+ else
+ {
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_rtx (LE, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
+ }
+}")
+
+(define_expand "bge"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ {
+ operands[1] = gen_reg_rtx (DFmode);
+ operands[2] = gen_rtx (LE, DFmode, alpha_compare_op1, alpha_compare_op0);
+ operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (DFmode));
+ }
+ else
+ {
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_rtx (LT, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
+ }
+}")
+
+(define_expand "bltu"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_rtx (LTU, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (NE, VOIDmode, operands[1], const0_rtx);
+}")
+
+(define_expand "bleu"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_rtx (LEU, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (NE, VOIDmode, operands[1], const0_rtx);
+}")
+
+(define_expand "bgtu"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_rtx (LEU, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
+}")
+
+(define_expand "bgeu"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (match_dup 3)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_rtx (LTU, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
+}")
+
+(define_expand "seq"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (EQ, DImode, alpha_compare_op0, alpha_compare_op1);
+}")
+
+(define_expand "sne"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))
+ (set (match_dup 0) (xor:DI (match_dup 0) (const_int 1)))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (EQ, DImode, alpha_compare_op0, alpha_compare_op1);
+}")
+
+(define_expand "slt"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LT, DImode, alpha_compare_op0, alpha_compare_op1);
+}")
+
+(define_expand "sle"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LE, DImode, alpha_compare_op0, alpha_compare_op1);
+}")
+
+(define_expand "sgt"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LT, DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
+}")
+
+(define_expand "sge"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LE, DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
+}")
+
+(define_expand "sltu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LTU, DImode, alpha_compare_op0, alpha_compare_op1);
+}")
+
+(define_expand "sleu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LEU, DImode, alpha_compare_op0, alpha_compare_op1);
+}")
+
+(define_expand "sgtu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LTU, DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
+}")
+
+(define_expand "sgeu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (alpha_compare_fp_p)
+ FAIL;
+
+ operands[1] = gen_rtx (LEU, DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
+}")
+
+;; These are the main define_expand's used to make conditional moves.
+
+(define_expand "movsicc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
+ (set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:DI (match_dup 5)
+ (match_operand:SI 2 "reg_or_8bit_operand" "")
+ (match_operand:SI 3 "reg_or_8bit_operand" "")))]
+ ""
+ "
+{
+ rtx op0,op1;
+ enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
+
+ if (alpha_compare_fp_p)
+ FAIL;
+ switch (code)
+ {
+ case EQ: case LE: case LT:
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case NE:
+ code = code2 = EQ;
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case GE:
+ code = LE;
+ op0 = force_reg (DImode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ case GT:
+ code = LT;
+ op0 = force_reg (DImode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ default:
+ FAIL;
+ }
+ operands[1] = gen_rtx (code, DImode, op0, op1);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DImode));
+}")
+
+(define_expand "movdicc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_dup 5)
+ (match_operand:DI 2 "reg_or_8bit_operand" "")
+ (match_operand:DI 3 "reg_or_8bit_operand" "")))]
+ ""
+ "
+{
+ rtx op0,op1;
+ enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
+
+ if (alpha_compare_fp_p)
+ FAIL;
+ switch (code)
+ {
+ case EQ: case LE: case LT:
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case NE:
+ code = code2 = EQ;
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case GE:
+ code = LE;
+ op0 = force_reg (DImode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ case GT:
+ code = LT;
+ op0 = force_reg (DImode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ default:
+ FAIL;
+ }
+ operands[1] = gen_rtx (code, DImode, op0, op1);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DImode));
+}")
+
+(define_expand "movsfcc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
+ (set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_dup 5)
+ (match_operand:SF 2 "reg_or_fp0_operand" "")
+ (match_operand:SF 3 "reg_or_fp0_operand" "")))]
+ ""
+ "
+{
+ rtx op0,op1;
+ enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
+
+ if (!alpha_compare_fp_p)
+ FAIL;
+ switch (code)
+ {
+ case EQ: case LE: case LT:
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case NE:
+ /* There isn't a cmptne insn. */
+ code = code2 = EQ;
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case GE:
+ code = LE;
+ op0 = force_reg (DFmode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ case GT:
+ code = LT;
+ op0 = force_reg (DFmode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ default:
+ FAIL;
+ }
+ operands[1] = gen_rtx (code, DFmode, op0, op1);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DFmode));
+}")
+
+(define_expand "movdfcc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
+ (set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_dup 5)
+ (match_operand:DF 2 "reg_or_fp0_operand" "")
+ (match_operand:DF 3 "reg_or_fp0_operand" "")))]
+ ""
+ "
+{
+ rtx op0,op1;
+ enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
+
+ if (!alpha_compare_fp_p)
+ FAIL;
+ switch (code)
+ {
+ case EQ: case LE: case LT:
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case NE:
+ /* There isn't a cmptne insn. */
+ code = code2 = EQ;
+ op0 = alpha_compare_op0;
+ op1 = alpha_compare_op1;
+ break;
+ case GE:
+ code = LE;
+ op0 = force_reg (DFmode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ case GT:
+ code = LT;
+ op0 = force_reg (DFmode, alpha_compare_op1);
+ op1 = alpha_compare_op0;
+ break;
+ default:
+ FAIL;
+ }
+ operands[1] = gen_rtx (code, DFmode, op0, op1);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DFmode));
+}")
+
+;; These define_split definitions are used in cases when comparisons have
+;; not be stated in the correct way and we need to reverse the second
+;; comparison. For example, x >= 7 has to be done as x < 6 with the
+;; comparison that tests the result being reversed. We have one define_split
+;; for each use of a comparison. They do not match valid insns and need
+;; not generate valid insns.
+;;
+;; We can also handle equality comparisons (and inequality comparisons in
+;; cases where the resulting add cannot overflow) by doing an add followed by
+;; a comparison with zero. This is faster since the addition takes one
+;; less cycle than a compare when feeding into a conditional move.
+;; For this case, we also have an SImode pattern since we can merge the add
+;; and sign extend and the order doesn't matter.
+;;
+;; We do not do this for floating-point, since it isn't clear how the "wrong"
+;; operation could have been generated.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "")
+ (match_operand:DI 3 "reg_or_cint_operand" "")])
+ (match_operand:DI 4 "reg_or_cint_operand" "")
+ (match_operand:DI 5 "reg_or_cint_operand" "")))
+ (clobber (match_operand:DI 6 "register_operand" ""))]
+ "operands[3] != const0_rtx"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))]
+ "
+{ enum rtx_code code = GET_CODE (operands[1]);
+ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU);
+
+ /* If we are comparing for equality with a constant and that constant
+ appears in the arm when the register equals the constant, use the
+ register since that is more likely to match (and to produce better code
+ if both would). */
+
+ if (code == EQ && GET_CODE (operands[3]) == CONST_INT
+ && rtx_equal_p (operands[4], operands[3]))
+ operands[4] = operands[2];
+
+ else if (code == NE && GET_CODE (operands[3]) == CONST_INT
+ && rtx_equal_p (operands[5], operands[3]))
+ operands[5] = operands[2];
+
+ if (code == NE || code == EQ
+ || (extended_count (operands[2], DImode, unsignedp) >= 1
+ && extended_count (operands[3], DImode, unsignedp) >= 1))
+ {
+ if (GET_CODE (operands[3]) == CONST_INT)
+ operands[7] = gen_rtx (PLUS, DImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
+ else
+ operands[7] = gen_rtx (MINUS, DImode, operands[2], operands[3]);
+
+ operands[8] = gen_rtx (code, VOIDmode, operands[6], const0_rtx);
+ }
+
+ else if (code == EQ || code == LE || code == LT
+ || code == LEU || code == LTU)
+ {
+ operands[7] = gen_rtx (code, DImode, operands[2], operands[3]);
+ operands[8] = gen_rtx (NE, VOIDmode, operands[6], const0_rtx);
+ }
+ else
+ {
+ operands[7] = gen_rtx (reverse_condition (code), DImode, operands[2],
+ operands[3]);
+ operands[8] = gen_rtx (EQ, VOIDmode, operands[6], const0_rtx);
+ }
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "reg_or_0_operand" "")
+ (match_operand:SI 3 "reg_or_cint_operand" "")])
+ (match_operand:DI 4 "reg_or_8bit_operand" "")
+ (match_operand:DI 5 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 6 "register_operand" ""))]
+ "operands[3] != const0_rtx
+ && (GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))]
+ "
+{ enum rtx_code code = GET_CODE (operands[1]);
+ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU);
+ rtx tem;
+
+ if ((code != NE && code != EQ
+ && ! (extended_count (operands[2], DImode, unsignedp) >= 1
+ && extended_count (operands[3], DImode, unsignedp) >= 1)))
+ FAIL;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ tem = gen_rtx (PLUS, SImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
+ else
+ tem = gen_rtx (MINUS, SImode, operands[2], operands[3]);
+
+ operands[7] = gen_rtx (SIGN_EXTEND, DImode, tem);
+ operands[8] = gen_rtx (GET_CODE (operands[1]), VOIDmode, operands[6],
+ const0_rtx);
+}")
+
+(define_split
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "")
+ (match_operand:DI 3 "reg_or_cint_operand" "")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (clobber (match_operand:DI 4 "register_operand" ""))]
+ "operands[3] != const0_rtx"
+ [(set (match_dup 4) (match_dup 5))
+ (set (pc) (if_then_else (match_dup 6) (label_ref (match_dup 0)) (pc)))]
+ "
+{ enum rtx_code code = GET_CODE (operands[1]);
+ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU);
+
+ if (code == NE || code == EQ
+ || (extended_count (operands[2], DImode, unsignedp) >= 1
+ && extended_count (operands[3], DImode, unsignedp) >= 1))
+ {
+ if (GET_CODE (operands[3]) == CONST_INT)
+ operands[5] = gen_rtx (PLUS, DImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
+ else
+ operands[5] = gen_rtx (MINUS, DImode, operands[2], operands[3]);
+
+ operands[6] = gen_rtx (code, VOIDmode, operands[4], const0_rtx);
+ }
+
+ else if (code == EQ || code == LE || code == LT
+ || code == LEU || code == LTU)
+ {
+ operands[5] = gen_rtx (code, DImode, operands[2], operands[3]);
+ operands[6] = gen_rtx (NE, VOIDmode, operands[4], const0_rtx);
+ }
+ else
+ {
+ operands[5] = gen_rtx (reverse_condition (code), DImode, operands[2],
+ operands[3]);
+ operands[6] = gen_rtx (EQ, VOIDmode, operands[4], const0_rtx);
+ }
+}")
+
+(define_split
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "reg_or_0_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (clobber (match_operand:DI 4 "register_operand" ""))]
+ "operands[3] != const0_rtx
+ && (GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)"
+ [(set (match_dup 4) (match_dup 5))
+ (set (pc) (if_then_else (match_dup 6) (label_ref (match_dup 0)) (pc)))]
+ "
+{ rtx tem;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ tem = gen_rtx (PLUS, SImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
+ else
+ tem = gen_rtx (MINUS, SImode, operands[2], operands[3]);
+
+ operands[5] = gen_rtx (SIGN_EXTEND, DImode, tem);
+ operands[6] = gen_rtx (GET_CODE (operands[1]), VOIDmode,
+ operands[4], const0_rtx);
+}")
+
+;; We can convert such things as "a > 0xffff" to "t = a & ~ 0xffff; t != 0".
+;; This eliminates one, and sometimes two, insns when the AND can be done
+;; with a ZAP.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:DI 2 "register_operand" "")
+ (match_operand:DI 3 "const_int_operand" "")]))
+ (clobber (match_operand:DI 4 "register_operand" ""))]
+ "exact_log2 (INTVAL (operands[3]) + 1) >= 0
+ && (GET_CODE (operands[1]) == GTU
+ || GET_CODE (operands[1]) == LEU
+ || ((GET_CODE (operands[1]) == GT || GET_CODE (operands[1]) == LE)
+ && extended_count (operands[2], DImode, 1) > 0))"
+ [(set (match_dup 4) (and:DI (match_dup 2) (match_dup 5)))
+ (set (match_dup 0) (match_dup 6))]
+ "
+{
+ operands[5] = GEN_INT (~ INTVAL (operands[3]));
+ operands[6] = gen_rtx (((GET_CODE (operands[1]) == GTU
+ || GET_CODE (operands[1]) == GT)
+ ? NE : EQ),
+ DImode, operands[4], const0_rtx);
+}")
+
+;; Here are the CALL and unconditional branch insns. Calls on NT and OSF
+;; work differently, so we have different patterns for each.
+
+(define_expand "call"
+ [(use (match_operand:DI 0 "" ""))
+ (use (match_operand 1 "" ""))]
+ ""
+ "
+{ if (WINDOWS_NT)
+ emit_call_insn (gen_call_nt (operands[0], operands[1]));
+ else
+ emit_call_insn (gen_call_osf (operands[0], operands[1]));
+
+ DONE;
+}")
+
+(define_expand "call_osf"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 27))
+ (clobber (reg:DI 26))])]
+ ""
+ "
+{ if (GET_CODE (operands[0]) != MEM)
+ abort ();
+
+ operands[0] = XEXP (operands[0], 0);
+
+ if (GET_CODE (operands[0]) != SYMBOL_REF
+ && ! (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 27))
+ {
+ rtx tem = gen_rtx (REG, DImode, 27);
+ emit_move_insn (tem, operands[0]);
+ operands[0] = tem;
+ }
+}")
+
+(define_expand "call_nt"
+ [(parallel [(call (mem:DI (match_operand:DI 0 "" ""))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 26))])]
+ ""
+ "
+{ if (GET_CODE (operands[0]) != MEM)
+ abort ();
+ operands[0] = XEXP (operands[0], 0);
+
+ if (GET_CODE (operands[1]) != SYMBOL_REF
+ && ! (GET_CODE (operands[1]) == REG && REGNO (operands[1]) == 27))
+ {
+ rtx tem = gen_rtx (REG, DImode, 27);
+ emit_move_insn (tem, operands[1]);
+ operands[1] = tem;
+ }
+}")
+
+(define_expand "call_value"
+ [(use (match_operand 0 "" ""))
+ (use (match_operand:DI 1 "" ""))
+ (use (match_operand 2 "" ""))]
+ ""
+ "
+{ if (WINDOWS_NT)
+ emit_call_insn (gen_call_value_nt (operands[0], operands[1], operands[2]));
+ else
+ emit_call_insn (gen_call_value_osf (operands[0], operands[1],
+ operands[2]));
+ DONE;
+}")
+
+(define_expand "call_value_osf"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" ""))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 27))
+ (clobber (reg:DI 26))])]
+ ""
+ "
+{ if (GET_CODE (operands[1]) != MEM)
+ abort ();
+
+ operands[1] = XEXP (operands[1], 0);
+
+ if (GET_CODE (operands[1]) != SYMBOL_REF
+ && ! (GET_CODE (operands[1]) == REG && REGNO (operands[1]) == 27))
+ {
+ rtx tem = gen_rtx (REG, DImode, 27);
+ emit_move_insn (tem, operands[1]);
+ operands[1] = tem;
+ }
+}")
+
+(define_expand "call_value_nt"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "" ""))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 26))])]
+ ""
+ "
+{ if (GET_CODE (operands[1]) != MEM)
+ abort ();
+
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) != SYMBOL_REF
+ && ! (GET_CODE (operands[1]) == REG && REGNO (operands[1]) == 27))
+ {
+ rtx tem = gen_rtx (REG, DImode, 27);
+ emit_move_insn (tem, operands[1]);
+ operands[1] = tem;
+ }
+}")
+
+(define_insn ""
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r,R,i"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 27))
+ (clobber (reg:DI 26))]
+ "! WINDOWS_NT"
+ "@
+ jsr $26,($27),0\;ldgp $29,0($26)
+ bsr $26,%0..ng
+ jsr $26,%0\;ldgp $29,0($26)"
+ [(set_attr "type" "jsr,jsr,ibr")])
+
+(define_insn ""
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r,i"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 26))]
+ "WINDOWS_NT"
+ "@
+ jsr $26,(%0)
+ bsr $26,%0"
+ [(set_attr "type" "jsr")])
+
+(define_insn ""
+ [(set (match_operand 0 "register_operand" "=rf,rf,rf")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r,R,i"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 27))
+ (clobber (reg:DI 26))]
+ "! WINDOWS_NT"
+ "@
+ jsr $26,($27),0\;ldgp $29,0($26)
+ bsr $26,%1..ng
+ jsr $26,%1\;ldgp $29,0($26)"
+ [(set_attr "type" "jsr,jsr,ibr")])
+
+(define_insn ""
+ [(set (match_operand 0 "register_operand" "=rf,rf")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r,i"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 26))]
+ "WINDOWS_NT"
+ "@
+ jsr $26,(%1)
+ bsr $26,%1"
+ [(set_attr "type" "jsr")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "")
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "br $31,%l0"
+ [(set_attr "type" "ibr")])
+
+(define_insn "return"
+ [(return)]
+ "direct_return ()"
+ "ret $31,($26),1"
+ [(set_attr "type" "ibr")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
+ ""
+ "jmp $31,(%0),0"
+ [(set_attr "type" "ibr")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "bis $31,$31,$31"
+ [(set_attr "type" "iaddlog")])
+
+(define_expand "tablejump"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:SI 1 "" ""))]
+ ""
+ "
+{
+ if (WINDOWS_NT)
+ emit_jump_insn (gen_tablejump_nt (operands[0], operands[1]));
+ else
+ emit_jump_insn (gen_tablejump_osf (operands[0], operands[1]));
+
+ DONE;
+}")
+
+(define_expand "tablejump_osf"
+ [(set (match_dup 3)
+ (sign_extend:DI (match_operand:SI 0 "register_operand" "")))
+ (parallel [(set (pc)
+ (plus:DI (match_dup 3)
+ (label_ref:DI (match_operand 1 "" ""))))
+ (clobber (match_scratch:DI 2 "=r"))])]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "tablejump_nt"
+ [(set (match_dup 3)
+ (sign_extend:DI (match_operand:SI 0 "register_operand" "")))
+ (parallel [(set (pc)
+ (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_insn ""
+ [(set (pc)
+ (plus:DI (match_operand:DI 0 "register_operand" "r")
+ (label_ref:DI (match_operand 1 "" ""))))
+ (clobber (match_scratch:DI 2 "=r"))]
+ "! WINDOWS_NT && next_active_insn (insn) != 0
+ && GET_CODE (PATTERN (next_active_insn (insn))) == ADDR_DIFF_VEC
+ && PREV_INSN (next_active_insn (insn)) == operands[1]"
+ "*
+{ rtx best_label = 0;
+ rtx jump_table_insn = next_active_insn (operands[1]);
+
+ if (GET_CODE (jump_table_insn) == JUMP_INSN
+ && GET_CODE (PATTERN (jump_table_insn)) == ADDR_DIFF_VEC)
+ {
+ rtx jump_table = PATTERN (jump_table_insn);
+ int n_labels = XVECLEN (jump_table, 1);
+ int best_count = -1;
+ int i, j;
+
+ for (i = 0; i < n_labels; i++)
+ {
+ int count = 1;
+
+ for (j = i + 1; j < n_labels; j++)
+ if (XEXP (XVECEXP (jump_table, 1, i), 0)
+ == XEXP (XVECEXP (jump_table, 1, j), 0))
+ count++;
+
+ if (count > best_count)
+ best_count = count, best_label = XVECEXP (jump_table, 1, i);
+ }
+ }
+
+ if (best_label)
+ {
+ operands[3] = best_label;
+ return \"addq %0,$29,%2\;jmp $31,(%2),%3\";
+ }
+ else
+ return \"addq %0,$29,%2\;jmp $31,(%2),0\";
+}"
+ [(set_attr "type" "ibr")])
+
+(define_insn ""
+ [(set (pc)
+ (match_operand:DI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "WINDOWS_NT && next_active_insn (insn) != 0
+ && GET_CODE (PATTERN (next_active_insn (insn))) == ADDR_DIFF_VEC
+ && PREV_INSN (next_active_insn (insn)) == operands[1]"
+ "*
+{ rtx best_label = 0;
+ rtx jump_table_insn = next_active_insn (operands[1]);
+
+ if (GET_CODE (jump_table_insn) == JUMP_INSN
+ && GET_CODE (PATTERN (jump_table_insn)) == ADDR_DIFF_VEC)
+ {
+ rtx jump_table = PATTERN (jump_table_insn);
+ int n_labels = XVECLEN (jump_table, 1);
+ int best_count = -1;
+ int i, j;
+
+ for (i = 0; i < n_labels; i++)
+ {
+ int count = 1;
+
+ for (j = i + 1; j < n_labels; j++)
+ if (XEXP (XVECEXP (jump_table, 1, i), 0)
+ == XEXP (XVECEXP (jump_table, 1, j), 0))
+ count++;
+
+ if (count > best_count)
+ best_count = count, best_label = XVECEXP (jump_table, 1, i);
+ }
+ }
+
+ if (best_label)
+ {
+ operands[2] = best_label;
+ return \"jmp $31,(%0),%2\";
+ }
+ else
+ return \"jmp $31,(%0),0\";
+}"
+ [(set_attr "type" "ibr")])
+
+;; Cache flush. Used by INITIALIZE_TRAMPOLINE. 0x86 is PAL_imb, but we don't
+;; want to have to include pal.h in our .s file.
+(define_insn ""
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "call_pal 0x86")
+
+;; Finally, we have the basic data motion insns. The byte and word insns
+;; are done via define_expand. Start with the floating-point insns, since
+;; they are simpler.
+
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m")
+ (match_operand:SF 1 "input_operand" "rG,m,rG,f,G,m,fG"))]
+ "register_operand (operands[0], SFmode)
+ || reg_or_fp0_operand (operands[1], SFmode)"
+ "@
+ bis %r1,%r1,%0
+ ldl %0,%1
+ stl %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ lds %0,%1
+ sts %R1,%0"
+ [(set_attr "type" "iaddlog,ld,st,fpop,fpop,ld,st")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m")
+ (match_operand:DF 1 "input_operand" "rG,m,rG,f,G,m,fG"))]
+ "register_operand (operands[0], DFmode)
+ || reg_or_fp0_operand (operands[1], DFmode)"
+ "@
+ bis %r1,%r1,%0
+ ldq %0,%1
+ stq %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ ldt %0,%1
+ stt %R1,%0"
+ [(set_attr "type" "iaddlog,ld,st,fpop,fpop,ld,st")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_fp0_operand (operands[1], SFmode))
+ operands[1] = force_reg (SFmode, operands[1]);
+}")
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_fp0_operand (operands[1], DFmode))
+ operands[1] = force_reg (DFmode, operands[1]);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,f,f,f,m")
+ (match_operand:SI 1 "input_operand" "r,J,I,K,L,m,rJ,f,J,m,fG"))]
+ "! WINDOWS_NT && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ ldl %0,%1
+ stl %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ lds %0,%1
+ sts %R1,%0"
+ [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,iaddlog,ld,st,fpop,fpop,ld,st")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,r,m,f,f,f,m")
+ (match_operand:SI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,m,fG"))]
+ "WINDOWS_NT && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ lda %0,%1
+ ldl %0,%1
+ stl %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ lds %0,%1
+ sts %R1,%0"
+ [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,iaddlog,ldsym,ld,st,fpop,fpop,ld,st")])
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,f,f")
+ (match_operand:HI 1 "input_operand" "r,J,I,n,f,J"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%L1
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0"
+ [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,fpop,fpop")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,r,f,f")
+ (match_operand:QI 1 "input_operand" "r,J,I,n,f,J"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%L1
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0"
+ [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,fpop,fpop")])
+
+;; We do two major things here: handle mem->mem and construct long
+;; constants.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], SImode))
+ operands[1] = force_reg (SImode, operands[1]);
+
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], SImode))
+ ;
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ operands[1]
+ = alpha_emit_set_const (operands[0], SImode, INTVAL (operands[1]), 3);
+ if (rtx_equal_p (operands[0], operands[1]))
+ DONE;
+ }
+}")
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! add_operand (operands[1], SImode)"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem
+ = alpha_emit_set_const (operands[0], SImode, INTVAL (operands[1]), 2);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,r,r,m,f,f,f,Q")
+ (match_operand:DI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,Q,fG"))]
+ "register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode)"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ lda %0,%1
+ ldq%A1 %0,%1
+ stq%A0 %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ ldt %0,%1
+ stt %R1,%0"
+ [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,iaddlog,ldsym,ld,st,fpop,fpop,ld,st")])
+
+;; We do three major things here: handle mem->mem, put 64-bit constants in
+;; memory, and construct long 32-bit constants.
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ rtx tem;
+
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], DImode))
+ operands[1] = force_reg (DImode, operands[1]);
+
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], DImode))
+ ;
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && (tem = alpha_emit_set_const (operands[0], DImode,
+ INTVAL (operands[1]), 3)) != 0)
+ {
+ if (rtx_equal_p (tem, operands[0]))
+ DONE;
+ else
+ operands[1] = tem;
+ }
+ else if (CONSTANT_P (operands[1]))
+ {
+ operands[1] = force_const_mem (DImode, operands[1]);
+ if (reload_in_progress)
+ {
+ emit_move_insn (operands[0], XEXP (operands[1], 0));
+ operands[1] = copy_rtx (operands[1]);
+ XEXP (operands[1], 0) = operands[0];
+ }
+ else
+ operands[1] = validize_mem (operands[1]);
+ }
+ else
+ abort ();
+}")
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "! add_operand (operands[1], DImode)"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem
+ = alpha_emit_set_const (operands[0], DImode, INTVAL (operands[1]), 2);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+;; These are the partial-word cases.
+;;
+;; First we have the code to load an aligned word. Operand 0 is the register
+;; in which to place the result. It's mode is QImode or HImode. Operand 1
+;; is an SImode MEM at the low-order byte of the proper word. Operand 2 is the
+;; number of bits within the word that the value is. Operand 3 is an SImode
+;; scratch register. If operand 0 is a hard register, operand 3 may be the
+;; same register. It is allowed to conflict with operand 1 as well.
+
+(define_expand "aligned_loadqi"
+ [(set (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (subreg:DI (match_operand:QI 0 "register_operand" "") 0)
+ (zero_extract:DI (subreg:DI (match_dup 3) 0)
+ (const_int 8)
+ (match_operand:DI 2 "const_int_operand" "")))]
+
+ ""
+ "")
+
+(define_expand "aligned_loadhi"
+ [(set (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (subreg:DI (match_operand:HI 0 "register_operand" "") 0)
+ (zero_extract:DI (subreg:DI (match_dup 3) 0)
+ (const_int 16)
+ (match_operand:DI 2 "const_int_operand" "")))]
+
+ ""
+ "")
+
+;; Similar for unaligned loads. For QImode, we use the sequence from the
+;; Alpha Architecture manual. However, for HImode, we do not. HImode pointers
+;; are normally aligned to the byte boundary, so an HImode object cannot
+;; cross a longword boundary. We could use a sequence similar to that for
+;; QImode, but that would fail if the pointer, was, in fact, not aligned.
+;; Instead, we clear bit 1 in the address and do an ldl. If the low-order
+;; bit was not aligned, this will trap and the trap handler will do what is
+;; needed.
+;;
+;; Here operand 1 is the address. Operands 2 and 3 are temporaries, where
+;; operand 3 can overlap the input and output registers.
+
+(define_expand "unaligned_loadqi"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 1 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 3 "register_operand" "")
+ (match_dup 1))
+ (set (subreg:DI (match_operand:QI 0 "register_operand" "") 0)
+ (zero_extract:DI (match_dup 2)
+ (const_int 8)
+ (ashift:DI (match_dup 3) (const_int 3))))]
+ ""
+ "")
+
+;; For this, the address must already be in a register. We also need two
+;; DImode temporaries, neither of which may overlap the input (and hence the
+;; output, since they might be the same register), but both of which may
+;; be the same.
+
+(define_expand "unaligned_loadhi"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (and:DI (match_operand:DI 1 "register_operand" "")
+ (const_int -7)))
+ (set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (match_dup 2)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (and:DI (match_dup 1) (const_int -2)))
+ (set (subreg:DI (match_operand:HI 0 "register_operand" "") 0)
+ (zero_extract:DI (match_dup 3)
+ (const_int 16)
+ (ashift:DI (match_dup 4) (const_int 3))))]
+ ""
+ "")
+
+;; Storing an aligned byte or word requires two temporaries. Operand 0 is the
+;; aligned SImode MEM. Operand 1 is the register containing the
+;; byte or word to store. Operand 2 is the number of bits within the word that
+;; the value should be placed. Operands 3 and 4 are SImode temporaries.
+
+(define_expand "aligned_store"
+ [(set (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 0 "memory_operand" ""))
+ (set (subreg:DI (match_dup 3) 0)
+ (and:DI (subreg:DI (match_dup 3) 0) (match_dup 5)))
+ (set (subreg:DI (match_operand:SI 4 "register_operand" "") 0)
+ (ashift:DI (zero_extend:DI (match_operand 1 "register_operand" ""))
+ (match_operand:DI 2 "const_int_operand" "")))
+ (set (subreg:DI (match_dup 4) 0)
+ (ior:DI (subreg:DI (match_dup 4) 0) (subreg:DI (match_dup 3) 0)))
+ (set (match_dup 0) (match_dup 4))]
+ ""
+ "
+{ operands[5] = GEN_INT (~ (GET_MODE_MASK (GET_MODE (operands[1]))
+ << INTVAL (operands[2])));
+}")
+
+;; For the unaligned byte case, we use code similar to that in the
+;; Architecture book, but reordered to lower the number of registers
+;; required. Operand 0 is the address. Operand 1 is the data to store.
+;; Operands 2, 3, and 4 are DImode temporaries, where operands 2 and 4 may
+;; be the same temporary, if desired. If the address is in a register,
+;; operand 2 can be that register.
+
+(define_expand "unaligned_storeqi"
+ [(set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 0 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 2 "register_operand" "")
+ (match_dup 0))
+ (set (match_dup 3)
+ (and:DI (not:DI (ashift:DI (const_int 255)
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (match_dup 3)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" ""))
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3)))
+ (set (mem:DI (and:DI (match_dup 0) (const_int -8)))
+ (match_dup 4))]
+ ""
+ "")
+
+;; This is the code for storing into an unaligned short. It uses the same
+;; trick as loading from an unaligned short. It needs lots of temporaries.
+;; However, during reload, we only have two registers available. So we
+;; repeat code so that only two temporaries are available. During RTL
+;; generation, we can use different pseudos for each temporary and CSE
+;; will remove the redundancies. During reload, we have to settle with
+;; what we get. Luckily, unaligned accesses of this kind produced during
+;; reload are quite rare.
+;;
+;; Operand 0 is the address of the memory location. Operand 1 contains the
+;; data to store. The rest of the operands are all temporaries, with
+;; various overlap possibilities during reload. See reload_outhi for
+;; details of this use.
+
+(define_expand "unaligned_storehi"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (match_operand:DI 0 "address_operand" ""))
+ (set (match_operand:DI 3 "register_operand" "")
+ (and:DI (match_dup 2) (const_int -7)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (mem:DI (match_dup 3)))
+ (set (match_operand:DI 10 "register_operand" "")
+ (and:DI (match_dup 2) (const_int -2)))
+ (set (match_operand:DI 5 "register_operand" "")
+ (and:DI (not:DI (ashift:DI (const_int 65535)
+ (ashift:DI (match_dup 10) (const_int 3))))
+ (match_dup 4)))
+ (set (match_operand:DI 6 "register_operand" "")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" ""))
+ (ashift:DI (match_dup 10) (const_int 3))))
+ (set (match_operand:DI 7 "register_operand" "")
+ (ior:DI (match_dup 5) (match_dup 6)))
+ (set (match_operand:DI 8 "register_operand" "") (match_dup 0))
+ (set (match_operand:DI 9 "register_operand" "")
+ (and:DI (match_dup 8) (const_int -7)))
+ (set (mem:DI (match_dup 9)) (match_dup 7))]
+ ""
+ "")
+
+;; Here are the define_expand's for QI and HI moves that use the above
+;; patterns. We have the normal sets, plus the ones that need scratch
+;; registers for reload.
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{ extern rtx get_unaligned_address ();
+
+ /* If the output is not a register, the input must be. */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* Handle four memory cases, unaligned and aligned for either the input
+ or the output. The only case where we can be called during reload is
+ for aligned loads; all other cases require temporaries. */
+
+ if (GET_CODE (operands[1]) == MEM
+ || (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == MEM)
+ || (reload_in_progress && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == REG
+ && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
+ {
+ if (aligned_memory_operand (operands[1], QImode))
+ {
+ rtx aligned_mem, bitnum;
+ rtx scratch = (reload_in_progress
+ ? gen_rtx (REG, SImode, REGNO (operands[0]))
+ : gen_reg_rtx (SImode));
+
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
+ scratch));
+ }
+ else
+ {
+ /* Don't pass these as parameters since that makes the generated
+ code depend on parameter evaluation order which will cause
+ bootstrap failures. */
+
+ rtx temp1 = gen_reg_rtx (DImode);
+ rtx temp2 = gen_reg_rtx (DImode);
+ rtx seq = gen_unaligned_loadqi (operands[0],
+ get_unaligned_address (operands[1]),
+ temp1, temp2);
+
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ }
+
+ DONE;
+ }
+
+ else if (GET_CODE (operands[0]) == MEM
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == MEM)
+ || (reload_in_progress && GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
+ {
+ if (aligned_memory_operand (operands[0], QImode))
+ {
+ rtx aligned_mem, bitnum;
+ rtx temp1 = gen_reg_rtx (SImode);
+ rtx temp2 = gen_reg_rtx (SImode);
+
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ temp1, temp2));
+ }
+ else
+ {
+ rtx temp1 = gen_reg_rtx (DImode);
+ rtx temp2 = gen_reg_rtx (DImode);
+ rtx temp3 = gen_reg_rtx (DImode);
+ rtx seq = gen_unaligned_storeqi (get_unaligned_address (operands[0]),
+ operands[1], temp1, temp2, temp3);
+
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+ DONE;
+ }
+}")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{ extern rtx get_unaligned_address ();
+
+ /* If the output is not a register, the input must be. */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* Handle four memory cases, unaligned and aligned for either the input
+ or the output. The only case where we can be called during reload is
+ for aligned loads; all other cases require temporaries. */
+
+ if (GET_CODE (operands[1]) == MEM
+ || (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == MEM)
+ || (reload_in_progress && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == REG
+ && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
+ {
+ if (aligned_memory_operand (operands[1], HImode))
+ {
+ rtx aligned_mem, bitnum;
+ rtx scratch = (reload_in_progress
+ ? gen_rtx (REG, SImode, REGNO (operands[0]))
+ : gen_reg_rtx (SImode));
+
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
+ scratch));
+ }
+ else
+ {
+ rtx addr
+ = force_reg (DImode,
+ force_operand (get_unaligned_address (operands[1]),
+ NULL_RTX));
+ rtx scratch1 = gen_reg_rtx (DImode);
+ rtx scratch2 = gen_reg_rtx (DImode);
+ rtx scratch3 = gen_reg_rtx (DImode);
+
+ rtx seq = gen_unaligned_loadhi (operands[0], addr, scratch1,
+ scratch2, scratch3);
+
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ }
+
+ DONE;
+ }
+
+ else if (GET_CODE (operands[0]) == MEM
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == MEM)
+ || (reload_in_progress && GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
+ {
+ if (aligned_memory_operand (operands[0], HImode))
+ {
+ rtx aligned_mem, bitnum;
+ rtx temp1 = gen_reg_rtx (SImode);
+ rtx temp2 = gen_reg_rtx (SImode);
+
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ temp1, temp2));
+ }
+ else
+ {
+ rtx temp1 = gen_reg_rtx (DImode);
+ rtx temp2 = gen_reg_rtx (DImode);
+ rtx temp3 = gen_reg_rtx (DImode);
+ rtx temp4 = gen_reg_rtx (DImode);
+ rtx temp5 = gen_reg_rtx (DImode);
+ rtx temp6 = gen_reg_rtx (DImode);
+ rtx temp7 = gen_reg_rtx (DImode);
+ rtx temp8 = gen_reg_rtx (DImode);
+ rtx temp9 = gen_reg_rtx (DImode);
+
+ rtx seq = gen_unaligned_storehi (get_unaligned_address (operands[0]),
+ operands[1], temp1, temp2,temp3,
+ temp4, temp5, temp6,temp7,
+ temp8, temp9);
+
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+
+ DONE;
+ }
+}")
+
+;; Here are the versions for reload. Note that in the unaligned cases
+;; we know that the operand must not be a pseudo-register because stack
+;; slots are always aligned references.
+
+(define_expand "reload_inqi"
+ [(parallel [(match_operand:QI 0 "register_operand" "=r")
+ (match_operand:QI 1 "unaligned_memory_operand" "m")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ ""
+ "
+{ extern rtx get_unaligned_address ();
+ rtx addr = get_unaligned_address (operands[1]);
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ rtx scratch = gen_rtx (REG, DImode,
+ REGNO (operands[0]) == REGNO (operands[2])
+ ? REGNO (operands[2]) + 1 : REGNO (operands[2]));
+ rtx seq = gen_unaligned_loadqi (operands[0], addr, scratch,
+ gen_rtx (REG, DImode, REGNO (operands[0])));
+
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ DONE;
+}")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "register_operand" "=r")
+ (match_operand:HI 1 "unaligned_memory_operand" "m")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ ""
+ "
+{ extern rtx get_unaligned_address ();
+ rtx addr = get_unaligned_address (operands[1]);
+ rtx scratch1 = gen_rtx (REG, DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx (REG, DImode, REGNO (operands[2]) + 1);
+ rtx seq;
+
+ if (GET_CODE (addr) != REG)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, scratch2, addr));
+ addr = scratch2;
+ }
+
+ seq = gen_unaligned_loadhi (operands[0], addr, scratch1, scratch1, scratch2);
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ DONE;
+}")
+
+(define_expand "reload_outqi"
+ [(parallel [(match_operand:QI 0 "any_memory_operand" "=m")
+ (match_operand:QI 1 "register_operand" "r")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ ""
+ "
+{ extern rtx get_unaligned_address ();
+
+ if (aligned_memory_operand (operands[0], QImode))
+ {
+ rtx aligned_mem, bitnum;
+
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ gen_rtx (REG, SImode, REGNO (operands[2])),
+ gen_rtx (REG, SImode,
+ REGNO (operands[2]) + 1)));
+ }
+ else
+ {
+ rtx addr = get_unaligned_address (operands[0]);
+ rtx scratch1 = gen_rtx (REG, DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx (REG, DImode, REGNO (operands[2]) + 1);
+ rtx scratch3 = scratch1;
+ rtx seq;
+
+ if (GET_CODE (addr) == REG)
+ scratch1 = addr;
+
+ seq = gen_unaligned_storeqi (addr, operands[1], scratch1,
+ scratch2, scratch3);
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+
+ DONE;
+}")
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "any_memory_operand" "=m")
+ (match_operand:HI 1 "register_operand" "r")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ ""
+ "
+{ extern rtx get_unaligned_address ();
+
+ if (aligned_memory_operand (operands[0], HImode))
+ {
+ rtx aligned_mem, bitnum;
+
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ gen_rtx (REG, SImode, REGNO (operands[2])),
+ gen_rtx (REG, SImode,
+ REGNO (operands[2]) + 1)));
+ }
+ else
+ {
+ rtx addr = get_unaligned_address (operands[0]);
+ rtx scratch1 = gen_rtx (REG, DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx (REG, DImode, REGNO (operands[2]) + 1);
+ rtx scratch_a = GET_CODE (addr) == REG ? addr : scratch1;
+ rtx seq;
+
+ seq = gen_unaligned_storehi (addr, operands[1], scratch_a,
+ scratch2, scratch2, scratch2,
+ scratch1, scratch2, scratch_a,
+ scratch1, scratch_a);
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+
+ DONE;
+}")
+
+;; Subroutine of stack space allocation. Perform a stack probe.
+(define_expand "probe_stack"
+ [(set (match_dup 1) (match_operand:DI 0 "const_int_operand" ""))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (MEM, DImode, plus_constant (stack_pointer_rtx,
+ INTVAL (operands[0])));
+ MEM_VOLATILE_P (operands[1]) = 1;
+
+ operands[0] = const0_rtx;
+}")
+
+;; This is how we allocate stack space. If we are allocating a
+;; constant amount of space and we know it is less than 4096
+;; bytes, we need do nothing.
+;;
+;; If it is more than 4096 bytes, we need to probe the stack
+;; periodically.
+(define_expand "allocate_stack"
+ [(set (reg:DI 30)
+ (plus:DI (reg:DI 30)
+ (match_operand:DI 0 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == CONST_INT
+ && INTVAL (operands[0]) < 32768)
+ {
+ if (INTVAL (operands[0]) >= 4096)
+ {
+ /* We do this the same way as in the prologue and generate explicit
+ probes. Then we update the stack by the constant. */
+
+ int probed = 4096;
+
+ emit_insn (gen_probe_stack (GEN_INT (- probed)));
+ while (probed + 8192 < INTVAL (operands[0]))
+ emit_insn (gen_probe_stack (GEN_INT (- (probed += 8192))));
+
+ if (probed + 4096 < INTVAL (operands[0]))
+ emit_insn (gen_probe_stack (GEN_INT (- INTVAL(operands[0]))));
+ }
+
+ operands[0] = GEN_INT (- INTVAL (operands[0]));
+ }
+ else
+ {
+ rtx out_label = 0;
+ rtx loop_label = gen_label_rtx ();
+ rtx want = gen_reg_rtx (Pmode);
+ rtx tmp = gen_reg_rtx (Pmode);
+ rtx memref;
+
+ emit_insn (gen_subdi3 (want, stack_pointer_rtx,
+ force_reg (Pmode, operands[0])));
+ emit_insn (gen_adddi3 (tmp, stack_pointer_rtx, GEN_INT (-4096)));
+
+ if (GET_CODE (operands[0]) != CONST_INT)
+ {
+ out_label = gen_label_rtx ();
+ emit_insn (gen_cmpdi (want, tmp));
+ emit_jump_insn (gen_bgeu (out_label));
+ }
+
+ emit_label (loop_label);
+ memref = gen_rtx (MEM, DImode, tmp);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_move_insn (memref, const0_rtx);
+ emit_insn (gen_adddi3 (tmp, tmp, GEN_INT(-8192)));
+ emit_insn (gen_cmpdi (tmp, want));
+ emit_jump_insn (gen_bgtu (loop_label));
+ memref = gen_rtx (MEM, DImode, want);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_move_insn (memref, const0_rtx);
+
+ if (out_label)
+ emit_label (out_label);
+
+ emit_move_insn (stack_pointer_rtx, want);
+
+ DONE;
+ }
+}")
diff --git a/contrib/gcc/config/alpha/elf.h b/contrib/gcc/config/alpha/elf.h
new file mode 100644
index 0000000..82f0410
--- /dev/null
+++ b/contrib/gcc/config/alpha/elf.h
@@ -0,0 +1,524 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha w/ELF.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@tamu.edu).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This is used on Alpha platforms that use the ELF format.
+Currently only Linux uses this. */
+
+#if 0
+#include "alpha/linux.h"
+#endif
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Alpha Linux/ELF)");
+
+#undef OBJECT_FORMAT_COFF
+#undef EXTENDED_COFF
+#define OBJECT_FORMAT_ELF
+
+#define SDB_DEBUGGING_INFO
+
+#undef ASM_FINAL_SPEC
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-D__alpha -D__alpha__ -D__linux__ -D__linux -D_LONGLONG -Dlinux -Dunix \
+-Asystem(linux) -Acpu(alpha) -Amachine(alpha) -D__ELF__"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m elf64alpha -G 8 %{O*:-O3} %{!O*:-O1} \
+ %{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}} \
+ %{static:-static}}"
+
+/* Output at beginning of assembler file. */
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+{ \
+ alpha_write_verstamp (FILE); \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ fprintf (FILE, "\t.set noat\n"); \
+}
+
+#define ASM_OUTPUT_SOURCE_LINE(STREAM, LINE) \
+ alpha_output_lineno (STREAM, LINE)
+extern void alpha_output_lineno ();
+
+extern void output_file_directive ();
+
+/* Attach a special .ident directive to the end of the file to identify
+ the version of GCC which compiled this code. The format of the
+ .ident string is patterned after the ones produced by native svr4
+ C compilers. */
+
+#define IDENT_ASM_OP ".ident"
+
+#ifdef IDENTIFY_WITH_IDENT
+#define ASM_IDENTIFY_GCC(FILE) /* nothing */
+#define ASM_IDENTIFY_LANGUAGE(FILE) \
+ fprintf(FILE, "\t%s \"GCC (%s) %s\"\n", IDENT_ASM_OP, \
+ lang_identify(), version_string)
+#else
+#define ASM_FILE_END(FILE) \
+do { \
+ fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
+ IDENT_ASM_OP, version_string); \
+ } while (0)
+#endif
+
+/* Allow #sccs in preprocessor. */
+
+#define SCCS_DIRECTIVE
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+/* This is how to allocate empty space in some section. The .zero
+ pseudo-op is used for this on most svr4 assemblers. */
+
+#define SKIP_ASM_OP ".zero"
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t%s\t%u\n", SKIP_ASM_OP, (SIZE))
+
+/* Output the label which precedes a jumptable. Note that for all svr4
+ systems where we actually generate jumptables (which is to say every
+ svr4 target except i386, where we use casesi instead) we put the jump-
+ tables into the .rodata section and since other stuff could have been
+ put into the .rodata section prior to any given jumptable, we have to
+ make sure that the location counter for the .rodata section gets pro-
+ perly re-aligned prior to the actual beginning of the jump table. */
+
+#define ALIGN_ASM_OP ".align"
+
+#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
+ ASM_OUTPUT_ALIGN ((FILE), 2);
+#endif
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* The standard SVR4 assembler seems to require that certain builtin
+ library routines (e.g. .udiv) be explicitly declared as .globl
+ in each assembly file where they are referenced. */
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ ASM_GLOBALIZE_LABEL (FILE, XSTR (FUN, 0))
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define COMMON_ASM_OP ".comm"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "\t%s\t", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+} while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define LOCAL_ASM_OP ".local"
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "\t%s\t", LOCAL_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+/* This is the pseudo-op used to generate a 64-bit word of data with a
+ specific value in some section. */
+
+#define INT_ASM_OP ".quad"
+
+/* This is the pseudo-op used to generate a contiguous sequence of byte
+ values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
+ AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
+
+#undef ASCII_DATA_ASM_OP
+#define ASCII_DATA_ASM_OP ".ascii"
+
+/* Support const sections and the ctors and dtors sections for g++.
+ Note that there appears to be two different ways to support const
+ sections at the moment. You can either #define the symbol
+ READONLY_DATA_SECTION (giving it some code which switches to the
+ readonly data section) or else you can #define the symbols
+ EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS, SELECT_SECTION, and
+ SELECT_RTX_SECTION. We do both here just to be on the safe side. */
+
+#define USE_CONST_SECTION 1
+
+#define CONST_SECTION_ASM_OP ".section\t.rodata"
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"aw\""
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"aw\""
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+
+#define INIT_SECTION_ASM_OP ".section\t.init"
+#define FINI_SECTION_ASM_OP ".section\t.fini"
+
+/* Support non-common, uninitialized data in the .bss section. */
+
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+
+/* A default list of other sections which we might be "in" at any given
+ time. For targets that use additional sections (e.g. .tdesc) you
+ should override this definition in the target-specific file which
+ includes this file. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors, in_bss
+
+/* A default list of extra section function definitions. For targets
+ that use additional sections (e.g. .tdesc) you should override this
+ definition in the target-specific file which includes this file. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ BSS_SECTION_FUNCTION
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION() const_section ()
+
+extern void text_section ();
+
+#define CONST_SECTION_FUNCTION \
+void \
+const_section () \
+{ \
+ if (!USE_CONST_SECTION) \
+ text_section(); \
+ else if (in_section != in_const) \
+ { \
+ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
+ in_section = in_const; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#define BSS_SECTION_FUNCTION \
+void \
+bss_section () \
+{ \
+ if (in_section != in_bss) \
+ { \
+ fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP); \
+ in_section = in_bss; \
+ } \
+}
+
+
+/* Switch into a generic section.
+ This is currently only used to support section attributes.
+
+ We make the section read-only and executable for a function decl,
+ read-only for a const data decl, and writable for a non-const data decl. */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME) \
+ fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, \
+ (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
+ (DECL) && TREE_READONLY (DECL) ? "a" : "aw")
+
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+
+#define SELECT_SECTION(DECL,RELOC) \
+{ \
+ if (TREE_CODE (DECL) == STRING_CST) \
+ { \
+ if (! flag_writable_strings) \
+ const_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (DECL) == VAR_DECL) \
+ { \
+ if ((flag_pic && RELOC) \
+ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
+ || !DECL_INITIAL (DECL) \
+ || (DECL_INITIAL (DECL) != error_mark_node \
+ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ { \
+ if (DECL_COMMON (DECL) \
+ && !DECL_INITIAL (DECL)) \
+ /* || DECL_INITIAL (DECL) == error_mark_node)) */ \
+ bss_section(); \
+ else \
+ data_section (); \
+ } \
+ else \
+ const_section (); \
+ } \
+ else \
+ const_section (); \
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of RTX in mode MODE. RTX is some kind
+ of constant in RTL. The argument MODE is redundant except
+ in the case of a `const_int' rtx. Currently, these always
+ go into the const section. */
+
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX) const_section()
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* This is how we tell the assembler that two symbols have the same value. */
+
+#define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
+ do { assemble_name(FILE, NAME1); \
+ fputs(" = ", FILE); \
+ assemble_name(FILE, NAME2); \
+ fputc('\n', FILE); } while (0)
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ } while (0)
+
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+
+/* Some svr4 assemblers have a limit on the number of characters which
+ can appear in the operand of a .string directive. If your assembler
+ has such a limitation, you should define STRING_LIMIT to reflect that
+ limit. Note that at least some svr4 assemblers have a limit on the
+ actual number of bytes in the double-quoted string, and that they
+ count each character in an escape sequence as one byte. Thus, an
+ escape sequence like \377 would count as four bytes.
+
+ If your target assembler doesn't support the .string directive, you
+ should define this to zero.
+*/
+
+#define STRING_LIMIT ((unsigned) 256)
+
+#define STRING_ASM_OP ".string"
+
+/*
+ * We always use gas here, so we don't worry about ECOFF assembler problems.
+ */
+#undef TARGET_GAS
+#define TARGET_GAS (1)
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* Provide a STARTFILE_SPEC appropriate for Linux. Here we add
+ the Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for Linux. Here we tack on
+ the Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
diff --git a/contrib/gcc/config/alpha/freebsd.h b/contrib/gcc/config/alpha/freebsd.h
new file mode 100644
index 0000000..24567a6
--- /dev/null
+++ b/contrib/gcc/config/alpha/freebsd.h
@@ -0,0 +1,103 @@
+/* XXX */
+/*
+ * This file was derived from source obtained from NetBSD/Alpha which
+ * is publicly available for ftp. The patch was developed by cgd@netbsd.org
+ * during the time he worked at CMU. He claims that CMU own this patch
+ * to gcc and that they have not (and will not) release the patch for
+ * incorporation in FSF sources. We are supposedly able to use the patch,
+ * but we are not allowed to forward it back to FSF for inclusion in
+ * their source releases.
+ *
+ * This all has me (jb@freebsd.org) confused because (a) I see no copyright
+ * messages that tell me that use is restricted; and (b) I expected that
+ * the patch was originally developed from other files which are subject
+ * to GPL.
+ *
+ * Use of this file is restricted until its CMU ownership is tested.
+ */
+
+#include "alpha/alpha.h"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* FreeBSD-specific things: */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__FreeBSD__ -D__alpha__ -D__alpha"
+
+/* Look for the include files in the system-defined places. */
+
+#undef GPLUSPLUS_INCLUDE_DIR
+#define GPLUSPLUS_INCLUDE_DIR "/usr/include/g++"
+
+#undef GCC_INCLUDE_DIR
+#define GCC_INCLUDE_DIR "/usr/include"
+
+#undef INCLUDE_DEFAULTS
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, 1, 1 }, \
+ { GCC_INCLUDE_DIR, 0, 0 }, \
+ { 0, 0, 0 } \
+ }
+
+
+/* Under FreeBSD, the normal location of the `ld' and `as' programs is the
+ /usr/bin directory. */
+
+#undef MD_EXEC_PREFIX
+#define MD_EXEC_PREFIX "/usr/bin/"
+
+/* Under FreeBSD, the normal location of the various *crt*.o files is the
+ /usr/lib directory. */
+
+#undef MD_STARTFILE_PREFIX
+#define MD_STARTFILE_PREFIX "/usr/lib/"
+
+
+/* Provide a CPP_SPEC appropriate for FreeBSD. Current we just deal with
+ the GCC option `-posix'. */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix:-D_POSIX_SOURCE}"
+
+/* Provide an ASM_SPEC appropriate for FreeBSD. */
+
+#undef ASM_SPEC
+#define ASM_SPEC " %|"
+
+#undef ASM_FINAL_SPEC
+
+/* Provide a LIB_SPEC appropriate for FreeBSD. Just select the appropriate
+ libc, depending on whether we're doing profiling. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{!pg:%{!pthread:-lc}%{pthread:-lpthread -lc}}%{pg:%{!pthread:-lc_p}%{pthread:-lpthread_p -lc_p}}}"
+
+/* Provide a LINK_SPEC appropriate for FreeBSD. Here we provide support
+ for the special GCC options -static, -assert, and -nostdlib. */
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{!nostdlib:%{!r*:%{!e*:-e __start}}} -dc -dp %{static:-Bstatic} %{assert*}"
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. Under FreeBSD/Alpha, the assembler does
+ nothing special with -pg. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fputs ("\tjsr $28,_mcount\n", (FILE)); /* at */
+
+/* Show that we need a GP when profiling. */
+#define TARGET_PROFILING_NEEDS_GP
+
+#define bsd4_4
+#undef HAS_INIT_SECTION
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/alpha/gdb-osf12.h b/contrib/gcc/config/alpha/gdb-osf12.h
new file mode 100644
index 0000000..98c2897
--- /dev/null
+++ b/contrib/gcc/config/alpha/gdb-osf12.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha, using
+ encapsulated stabs and OSF V1.2.
+ Copyright (C) 1994 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "alpha/osf12.h"
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/alpha/gdb-osf2.h b/contrib/gcc/config/alpha/gdb-osf2.h
new file mode 100644
index 0000000..5ddb798
--- /dev/null
+++ b/contrib/gcc/config/alpha/gdb-osf2.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha, using
+ encapsulated stabs.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Peter Schauer (pes@regent.e-technik.tu-muenchen.de).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "alpha/osf2.h"
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/alpha/gdb.h b/contrib/gcc/config/alpha/gdb.h
new file mode 100644
index 0000000..ecdbe40
--- /dev/null
+++ b/contrib/gcc/config/alpha/gdb.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha, using
+ encapsulated stabs.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Peter Schauer (pes@regent.e-technik.tu-muenchen.de).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "alpha/alpha.h"
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/alpha/osf12.h b/contrib/gcc/config/alpha/osf12.h
new file mode 100644
index 0000000..fe9112c
--- /dev/null
+++ b/contrib/gcc/config/alpha/osf12.h
@@ -0,0 +1,31 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha.
+ Copyright (C) 1992, 1993, 1995 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "alpha/alpha.h"
+
+/* In OSF 1.2, there is a linker bug that prevents use of -O3 to
+ the linker. */
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "-G 8 -O1 %{static:-non_shared} %{rpath*} \
+ %{!static:%{shared:-shared} %{!shared:-call_shared}} %{taso}"
diff --git a/contrib/gcc/config/alpha/osf2.h b/contrib/gcc/config/alpha/osf2.h
new file mode 100644
index 0000000..169af5a
--- /dev/null
+++ b/contrib/gcc/config/alpha/osf2.h
@@ -0,0 +1,32 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha.
+ Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#include "alpha/alpha.h"
+
+/* In OSF 2.0, the size of wchar_t was changed from short unsigned
+ to unsigned int. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "unsigned int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
diff --git a/contrib/gcc/config/alpha/x-alpha b/contrib/gcc/config/alpha/x-alpha
new file mode 100644
index 0000000..9919747
--- /dev/null
+++ b/contrib/gcc/config/alpha/x-alpha
@@ -0,0 +1 @@
+CLIB=-lmld
diff --git a/contrib/gcc/config/alpha/xm-alpha.h b/contrib/gcc/config/alpha/xm-alpha.h
new file mode 100644
index 0000000..642e1cf
--- /dev/null
+++ b/contrib/gcc/config/alpha/xm-alpha.h
@@ -0,0 +1,78 @@
+/* Configuration for GNU C-compiler for DEC Alpha.
+ Copyright (C) 1990, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 64
+#define HOST_BITS_PER_LONGLONG 64
+
+/* #define HOST_WORDS_BIG_ENDIAN */
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If not compiled with GNU C, use the builtin alloca. */
+#if !defined(__GNUC__) && !defined(_WIN32)
+#include <alloca.h>
+#else
+extern void *alloca ();
+#endif
+
+/* The host compiler has problems with enum bitfields since it makes
+ them signed so we can't fit all our codes in. */
+
+#ifndef __GNUC__
+#define ONLY_INT_FIELDS
+#endif
+
+/* Declare some functions needed for this machine. We don't want to
+ include these in the sources since other machines might define them
+ differently. */
+
+extern void *malloc (), *realloc (), *calloc ();
+
+#ifndef inhibit_libc
+#include "string.h"
+#endif
+
+/* OSF/1 has vprintf. */
+
+#define HAVE_VPRINTF
+
+/* OSF/1 has putenv. */
+
+#define HAVE_PUTENV
+
+/* OSF/1 is POSIX.1 compliant. */
+
+#define POSIX
diff --git a/contrib/gcc/config/freebsd.h b/contrib/gcc/config/freebsd.h
new file mode 100644
index 0000000..dc16976
--- /dev/null
+++ b/contrib/gcc/config/freebsd.h
@@ -0,0 +1,120 @@
+/* Base configuration file for all FreeBSD targets.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Common FreeBSD configuration.
+ All FreeBSD architectures should include this file, which will specify
+ their commonalities.
+ Adapted from /usr/src/contrib/gcc/config/i386/freebsd.h &
+ egcs/gcc/config/i386/freebsd-elf.h version by David O'Brien */
+
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* This defines which switch letters take arguments. On svr4, most of
+ the normal cases (defined in gcc.c) apply, and we also have -h* and
+ -z* options (for the linker). We have a slightly different mix. We
+ have -R (alias --rpath), no -z, --soname (-h), --assert etc. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ ( (CHAR) == 'D' \
+ || (CHAR) == 'U' \
+ || (CHAR) == 'o' \
+ || (CHAR) == 'e' \
+ || (CHAR) == 'T' \
+ || (CHAR) == 'u' \
+ || (CHAR) == 'I' \
+ || (CHAR) == 'm' \
+ || (CHAR) == 'x' \
+ || (CHAR) == 'L' \
+ || (CHAR) == 'A' \
+ || (CHAR) == 'V' \
+ || (CHAR) == 'B' \
+ || (CHAR) == 'b' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z' /* ignored by ld */ \
+ || (CHAR) == 'R')
+
+#undef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ || !strcmp (STR, "rpath") || !strcmp (STR, "rpath-link") \
+ || !strcmp (STR, "soname") || !strcmp (STR, "defsym") \
+ || !strcmp (STR, "assert") || !strcmp (STR, "dynamic-linker"))
+
+
+#define CPP_FBSD_PREDEFINES "-Dunix -D__ELF__ -D__FreeBSD__=4 -D__FreeBSD_cc_version=400001 -Asystem(unix) -Asystem(FreeBSD)"
+
+
+/* Code generation parameters. */
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions.
+ (even though the svr4 ABI for the i386 says that records and unions are
+ returned in memory) */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Ensure we the configuration knows our system correctly so we can link with
+ libraries compiled with the native cc. */
+#undef NO_DOLLAR_IN_LABEL
+
+
+/* Miscellaneous parameters. */
+
+/* Tell libgcc2.c that FreeBSD targets support atexit(3). */
+#define HAVE_ATEXIT
+
+
+/* FREEBSD_NATIVE is defined when gcc is integrated into the FreeBSD
+ source tree so it can be configured appropriately without using
+ the GNU configure/build mechanism. */
+
+#ifdef FREEBSD_NATIVE
+
+/* Look for the include files in the system-defined places. */
+
+#define GPLUSPLUS_INCLUDE_DIR "/usr/include/g++"
+#define GCC_INCLUDE_DIR "/usr/include"
+
+/* Now that GCC knows what the include path applies to, put the G++ one first.
+ C++ can now have include files that override the default C ones. */
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, "C++", 1, 1 }, \
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
+ { 0, 0, 0, 0 } \
+ }
+
+/* Under FreeBSD, the normal location of the compiler back ends is the
+ /usr/libexec directory. */
+
+#define STANDARD_EXEC_PREFIX "/usr/libexec/"
+#define TOOLDIR_BASE_PREFIX "/usr/libexec/"
+
+/* Under FreeBSD, the normal location of the various *crt*.o files is the
+ /usr/lib directory. */
+
+#define STANDARD_STARTFILE_PREFIX "/usr/lib/"
+
+/* FreeBSD is 4.4BSD derived */
+#define bsd4_4
+
+#endif /* FREEBSD_NATIVE */
diff --git a/contrib/gcc/config/i386/freebsd-elf.h b/contrib/gcc/config/i386/freebsd-elf.h
new file mode 100644
index 0000000..393ede7
--- /dev/null
+++ b/contrib/gcc/config/i386/freebsd-elf.h
@@ -0,0 +1,199 @@
+/* Definitions for Intel 386 running FreeBSD with ELF format
+ Copyright (C) 1994, 1995 Free Software Foundation, Inc.
+ Contributed by Eric Youngdale.
+ Modified for stabs-in-ELF by H.J. Lu.
+ Adapted from Linux version by John Polstra.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* A lie, I guess, but the general idea behind FreeBSD/ELF is that we are
+ supposed to be outputting something that will assemble under SVr4.
+ This gets us pretty close. */
+#include <i386/i386.h> /* Base i386 target machine definitions */
+#include <i386/att.h> /* Use the i386 AT&T assembler syntax */
+#include <linux.h> /* some common stuff */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 FreeBSD/ELF)");
+
+/* The svr4 ABI for the i386 says that records and unions are returned
+ in memory. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+
+/* This is how to output an element of a case-vector that is relative.
+ This is only used for PIC code. See comments by the `casesi' insn in
+ i386.md for an explanation of the expression this outputs. */
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
+
+/* Indicate that jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION
+
+/* Copy this from the svr4 specifications... */
+/* Define the register numbers to be used in Dwarf debugging information.
+ The SVR4 reference port C compiler uses the following register numbers
+ in its Dwarf output code:
+ 0 for %eax (gnu regno = 0)
+ 1 for %ecx (gnu regno = 2)
+ 2 for %edx (gnu regno = 1)
+ 3 for %ebx (gnu regno = 3)
+ 4 for %esp (gnu regno = 7)
+ 5 for %ebp (gnu regno = 6)
+ 6 for %esi (gnu regno = 4)
+ 7 for %edi (gnu regno = 5)
+ The following three DWARF register numbers are never generated by
+ the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
+ believes these numbers have these meanings.
+ 8 for %eip (no gnu equivalent)
+ 9 for %eflags (no gnu equivalent)
+ 10 for %trapno (no gnu equivalent)
+ It is not at all clear how we should number the FP stack registers
+ for the x86 architecture. If the version of SDB on x86/svr4 were
+ a bit less brain dead with respect to floating-point then we would
+ have a precedent to follow with respect to DWARF register numbers
+ for x86 FP registers, but the SDB on x86/svr4 is so completely
+ broken with respect to FP registers that it is hardly worth thinking
+ of it as something to strive for compatibility with.
+ The version of x86/svr4 SDB I have at the moment does (partially)
+ seem to believe that DWARF register number 11 is associated with
+ the x86 register %st(0), but that's about all. Higher DWARF
+ register numbers don't seem to be associated with anything in
+ particular, and even for DWARF regno 11, SDB only seems to under-
+ stand that it should say that a variable lives in %st(0) (when
+ asked via an `=' command) if we said it was in DWARF regno 11,
+ but SDB still prints garbage when asked for the value of the
+ variable in question (via a `/' command).
+ (Also note that the labels SDB prints for various FP stack regs
+ when doing an `x' command are all wrong.)
+ Note that these problems generally don't affect the native SVR4
+ C compiler because it doesn't allow the use of -O with -g and
+ because when it is *not* optimizing, it allocates a memory
+ location for each floating-point variable, and the memory
+ location is what gets described in the DWARF AT_location
+ attribute for the variable in question.
+ Regardless of the severe mental illness of the x86/svr4 SDB, we
+ do something sensible here and we use the following DWARF
+ register numbers. Note that these are all stack-top-relative
+ numbers.
+ 11 for %st(0) (gnu regno = 8)
+ 12 for %st(1) (gnu regno = 9)
+ 13 for %st(2) (gnu regno = 10)
+ 14 for %st(3) (gnu regno = 11)
+ 15 for %st(4) (gnu regno = 12)
+ 16 for %st(5) (gnu regno = 13)
+ 17 for %st(6) (gnu regno = 14)
+ 18 for %st(7) (gnu regno = 15)
+*/
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+((n) == 0 ? 0 \
+ : (n) == 1 ? 2 \
+ : (n) == 2 ? 1 \
+ : (n) == 3 ? 3 \
+ : (n) == 4 ? 6 \
+ : (n) == 5 ? 7 \
+ : (n) == 6 ? 5 \
+ : (n) == 7 ? 4 \
+ : ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG) ? (n)+3 \
+ : (-1))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+{ \
+ if (flag_pic) \
+ { \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
+ LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tcall *mcount@GOT(%%ebx)\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tcall mcount\n"); \
+ } \
+}
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Di386 -D__ELF__ -D__FreeBSD__=2 -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
+
+#undef CPP_SPEC
+#if TARGET_CPU_DEFAULT == 2
+#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{!m386:-D__i486__} %{posix:-D_POSIX_SOURCE}"
+#else
+#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{m486:-D__i486__} %{posix:-D_POSIX_SOURCE}"
+#endif
+
+#undef LIB_SPEC
+#if 1
+/* We no longer link with libc_p.a or libg.a by default. If you
+ * want to profile or debug the C library, please add
+ * -lc_p or -ggdb to LDFLAGS at the link time, respectively.
+ */
+#define LIB_SPEC \
+ "%{!shared: %{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} \
+ %{!ggdb:-lc} %{ggdb:-lg}}"
+#else
+#define LIB_SPEC \
+ "%{!shared: \
+ %{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
+#endif
+
+/* Provide a LINK_SPEC appropriate for FreeBSD. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m elf_i386 %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld-elf.so.1}} \
+ %{static:-static}}}"
+
+/* Get perform_* macros to build libgcc.a. */
+#include "i386/perform.h"
OpenPOWER on IntegriCloud