summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config/alpha
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/gcc/config/alpha')
-rw-r--r--contrib/gcc/config/alpha/alpha-interix.h252
-rw-r--r--contrib/gcc/config/alpha/alpha.c1462
-rw-r--r--contrib/gcc/config/alpha/alpha.h325
-rw-r--r--contrib/gcc/config/alpha/alpha.md1037
-rw-r--r--contrib/gcc/config/alpha/alpha32.h104
-rw-r--r--contrib/gcc/config/alpha/crtbegin.asm89
-rw-r--r--contrib/gcc/config/alpha/crtend.asm3
-rw-r--r--contrib/gcc/config/alpha/elf.h62
-rw-r--r--contrib/gcc/config/alpha/lib1funcs.asm325
-rw-r--r--contrib/gcc/config/alpha/linux-ecoff.h7
-rw-r--r--contrib/gcc/config/alpha/linux-elf.h3
-rw-r--r--contrib/gcc/config/alpha/linux.h6
-rw-r--r--contrib/gcc/config/alpha/netbsd-elf.h3
-rw-r--r--contrib/gcc/config/alpha/netbsd.h3
-rw-r--r--contrib/gcc/config/alpha/osf.h12
-rw-r--r--contrib/gcc/config/alpha/t-ieee6
-rw-r--r--contrib/gcc/config/alpha/t-interix16
-rw-r--r--contrib/gcc/config/alpha/vms.h4
-rw-r--r--contrib/gcc/config/alpha/vxworks.h6
-rw-r--r--contrib/gcc/config/alpha/xm-alpha-interix.h45
-rw-r--r--contrib/gcc/config/alpha/xm-alpha.h2
21 files changed, 2851 insertions, 921 deletions
diff --git a/contrib/gcc/config/alpha/alpha-interix.h b/contrib/gcc/config/alpha/alpha-interix.h
new file mode 100644
index 0000000..668fe93
--- /dev/null
+++ b/contrib/gcc/config/alpha/alpha-interix.h
@@ -0,0 +1,252 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha
+ running Windows/NT.
+ Copyright (C) 1995, 1996, 1999 Free Software Foundation, Inc.
+
+ Donn Terry, Softway Systems, Inc.
+ From code
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* cpp handles __STDC__ */
+/* The three "Alpha" defines on the first such line are from the CLAXP spec */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES " \
+ -D__INTERIX \
+ -D__OPENNT \
+ -D__Alpha_AXP -D_M_ALPHA -D_ALPHA_ \
+ -D__alpha -D__alpha__\
+ -D__stdcall= \
+ -D__cdecl= \
+ -Asystem(unix) -Asystem(interix) -Asystem(interix) -Acpu(alpha) -Amachine(alpha)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+-remap \
+%{posix:-D_POSIX_SOURCE} \
+-idirafter %$INTERIX_ROOT/usr/include"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (alpha Interix)");
+
+/* alpha.h sets this, but it doesn't apply to us */
+#undef OBJECT_FORMAT_ECOFF
+#undef OBJECT_FORMAT_COFF
+
+/* LINK_SPEC */
+
+/* MD_STARTFILE_PREFIX */
+
+/* ASM_OUTPUT_LOOP_ALIGN; ASM_OUTPUT_ALIGN_CODE */
+
+/* Codegen macro overrides for NT internal conventions */
+
+/* the below are ecoff specific... we don't need them, so
+ undef them (they'll get a default later) */
+
+#undef PUT_SDB_BLOCK_START
+#undef PUT_SDB_BLOCK_END
+
+/* the following are OSF linker (not gld) specific... we don't want them */
+#undef HAS_INIT_SECTION
+#undef LD_INIT_SWITCH
+#undef LD_FINI_SWITCH
+
+
+/* The following are needed for C++, but also needed for profiling */
+
+/* Support const sections and the ctors and dtors sections for g++.
+ Note that there appears to be two different ways to support const
+ sections at the moment. You can either #define the symbol
+ READONLY_DATA_SECTION (giving it some code which switches to the
+ readonly data section) or else you can #define the symbols
+ EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS, SELECT_SECTION, and
+ SELECT_RTX_SECTION. We do both here just to be on the safe side. */
+
+#define USE_CONST_SECTION 1
+
+#define CONST_SECTION_ASM_OP ".rdata"
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+
+#define CTORS_SECTION_ASM_OP ".ctors"
+#define DTORS_SECTION_ASM_OP ".dtors"
+
+/* A default list of other sections which we might be "in" at any given
+ time. For targets that use additional sections (e.g. .tdesc) you
+ should override this definition in the target-specific file which
+ includes this file. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors
+
+/* A default list of extra section function definitions. For targets
+ that use additional sections (e.g. .tdesc) you should override this
+ definition in the target-specific file which includes this file. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION() const_section ()
+
+extern void text_section ();
+
+#define CONST_SECTION_FUNCTION \
+void \
+const_section () \
+{ \
+ if (!USE_CONST_SECTION) \
+ text_section(); \
+ else if (in_section != in_const) \
+ { \
+ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
+ in_section = in_const; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#define INT_ASM_OP ".long"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* The linker will take care of this, and having them causes problems with
+ ld -r (specifically -rU). */
+#define CTOR_LISTS_DEFINED_EXTERNALLY 1
+
+#define SET_ASM_OP ".set"
+/* Output a definition (implements alias) */
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+do \
+{ \
+ fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "="); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+while (0)
+
+/* We use the defaults, so undef the null definitions */
+#undef PUT_SDB_FUNCTION_START
+#undef PUT_SDB_FUNCTION_END
+#undef PUT_SDB_EPILOGUE_END
+
+#define HOST_PTR_PRINTF "%p"
+#define HOST_PTR_AS_INT unsigned long
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+#define PCC_BITFIELD_TYPE_TEST TYPE_NATIVE(rec)
+#define GROUP_BITFIELDS_BY_ALIGN TYPE_NATIVE(rec)
+
+/* DWARF2 Unwinding doesn't work with exception handling yet. */
+#undef DWARF2_UNWIND_INFO
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* The definition of this macro implies that there are cases where
+ a scalar value cannot be returned in registers.
+
+ On NT (according to the spec) anything except strings/array that fits
+ in 64 bits is returned in the registers (this appears to differ from
+ the rest of the Alpha family). */
+
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE || int_size_in_bytes(TYPE) > 8)
+
+#define ASM_LOAD_ADDR(loc, reg) " lda " #reg "," #loc "\n"
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+{ \
+ alpha_write_verstamp (FILE); \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ fprintf (FILE, "\t.set volatile\n"); \
+ fprintf (FILE, "\t.set noat\n"); \
+ fprintf (FILE, "\t.globl\t__fltused\n"); \
+ ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
+}
+
+/* The current Interix assembler (consistent with the DEC documentation)
+ uses a=b NOT .set a,b; .set is for assembler options. */
+#undef ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL
+#define ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL(FILE, SY, HI, LO) \
+ do { \
+ assemble_name (FILE, SY); \
+ fputc ('=', FILE); \
+ assemble_name (FILE, HI); \
+ fputc ('-', FILE); \
+ assemble_name (FILE, LO); \
+ } while (0)
diff --git a/contrib/gcc/config/alpha/alpha.c b/contrib/gcc/config/alpha/alpha.c
index 0b72289..2d62693 100644
--- a/contrib/gcc/config/alpha/alpha.c
+++ b/contrib/gcc/config/alpha/alpha.c
@@ -1,5 +1,5 @@
/* Subroutines used for code generation on the DEC Alpha.
- Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GNU CC.
@@ -48,7 +48,7 @@ extern int rtx_equal_function_value_matters;
/* Specify which cpu to schedule for. */
enum processor_type alpha_cpu;
-static char* const alpha_cpu_name[] =
+static const char * const alpha_cpu_name[] =
{
"ev4", "ev5", "ev6"
};
@@ -67,11 +67,11 @@ enum alpha_fp_trap_mode alpha_fptm;
/* Strings decoded into the above options. */
-char *alpha_cpu_string; /* -mcpu= */
-char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
-char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
-char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
-char *alpha_mlat_string; /* -mmemory-latency= */
+const char *alpha_cpu_string; /* -mcpu= */
+const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
+const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
+const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
+const char *alpha_mlat_string; /* -mmemory-latency= */
/* Save information from a "cmpxx" operation until the branch or scc is
emitted. */
@@ -79,6 +79,10 @@ char *alpha_mlat_string; /* -mmemory-latency= */
rtx alpha_compare_op0, alpha_compare_op1;
int alpha_compare_fp_p;
+/* Define the information needed to modify the epilogue for EH. */
+
+rtx alpha_eh_epilogue_sp_ofs;
+
/* Non-zero if inside of a function, because the Alpha asm can't
handle .files inside of functions. */
@@ -96,6 +100,10 @@ int alpha_memory_latency = 3;
static int alpha_function_needs_gp;
+/* The alias set for prologue/epilogue register save/restore. */
+
+static int alpha_sr_alias_set;
+
/* Declarations of static functions. */
static void alpha_set_memflags_1
PROTO((rtx, int, int, int));
@@ -126,49 +134,6 @@ static int alpha_does_function_need_gp
void
override_options ()
{
- alpha_cpu
- = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
- : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
-
- if (alpha_cpu_string)
- {
- if (! strcmp (alpha_cpu_string, "ev4")
- || ! strcmp (alpha_cpu_string, "21064"))
- {
- alpha_cpu = PROCESSOR_EV4;
- target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
- }
- else if (! strcmp (alpha_cpu_string, "ev5")
- || ! strcmp (alpha_cpu_string, "21164"))
- {
- alpha_cpu = PROCESSOR_EV5;
- target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
- }
- else if (! strcmp (alpha_cpu_string, "ev56")
- || ! strcmp (alpha_cpu_string, "21164a"))
- {
- alpha_cpu = PROCESSOR_EV5;
- target_flags |= MASK_BWX;
- target_flags &= ~ (MASK_CIX | MASK_MAX);
- }
- else if (! strcmp (alpha_cpu_string, "pca56")
- || ! strcmp (alpha_cpu_string, "21164PC")
- || ! strcmp (alpha_cpu_string, "21164pc"))
- {
- alpha_cpu = PROCESSOR_EV5;
- target_flags |= MASK_BWX | MASK_MAX;
- target_flags &= ~ MASK_CIX;
- }
- else if (! strcmp (alpha_cpu_string, "ev6")
- || ! strcmp (alpha_cpu_string, "21264"))
- {
- alpha_cpu = PROCESSOR_EV6;
- target_flags |= MASK_BWX | MASK_CIX | MASK_MAX;
- }
- else
- error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
- }
-
alpha_tp = ALPHA_TP_PROG;
alpha_fprm = ALPHA_FPRM_NORM;
alpha_fptm = ALPHA_FPTM_N;
@@ -226,10 +191,54 @@ override_options ()
error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
}
- /* Do some sanity checks on the above option. */
+ alpha_cpu
+ = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
+ : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
+
+ if (alpha_cpu_string)
+ {
+ if (! strcmp (alpha_cpu_string, "ev4")
+ || ! strcmp (alpha_cpu_string, "21064"))
+ {
+ alpha_cpu = PROCESSOR_EV4;
+ target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
+ }
+ else if (! strcmp (alpha_cpu_string, "ev5")
+ || ! strcmp (alpha_cpu_string, "21164"))
+ {
+ alpha_cpu = PROCESSOR_EV5;
+ target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
+ }
+ else if (! strcmp (alpha_cpu_string, "ev56")
+ || ! strcmp (alpha_cpu_string, "21164a"))
+ {
+ alpha_cpu = PROCESSOR_EV5;
+ target_flags |= MASK_BWX;
+ target_flags &= ~ (MASK_MAX | MASK_FIX | MASK_CIX);
+ }
+ else if (! strcmp (alpha_cpu_string, "pca56")
+ || ! strcmp (alpha_cpu_string, "21164PC")
+ || ! strcmp (alpha_cpu_string, "21164pc"))
+ {
+ alpha_cpu = PROCESSOR_EV5;
+ target_flags |= MASK_BWX | MASK_MAX;
+ target_flags &= ~ (MASK_FIX | MASK_CIX);
+ }
+ else if (! strcmp (alpha_cpu_string, "ev6")
+ || ! strcmp (alpha_cpu_string, "21264"))
+ {
+ alpha_cpu = PROCESSOR_EV6;
+ target_flags |= MASK_BWX | MASK_MAX | MASK_FIX;
+ target_flags &= ~ (MASK_CIX);
+ }
+ else
+ error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
+ }
+
+ /* Do some sanity checks on the above options. */
if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
- && alpha_tp != ALPHA_TP_INSN)
+ && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
{
warning ("fp software completion requires -mtrap-precision=i");
alpha_tp = ALPHA_TP_INSN;
@@ -256,11 +265,11 @@ override_options ()
if (!alpha_mlat_string)
alpha_mlat_string = "L1";
- if (isdigit (alpha_mlat_string[0])
+ if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
&& (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
;
else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
- && isdigit (alpha_mlat_string[1])
+ && ISDIGIT ((unsigned char)alpha_mlat_string[1])
&& alpha_mlat_string[2] == '\0')
{
static int const cache_latency[][4] =
@@ -298,6 +307,9 @@ override_options ()
/* Default the definition of "small data" to 8 bytes. */
if (!g_switch_set)
g_switch_value = 8;
+
+ /* Acquire a unique set number for our register saves and restores. */
+ alpha_sr_alias_set = new_alias_set ();
}
/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
@@ -337,7 +349,6 @@ reg_or_6bit_operand (op, mode)
{
return ((GET_CODE (op) == CONST_INT
&& (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
- || GET_CODE (op) == CONSTANT_P_RTX
|| register_operand (op, mode));
}
@@ -351,7 +362,6 @@ reg_or_8bit_operand (op, mode)
{
return ((GET_CODE (op) == CONST_INT
&& (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
- || GET_CODE (op) == CONSTANT_P_RTX
|| register_operand (op, mode));
}
@@ -363,8 +373,7 @@ cint8_operand (op, mode)
enum machine_mode mode ATTRIBUTE_UNUSED;
{
return ((GET_CODE (op) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
- || GET_CODE (op) == CONSTANT_P_RTX);
+ && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100));
}
/* Return 1 if the operand is a valid second operand to an add insn. */
@@ -375,11 +384,9 @@ add_operand (op, mode)
enum machine_mode mode;
{
if (GET_CODE (op) == CONST_INT)
+ /* Constraints I, J, O and P are covered by K. */
return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
- || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')
- || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
- else if (GET_CODE (op) == CONSTANT_P_RTX)
- return 1;
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
return register_operand (op, mode);
}
@@ -393,10 +400,8 @@ sext_add_operand (op, mode)
enum machine_mode mode;
{
if (GET_CODE (op) == CONST_INT)
- return ((unsigned HOST_WIDE_INT) INTVAL (op) < 255
- || (unsigned HOST_WIDE_INT) (- INTVAL (op)) < 255);
- else if (GET_CODE (op) == CONSTANT_P_RTX)
- return 1;
+ return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
return register_operand (op, mode);
}
@@ -427,8 +432,6 @@ and_operand (op, mode)
return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
|| (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
|| zap_mask (INTVAL (op)));
- else if (GET_CODE (op) == CONSTANT_P_RTX)
- return 1;
return register_operand (op, mode);
}
@@ -443,8 +446,6 @@ or_operand (op, mode)
if (GET_CODE (op) == CONST_INT)
return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
|| (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
- else if (GET_CODE (op) == CONSTANT_P_RTX)
- return 1;
return register_operand (op, mode);
}
@@ -483,9 +484,9 @@ mode_mask_operand (op, mode)
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) == 0xff
|| INTVAL (op) == 0xffff
- || INTVAL (op) == 0xffffffff
+ || INTVAL (op) == (HOST_WIDE_INT)0xffffffff
#if HOST_BITS_PER_WIDE_INT == 64
- || INTVAL (op) == 0xffffffffffffffff
+ || INTVAL (op) == -1
#endif
));
}
@@ -544,7 +545,6 @@ reg_or_cint_operand (op, mode)
enum machine_mode mode;
{
return (GET_CODE (op) == CONST_INT
- || GET_CODE (op) == CONSTANT_P_RTX
|| register_operand (op, mode));
}
@@ -562,7 +562,7 @@ some_operand (op, mode)
switch (GET_CODE (op))
{
case REG: case MEM: case CONST_DOUBLE: case CONST_INT: case LABEL_REF:
- case SYMBOL_REF: case CONST: case CONSTANT_P_RTX:
+ case SYMBOL_REF: case CONST:
return 1;
case SUBREG:
@@ -611,9 +611,11 @@ input_operand (op, mode)
return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
case CONST_INT:
- case CONSTANT_P_RTX:
return mode == QImode || mode == HImode || add_operand (op, mode);
+ case CONSTANT_P_RTX:
+ return 1;
+
default:
break;
}
@@ -726,40 +728,49 @@ divmod_operator (op, mode)
a constant. It must be a valid address. This means that we can do
this as an aligned reference plus some offset.
- Take into account what reload will do.
-
- We could say that out-of-range stack slots are alignable, but that would
- complicate get_aligned_mem and it isn't worth the trouble since few
- functions have large stack space. */
+ Take into account what reload will do. */
int
aligned_memory_operand (op, mode)
register rtx op;
enum machine_mode mode;
{
- if (GET_CODE (op) == SUBREG)
+ rtx base;
+
+ if (reload_in_progress)
{
- if (GET_MODE (op) != mode)
- return 0;
- op = SUBREG_REG (op);
- mode = GET_MODE (op);
+ rtx tmp = op;
+ if (GET_CODE (tmp) == SUBREG)
+ tmp = SUBREG_REG (tmp);
+ if (GET_CODE (tmp) == REG
+ && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
+ {
+ op = reg_equiv_memory_loc[REGNO (tmp)];
+ if (op == 0)
+ return 0;
+ }
}
- if (reload_in_progress && GET_CODE (op) == REG
- && REGNO (op) >= FIRST_PSEUDO_REGISTER)
- op = reg_equiv_mem[REGNO (op)];
-
- if (GET_CODE (op) != MEM || GET_MODE (op) != mode
- || ! memory_address_p (mode, XEXP (op, 0)))
+ if (GET_CODE (op) != MEM
+ || GET_MODE (op) != mode)
return 0;
-
op = XEXP (op, 0);
- if (GET_CODE (op) == PLUS)
- op = XEXP (op, 0);
+ /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
+ sorts of constructs. Dig for the real base register. */
+ if (reload_in_progress
+ && GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 0)) == PLUS)
+ base = XEXP (XEXP (op, 0), 0);
+ else
+ {
+ if (! memory_address_p (mode, op))
+ return 0;
+ base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
+ }
- return (GET_CODE (op) == REG
- && REGNO_POINTER_ALIGN (REGNO (op)) >= 4);
+ return (GET_CODE (base) == REG
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4);
}
/* Similar, but return 1 if OP is a MEM which is not alignable. */
@@ -769,31 +780,42 @@ unaligned_memory_operand (op, mode)
register rtx op;
enum machine_mode mode;
{
- if (GET_CODE (op) == SUBREG)
+ rtx base;
+
+ if (reload_in_progress)
{
- if (GET_MODE (op) != mode)
- return 0;
- op = SUBREG_REG (op);
- mode = GET_MODE (op);
+ rtx tmp = op;
+ if (GET_CODE (tmp) == SUBREG)
+ tmp = SUBREG_REG (tmp);
+ if (GET_CODE (tmp) == REG
+ && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
+ {
+ op = reg_equiv_memory_loc[REGNO (tmp)];
+ if (op == 0)
+ return 0;
+ }
}
- if (reload_in_progress && GET_CODE (op) == REG
- && REGNO (op) >= FIRST_PSEUDO_REGISTER)
- op = reg_equiv_mem[REGNO (op)];
-
- if (GET_CODE (op) != MEM || GET_MODE (op) != mode)
+ if (GET_CODE (op) != MEM
+ || GET_MODE (op) != mode)
return 0;
-
op = XEXP (op, 0);
- if (! memory_address_p (mode, op))
- return 1;
-
- if (GET_CODE (op) == PLUS)
- op = XEXP (op, 0);
+ /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
+ sorts of constructs. Dig for the real base register. */
+ if (reload_in_progress
+ && GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 0)) == PLUS)
+ base = XEXP (XEXP (op, 0), 0);
+ else
+ {
+ if (! memory_address_p (mode, op))
+ return 0;
+ base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
+ }
- return (GET_CODE (op) != REG
- || REGNO_POINTER_ALIGN (REGNO (op)) < 4);
+ return (GET_CODE (base) == REG
+ && REGNO_POINTER_ALIGN (REGNO (base)) < 4);
}
/* Return 1 if OP is either a register or an unaligned memory location. */
@@ -822,6 +844,74 @@ any_memory_operand (op, mode)
&& REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
}
+/* Returns 1 if OP is not an eliminable register.
+
+ This exists to cure a pathological abort in the s8addq (et al) patterns,
+
+ long foo () { long t; bar(); return (long) &t * 26107; }
+
+ which run afoul of a hack in reload to cure a (presumably) similar
+ problem with lea-type instructions on other targets. But there is
+ one of us and many of them, so work around the problem by selectively
+ preventing combine from making the optimization. */
+
+int
+reg_not_elim_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx inner = op;
+ if (GET_CODE (op) == SUBREG)
+ inner = SUBREG_REG (op);
+ if (inner == frame_pointer_rtx || inner == arg_pointer_rtx)
+ return 0;
+
+ return register_operand (op, mode);
+}
+
+/* Return 1 is OP is a memory location that is not a reference (using
+ an AND) to an unaligned location. Take into account what reload
+ will do. */
+
+int
+normal_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (reload_in_progress)
+ {
+ rtx tmp = op;
+ if (GET_CODE (tmp) == SUBREG)
+ tmp = SUBREG_REG (tmp);
+ if (GET_CODE (tmp) == REG
+ && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
+ {
+ op = reg_equiv_memory_loc[REGNO (tmp)];
+
+ /* This may not have been assigned an equivalent address if it will
+ be eliminated. In that case, it doesn't matter what we do. */
+ if (op == 0)
+ return 1;
+ }
+ }
+
+ return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) != AND;
+}
+
+/* Accept a register, but not a subreg of any kind. This allows us to
+ avoid pathological cases in reload wrt data movement common in
+ int->fp conversion. */
+
+int
+reg_no_subreg_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == SUBREG)
+ return 0;
+ return register_operand (op, mode);
+}
+
/* Return 1 if this function can directly return via $26. */
int
@@ -835,7 +925,8 @@ direct_return ()
/* REF is an alignable memory location. Place an aligned SImode
reference into *PALIGNED_MEM and the number of bits to shift into
- *PBITNUM. */
+ *PBITNUM. SCRATCH is a free register for use in reloading out
+ of range stack slots. */
void
get_aligned_mem (ref, paligned_mem, pbitnum)
@@ -845,33 +936,33 @@ get_aligned_mem (ref, paligned_mem, pbitnum)
rtx base;
HOST_WIDE_INT offset = 0;
- if (GET_CODE (ref) == SUBREG)
- {
- offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
- - MIN (UNITS_PER_WORD,
- GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
- ref = SUBREG_REG (ref);
- }
+ if (GET_CODE (ref) != MEM)
+ abort ();
- if (GET_CODE (ref) == REG)
- ref = reg_equiv_mem[REGNO (ref)];
+ if (reload_in_progress
+ && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
+ {
+ base = find_replacement (&XEXP (ref, 0));
- if (reload_in_progress)
- base = find_replacement (&XEXP (ref, 0));
+ if (! memory_address_p (GET_MODE (ref), base))
+ abort ();
+ }
else
- base = XEXP (ref, 0);
+ {
+ base = XEXP (ref, 0);
+ }
if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
- *paligned_mem = gen_rtx_MEM (SImode,
- plus_constant (base, offset & ~3));
- MEM_IN_STRUCT_P (*paligned_mem) = MEM_IN_STRUCT_P (ref);
- MEM_VOLATILE_P (*paligned_mem) = MEM_VOLATILE_P (ref);
+ *paligned_mem = gen_rtx_MEM (SImode, plus_constant (base, offset & ~3));
+ MEM_COPY_ATTRIBUTES (*paligned_mem, ref);
RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
+ /* Sadly, we cannot use alias sets here because we may overlap other
+ data in a different alias set. */
+ /* MEM_ALIAS_SET (*paligned_mem) = MEM_ALIAS_SET (ref); */
+
*pbitnum = GEN_INT ((offset & 3) * 8);
}
@@ -886,23 +977,21 @@ get_unaligned_address (ref, extra_offset)
rtx base;
HOST_WIDE_INT offset = 0;
- if (GET_CODE (ref) == SUBREG)
- {
- offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
- - MIN (UNITS_PER_WORD,
- GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
- ref = SUBREG_REG (ref);
- }
+ if (GET_CODE (ref) != MEM)
+ abort ();
- if (GET_CODE (ref) == REG)
- ref = reg_equiv_mem[REGNO (ref)];
+ if (reload_in_progress
+ && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
+ {
+ base = find_replacement (&XEXP (ref, 0));
- if (reload_in_progress)
- base = find_replacement (&XEXP (ref, 0));
+ if (! memory_address_p (GET_MODE (ref), base))
+ abort ();
+ }
else
- base = XEXP (ref, 0);
+ {
+ base = XEXP (ref, 0);
+ }
if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
@@ -945,6 +1034,12 @@ alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
MEM_IN_STRUCT_P (x) = in_struct_p;
MEM_VOLATILE_P (x) = volatile_p;
RTX_UNCHANGING_P (x) = unchanging_p;
+ /* Sadly, we cannot use alias sets because the extra aliasing
+ produced by the AND interferes. Given that two-byte quantities
+ are the only thing we would be able to differentiate anyway,
+ there does not seem to be any point in convoluting the early
+ out of the alias check. */
+ /* MEM_ALIAS_SET (x) = alias_set; */
break;
default:
@@ -963,14 +1058,19 @@ alpha_set_memflags (insn, ref)
rtx insn;
rtx ref;
{
- /* Note that it is always safe to get these flags, though they won't
- be what we think if REF is not a MEM. */
- int in_struct_p = MEM_IN_STRUCT_P (ref);
- int volatile_p = MEM_VOLATILE_P (ref);
- int unchanging_p = RTX_UNCHANGING_P (ref);
-
- if (GET_CODE (ref) != MEM
- || (! in_struct_p && ! volatile_p && ! unchanging_p))
+ int in_struct_p, volatile_p, unchanging_p;
+
+ if (GET_CODE (ref) != MEM)
+ return;
+
+ in_struct_p = MEM_IN_STRUCT_P (ref);
+ volatile_p = MEM_VOLATILE_P (ref);
+ unchanging_p = RTX_UNCHANGING_P (ref);
+
+ /* This is only called from alpha.md, after having had something
+ generated from one of the insn patterns. So if everything is
+ zero, the pattern is already up-to-date. */
+ if (! in_struct_p && ! volatile_p && ! unchanging_p)
return;
alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
@@ -1128,7 +1228,7 @@ alpha_emit_set_const_1 (target, mode, c, n)
for (; bits > 0; bits--)
if ((temp = (alpha_emit_set_const
(subtarget, mode,
- (unsigned HOST_WIDE_INT) c >> bits, i))) != 0
+ (unsigned HOST_WIDE_INT) (c >> bits), i))) != 0
|| ((temp = (alpha_emit_set_const
(subtarget, mode,
((unsigned HOST_WIDE_INT) c) >> bits, i)))
@@ -1176,70 +1276,66 @@ alpha_emit_set_const_1 (target, mode, c, n)
return 0;
}
-#if HOST_BITS_PER_WIDE_INT == 64
/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
fall back to a straight forward decomposition. We do this to avoid
exponential run times encountered when looking for longer sequences
with alpha_emit_set_const. */
rtx
-alpha_emit_set_long_const (target, c)
+alpha_emit_set_long_const (target, c1, c2)
rtx target;
- HOST_WIDE_INT c;
+ HOST_WIDE_INT c1, c2;
{
- /* Use a pseudo if highly optimizing and still generating RTL. */
- rtx subtarget
- = (flag_expensive_optimizations && rtx_equal_function_value_matters
- ? 0 : target);
HOST_WIDE_INT d1, d2, d3, d4;
- rtx r1, r2;
/* Decompose the entire word */
- d1 = ((c & 0xffff) ^ 0x8000) - 0x8000;
- c -= d1;
- d2 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
- c = (c - d2) >> 32;
- d3 = ((c & 0xffff) ^ 0x8000) - 0x8000;
- c -= d3;
- d4 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
-
- if (c - d4 != 0)
- abort();
+#if HOST_BITS_PER_WIDE_INT >= 64
+ if (c2 != -(c1 < 0))
+ abort ();
+ d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
+ c1 -= d1;
+ d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ c1 = (c1 - d2) >> 32;
+ d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
+ c1 -= d3;
+ d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ if (c1 != d4)
+ abort ();
+#else
+ d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
+ c1 -= d1;
+ d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ if (c1 != d2)
+ abort ();
+ c2 += (d2 < 0);
+ d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
+ c2 -= d3;
+ d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ if (c2 != d4)
+ abort ();
+#endif
/* Construct the high word */
- if (d3 == 0)
- r1 = copy_to_suggested_reg (GEN_INT (d4), subtarget, DImode);
- else if (d4 == 0)
- r1 = copy_to_suggested_reg (GEN_INT (d3), subtarget, DImode);
+ if (d4)
+ {
+ emit_move_insn (target, GEN_INT (d4));
+ if (d3)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
+ }
else
- r1 = expand_binop (DImode, add_optab, GEN_INT (d3), GEN_INT (d4),
- subtarget, 0, OPTAB_WIDEN);
+ emit_move_insn (target, GEN_INT (d3));
/* Shift it into place */
- r2 = expand_binop (DImode, ashl_optab, r1, GEN_INT (32),
- subtarget, 0, OPTAB_WIDEN);
-
- if (subtarget == 0 && d1 == d3 && d2 == d4)
- r1 = expand_binop (DImode, add_optab, r1, r2, subtarget, 0, OPTAB_WIDEN);
- else
- {
- r1 = r2;
-
- /* Add in the low word */
- if (d2 != 0)
- r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d2),
- subtarget, 0, OPTAB_WIDEN);
- if (d1 != 0)
- r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d1),
- subtarget, 0, OPTAB_WIDEN);
- }
+ emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
- if (subtarget == 0)
- r1 = copy_to_suggested_reg(r1, target, DImode);
+ /* Add in the low bits. */
+ if (d2)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
+ if (d1)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
- return r1;
+ return target;
}
-#endif /* HOST_BITS_PER_WIDE_INT == 64 */
/* Generate the comparison for a conditional branch. */
@@ -1416,7 +1512,7 @@ alpha_emit_conditional_move (cmp, mode)
abort ();
}
- /* ??? We mark the the branch mode to be CCmode to prevent the compare
+ /* ??? We mark the branch mode to be CCmode to prevent the compare
and cmov from being combined, since the compare insn follows IEEE
rules that the cmov does not. */
if (alpha_compare_fp_p && !flag_fast_math)
@@ -1517,6 +1613,8 @@ alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
emit_insn (gen_extqh (exth, memh, addr));
mode = DImode;
break;
+ default:
+ abort();
}
addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
@@ -1790,7 +1888,8 @@ alpha_expand_block_move (operands)
{
rtx bytes_rtx = operands[2];
rtx align_rtx = operands[3];
- HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
+ HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
+ HOST_WIDE_INT bytes = orig_bytes;
HOST_WIDE_INT src_align = INTVAL (align_rtx);
HOST_WIDE_INT dst_align = src_align;
rtx orig_src = operands[1];
@@ -1863,17 +1962,24 @@ alpha_expand_block_move (operands)
enum machine_mode mode;
tmp = XEXP (XEXP (orig_src, 0), 0);
- mode = mode_for_size (bytes, MODE_INT, 1);
+ /* Don't use the existing register if we're reading more than
+ is held in the register. Nor if there is not a mode that
+ handles the exact size. */
+ mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
if (mode != BLKmode
- && GET_MODE_SIZE (GET_MODE (tmp)) <= bytes)
+ && GET_MODE_SIZE (GET_MODE (tmp)) >= bytes)
{
- /* Whee! Optimize the load to use the existing register. */
- data_regs[nregs++] = gen_lowpart (mode, tmp);
+ if (mode == TImode)
+ {
+ data_regs[nregs] = gen_lowpart (DImode, tmp);
+ data_regs[nregs+1] = gen_highpart (DImode, tmp);
+ nregs += 2;
+ }
+ else
+ data_regs[nregs++] = gen_lowpart (mode, tmp);
goto src_done;
}
- /* ??? We could potentially be copying 3 bytes or whatnot from
- a wider reg. Probably not worth worrying about. */
/* No appropriate mode; fall back on memory. */
orig_src = change_address (orig_src, GET_MODE (orig_src),
copy_addr_to_reg (XEXP (orig_src, 0)));
@@ -1890,9 +1996,9 @@ alpha_expand_block_move (operands)
for (i = 0; i < words; ++i)
{
emit_move_insn (data_regs[nregs+i],
- change_address(orig_src, DImode,
- plus_constant (XEXP (orig_src, 0),
- ofs + i*8)));
+ change_address (orig_src, DImode,
+ plus_constant (XEXP (orig_src, 0),
+ ofs + i*8)));
}
nregs += words;
@@ -1909,9 +2015,9 @@ alpha_expand_block_move (operands)
for (i = 0; i < words; ++i)
{
emit_move_insn (data_regs[nregs+i],
- change_address(orig_src, SImode,
- plus_constant (XEXP (orig_src, 0),
- ofs + i*4)));
+ change_address (orig_src, SImode,
+ plus_constant (XEXP (orig_src, 0),
+ ofs + i*4)));
}
nregs += words;
@@ -1925,7 +2031,8 @@ alpha_expand_block_move (operands)
for (i = 0; i < words+1; ++i)
data_regs[nregs+i] = gen_reg_rtx(DImode);
- alpha_expand_unaligned_load_words(data_regs+nregs, orig_src, words, ofs);
+ alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
+ words, ofs);
nregs += words;
bytes -= words * 8;
@@ -1979,7 +2086,7 @@ alpha_expand_block_move (operands)
}
src_done:
- if (nregs > sizeof(data_regs)/sizeof(*data_regs))
+ if (nregs > (int)(sizeof(data_regs)/sizeof(*data_regs)))
abort();
/*
@@ -1993,19 +2100,51 @@ alpha_expand_block_move (operands)
enum machine_mode mode;
tmp = XEXP (XEXP (orig_dst, 0), 0);
- mode = mode_for_size (bytes, MODE_INT, 1);
- if (GET_MODE (tmp) == mode && nregs == 1)
+ mode = mode_for_size (orig_bytes * BITS_PER_UNIT, MODE_INT, 1);
+ if (GET_MODE (tmp) == mode)
{
- emit_move_insn (tmp, data_regs[0]);
- i = 1;
- goto dst_done;
+ if (nregs == 1)
+ {
+ emit_move_insn (tmp, data_regs[0]);
+ i = 1;
+ goto dst_done;
+ }
+ else if (nregs == 2 && mode == TImode)
+ {
+ /* Undo the subregging done above when copying between
+ two TImode registers. */
+ if (GET_CODE (data_regs[0]) == SUBREG
+ && GET_MODE (SUBREG_REG (data_regs[0])) == TImode)
+ {
+ emit_move_insn (tmp, SUBREG_REG (data_regs[0]));
+ }
+ else
+ {
+ rtx seq;
+
+ start_sequence ();
+ emit_move_insn (gen_lowpart (DImode, tmp), data_regs[0]);
+ emit_move_insn (gen_highpart (DImode, tmp), data_regs[1]);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (seq, tmp, data_regs[0],
+ data_regs[1], NULL_RTX);
+ }
+
+ i = 2;
+ goto dst_done;
+ }
}
/* ??? If nregs > 1, consider reconstructing the word in regs. */
/* ??? Optimize mode < dst_mode with strict_low_part. */
- /* No appropriate mode; fall back on memory. */
+
+ /* No appropriate mode; fall back on memory. We can speed things
+ up by recognizing extra alignment information. */
orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
copy_addr_to_reg (XEXP (orig_dst, 0)));
+ dst_align = GET_MODE_SIZE (GET_MODE (tmp));
}
/* Write out the data in whatever chunks reading the source allowed. */
@@ -2013,9 +2152,9 @@ alpha_expand_block_move (operands)
{
while (i < nregs && GET_MODE (data_regs[i]) == DImode)
{
- emit_move_insn (change_address(orig_dst, DImode,
- plus_constant (XEXP (orig_dst, 0),
- ofs)),
+ emit_move_insn (change_address (orig_dst, DImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
data_regs[i]);
ofs += 8;
i++;
@@ -2030,13 +2169,13 @@ alpha_expand_block_move (operands)
tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
NULL_RTX, 1, OPTAB_WIDEN);
- emit_move_insn (change_address(orig_dst, SImode,
- plus_constant (XEXP (orig_dst, 0),
- ofs)),
+ emit_move_insn (change_address (orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
gen_lowpart (SImode, data_regs[i]));
- emit_move_insn (change_address(orig_dst, SImode,
- plus_constant (XEXP (orig_dst, 0),
- ofs+4)),
+ emit_move_insn (change_address (orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs+4)),
gen_lowpart (SImode, tmp));
ofs += 8;
i++;
@@ -2155,6 +2294,22 @@ alpha_expand_block_clear (operands)
align = 2;
}
}
+ else if (GET_CODE (tmp) == ADDRESSOF)
+ {
+ enum machine_mode mode;
+
+ mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
+ if (GET_MODE (XEXP (tmp, 0)) == mode)
+ {
+ emit_move_insn (XEXP (tmp, 0), const0_rtx);
+ return 1;
+ }
+
+ /* No appropriate mode; fall back on memory. */
+ orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
+ copy_addr_to_reg (tmp));
+ align = GET_MODE_SIZE (GET_MODE (XEXP (tmp, 0)));
+ }
/* Handle a block of contiguous words first. */
@@ -2179,9 +2334,9 @@ alpha_expand_block_clear (operands)
for (i = 0; i < words; ++i)
{
- emit_move_insn (change_address(orig_dst, SImode,
- plus_constant (XEXP (orig_dst, 0),
- ofs + i*4)),
+ emit_move_insn (change_address (orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs + i*4)),
const0_rtx);
}
@@ -2431,6 +2586,7 @@ void
alpha_init_expanders ()
{
alpha_return_addr_rtx = NULL_RTX;
+ alpha_eh_epilogue_sp_ofs = NULL_RTX;
/* Arrange to save and restore machine status around nested functions. */
save_machine_status = alpha_save_machine_status;
@@ -2454,7 +2610,7 @@ alpha_return_addr (count, frame)
/* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
alpha_return_addr_rtx = gen_reg_rtx (Pmode);
- init = gen_rtx_SET (Pmode, alpha_return_addr_rtx,
+ init = gen_rtx_SET (VOIDmode, alpha_return_addr_rtx,
gen_rtx_REG (Pmode, REG_RA));
/* Emit the insn to the prologue with the other argument copies. */
@@ -2478,9 +2634,9 @@ alpha_ra_ever_killed ()
return regs_ever_live[REG_RA];
push_topmost_sequence ();
- top = get_insns();
+ top = get_insns ();
pop_topmost_sequence ();
-
+
return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
}
@@ -2729,7 +2885,7 @@ print_operand (file, x, code)
&& CONST_DOUBLE_LOW (x) == -1)
fprintf (file, "q");
#else
- else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffffffffffff)
+ else if (GET_CODE (x) == CONST_INT && INTVAL (x) == -1)
fprintf (file, "q");
else if (GET_CODE (x) == CONST_DOUBLE
&& CONST_DOUBLE_HIGH (x) == 0
@@ -2826,6 +2982,37 @@ print_operand (file, x, code)
output_operand_lossage ("invalid %%xn code");
}
}
+
+void
+print_operand_address (file, addr)
+ FILE *file;
+ rtx addr;
+{
+ int basereg = 31;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (addr) == AND)
+ addr = XEXP (addr, 0);
+
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ offset = INTVAL (XEXP (addr, 1));
+ addr = XEXP (addr, 0);
+ }
+ if (GET_CODE (addr) == REG)
+ basereg = REGNO (addr);
+ else if (GET_CODE (addr) == SUBREG
+ && GET_CODE (SUBREG_REG (addr)) == REG)
+ basereg = REGNO (SUBREG_REG (addr)) + SUBREG_WORD (addr);
+ else if (GET_CODE (addr) == CONST_INT)
+ offset = INTVAL (addr);
+ else
+ abort ();
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
+ fprintf (file, "($%d)", basereg);
+}
/* Emit RTL insns to initialize the variable parts of a trampoline at
TRAMP. FNADDR is an RTX for the address of the function's pure
@@ -2846,10 +3033,14 @@ alpha_initialize_trampoline (tramp, fnaddr, cxt, fnofs, cxtofs, jmpofs)
int fnofs, cxtofs, jmpofs;
{
rtx temp, temp1, addr;
- /* ??? Something is wrong with VMS codegen in that we get aborts when
- using ptr_mode. Hack around it for now. */
+ /* VMS really uses DImode pointers in memory at this point. */
enum machine_mode mode = TARGET_OPEN_VMS ? Pmode : ptr_mode;
+#ifdef POINTERS_EXTEND_UNSIGNED
+ fnaddr = convert_memory_address (mode, fnaddr);
+ cxt = convert_memory_address (mode, cxt);
+#endif
+
/* Store function address and CXT. */
addr = memory_address (mode, plus_constant (tramp, fnofs));
emit_move_insn (gen_rtx (MEM, mode, addr), fnaddr);
@@ -2953,7 +3144,7 @@ alpha_builtin_saveregs (arglist)
dest = change_address (block, ptr_mode, XEXP (block, 0));
emit_move_insn (dest, addr);
- if (flag_check_memory_usage)
+ if (current_function_check_memory_usage)
emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
dest, ptr_mode,
GEN_INT (GET_MODE_SIZE (ptr_mode)),
@@ -2967,7 +3158,7 @@ alpha_builtin_saveregs (arglist)
POINTER_SIZE/BITS_PER_UNIT));
emit_move_insn (dest, argsize);
- if (flag_check_memory_usage)
+ if (current_function_check_memory_usage)
emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
dest, ptr_mode,
GEN_INT (GET_MODE_SIZE
@@ -3190,13 +3381,39 @@ alpha_does_function_need_gp ()
void
alpha_write_verstamp (file)
- FILE *file;
+ FILE *file ATTRIBUTE_UNUSED;
{
#ifdef MS_STAMP
fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
#endif
}
+/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
+ sequences. */
+
+static rtx
+set_frame_related_p ()
+{
+ rtx seq = gen_sequence ();
+ end_sequence ();
+
+ if (GET_CODE (seq) == SEQUENCE)
+ {
+ int i = XVECLEN (seq, 0);
+ while (--i >= 0)
+ RTX_FRAME_RELATED_P (XVECEXP (seq, 0, i)) = 1;
+ return emit_insn (seq);
+ }
+ else
+ {
+ seq = emit_insn (seq);
+ RTX_FRAME_RELATED_P (seq) = 1;
+ return seq;
+ }
+}
+
+#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
+
/* Write function prologue. */
/* On vms we have two kinds of functions:
@@ -3226,7 +3443,7 @@ alpha_expand_prologue ()
HOST_WIDE_INT frame_size;
/* Offset from base reg to register save area. */
HOST_WIDE_INT reg_offset;
- rtx sa_reg;
+ rtx sa_reg, mem;
int i;
sa_size = alpha_sa_size ();
@@ -3276,8 +3493,8 @@ alpha_expand_prologue ()
if (frame_size != 0)
{
- emit_move_insn (stack_pointer_rtx,
- plus_constant (stack_pointer_rtx, -frame_size));
+ FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-frame_size))));
}
}
else
@@ -3292,9 +3509,10 @@ alpha_expand_prologue ()
HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
rtx ptr = gen_rtx_REG (DImode, 22);
rtx count = gen_rtx_REG (DImode, 23);
+ rtx seq;
emit_move_insn (count, GEN_INT (blocks));
- emit_move_insn (ptr, plus_constant (stack_pointer_rtx, 4096));
+ emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
/* Because of the difficulty in emitting a new basic block this
late in the compilation, generate the loop as a single insn. */
@@ -3307,7 +3525,42 @@ alpha_expand_prologue ()
emit_move_insn (last, const0_rtx);
}
- emit_move_insn (stack_pointer_rtx, plus_constant (ptr, -leftover));
+ if (TARGET_WINDOWS_NT)
+ {
+ /* For NT stack unwind (done by 'reverse execution'), it's
+ not OK to take the result of a loop, even though the value
+ is already in ptr, so we reload it via a single operation
+ and subtract it to sp.
+
+ Yes, that's correct -- we have to reload the whole constant
+ into a temporary via ldah+lda then subtract from sp. To
+ ensure we get ldah+lda, we use a special pattern. */
+
+ HOST_WIDE_INT lo, hi;
+ lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
+ hi = frame_size - lo;
+
+ emit_move_insn (ptr, GEN_INT (hi));
+ emit_insn (gen_nt_lda (ptr, GEN_INT (lo)));
+ seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
+ ptr));
+ }
+ else
+ {
+ seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
+ GEN_INT (-leftover)));
+ }
+
+ /* This alternative is special, because the DWARF code cannot
+ possibly intuit through the loop above. So we invent this
+ note it looks at instead. */
+ RTX_FRAME_RELATED_P (seq) = 1;
+ REG_NOTES (seq)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (-frame_size))),
+ REG_NOTES (seq));
}
/* Cope with very large offsets to the register save area. */
@@ -3323,21 +3576,23 @@ alpha_expand_prologue ()
bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 24);
- emit_move_insn (sa_reg, plus_constant (stack_pointer_rtx, bias));
+ FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, GEN_INT (bias))));
}
/* Save regs in stack order. Beginning with VMS PV. */
if (TARGET_OPEN_VMS && vms_is_stack_procedure)
{
- emit_move_insn (gen_rtx_MEM (DImode, stack_pointer_rtx),
- gen_rtx_REG (DImode, REG_PV));
+ mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
}
/* Save register RA next. */
if (imask & (1L << REG_RA))
{
- emit_move_insn (gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset)),
- gen_rtx_REG (DImode, REG_RA));
+ mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
imask &= ~(1L << REG_RA);
reg_offset += 8;
}
@@ -3346,18 +3601,18 @@ alpha_expand_prologue ()
for (i = 0; i < 32; i++)
if (imask & (1L << i))
{
- emit_move_insn (gen_rtx_MEM (DImode,
- plus_constant (sa_reg, reg_offset)),
- gen_rtx_REG (DImode, i));
+ mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
reg_offset += 8;
}
for (i = 0; i < 32; i++)
if (fmask & (1L << i))
{
- emit_move_insn (gen_rtx_MEM (DFmode,
- plus_constant (sa_reg, reg_offset)),
- gen_rtx_REG (DFmode, i+32));
+ mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
reg_offset += 8;
}
@@ -3366,25 +3621,25 @@ alpha_expand_prologue ()
if (!vms_is_stack_procedure)
{
/* Register frame procedures fave the fp. */
- emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
- hard_frame_pointer_rtx);
+ FRP (emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
+ hard_frame_pointer_rtx));
}
if (vms_base_regno != REG_PV)
- emit_move_insn (gen_rtx_REG (DImode, vms_base_regno),
- gen_rtx_REG (DImode, REG_PV));
+ FRP (emit_move_insn (gen_rtx_REG (DImode, vms_base_regno),
+ gen_rtx_REG (DImode, REG_PV)));
if (vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
{
- emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+ FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
}
/* If we have to allocate space for outgoing args, do it now. */
if (current_function_outgoing_args_size != 0)
{
- emit_move_insn (stack_pointer_rtx,
- plus_constant (hard_frame_pointer_rtx,
- - ALPHA_ROUND (current_function_outgoing_args_size)));
+ FRP (emit_move_insn (stack_pointer_rtx,
+ plus_constant (hard_frame_pointer_rtx,
+ - ALPHA_ROUND (current_function_outgoing_args_size))));
}
}
else
@@ -3393,13 +3648,13 @@ alpha_expand_prologue ()
if (frame_pointer_needed)
{
if (TARGET_CAN_FAULT_IN_PROLOGUE)
- emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+ FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
else
{
/* This must always be the last instruction in the
prologue, thus we emit a special move + clobber. */
- emit_insn (gen_init_fp (hard_frame_pointer_rtx,
- stack_pointer_rtx, sa_reg));
+ FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
+ stack_pointer_rtx, sa_reg)));
}
}
}
@@ -3616,6 +3871,12 @@ output_end_prologue (file)
/* Write function epilogue. */
+/* ??? At some point we will want to support full unwind, and so will
+ need to mark the epilogue as well. At the moment, we just confuse
+ dwarf2out. */
+#undef FRP
+#define FRP(exp) exp
+
void
alpha_expand_epilogue ()
{
@@ -3630,7 +3891,7 @@ alpha_expand_epilogue ()
HOST_WIDE_INT reg_offset;
int fp_is_frame_pointer, fp_offset;
rtx sa_reg, sa_reg_exp = NULL;
- rtx sp_adj1, sp_adj2;
+ rtx sp_adj1, sp_adj2, mem;
int i;
sa_size = alpha_sa_size ();
@@ -3664,7 +3925,7 @@ alpha_expand_epilogue ()
&& vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
|| (!TARGET_OPEN_VMS && frame_pointer_needed))
{
- emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
+ FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
}
/* Cope with very large offsets to the register save area. */
@@ -3682,13 +3943,17 @@ alpha_expand_epilogue ()
sa_reg = gen_rtx_REG (DImode, 22);
sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
- emit_move_insn (sa_reg, sa_reg_exp);
+ FRP (emit_move_insn (sa_reg, sa_reg_exp));
}
/* Restore registers in order, excepting a true frame pointer. */
- emit_move_insn (gen_rtx_REG (DImode, REG_RA),
- gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset)));
+ if (! alpha_eh_epilogue_sp_ofs)
+ {
+ mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ }
reg_offset += 8;
imask &= ~(1L << REG_RA);
@@ -3699,10 +3964,9 @@ alpha_expand_epilogue ()
fp_offset = reg_offset;
else
{
- emit_move_insn (gen_rtx_REG (DImode, i),
- gen_rtx_MEM (DImode,
- plus_constant(sa_reg,
- reg_offset)));
+ mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
}
reg_offset += 8;
}
@@ -3710,54 +3974,57 @@ alpha_expand_epilogue ()
for (i = 0; i < 32; ++i)
if (fmask & (1L << i))
{
- emit_move_insn (gen_rtx_REG (DFmode, i+32),
- gen_rtx_MEM (DFmode,
- plus_constant(sa_reg, reg_offset)));
+ mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
reg_offset += 8;
}
}
- if (frame_size)
+ if (frame_size || alpha_eh_epilogue_sp_ofs)
{
+ sp_adj1 = stack_pointer_rtx;
+
+ if (alpha_eh_epilogue_sp_ofs)
+ {
+ sp_adj1 = gen_rtx_REG (DImode, 23);
+ emit_move_insn (sp_adj1,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ alpha_eh_epilogue_sp_ofs));
+ }
+
/* If the stack size is large, begin computation into a temporary
register so as not to interfere with a potential fp restore,
which must be consecutive with an SP restore. */
if (frame_size < 32768)
- {
- sp_adj1 = stack_pointer_rtx;
- sp_adj2 = GEN_INT (frame_size);
- }
+ sp_adj2 = GEN_INT (frame_size);
else if (frame_size < 0x40007fffL)
{
int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
- sp_adj2 = plus_constant (stack_pointer_rtx, frame_size - low);
+ sp_adj2 = plus_constant (sp_adj1, frame_size - low);
if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
sp_adj1 = sa_reg;
else
{
sp_adj1 = gen_rtx_REG (DImode, 23);
- emit_move_insn (sp_adj1, sp_adj2);
+ FRP (emit_move_insn (sp_adj1, sp_adj2));
}
sp_adj2 = GEN_INT (low);
}
else
{
- sp_adj2 = gen_rtx_REG (DImode, 23);
- sp_adj1 = alpha_emit_set_const (sp_adj2, DImode, frame_size, 3);
- if (!sp_adj1)
+ rtx tmp = gen_rtx_REG (DImode, 23);
+ FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
+ if (!sp_adj2)
{
/* We can't drop new things to memory this late, afaik,
so build it up by pieces. */
-#if HOST_BITS_PER_WIDE_INT == 64
- sp_adj1 = alpha_emit_set_long_const (sp_adj2, frame_size);
- if (!sp_adj1)
+ FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
+ -(frame_size < 0)));
+ if (!sp_adj2)
abort ();
-#else
- abort ();
-#endif
}
- sp_adj2 = stack_pointer_rtx;
}
/* From now on, things must be in order. So emit blockages. */
@@ -3766,29 +4033,29 @@ alpha_expand_epilogue ()
if (fp_is_frame_pointer)
{
emit_insn (gen_blockage ());
- emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_MEM (DImode,
- plus_constant(sa_reg, fp_offset)));
+ mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, fp_offset));
+ MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
+ FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
}
else if (TARGET_OPEN_VMS)
{
emit_insn (gen_blockage ());
- emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno));
+ FRP (emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno)));
}
/* Restore the stack pointer. */
emit_insn (gen_blockage ());
- emit_move_insn (stack_pointer_rtx,
- gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
+ FRP (emit_move_insn (stack_pointer_rtx,
+ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
}
else
{
if (TARGET_OPEN_VMS && !vms_is_stack_procedure)
{
emit_insn (gen_blockage ());
- emit_move_insn (hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, vms_save_fp_regno));
+ FRP (emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno)));
}
}
@@ -3817,10 +4084,10 @@ alpha_end_function (file, fnname, decl)
Don't do this for global functions in object files destined for a
shared library because the function may be overridden by the application
- or other libraries.
- ??? Is this just ELF? */
+ or other libraries. Similarly, don't do this for weak functions. */
- if (!flag_pic || !TREE_PUBLIC (current_function_decl))
+ if (!DECL_WEAK (current_function_decl)
+ && (!flag_pic || !TREE_PUBLIC (current_function_decl)))
SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
}
@@ -3843,7 +4110,7 @@ static int num_source_filenames = 0;
/* Name of the file containing the current function. */
-static char *current_function_file = "";
+static const char *current_function_file = "";
/* Offsets to alpha virtual arg/local debugging pointers. */
@@ -4095,10 +4362,7 @@ alpha_handle_trap_shadows (insns)
{
struct shadow_summary shadow;
int trap_pending, exception_nesting;
- rtx i;
-
- if (alpha_tp == ALPHA_TP_PROG && !flag_exceptions)
- return;
+ rtx i, n;
trap_pending = 0;
exception_nesting = 0;
@@ -4201,7 +4465,9 @@ alpha_handle_trap_shadows (insns)
else
{
close_shadow:
- emit_insn_before (gen_trapb (), i);
+ n = emit_insn_before (gen_trapb (), i);
+ PUT_MODE (n, TImode);
+ PUT_MODE (i, TImode);
trap_pending = 0;
shadow.used.i = 0;
shadow.used.fp = 0;
@@ -4223,14 +4489,572 @@ alpha_handle_trap_shadows (insns)
}
}
}
+
+#ifdef HAIFA
+/* Alpha can only issue instruction groups simultaneously if they are
+ suitibly aligned. This is very processor-specific. */
+
+enum alphaev4_pipe {
+ EV4_STOP = 0,
+ EV4_IB0 = 1,
+ EV4_IB1 = 2,
+ EV4_IBX = 4
+};
+
+enum alphaev5_pipe {
+ EV5_STOP = 0,
+ EV5_NONE = 1,
+ EV5_E01 = 2,
+ EV5_E0 = 4,
+ EV5_E1 = 8,
+ EV5_FAM = 16,
+ EV5_FA = 32,
+ EV5_FM = 64
+};
+
+static enum alphaev4_pipe alphaev4_insn_pipe PROTO((rtx));
+static enum alphaev5_pipe alphaev5_insn_pipe PROTO((rtx));
+static rtx alphaev4_next_group PROTO((rtx, int*, int*));
+static rtx alphaev5_next_group PROTO((rtx, int*, int*));
+static rtx alphaev4_next_nop PROTO((int*));
+static rtx alphaev5_next_nop PROTO((int*));
+
+static void alpha_align_insns
+ PROTO((rtx, int, rtx (*)(rtx, int*, int*), rtx (*)(int*), int));
+
+static enum alphaev4_pipe
+alphaev4_insn_pipe (insn)
+ rtx insn;
+{
+ if (recog_memoized (insn) < 0)
+ return EV4_STOP;
+ if (get_attr_length (insn) != 4)
+ return EV4_STOP;
+
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ILD:
+ case TYPE_FLD:
+ return EV4_IBX;
+
+ case TYPE_LDSYM:
+ case TYPE_IADD:
+ case TYPE_ILOG:
+ case TYPE_ICMOV:
+ case TYPE_ICMP:
+ case TYPE_IST:
+ case TYPE_FST:
+ case TYPE_SHIFT:
+ case TYPE_IMUL:
+ case TYPE_FBR:
+ return EV4_IB0;
+
+ case TYPE_MISC:
+ case TYPE_IBR:
+ case TYPE_JSR:
+ case TYPE_FCPYS:
+ case TYPE_FCMOV:
+ case TYPE_FADD:
+ case TYPE_FDIV:
+ case TYPE_FMUL:
+ return EV4_IB1;
+
+ default:
+ abort();
+ }
+}
+
+static enum alphaev5_pipe
+alphaev5_insn_pipe (insn)
+ rtx insn;
+{
+ if (recog_memoized (insn) < 0)
+ return EV5_STOP;
+ if (get_attr_length (insn) != 4)
+ return EV5_STOP;
+
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ILD:
+ case TYPE_FLD:
+ case TYPE_LDSYM:
+ case TYPE_IADD:
+ case TYPE_ILOG:
+ case TYPE_ICMOV:
+ case TYPE_ICMP:
+ return EV5_E01;
+
+ case TYPE_IST:
+ case TYPE_FST:
+ case TYPE_SHIFT:
+ case TYPE_IMUL:
+ case TYPE_MISC:
+ case TYPE_MVI:
+ return EV5_E0;
+
+ case TYPE_IBR:
+ case TYPE_JSR:
+ return EV5_E1;
+
+ case TYPE_FCPYS:
+ return EV5_FAM;
+
+ case TYPE_FBR:
+ case TYPE_FCMOV:
+ case TYPE_FADD:
+ case TYPE_FDIV:
+ return EV5_FA;
+
+ case TYPE_FMUL:
+ return EV5_FM;
+
+ default:
+ abort();
+ }
+}
+
+/* IN_USE is a mask of the slots currently filled within the insn group.
+ The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
+ the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
+
+ LEN is, of course, the length of the group in bytes. */
+
+static rtx
+alphaev4_next_group (insn, pin_use, plen)
+ rtx insn;
+ int *pin_use, *plen;
+{
+ int len, in_use;
+
+ len = in_use = 0;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == USE)
+ goto next_and_done;
+
+ while (1)
+ {
+ enum alphaev4_pipe pipe;
+
+ pipe = alphaev4_insn_pipe (insn);
+ switch (pipe)
+ {
+ case EV4_STOP:
+ /* Force complex instructions to start new groups. */
+ if (in_use)
+ goto done;
+
+ /* If this is a completely unrecognized insn, its an asm.
+ We don't know how long it is, so record length as -1 to
+ signal a needed realignment. */
+ if (recog_memoized (insn) < 0)
+ len = -1;
+ else
+ len = get_attr_length (insn);
+ goto next_and_done;
+
+ case EV4_IBX:
+ if (in_use & EV4_IB0)
+ {
+ if (in_use & EV4_IB1)
+ goto done;
+ in_use |= EV4_IB1;
+ }
+ else
+ in_use |= EV4_IB0 | EV4_IBX;
+ break;
+
+ case EV4_IB0:
+ if (in_use & EV4_IB0)
+ {
+ if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
+ goto done;
+ in_use |= EV4_IB1;
+ }
+ in_use |= EV4_IB0;
+ break;
+
+ case EV4_IB1:
+ if (in_use & EV4_IB1)
+ goto done;
+ in_use |= EV4_IB1;
+ break;
+
+ default:
+ abort();
+ }
+ len += 4;
+
+ /* Haifa doesn't do well scheduling branches. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ goto next_and_done;
+
+ next:
+ insn = next_nonnote_insn (insn);
+
+ if (!insn || GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ goto done;
+
+ /* Let Haifa tell us where it thinks insn group boundaries are. */
+ if (GET_MODE (insn) == TImode)
+ goto done;
+
+ if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
+ goto next;
+ }
+
+ next_and_done:
+ insn = next_nonnote_insn (insn);
+
+ done:
+ *plen = len;
+ *pin_use = in_use;
+ return insn;
+}
+
+/* IN_USE is a mask of the slots currently filled within the insn group.
+ The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
+ the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
+
+ LEN is, of course, the length of the group in bytes. */
+
+static rtx
+alphaev5_next_group (insn, pin_use, plen)
+ rtx insn;
+ int *pin_use, *plen;
+{
+ int len, in_use;
+
+ len = in_use = 0;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == USE)
+ goto next_and_done;
+
+ while (1)
+ {
+ enum alphaev5_pipe pipe;
+
+ pipe = alphaev5_insn_pipe (insn);
+ switch (pipe)
+ {
+ case EV5_STOP:
+ /* Force complex instructions to start new groups. */
+ if (in_use)
+ goto done;
+
+ /* If this is a completely unrecognized insn, its an asm.
+ We don't know how long it is, so record length as -1 to
+ signal a needed realignment. */
+ if (recog_memoized (insn) < 0)
+ len = -1;
+ else
+ len = get_attr_length (insn);
+ goto next_and_done;
+
+ /* ??? Most of the places below, we would like to abort, as
+ it would indicate an error either in Haifa, or in the
+ scheduling description. Unfortunately, Haifa never
+ schedules the last instruction of the BB, so we don't
+ have an accurate TI bit to go off. */
+ case EV5_E01:
+ if (in_use & EV5_E0)
+ {
+ if (in_use & EV5_E1)
+ goto done;
+ in_use |= EV5_E1;
+ }
+ else
+ in_use |= EV5_E0 | EV5_E01;
+ break;
+
+ case EV5_E0:
+ if (in_use & EV5_E0)
+ {
+ if (!(in_use & EV5_E01) || (in_use & EV5_E1))
+ goto done;
+ in_use |= EV5_E1;
+ }
+ in_use |= EV5_E0;
+ break;
+
+ case EV5_E1:
+ if (in_use & EV5_E1)
+ goto done;
+ in_use |= EV5_E1;
+ break;
+
+ case EV5_FAM:
+ if (in_use & EV5_FA)
+ {
+ if (in_use & EV5_FM)
+ goto done;
+ in_use |= EV5_FM;
+ }
+ else
+ in_use |= EV5_FA | EV5_FAM;
+ break;
+
+ case EV5_FA:
+ if (in_use & EV5_FA)
+ goto done;
+ in_use |= EV5_FA;
+ break;
+
+ case EV5_FM:
+ if (in_use & EV5_FM)
+ goto done;
+ in_use |= EV5_FM;
+ break;
+
+ case EV5_NONE:
+ break;
+
+ default:
+ abort();
+ }
+ len += 4;
+
+ /* Haifa doesn't do well scheduling branches. */
+ /* ??? If this is predicted not-taken, slotting continues, except
+ that no more IBR, FBR, or JSR insns may be slotted. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ goto next_and_done;
+
+ next:
+ insn = next_nonnote_insn (insn);
+
+ if (!insn || GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ goto done;
+
+ /* Let Haifa tell us where it thinks insn group boundaries are. */
+ if (GET_MODE (insn) == TImode)
+ goto done;
+
+ if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
+ goto next;
+ }
+
+ next_and_done:
+ insn = next_nonnote_insn (insn);
+
+ done:
+ *plen = len;
+ *pin_use = in_use;
+ return insn;
+}
+
+static rtx
+alphaev4_next_nop (pin_use)
+ int *pin_use;
+{
+ int in_use = *pin_use;
+ rtx nop;
+
+ if (!(in_use & EV4_IB0))
+ {
+ in_use |= EV4_IB0;
+ nop = gen_nop ();
+ }
+ else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
+ {
+ in_use |= EV4_IB1;
+ nop = gen_nop ();
+ }
+ else if (TARGET_FP && !(in_use & EV4_IB1))
+ {
+ in_use |= EV4_IB1;
+ nop = gen_fnop ();
+ }
+ else
+ nop = gen_unop ();
+
+ *pin_use = in_use;
+ return nop;
+}
+
+static rtx
+alphaev5_next_nop (pin_use)
+ int *pin_use;
+{
+ int in_use = *pin_use;
+ rtx nop;
+ if (!(in_use & EV5_E1))
+ {
+ in_use |= EV5_E1;
+ nop = gen_nop ();
+ }
+ else if (TARGET_FP && !(in_use & EV5_FA))
+ {
+ in_use |= EV5_FA;
+ nop = gen_fnop ();
+ }
+ else if (TARGET_FP && !(in_use & EV5_FM))
+ {
+ in_use |= EV5_FM;
+ nop = gen_fnop ();
+ }
+ else
+ nop = gen_unop ();
+
+ *pin_use = in_use;
+ return nop;
+}
+
+/* The instruction group alignment main loop. */
+
+static void
+alpha_align_insns (insns, max_align, next_group, next_nop, gp_in_use)
+ rtx insns;
+ int max_align;
+ rtx (*next_group) PROTO((rtx, int*, int*));
+ rtx (*next_nop) PROTO((int*));
+ int gp_in_use;
+{
+ /* ALIGN is the known alignment for the insn group. */
+ int align;
+ /* OFS is the offset of the current insn in the insn group. */
+ int ofs;
+ int prev_in_use, in_use, len;
+ rtx i, next;
+
+ /* Let shorten branches care for assigning alignments to code labels. */
+ shorten_branches (insns);
+
+ align = (FUNCTION_BOUNDARY/BITS_PER_UNIT < max_align
+ ? FUNCTION_BOUNDARY/BITS_PER_UNIT : max_align);
+
+ /* Account for the initial GP load, which happens before the scheduled
+ prologue we emitted as RTL. */
+ ofs = prev_in_use = 0;
+ if (alpha_does_function_need_gp())
+ {
+ ofs = 8 & (align - 1);
+ prev_in_use = gp_in_use;
+ }
+
+ i = insns;
+ if (GET_CODE (i) == NOTE)
+ i = next_nonnote_insn (i);
+
+ while (i)
+ {
+ next = (*next_group)(i, &in_use, &len);
+
+ /* When we see a label, resync alignment etc. */
+ if (GET_CODE (i) == CODE_LABEL)
+ {
+ int new_align = 1 << label_to_alignment (i);
+ if (new_align >= align)
+ {
+ align = new_align < max_align ? new_align : max_align;
+ ofs = 0;
+ }
+ else if (ofs & (new_align-1))
+ ofs = (ofs | (new_align-1)) + 1;
+ if (len != 0)
+ abort();
+ }
+
+ /* Handle complex instructions special. */
+ else if (in_use == 0)
+ {
+ /* Asms will have length < 0. This is a signal that we have
+ lost alignment knowledge. Assume, however, that the asm
+ will not mis-align instructions. */
+ if (len < 0)
+ {
+ ofs = 0;
+ align = 4;
+ len = 0;
+ }
+ }
+
+ /* If the known alignment is smaller than the recognized insn group,
+ realign the output. */
+ else if (align < len)
+ {
+ int new_log_align = len > 8 ? 4 : 3;
+ rtx where;
+
+ where = prev_nonnote_insn (i);
+ if (!where || GET_CODE (where) != CODE_LABEL)
+ where = i;
+
+ emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
+ align = 1 << new_log_align;
+ ofs = 0;
+ }
+
+ /* If the group won't fit in the same INT16 as the previous,
+ we need to add padding to keep the group together. Rather
+ than simply leaving the insn filling to the assembler, we
+ can make use of the knowledge of what sorts of instructions
+ were issued in the previous group to make sure that all of
+ the added nops are really free. */
+ else if (ofs + len > align)
+ {
+ int nop_count = (align - ofs) / 4;
+ rtx where;
+
+ /* Insert nops before labels and branches to truely merge the
+ execution of the nops with the previous instruction group. */
+ where = prev_nonnote_insn (i);
+ if (where)
+ {
+ if (GET_CODE (where) == CODE_LABEL)
+ {
+ rtx where2 = prev_nonnote_insn (where);
+ if (where2 && GET_CODE (where2) == JUMP_INSN)
+ where = where2;
+ }
+ else if (GET_CODE (where) != JUMP_INSN)
+ where = i;
+ }
+ else
+ where = i;
+
+ do
+ emit_insn_before ((*next_nop)(&prev_in_use), where);
+ while (--nop_count);
+ ofs = 0;
+ }
+
+ ofs = (ofs + len) & (align - 1);
+ prev_in_use = in_use;
+ i = next;
+ }
+}
+#endif /* HAIFA */
+
/* Machine dependant reorg pass. */
void
alpha_reorg (insns)
rtx insns;
{
- alpha_handle_trap_shadows (insns);
+ if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
+ alpha_handle_trap_shadows (insns);
+
+#ifdef HAIFA
+ /* Due to the number of extra trapb insns, don't bother fixing up
+ alignment when trap precision is instruction. Moreover, we can
+ only do our job when sched2 is run and Haifa is our scheduler. */
+ if (optimize && !optimize_size
+ && alpha_tp != ALPHA_TP_INSN
+ && flag_schedule_insns_after_reload)
+ {
+ if (alpha_cpu == PROCESSOR_EV4)
+ alpha_align_insns (insns, 8, alphaev4_next_group,
+ alphaev4_next_nop, EV4_IB0);
+ else if (alpha_cpu == PROCESSOR_EV5)
+ alpha_align_insns (insns, 16, alphaev5_next_group,
+ alphaev5_next_nop, EV5_E01 | EV5_E0);
+ }
+#endif
}
diff --git a/contrib/gcc/config/alpha/alpha.h b/contrib/gcc/config/alpha/alpha.h
index 43b0dee..e9c3f6d 100644
--- a/contrib/gcc/config/alpha/alpha.h
+++ b/contrib/gcc/config/alpha/alpha.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for DEC Alpha.
- Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GNU CC.
@@ -95,73 +95,76 @@ extern enum alpha_fp_trap_mode alpha_fptm;
/* This means that floating-point support exists in the target implementation
of the Alpha architecture. This is usually the default. */
-
-#define MASK_FP 1
+#define MASK_FP (1 << 0)
#define TARGET_FP (target_flags & MASK_FP)
/* This means that floating-point registers are allowed to be used. Note
that Alpha implementations without FP operations are required to
provide the FP registers. */
-#define MASK_FPREGS 2
+#define MASK_FPREGS (1 << 1)
#define TARGET_FPREGS (target_flags & MASK_FPREGS)
/* This means that gas is used to process the assembler file. */
-#define MASK_GAS 4
+#define MASK_GAS (1 << 2)
#define TARGET_GAS (target_flags & MASK_GAS)
/* This means that we should mark procedures as IEEE conformant. */
-#define MASK_IEEE_CONFORMANT 8
+#define MASK_IEEE_CONFORMANT (1 << 3)
#define TARGET_IEEE_CONFORMANT (target_flags & MASK_IEEE_CONFORMANT)
/* This means we should be IEEE-compliant except for inexact. */
-#define MASK_IEEE 16
+#define MASK_IEEE (1 << 4)
#define TARGET_IEEE (target_flags & MASK_IEEE)
/* This means we should be fully IEEE-compliant. */
-#define MASK_IEEE_WITH_INEXACT 32
+#define MASK_IEEE_WITH_INEXACT (1 << 5)
#define TARGET_IEEE_WITH_INEXACT (target_flags & MASK_IEEE_WITH_INEXACT)
/* This means we must construct all constants rather than emitting
them as literal data. */
-#define MASK_BUILD_CONSTANTS 128
+#define MASK_BUILD_CONSTANTS (1 << 6)
#define TARGET_BUILD_CONSTANTS (target_flags & MASK_BUILD_CONSTANTS)
/* This means we handle floating points in VAX F- (float)
or G- (double) Format. */
-#define MASK_FLOAT_VAX 512
+#define MASK_FLOAT_VAX (1 << 7)
#define TARGET_FLOAT_VAX (target_flags & MASK_FLOAT_VAX)
/* This means that the processor has byte and half word loads and stores
(the BWX extension). */
-#define MASK_BWX 1024
+#define MASK_BWX (1 << 8)
#define TARGET_BWX (target_flags & MASK_BWX)
-/* This means that the processor has the CIX extension. */
-#define MASK_CIX 2048
-#define TARGET_CIX (target_flags & MASK_CIX)
-
/* This means that the processor has the MAX extension. */
-#define MASK_MAX 4096
+#define MASK_MAX (1 << 9)
#define TARGET_MAX (target_flags & MASK_MAX)
+/* This means that the processor has the FIX extension. */
+#define MASK_FIX (1 << 10)
+#define TARGET_FIX (target_flags & MASK_FIX)
+
+/* This means that the processor has the CIX extension. */
+#define MASK_CIX (1 << 11)
+#define TARGET_CIX (target_flags & MASK_CIX)
+
/* This means that the processor is an EV5, EV56, or PCA56. This is defined
only in TARGET_CPU_DEFAULT. */
-#define MASK_CPU_EV5 8192
+#define MASK_CPU_EV5 (1 << 28)
/* Likewise for EV6. */
-#define MASK_CPU_EV6 16384
+#define MASK_CPU_EV6 (1 << 29)
/* This means we support the .arch directive in the assembler. Only
defined in TARGET_CPU_DEFAULT. */
-#define MASK_SUPPORT_ARCH 32768
+#define MASK_SUPPORT_ARCH (1 << 30)
#define TARGET_SUPPORT_ARCH (target_flags & MASK_SUPPORT_ARCH)
/* These are for target os support and cannot be changed at runtime. */
@@ -185,26 +188,32 @@ extern enum alpha_fp_trap_mode alpha_fptm;
where VALUE is the bits to set or minus the bits to clear.
An empty string NAME is used to identify the default VALUE. */
-#define TARGET_SWITCHES \
- { {"no-soft-float", MASK_FP}, \
- {"soft-float", - MASK_FP}, \
- {"fp-regs", MASK_FPREGS}, \
- {"no-fp-regs", - (MASK_FP|MASK_FPREGS)}, \
- {"alpha-as", -MASK_GAS}, \
- {"gas", MASK_GAS}, \
- {"ieee-conformant", MASK_IEEE_CONFORMANT}, \
- {"ieee", MASK_IEEE|MASK_IEEE_CONFORMANT}, \
- {"ieee-with-inexact", MASK_IEEE_WITH_INEXACT|MASK_IEEE_CONFORMANT}, \
- {"build-constants", MASK_BUILD_CONSTANTS}, \
- {"float-vax", MASK_FLOAT_VAX}, \
- {"float-ieee", -MASK_FLOAT_VAX}, \
- {"bwx", MASK_BWX}, \
- {"no-bwx", -MASK_BWX}, \
- {"cix", MASK_CIX}, \
- {"no-cix", -MASK_CIX}, \
- {"max", MASK_MAX}, \
- {"no-max", -MASK_MAX}, \
- {"", TARGET_DEFAULT | TARGET_CPU_DEFAULT} }
+#define TARGET_SWITCHES \
+ { {"no-soft-float", MASK_FP, "Use hardware fp"}, \
+ {"soft-float", - MASK_FP, "Do not use hardware fp"}, \
+ {"fp-regs", MASK_FPREGS, "Use fp registers"}, \
+ {"no-fp-regs", - (MASK_FP|MASK_FPREGS), "Do not use fp registers"}, \
+ {"alpha-as", -MASK_GAS, "Do not assume GAS"}, \
+ {"gas", MASK_GAS, "Assume GAS"}, \
+ {"ieee-conformant", MASK_IEEE_CONFORMANT, \
+ "Request IEEE-conformant math library routines (OSF/1)"}, \
+ {"ieee", MASK_IEEE|MASK_IEEE_CONFORMANT, \
+ "Emit IEEE-conformant code, without inexact exceptions"}, \
+ {"ieee-with-inexact", MASK_IEEE_WITH_INEXACT|MASK_IEEE_CONFORMANT, \
+ "Emit IEEE-conformant code, with inexact exceptions"}, \
+ {"build-constants", MASK_BUILD_CONSTANTS, \
+ "Do not emit complex integer constants to read-only memory"}, \
+ {"float-vax", MASK_FLOAT_VAX, "Use VAX fp"}, \
+ {"float-ieee", -MASK_FLOAT_VAX, "Do not use VAX fp"}, \
+ {"bwx", MASK_BWX, "Emit code for the byte/word ISA extension"}, \
+ {"no-bwx", -MASK_BWX, ""}, \
+ {"max", MASK_MAX, "Emit code for the motion video ISA extension"}, \
+ {"no-max", -MASK_MAX, ""}, \
+ {"fix", MASK_FIX, "Emit code for the fp move and sqrt ISA extension"}, \
+ {"no-fix", -MASK_FIX, ""}, \
+ {"cix", MASK_CIX, "Emit code for the counting ISA extension"}, \
+ {"no-cix", -MASK_CIX, ""}, \
+ {"", TARGET_DEFAULT | TARGET_CPU_DEFAULT, ""} }
#define TARGET_DEFAULT MASK_FP|MASK_FPREGS
@@ -229,19 +238,24 @@ extern enum alpha_fp_trap_mode alpha_fptm;
extern char *m88k_short_data;
#define TARGET_OPTIONS { { "short-data-", &m88k_short_data } } */
-extern char *alpha_cpu_string; /* For -mcpu= */
-extern char *alpha_fprm_string; /* For -mfp-rounding-mode=[n|m|c|d] */
-extern char *alpha_fptm_string; /* For -mfp-trap-mode=[n|u|su|sui] */
-extern char *alpha_tp_string; /* For -mtrap-precision=[p|f|i] */
-extern char *alpha_mlat_string; /* For -mmemory-latency= */
-
-#define TARGET_OPTIONS \
-{ \
- {"cpu=", &alpha_cpu_string}, \
- {"fp-rounding-mode=", &alpha_fprm_string}, \
- {"fp-trap-mode=", &alpha_fptm_string}, \
- {"trap-precision=", &alpha_tp_string}, \
- {"memory-latency=", &alpha_mlat_string}, \
+extern const char *alpha_cpu_string; /* For -mcpu= */
+extern const char *alpha_fprm_string; /* For -mfp-rounding-mode=[n|m|c|d] */
+extern const char *alpha_fptm_string; /* For -mfp-trap-mode=[n|u|su|sui] */
+extern const char *alpha_tp_string; /* For -mtrap-precision=[p|f|i] */
+extern const char *alpha_mlat_string; /* For -mmemory-latency= */
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", &alpha_cpu_string, \
+ "Generate code for a given CPU"}, \
+ {"fp-rounding-mode=", &alpha_fprm_string, \
+ "Control the generated fp rounding mode"}, \
+ {"fp-trap-mode=", &alpha_fptm_string, \
+ "Control the IEEE trap mode"}, \
+ {"trap-precision=", &alpha_tp_string, \
+ "Control the precision given to fp exceptions"}, \
+ {"memory-latency=", &alpha_mlat_string, \
+ "Tune expected memory latency"}, \
}
/* Attempt to describe CPU characteristics to the preprocessor. */
@@ -249,6 +263,7 @@ extern char *alpha_mlat_string; /* For -mmemory-latency= */
/* Corresponding to amask... */
#define CPP_AM_BWX_SPEC "-D__alpha_bwx__ -Acpu(bwx)"
#define CPP_AM_MAX_SPEC "-D__alpha_max__ -Acpu(max)"
+#define CPP_AM_FIX_SPEC "-D__alpha_fix__ -Acpu(fix)"
#define CPP_AM_CIX_SPEC "-D__alpha_cix__ -Acpu(cix)"
/* Corresponding to implver... */
@@ -261,7 +276,7 @@ extern char *alpha_mlat_string; /* For -mmemory-latency= */
#define CPP_CPU_EV5_SPEC "%(cpp_im_ev5)"
#define CPP_CPU_EV56_SPEC "%(cpp_im_ev5) %(cpp_am_bwx)"
#define CPP_CPU_PCA56_SPEC "%(cpp_im_ev5) %(cpp_am_bwx) %(cpp_am_max)"
-#define CPP_CPU_EV6_SPEC "%(cpp_im_ev6) %(cpp_am_bwx) %(cpp_am_max) %(cpp_am_cix)"
+#define CPP_CPU_EV6_SPEC "%(cpp_im_ev6) %(cpp_am_bwx) %(cpp_am_max) %(cpp_am_fix)"
#ifndef CPP_CPU_DEFAULT_SPEC
# if TARGET_CPU_DEFAULT & MASK_CPU_EV6
@@ -311,6 +326,7 @@ extern char *alpha_mlat_string; /* For -mmemory-latency= */
#define EXTRA_SPECS \
{ "cpp_am_bwx", CPP_AM_BWX_SPEC }, \
{ "cpp_am_max", CPP_AM_MAX_SPEC }, \
+ { "cpp_am_fix", CPP_AM_FIX_SPEC }, \
{ "cpp_am_cix", CPP_AM_CIX_SPEC }, \
{ "cpp_im_ev4", CPP_IM_EV4_SPEC }, \
{ "cpp_im_ev5", CPP_IM_EV5_SPEC }, \
@@ -483,7 +499,7 @@ extern void override_options ();
Alpha we'll get better performance by aligning on an octaword
boundary. */
-#define ALIGN_LABEL_AFTER_BARRIER(FILE) \
+#define LABEL_ALIGN_AFTER_BARRIER(FILE) \
(optimize > 0 && write_symbols != SDB_DEBUG ? 4 : 0)
/* No data type wants to be aligned rounder than this. */
@@ -613,17 +629,20 @@ extern void override_options ();
registers can hold 32-bit and 64-bit integers as well, but not 16-bit
or 8-bit values. */
-#define HARD_REGNO_MODE_OK(REGNO, MODE) \
- ((REGNO) < 32 || ((MODE) != QImode && (MODE) != HImode))
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((REGNO) >= 32 && (REGNO) <= 62 \
+ ? GET_MODE_UNIT_SIZE (MODE) == 8 || GET_MODE_UNIT_SIZE (MODE) == 4 \
+ : 1)
+
+/* A C expression that is nonzero if a value of mode
+ MODE1 is accessible in mode MODE2 without copying.
-/* Value is 1 if it is a good idea to tie two pseudo registers
- when one has mode MODE1 and one has mode MODE2.
- If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
- for any hard reg, then this must be 0 for correct output. */
+ This asymmetric test is true when MODE1 could be put
+ in an FP register but MODE2 could not. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
- ((MODE1) == QImode || (MODE1) == HImode \
- ? (MODE2) == QImode || (MODE2) == HImode \
+ (HARD_REGNO_MODE_OK (32, (MODE1)) \
+ ? HARD_REGNO_MODE_OK (32, (MODE2)) \
: 1)
/* Specify the registers used for certain standard purposes.
@@ -771,11 +790,12 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
'S' is a 6-bit constant (valid for a shift insn). */
#define EXTRA_CONSTRAINT(OP, C) \
- ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) != AND \
+ ((C) == 'Q' ? normal_memory_operand (OP, VOIDmode) \
: (C) == 'R' ? current_file_function_operand (OP, Pmode) \
: (C) == 'S' ? (GET_CODE (OP) == CONST_INT \
&& (unsigned HOST_WIDE_INT) INTVAL (OP) < 64) \
: 0)
+extern int normal_memory_operand ();
/* Given an rtx X being reloaded into a reg required to be
in class CLASS, return the class of reg to actually use.
@@ -807,7 +827,7 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
&& (((CLASS) == FLOAT_REGS \
&& ((MODE) == SImode || (MODE) == HImode || (MODE) == QImode)) \
|| (((MODE) == QImode || (MODE) == HImode) \
- && ! TARGET_BWX && unaligned_memory_operand (IN, MODE)))) \
+ && ! TARGET_BWX && ! aligned_memory_operand (IN, MODE)))) \
? GENERAL_REGS \
: ((CLASS) == FLOAT_REGS && GET_CODE (IN) == MEM \
&& GET_CODE (XEXP (IN, 0)) == AND) ? GENERAL_REGS \
@@ -835,10 +855,10 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
: NO_REGS)
/* If we are copying between general and FP registers, we need a memory
- location unless the CIX extension is available. */
+ location unless the FIX extension is available. */
#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
- (! TARGET_CIX && (CLASS1) != (CLASS2))
+ (! TARGET_FIX && (CLASS1) != (CLASS2))
/* Specify the mode to be used for memory when a secondary memory
location is needed. If MODE is floating-point, use it. Otherwise,
@@ -871,7 +891,7 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
(((CLASS1) == FLOAT_REGS) == ((CLASS2) == FLOAT_REGS) \
? 2 \
- : TARGET_CIX ? 3 : 4+2*alpha_memory_latency)
+ : TARGET_FIX ? 3 : 4+2*alpha_memory_latency)
/* A C expressions returning the cost of moving data of MODE from a register to
or from memory.
@@ -989,26 +1009,25 @@ extern int alpha_memory_latency;
On Alpha the value is found in $0 for integer functions and
$f0 for floating-point functions. */
-#define FUNCTION_VALUE(VALTYPE, FUNC) \
- gen_rtx (REG, \
- ((INTEGRAL_TYPE_P (VALTYPE) \
- && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
- || POINTER_TYPE_P (VALTYPE)) \
- ? word_mode : TYPE_MODE (VALTYPE), \
- ((TARGET_FPREGS \
- && (TREE_CODE (VALTYPE) == REAL_TYPE \
- || TREE_CODE (VALTYPE) == COMPLEX_TYPE)) \
- ? 32 : 0))
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG (((INTEGRAL_TYPE_P (VALTYPE) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ || POINTER_TYPE_P (VALTYPE)) \
+ ? word_mode : TYPE_MODE (VALTYPE), \
+ ((TARGET_FPREGS \
+ && (TREE_CODE (VALTYPE) == REAL_TYPE \
+ || TREE_CODE (VALTYPE) == COMPLEX_TYPE)) \
+ ? 32 : 0))
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
-#define LIBCALL_VALUE(MODE) \
- gen_rtx (REG, MODE, \
- (TARGET_FPREGS \
- && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
- || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) \
- ? 32 : 0))
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (MODE, \
+ (TARGET_FPREGS \
+ && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) \
+ ? 32 : 0))
/* The definition of this macro implies that there are cases where
a scalar value cannot be returned in registers.
@@ -1182,6 +1201,10 @@ extern struct rtx_def *alpha_builtin_saveregs ();
extern struct rtx_def *alpha_compare_op0, *alpha_compare_op1;
extern int alpha_compare_fp_p;
+/* Define the information needed to modify the epilogue for EH. */
+
+extern struct rtx_def *alpha_eh_epilogue_sp_ofs;
+
/* Make (or fake) .linkage entry for function call.
IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
extern void alpha_need_linkage ();
@@ -1293,6 +1316,7 @@ do { \
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
alpha_initialize_trampoline (TRAMP, FNADDR, CXT, 16, 24, 8)
+extern void alpha_initialize_trampoline ();
/* A C expression whose value is RTL representing the value of the return
address for the frame COUNT steps up from the current frame.
@@ -1302,6 +1326,9 @@ do { \
#define RETURN_ADDR_RTX alpha_return_addr
extern struct rtx_def *alpha_return_addr ();
+/* Before the prologue, RA lives in $26. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 26)
+
/* Initialize data used by insn expanders. This is called from insn_emit,
once for every function before code is generated. */
@@ -1310,11 +1337,11 @@ extern void alpha_init_expanders ();
/* Addressing modes, and classification of registers for them. */
-/* #define HAVE_POST_INCREMENT */
-/* #define HAVE_POST_DECREMENT */
+/* #define HAVE_POST_INCREMENT 0 */
+/* #define HAVE_POST_DECREMENT 0 */
-/* #define HAVE_PRE_DECREMENT */
-/* #define HAVE_PRE_INCREMENT */
+/* #define HAVE_PRE_DECREMENT 0 */
+/* #define HAVE_PRE_INCREMENT 0 */
/* Macros to check register numbers against specific register classes. */
@@ -1365,18 +1392,32 @@ extern void alpha_init_expanders ();
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg. */
#define REG_OK_FOR_INDEX_P(X) 0
+
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg. */
#define REG_OK_FOR_BASE_P(X) \
(REGNO (X) < 32 || REGNO (X) == 63 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+/* ??? Nonzero if X is the frame pointer, or some virtual register
+ that may eliminate to the frame pointer. These will be allowed to
+ have offsets greater than 32K. This is done because register
+ elimination offsets will change the hi/lo split, and if we split
+ before reload, we will require additional instructions. */
+#define REG_OK_FP_BASE_P(X) \
+ (REGNO (X) == 31 || REGNO (X) == 63 \
+ || (REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ && REGNO (X) < LAST_VIRTUAL_REGISTER))
+
#else
/* Nonzero if X is a hard reg that can be used as an index. */
#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
/* Nonzero if X is a hard reg that can be used as a base reg. */
#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FP_BASE_P(X) 0
+
#endif
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
@@ -1391,16 +1432,34 @@ extern void alpha_init_expanders ();
First define the basic valid address. */
-#define GO_IF_LEGITIMATE_SIMPLE_ADDRESS(MODE, X, ADDR) \
-{ if (REG_P (X) && REG_OK_FOR_BASE_P (X)) \
- goto ADDR; \
- if (CONSTANT_ADDRESS_P (X)) \
- goto ADDR; \
- if (GET_CODE (X) == PLUS \
- && REG_P (XEXP (X, 0)) \
- && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
- && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
- goto ADDR; \
+#define GO_IF_LEGITIMATE_SIMPLE_ADDRESS(MODE, X, ADDR) \
+{ \
+ rtx tmp = (X); \
+ if (GET_CODE (tmp) == SUBREG \
+ && (GET_MODE_SIZE (GET_MODE (tmp)) \
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (tmp))))) \
+ tmp = SUBREG_REG (tmp); \
+ if (REG_P (tmp) && REG_OK_FOR_BASE_P (tmp)) \
+ goto ADDR; \
+ if (CONSTANT_ADDRESS_P (X)) \
+ goto ADDR; \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ tmp = XEXP (X, 0); \
+ if (GET_CODE (tmp) == SUBREG \
+ && (GET_MODE_SIZE (GET_MODE (tmp)) \
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (tmp))))) \
+ tmp = SUBREG_REG (tmp); \
+ if (REG_P (tmp)) \
+ { \
+ if (REG_OK_FP_BASE_P (tmp) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ goto ADDR; \
+ if (REG_OK_FOR_BASE_P (tmp) \
+ && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ goto ADDR; \
+ } \
+ } \
}
/* Now accept the simple address, or, for DImode only, an AND of a simple
@@ -1596,9 +1655,11 @@ do { \
#define MOVE_MAX 8
-/* Controls how many units are moved by expr.c before resorting to movstr.
- Without byte/word accesses, we want no more than one; with, several single
- byte accesses are better. */
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction pairs, we will do a movstr or libcall instead.
+
+ Without byte/word accesses, we want no more than four instructions;
+ with, several single byte accesses are better. */
#define MOVE_RATIO (TARGET_BWX ? 7 : 2)
@@ -1693,6 +1754,12 @@ do { \
/* The EV4 is dual issue; EV5/EV6 are quad issue. */
#define ISSUE_RATE (alpha_cpu == PROCESSOR_EV4 ? 2 : 4)
+/* Describe the fact that MULTI instructions are multiple instructions
+ and so to assume they don't pair with anything. */
+#define MD_SCHED_VARIABLE_ISSUE(DUMP, SCHED_VERBOSE, INSN, CAN_ISSUE_MORE) \
+ if (recog_memoized (INSN) < 0 || get_attr_type (INSN) == TYPE_MULTI) \
+ (CAN_ISSUE_MORE) = 0
+
/* Compute the cost of computing a constant rtl expression RTX
whose rtx-code is CODE. The body of this macro is a portion
of a switch statement. If the code is computed here,
@@ -1970,7 +2037,7 @@ literal_section () \
This is suitable for output with `assemble_name'. */
#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
- sprintf (LABEL, "*$%s%d", PREFIX, NUM)
+ sprintf ((LABEL), "*$%s%ld", (PREFIX), (long)(NUM))
/* Check a floating-point value for validity for a particular machine mode. */
@@ -2078,6 +2145,11 @@ literal_section () \
} \
while (0)
+/* To get unaligned data, we have to turn off auto alignment. */
+#define UNALIGNED_SHORT_ASM_OP ".align 0\n\t.word"
+#define UNALIGNED_INT_ASM_OP ".align 0\n\t.long"
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".align 0\n\t.quad"
+
/* This is how to output an insn to push a register on the stack.
It need not be very fast code. */
@@ -2237,41 +2309,22 @@ do { \
/* Print a memory address as an operand to reference that memory location. */
-#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
-{ rtx addr = (ADDR); \
- int basereg = 31; \
- HOST_WIDE_INT offset = 0; \
- \
- if (GET_CODE (addr) == AND) \
- addr = XEXP (addr, 0); \
- \
- if (GET_CODE (addr) == REG) \
- basereg = REGNO (addr); \
- else if (GET_CODE (addr) == CONST_INT) \
- offset = INTVAL (addr); \
- else if (GET_CODE (addr) == PLUS \
- && GET_CODE (XEXP (addr, 0)) == REG \
- && GET_CODE (XEXP (addr, 1)) == CONST_INT) \
- basereg = REGNO (XEXP (addr, 0)), offset = INTVAL (XEXP (addr, 1)); \
- else \
- abort (); \
- \
- fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, offset); \
- fprintf (FILE, "($%d)", basereg); \
-}
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address((FILE), (ADDR))
+
/* Define the codes that are matched by predicates in alpha.c. */
#define PREDICATE_CODES \
{"reg_or_0_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_6bit_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
- {"reg_or_8bit_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
- {"cint8_operand", {CONST_INT, CONSTANT_P_RTX}}, \
- {"reg_or_cint_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
- {"add_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
- {"sext_add_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"reg_or_6bit_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_8bit_operand", {SUBREG, REG, CONST_INT}}, \
+ {"cint8_operand", {CONST_INT}}, \
+ {"reg_or_cint_operand", {SUBREG, REG, CONST_INT}}, \
+ {"add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"sext_add_operand", {SUBREG, REG, CONST_INT}}, \
{"const48_operand", {CONST_INT}}, \
- {"and_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
- {"or_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"and_operand", {SUBREG, REG, CONST_INT}}, \
+ {"or_operand", {SUBREG, REG, CONST_INT}}, \
{"mode_mask_operand", {CONST_INT}}, \
{"mul8_operand", {CONST_INT}}, \
{"mode_width_operand", {CONST_INT}}, \
@@ -2284,14 +2337,16 @@ do { \
{"current_file_function_operand", {SYMBOL_REF}}, \
{"call_operand", {REG, SYMBOL_REF}}, \
{"input_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
- SYMBOL_REF, CONST, LABEL_REF, CONSTANT_P_RTX}}, \
+ SYMBOL_REF, CONST, LABEL_REF}}, \
{"some_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
- SYMBOL_REF, CONST, LABEL_REF, CONSTANT_P_RTX}}, \
+ SYMBOL_REF, CONST, LABEL_REF}}, \
{"aligned_memory_operand", {MEM}}, \
{"unaligned_memory_operand", {MEM}}, \
{"reg_or_unaligned_mem_operand", {SUBREG, REG, MEM}}, \
{"any_memory_operand", {MEM}}, \
- {"hard_fp_register_operand", {SUBREG, REG}},
+ {"hard_fp_register_operand", {SUBREG, REG}}, \
+ {"reg_not_elim_operand", {SUBREG, REG}}, \
+ {"reg_no_subreg_operand", {REG}},
/* Tell collect that the object format is ECOFF. */
#define OBJECT_FORMAT_COFF
@@ -2462,6 +2517,7 @@ extern int current_file_function_operand ();
extern int alpha_sa_size ();
extern int alpha_adjust_cost ();
extern void print_operand ();
+extern void print_operand_address ();
extern int reg_or_0_operand ();
extern int reg_or_8bit_operand ();
extern int mul8_operand ();
@@ -2482,6 +2538,7 @@ extern int divmod_operator ();
extern int call_operand ();
extern int reg_or_cint_operand ();
extern int hard_fp_register_operand ();
+extern int reg_not_elim_operand ();
extern void alpha_set_memflags ();
extern int aligned_memory_operand ();
extern void get_aligned_mem ();
diff --git a/contrib/gcc/config/alpha/alpha.md b/contrib/gcc/config/alpha/alpha.md
index 87ebf95..6d075e9 100644
--- a/contrib/gcc/config/alpha/alpha.md
+++ b/contrib/gcc/config/alpha/alpha.md
@@ -1,5 +1,5 @@
;; Machine description for DEC Alpha for GNU C compiler
-;; Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
+;; Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
;; This file is part of GNU CC.
@@ -29,6 +29,7 @@
;; 3 mskxh
;; 4 cvtlq
;; 5 cvtql
+;; 6 nt_lda
;;
;; UNSPEC_VOLATILE:
;;
@@ -38,6 +39,8 @@
;; 3 builtin_longjmp
;; 4 trapb
;; 5 prologue_stack_probe_loop
+;; 6 realign
+;; 7 exception_receiver
;; Processor type -- this attribute must exactly match the processor_type
;; enumeration in alpha.h.
@@ -51,9 +54,13 @@
;; separately.
(define_attr "type"
- "ild,fld,ldsym,ist,fst,ibr,fbr,jsr,iadd,ilog,shift,icmov,fcmov,icmp,imul,fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof"
+ "ild,fld,ldsym,ist,fst,ibr,fbr,jsr,iadd,ilog,shift,icmov,fcmov,icmp,imul,fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
(const_string "iadd"))
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "multi")])
+
;; Define the operand size an insn operates on. Used primarily by mul
;; and div operations that have size dependant timings.
@@ -149,13 +156,18 @@
; Memory takes at least 2 clocks. Return one from here and fix up with
; user-defined latencies in adjust_cost.
-; ??? How to: "An instruction of class LD cannot be issued in the _second_
-; cycle after an instruction of class ST is issued."
(define_function_unit "ev5_ebox" 2 0
(and (eq_attr "cpu" "ev5")
(eq_attr "type" "ild,fld,ldsym"))
1 1)
+; Loads can dual issue with one another, but loads and stores do not mix.
+(define_function_unit "ev5_e0" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "ild,fld,ldsym"))
+ 1 1
+ [(eq_attr "type" "ist,fst")])
+
; Stores, shifts, multiplies can only issue to E0
(define_function_unit "ev5_e0" 1 0
(and (eq_attr "cpu" "ev5")
@@ -420,12 +432,23 @@
(match_operand:SI 2 "add_operand" "")))]
""
"
-{ emit_insn (gen_rtx_SET (VOIDmode, gen_lowpart (DImode, operands[0]),
- gen_rtx_PLUS (DImode,
- gen_lowpart (DImode, operands[1]),
- gen_lowpart (DImode, operands[2]))));
- DONE;
-} ")
+{
+ if (optimize)
+ {
+ rtx op1 = gen_lowpart (DImode, operands[1]);
+ rtx op2 = gen_lowpart (DImode, operands[2]);
+
+ if (! cse_not_expected)
+ {
+ rtx tmp = gen_reg_rtx (DImode);
+ emit_insn (gen_adddi3 (tmp, op1, op2));
+ emit_move_insn (gen_lowpart (DImode, operands[0]), tmp);
+ }
+ else
+ emit_insn (gen_adddi3 (gen_lowpart (DImode, operands[0]), op1, op2));
+ DONE;
+ }
+}")
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
@@ -470,7 +493,7 @@
(sign_extend:DI
(plus:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "const_int_operand" ""))))
- (clobber (match_operand:SI 3 "register_operand" ""))]
+ (clobber (match_operand:SI 3 "reg_not_elim_operand" ""))]
"! sext_add_operand (operands[2], SImode) && INTVAL (operands[2]) > 0
&& INTVAL (operands[2]) % 4 == 0"
[(set (match_dup 3) (match_dup 4))
@@ -512,20 +535,57 @@
(plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ,rJ,rJ")
(match_operand:DI 2 "add_operand" "rI,O,K,L")))]
""
- "@
- addq %r1,%2,%0
- subq %r1,%n2,%0
- lda %0,%2(%r1)
- ldah %0,%h2(%r1)")
+ "*
+{
+ const char * const pattern[4] = {
+ \"addq %r1,%2,%0\",
+ \"subq %r1,%n2,%0\",
+ \"lda %0,%2(%r1)\",
+ \"ldah %0,%h2(%r1)\"
+ };
+
+ /* The NT stack unwind code can't handle a subq to adjust the stack
+ (that's a bug, but not one we can do anything about). As of NT4.0 SP3,
+ the exception handling code will loop if a subq is used and an
+ exception occurs.
+
+ The 19980616 change to emit prologues as RTL also confused some
+ versions of GDB, which also interprets prologues. This has been
+ fixed as of GDB 4.18, but it does not harm to unconditionally
+ use lda here. */
+
+ int which = which_alternative;
+
+ if (operands[0] == stack_pointer_rtx
+ && GET_CODE (operands[2]) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))
+ which = 2;
+
+ return pattern[which];
+}")
+
+;; ??? Allow large constants when basing off the frame pointer or some
+;; virtual register that may eliminate to the frame pointer. This is
+;; done because register elimination offsets will change the hi/lo split,
+;; and if we split before reload, we will require additional instructions.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r")
+ (match_operand:DI 2 "const_int_operand" "n")))]
+ "REG_OK_FP_BASE_P (operands[1])"
+ "#")
-;; Don't do this if we are adjusting SP since we don't want to do
-;; it in two steps.
+;; Don't do this if we are adjusting SP since we don't want to do it
+;; in two steps. Don't split FP sources for the reason listed above.
(define_split
[(set (match_operand:DI 0 "register_operand" "")
(plus:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "const_int_operand" "")))]
"! add_operand (operands[2], DImode)
- && REGNO (operands[0]) != STACK_POINTER_REGNUM"
+ && operands[0] != stack_pointer_rtx
+ && operands[1] != frame_pointer_rtx
+ && operands[1] != arg_pointer_rtx"
[(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
"
@@ -540,24 +600,24 @@
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r,r")
- (plus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ")
+ (plus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r")
(match_operand:SI 2 "const48_operand" "I,I"))
(match_operand:SI 3 "sext_add_operand" "rI,O")))]
""
"@
- s%2addl %r1,%3,%0
- s%2subl %r1,%n3,%0")
+ s%2addl %1,%3,%0
+ s%2subl %1,%n3,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
(sign_extend:DI
- (plus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ")
+ (plus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r")
(match_operand:SI 2 "const48_operand" "I,I"))
(match_operand:SI 3 "sext_add_operand" "rI,O"))))]
""
"@
- s%2addl %r1,%3,%0
- s%2subl %r1,%n3,%0")
+ s%2addl %1,%3,%0
+ s%2subl %1,%n3,%0")
(define_split
[(set (match_operand:DI 0 "register_operand" "")
@@ -567,7 +627,7 @@
(match_operand 3 "" "")])
(match_operand:SI 4 "const48_operand" ""))
(match_operand:SI 5 "add_operand" ""))))
- (clobber (match_operand:DI 6 "register_operand" ""))]
+ (clobber (match_operand:DI 6 "reg_not_elim_operand" ""))]
""
[(set (match_dup 6) (match_dup 7))
(set (match_dup 0)
@@ -582,12 +642,12 @@
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
- (plus:DI (mult:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ")
+ (plus:DI (mult:DI (match_operand:DI 1 "reg_not_elim_operand" "r,r")
(match_operand:DI 2 "const48_operand" "I,I"))
- (match_operand:DI 3 "reg_or_8bit_operand" "rI,O")))]
+ (match_operand:DI 3 "sext_add_operand" "rI,O")))]
""
"@
- s%2addq %r1,%3,%0
+ s%2addq %1,%3,%0
s%2subq %1,%n3,%0")
;; These variants of the above insns can occur if the third operand
@@ -656,9 +716,7 @@
[(set (match_dup 5)
(plus:SI (mult:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
(set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 5) (match_dup 4))))]
- "
-{ operands[5] = gen_lowpart (SImode, operands[0]);
-}")
+ "operands[5] = gen_lowpart (SImode, operands[0]);")
(define_insn ""
[(set (match_operand:DI 0 "some_operand" "=&r")
@@ -706,11 +764,22 @@
(match_operand:SI 2 "reg_or_8bit_operand" "")))]
""
"
-{ emit_insn (gen_rtx_SET (VOIDmode, gen_lowpart (DImode, operands[0]),
- gen_rtx_MINUS (DImode,
- gen_lowpart (DImode, operands[1]),
- gen_lowpart (DImode, operands[2]))));
- DONE;
+{
+ if (optimize)
+ {
+ rtx op1 = gen_lowpart (DImode, operands[1]);
+ rtx op2 = gen_lowpart (DImode, operands[2]);
+
+ if (! cse_not_expected)
+ {
+ rtx tmp = gen_reg_rtx (DImode);
+ emit_insn (gen_subdi3 (tmp, op1, op2));
+ emit_move_insn (gen_lowpart (DImode, operands[0]), tmp);
+ }
+ else
+ emit_insn (gen_subdi3 (gen_lowpart (DImode, operands[0]), op1, op2));
+ DONE;
+ }
} ")
(define_insn ""
@@ -736,64 +805,67 @@
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
- (minus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (minus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r")
(match_operand:SI 2 "const48_operand" "I"))
(match_operand:SI 3 "reg_or_8bit_operand" "rI")))]
""
- "s%2subl %r1,%3,%0")
+ "s%2subl %1,%3,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
- (minus:SI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (minus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r")
(match_operand:SI 2 "const48_operand" "I"))
(match_operand:SI 3 "reg_or_8bit_operand" "rI"))))]
""
- "s%2subl %r1,%3,%0")
+ "s%2subl %1,%3,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
- (minus:DI (mult:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (minus:DI (mult:DI (match_operand:DI 1 "reg_not_elim_operand" "r")
(match_operand:DI 2 "const48_operand" "I"))
(match_operand:DI 3 "reg_or_8bit_operand" "rI")))]
""
- "s%2subq %r1,%3,%0")
+ "s%2subq %1,%3,%0")
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
- (match_operand:SI 2 "reg_or_0_operand" "rJ")))]
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI")))]
""
- "mull %r1,%r2,%0"
+ "mull %r1,%2,%0"
[(set_attr "type" "imul")
(set_attr "opsize" "si")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
- (sign_extend:DI (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
- (match_operand:SI 2 "reg_or_0_operand" "rJ"))))]
+ (sign_extend:DI
+ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))]
""
- "mull %r1,%r2,%0"
+ "mull %r1,%2,%0"
[(set_attr "type" "imul")
(set_attr "opsize" "si")])
(define_insn "muldi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
- (match_operand:DI 2 "reg_or_0_operand" "rJ")))]
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))]
""
- "mulq %r1,%r2,%0"
+ "mulq %r1,%2,%0"
[(set_attr "type" "imul")])
(define_insn "umuldi3_highpart"
[(set (match_operand:DI 0 "register_operand" "=r")
(truncate:DI
(lshiftrt:TI
- (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand" "r"))
- (zero_extend:TI (match_operand:DI 2 "register_operand" "r")))
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "reg_or_0_operand" "%rJ"))
+ (zero_extend:TI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))
(const_int 64))))]
""
- "umulh %1,%2,%0"
+ "umulh %r1,%2,%0"
[(set_attr "type" "imul")
(set_attr "opsize" "udi")])
@@ -816,6 +888,7 @@
;; do the right thing if the inputs are not properly sign-extended.
;; But Linux, for instance, does not have this problem. Is it worth
;; the complication here to eliminate the sign extension?
+;; Interix/NT has the same sign-extension problem.
(define_expand "divsi3"
[(set (reg:DI 24)
@@ -1146,7 +1219,15 @@
"eqv %r1,%2,%0"
[(set_attr "type" "ilog")])
-;; Handle the FFS insn if we support CIX.
+;; Handle the FFS insn iff we support CIX.
+;;
+;; These didn't make it into EV6 pass 2 as planned. Instead they
+;; cropped cttz/ctlz/ctpop from the old CIX and renamed it FIX for
+;; "Square Root and Floating Point Convert Extension".
+;;
+;; I'm assured that these insns will make it into EV67 (first pass
+;; due Summer 1999), presumably with a new AMASK bit, and presumably
+;; will still be named CIX.
(define_expand "ffsdi2"
[(set (match_dup 2)
@@ -1168,7 +1249,7 @@
(unspec [(match_operand:DI 1 "register_operand" "r")] 1))]
"TARGET_CIX"
"cttz %1,%0"
- ; ev6 calls all mvi and cttz/ctlz/popc class imisc, so just
+ ; EV6 calls all mvi and cttz/ctlz/popc class imisc, so just
; reuse the existing type name.
[(set_attr "type" "mvi")])
@@ -1495,6 +1576,29 @@
"ext%M2l %r1,%3,%0"
[(set_attr "type" "shift")])
+;; Combine has some strange notion of preserving existing undefined behaviour
+;; in shifts larger than a word size. So capture these patterns that it
+;; should have turned into zero_extracts.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))
+ (match_operand:DI 3 "mode_mask_operand" "n")))]
+ ""
+ "ext%U3l %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ ""
+ "extql %1,%2,%0"
+ [(set_attr "type" "shift")])
+
(define_insn "extqh"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI
@@ -1639,22 +1743,22 @@
"HOST_BITS_PER_WIDE_INT == 64
&& GET_CODE (operands[3]) == CONST_INT
&& (((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2])
- == INTVAL (operands[3]))
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
|| ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2])
- == INTVAL (operands[3]))
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
|| ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2])
- == INTVAL (operands[3])))"
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])))"
"*
{
#if HOST_BITS_PER_WIDE_INT == 64
if ((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2])
- == INTVAL (operands[3]))
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
return \"insbl %1,%s2,%0\";
if ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2])
- == INTVAL (operands[3]))
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
return \"inswl %1,%s2,%0\";
if ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2])
- == INTVAL (operands[3]))
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
return \"insll %1,%s2,%0\";
#endif
abort();
@@ -1794,60 +1898,62 @@
;; instruction. To allow combine et al to do useful things, we keep the
;; operation as a unit until after reload, at which point we split the
;; instructions.
+;;
+;; Note that we (attempt to) only consider this optimization when the
+;; ultimate destination is memory. If we will be doing further integer
+;; processing, it is cheaper to do the truncation in the int regs.
+
+(define_insn "*cvtql"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (unspec:SI [(match_operand:DI 1 "reg_or_fp0_operand" "fG")] 5))]
+ "TARGET_FP"
+ "cvtql%` %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "")))
- (clobber (match_scratch:DI 2 ""))]
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (subreg:SI (fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "")) 0))
+ (clobber (match_scratch:DI 2 ""))
+ (clobber (match_scratch:SI 3 ""))]
"TARGET_FP && reload_completed"
[(set (match_dup 2) (fix:DI (match_dup 1)))
- (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
+ (set (match_dup 3) (unspec:SI [(match_dup 2)] 5))
+ (set (match_dup 0) (match_dup 3))]
"")
-;; Due to issues with CLASS_CANNOT_CHANGE_SIZE, we cannot use a subreg here.
(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "")))]
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (subreg:SI (fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "")) 0))
+ (clobber (match_scratch:DI 2 ""))]
"TARGET_FP && reload_completed"
[(set (match_dup 2) (fix:DI (match_dup 1)))
- (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
- "operands[2] = gen_rtx_REG (DImode, REGNO (operands[0]));")
+ (set (match_dup 3) (unspec:SI [(match_dup 2)] 5))
+ (set (match_dup 0) (match_dup 3))]
+ ;; Due to REG_CANNOT_CHANGE_SIZE issues, we cannot simply use SUBREG.
+ "operands[3] = gen_rtx_REG (SImode, REGNO (operands[2]));")
(define_insn ""
- [(set (match_operand:SI 0 "register_operand" "=f")
- (unspec:SI [(match_operand:DI 1 "reg_or_fp0_operand" "fG")] 5))]
- "TARGET_FP"
- "cvtql%` %R1,%0"
- [(set_attr "type" "fadd")
- (set_attr "trap" "yes")])
-
-(define_insn "fix_truncdfsi2_tp"
- [(set (match_operand:SI 0 "register_operand" "=&f")
- (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))
- (clobber (match_scratch:DI 2 "=&f"))]
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI (fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")) 0))
+ (clobber (match_scratch:DI 2 "=&f"))
+ (clobber (match_scratch:SI 3 "=&f"))]
"TARGET_FP && alpha_tp == ALPHA_TP_INSN"
"#"
[(set_attr "type" "fadd")
(set_attr "trap" "yes")])
(define_insn ""
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI (fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")) 0))
+ (clobber (match_scratch:DI 2 "=f"))]
"TARGET_FP && alpha_tp != ALPHA_TP_INSN"
"#"
[(set_attr "type" "fadd")
(set_attr "trap" "yes")])
-(define_expand "fix_truncdfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP"
- "{ if (alpha_tp == ALPHA_TP_INSN)
- { emit_insn(gen_fix_truncdfsi2_tp(operands[0], operands[1])); DONE; }
- }")
-
(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=&f")
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f")
(fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP && alpha_tp == ALPHA_TP_INSN"
"cvt%-q%(c %R1,%0"
@@ -1855,7 +1961,7 @@
(set_attr "trap" "yes")])
(define_insn "fix_truncdfdi2"
- [(set (match_operand:DI 0 "register_operand" "=f")
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f")
(fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
"cvt%-q%(c %R1,%0"
@@ -1865,55 +1971,52 @@
;; Likewise between SFmode and SImode.
(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (fix:SI (float_extend:DF
- (match_operand:SF 1 "reg_or_fp0_operand" ""))))
- (clobber (match_scratch:DI 2 ""))]
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (subreg:SI (fix:DI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" ""))) 0))
+ (clobber (match_scratch:DI 2 ""))
+ (clobber (match_scratch:SI 3 ""))]
"TARGET_FP && reload_completed"
[(set (match_dup 2) (fix:DI (float_extend:DF (match_dup 1))))
- (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
+ (set (match_dup 3) (unspec:SI [(match_dup 2)] 5))
+ (set (match_dup 0) (match_dup 3))]
"")
-;; Due to issues with CLASS_CANNOT_CHANGE_SIZE, we cannot use a subreg here.
(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (fix:SI (float_extend:DF
- (match_operand:SF 1 "reg_or_fp0_operand" ""))))]
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (subreg:SI (fix:DI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" ""))) 0))
+ (clobber (match_scratch:DI 2 ""))]
"TARGET_FP && reload_completed"
[(set (match_dup 2) (fix:DI (float_extend:DF (match_dup 1))))
- (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
- "operands[2] = gen_rtx_REG (DImode, REGNO (operands[0]));")
-
-(define_insn "fix_truncsfsi2_tp"
- [(set (match_operand:SI 0 "register_operand" "=&f")
- (fix:SI (float_extend:DF
- (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))
- (clobber (match_scratch:DI 2 "=&f"))]
+ (set (match_dup 3) (unspec:SI [(match_dup 2)] 5))
+ (set (match_dup 0) (match_dup 3))]
+ ;; Due to REG_CANNOT_CHANGE_SIZE issues, we cannot simply use SUBREG.
+ "operands[3] = gen_rtx_REG (SImode, REGNO (operands[2]));")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI (fix:DI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))) 0))
+ (clobber (match_scratch:DI 2 "=&f"))
+ (clobber (match_scratch:SI 3 "=&f"))]
"TARGET_FP && alpha_tp == ALPHA_TP_INSN"
"#"
[(set_attr "type" "fadd")
(set_attr "trap" "yes")])
(define_insn ""
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (float_extend:DF
- (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI (fix:DI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))) 0))
+ (clobber (match_scratch:DI 2 "=f"))]
"TARGET_FP && alpha_tp != ALPHA_TP_INSN"
"#"
[(set_attr "type" "fadd")
(set_attr "trap" "yes")])
-(define_expand "fix_truncsfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (float_extend:DF
- (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
- "TARGET_FP"
- "{ if (alpha_tp == ALPHA_TP_INSN)
- { emit_insn(gen_fix_truncsfsi2_tp(operands[0], operands[1])); DONE; }
- }")
-
(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=&f")
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f")
(fix:DI (float_extend:DF
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
"TARGET_FP && alpha_tp == ALPHA_TP_INSN"
@@ -1922,7 +2025,7 @@
(set_attr "trap" "yes")])
(define_insn "fix_truncsfdi2"
- [(set (match_operand:DI 0 "register_operand" "=f")
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f")
(fix:DI (float_extend:DF
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
"TARGET_FP"
@@ -1932,7 +2035,7 @@
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=&f")
- (float:SF (match_operand:DI 1 "register_operand" "f")))]
+ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
"TARGET_FP && alpha_tp == ALPHA_TP_INSN"
"cvtq%,%+%& %1,%0"
[(set_attr "type" "fadd")
@@ -1940,7 +2043,7 @@
(define_insn "floatdisf2"
[(set (match_operand:SF 0 "register_operand" "=f")
- (float:SF (match_operand:DI 1 "register_operand" "f")))]
+ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
"TARGET_FP"
"cvtq%,%+%& %1,%0"
[(set_attr "type" "fadd")
@@ -1948,7 +2051,7 @@
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=&f")
- (float:DF (match_operand:DI 1 "register_operand" "f")))]
+ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
"TARGET_FP && alpha_tp == ALPHA_TP_INSN"
"cvtq%-%+%& %1,%0"
[(set_attr "type" "fadd")
@@ -1956,7 +2059,7 @@
(define_insn "floatdidf2"
[(set (match_operand:DF 0 "register_operand" "=f")
- (float:DF (match_operand:DI 1 "register_operand" "f")))]
+ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
"TARGET_FP"
"cvtq%-%+%& %1,%0"
[(set_attr "type" "fadd")
@@ -1990,7 +2093,7 @@
(float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m,f")))]
"TARGET_FP && alpha_tp != ALPHA_TP_INSN"
"@
- cpys %1,%1,%0
+ fmov %1,%0
ld%, %0,%1
st%- %1,%0"
[(set_attr "type" "fcpys,fld,fst")
@@ -2205,7 +2308,7 @@
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=&f")
(sqrt:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP && TARGET_CIX && alpha_tp == ALPHA_TP_INSN"
+ "TARGET_FP && TARGET_FIX && alpha_tp == ALPHA_TP_INSN"
"sqrt%,%)%& %R1,%0"
[(set_attr "type" "fsqrt")
(set_attr "opsize" "si")
@@ -2214,7 +2317,7 @@
(define_insn "sqrtsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(sqrt:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP && TARGET_CIX"
+ "TARGET_FP && TARGET_FIX"
"sqrt%,%)%& %R1,%0"
[(set_attr "type" "fsqrt")
(set_attr "opsize" "si")
@@ -2223,7 +2326,7 @@
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=&f")
(sqrt:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP && TARGET_CIX && alpha_tp == ALPHA_TP_INSN"
+ "TARGET_FP && TARGET_FIX && alpha_tp == ALPHA_TP_INSN"
"sqrt%-%)%& %R1,%0"
[(set_attr "type" "fsqrt")
(set_attr "trap" "yes")])
@@ -2231,7 +2334,7 @@
(define_insn "sqrtdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(sqrt:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP && TARGET_CIX"
+ "TARGET_FP && TARGET_FIX"
"sqrt%-%)%& %1,%0"
[(set_attr "type" "fsqrt")
(set_attr "trap" "yes")])
@@ -2262,7 +2365,7 @@
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- (if_then_else:DI
+ (if_then_else:SI
(match_operator 2 "signed_comparison_operator"
[(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
(match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
@@ -2322,69 +2425,6 @@
cmovlbc %r2,%3,%0"
[(set_attr "type" "icmov")])
-;; This form is added since combine thinks that an IF_THEN_ELSE with both
-;; arms constant is a single insn, so it won't try to form it if combine
-;; knows they are really two insns. This occurs in divides by powers
-;; of two.
-
-(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=r")
- (if_then_else:DI
- (match_operator 2 "signed_comparison_operator"
- [(match_operand:DI 3 "reg_or_0_operand" "rJ")
- (const_int 0)])
- (plus:DI (match_dup 0)
- (match_operand:DI 1 "reg_or_8bit_operand" "rI"))
- (match_dup 0)))
- (clobber (match_scratch:DI 4 "=&r"))]
- ""
- "addq %0,%1,%4\;cmov%C2 %r3,%4,%0"
- [(set_attr "type" "icmov")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:DI 0 "register_operand" "")
- (if_then_else:DI
- (match_operator 2 "signed_comparison_operator"
- [(match_operand:DI 3 "reg_or_0_operand" "")
- (const_int 0)])
- (plus:DI (match_dup 0)
- (match_operand:DI 1 "reg_or_8bit_operand" ""))
- (match_dup 0)))
- (clobber (match_operand:DI 4 "register_operand" ""))]
- ""
- [(set (match_dup 4) (plus:DI (match_dup 0) (match_dup 1)))
- (set (match_dup 0) (if_then_else:DI (match_op_dup 2
- [(match_dup 3)
- (const_int 0)])
- (match_dup 4) (match_dup 0)))]
- "")
-
-(define_split
- [(parallel
- [(set (match_operand:DI 0 "register_operand" "")
- (if_then_else:DI
- (match_operator 1 "comparison_operator"
- [(zero_extract:DI (match_operand:DI 2 "register_operand" "")
- (const_int 1)
- (match_operand:DI 3 "const_int_operand" ""))
- (const_int 0)])
- (match_operand:DI 4 "reg_or_8bit_operand" "")
- (match_operand:DI 5 "reg_or_8bit_operand" "")))
- (clobber (match_operand:DI 6 "register_operand" ""))])]
- "INTVAL (operands[3]) != 0"
- [(set (match_dup 6)
- (lshiftrt:DI (match_dup 2) (match_dup 3)))
- (set (match_dup 0)
- (if_then_else:DI (match_op_dup 1
- [(zero_extract:DI (match_dup 6)
- (const_int 1)
- (const_int 0))
- (const_int 0)])
- (match_dup 4)
- (match_dup 5)))]
- "")
-
;; For ABS, we have two choices, depending on whether the input and output
;; registers are the same or not.
(define_expand "absdi2"
@@ -2460,7 +2500,7 @@
(define_insn "sminqi3"
[(set (match_operand:QI 0 "register_operand" "=r")
- (smin:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (smin:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
(match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"minsb8 %r1,%2,%0"
@@ -2468,7 +2508,7 @@
(define_insn "uminqi3"
[(set (match_operand:QI 0 "register_operand" "=r")
- (umin:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (umin:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
(match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"minub8 %r1,%2,%0"
@@ -2476,7 +2516,7 @@
(define_insn "smaxqi3"
[(set (match_operand:QI 0 "register_operand" "=r")
- (smax:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (smax:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
(match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"maxsb8 %r1,%2,%0"
@@ -2484,7 +2524,7 @@
(define_insn "umaxqi3"
[(set (match_operand:QI 0 "register_operand" "=r")
- (umax:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (umax:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
(match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"maxub8 %r1,%2,%0"
@@ -2492,7 +2532,7 @@
(define_insn "sminhi3"
[(set (match_operand:HI 0 "register_operand" "=r")
- (smin:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (smin:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
(match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"minsw4 %r1,%2,%0"
@@ -2500,7 +2540,7 @@
(define_insn "uminhi3"
[(set (match_operand:HI 0 "register_operand" "=r")
- (umin:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (umin:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
(match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"minuw4 %r1,%2,%0"
@@ -2508,7 +2548,7 @@
(define_insn "smaxhi3"
[(set (match_operand:HI 0 "register_operand" "=r")
- (smax:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (smax:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
(match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"maxsw4 %r1,%2,%0"
@@ -2516,7 +2556,7 @@
(define_insn "umaxhi3"
[(set (match_operand:HI 0 "register_operand" "=r")
- (umax:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (umax:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
(match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
"TARGET_MAX"
"maxuw4 %r1,%2,%0"
@@ -3197,7 +3237,7 @@
(define_expand "movsicc"
[(set (match_operand:SI 0 "register_operand" "")
- (if_then_else:DI (match_operand 1 "comparison_operator" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "reg_or_8bit_operand" "")
(match_operand:SI 3 "reg_or_8bit_operand" "")))]
""
@@ -3610,7 +3650,7 @@
abort ();
operands[1] = XEXP (operands[1], 0);
- if (GET_CODE (operands[0]) != SYMBOL_REF && GET_CODE (operands[0]) != REG)
+ if (GET_CODE (operands[1]) != SYMBOL_REF && GET_CODE (operands[1]) != REG)
operands[1] = force_reg (DImode, operands[1]);
}")
@@ -3675,7 +3715,7 @@
bsr $26,$%0..ng
jsr $26,%0\;ldgp $29,0($26)"
[(set_attr "type" "jsr")
- (set_attr "length" "12,*,12")])
+ (set_attr "length" "12,*,16")])
(define_insn ""
[(call (mem:DI (match_operand:DI 0 "call_operand" "r,R,i"))
@@ -3698,7 +3738,7 @@
(clobber (reg:DI 27))]
"TARGET_OPEN_VMS"
"@
- bis %2,%2,$27\;jsr $26,0\;ldq $27,0($29)
+ mov %2,$27\;jsr $26,0\;ldq $27,0($29)
ldq $27,%2\;jsr $26,%0\;ldq $27,0($29)"
[(set_attr "type" "jsr")
(set_attr "length" "12,16")])
@@ -3715,7 +3755,7 @@
bsr $26,$%1..ng
jsr $26,%1\;ldgp $29,0($26)"
[(set_attr "type" "jsr")
- (set_attr "length" "12,*,12")])
+ (set_attr "length" "12,*,16")])
(define_insn ""
[(set (match_operand 0 "register_operand" "=rf,rf,rf")
@@ -3740,7 +3780,7 @@
(clobber (reg:DI 27))]
"TARGET_OPEN_VMS"
"@
- bis %3,%3,$27\;jsr $26,0\;ldq $27,0($29)
+ mov %3,$27\;jsr $26,0\;ldq $27,0($29)
ldq $27,%3\;jsr $26,%1\;ldq $27,0($29)"
[(set_attr "type" "jsr")
(set_attr "length" "12,16")])
@@ -3811,12 +3851,6 @@
"jmp $31,(%0),0"
[(set_attr "type" "ibr")])
-(define_insn "nop"
- [(const_int 0)]
- ""
- "nop"
- [(set_attr "type" "ilog")])
-
(define_expand "tablejump"
[(use (match_operand:SI 0 "register_operand" ""))
(use (match_operand:SI 1 "" ""))]
@@ -3987,72 +4021,68 @@
;; they are simpler.
(define_insn ""
- [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m")
- (match_operand:SF 1 "input_operand" "rG,m,rG,f,G,m,fG"))]
- "! TARGET_CIX
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,r,m,m")
+ (match_operand:SF 1 "input_operand" "fG,m,rG,m,fG,r"))]
+ "! TARGET_FIX
&& (register_operand (operands[0], SFmode)
|| reg_or_fp0_operand (operands[1], SFmode))"
"@
- bis %r1,%r1,%0
- ldl %0,%1
- stl %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%, %0,%1
- st%, %R1,%0"
- [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst")])
+ mov %r1,%0
+ ldl %0,%1
+ st%, %R1,%0
+ stl %r1,%0"
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist")])
(define_insn ""
- [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m,f,*r")
- (match_operand:SF 1 "input_operand" "rG,m,rG,f,G,m,fG,r,*f"))]
- "TARGET_CIX
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,r,r,m,m,f,*r")
+ (match_operand:SF 1 "input_operand" "fG,m,rG,m,fG,r,r,*f"))]
+ "TARGET_FIX
&& (register_operand (operands[0], SFmode)
|| reg_or_fp0_operand (operands[1], SFmode))"
"@
- bis %r1,%r1,%0
- ldl %0,%1
- stl %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%, %0,%1
+ mov %r1,%0
+ ldl %0,%1
st%, %R1,%0
+ stl %r1,%0
itofs %1,%0
ftois %1,%0"
- [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst,itof,ftoi")])
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi")])
(define_insn ""
- [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m")
- (match_operand:DF 1 "input_operand" "rG,m,rG,f,G,m,fG"))]
- "! TARGET_CIX
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,r,m,m")
+ (match_operand:DF 1 "input_operand" "fG,m,rG,m,fG,r"))]
+ "! TARGET_FIX
&& (register_operand (operands[0], DFmode)
|| reg_or_fp0_operand (operands[1], DFmode))"
"@
- bis %r1,%r1,%0
- ldq %0,%1
- stq %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%- %0,%1
- st%- %R1,%0"
- [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst")])
+ mov %r1,%0
+ ldq %0,%1
+ st%- %R1,%0
+ stq %r1,%0"
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist")])
(define_insn ""
- [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m,f,*r")
- (match_operand:DF 1 "input_operand" "rG,m,rG,f,G,m,fG,r,*f"))]
- "TARGET_CIX
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,r,r,m,m,f,*r")
+ (match_operand:DF 1 "input_operand" "fG,m,rG,m,fG,r,r,*f"))]
+ "TARGET_FIX
&& (register_operand (operands[0], DFmode)
|| reg_or_fp0_operand (operands[1], DFmode))"
"@
- bis %r1,%r1,%0
- ldq %0,%1
- stq %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%- %0,%1
+ mov %r1,%0
+ ldq %0,%1
st%- %R1,%0
+ stq %r1,%0
itoft %1,%0
ftoit %1,%0"
- [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst,itof,ftoi")])
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi")])
(define_expand "movsf"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
@@ -4077,131 +4107,110 @@
}")
(define_insn ""
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,f,f,f,m")
- (match_operand:SI 1 "input_operand" "r,J,I,K,L,m,rJ,f,J,m,fG"))]
- "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && ! TARGET_CIX
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,m,f,f,m")
+ (match_operand:SI 1 "input_operand" "rJ,K,L,m,rJ,fJ,m,f"))]
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && ! TARGET_FIX
&& (register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%1
ldah %0,%h1
ldl %0,%1
stl %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%, %0,%1
st%, %R1,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ild,ist,fcpys,fcpys,fld,fst")])
+ [(set_attr "type" "ilog,iadd,iadd,ild,ist,fcpys,fld,fst")])
(define_insn ""
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,f,f,f,m,r,*f")
- (match_operand:SI 1 "input_operand" "r,J,I,K,L,m,rJ,f,J,m,fG,f,*r"))]
- "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && TARGET_CIX
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,m,f,f,m,r,*f")
+ (match_operand:SI 1 "input_operand" "rJ,K,L,m,rJ,fJ,m,f,f,*r"))]
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && TARGET_FIX
&& (register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%1
ldah %0,%h1
ldl %0,%1
stl %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%, %0,%1
st%, %R1,%0
ftois %1,%0
itofs %1,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ild,ist,fcpys,fcpys,fld,fst,ftoi,itof")])
+ [(set_attr "type" "ilog,iadd,iadd,ild,ist,fcpys,fld,fst,ftoi,itof")])
(define_insn ""
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,r,m,f,f,f,m")
- (match_operand:SI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,m,fG"))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m,f,f,m")
+ (match_operand:SI 1 "input_operand" "rJ,K,L,s,m,rJ,fJ,m,f"))]
"(TARGET_WINDOWS_NT || TARGET_OPEN_VMS)
&& (register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %1,%0
lda %0,%1
ldah %0,%h1
lda %0,%1
ldl %0,%1
stl %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ld%, %0,%1
st%, %R1,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ldsym,ild,ist,fcpys,fcpys,fld,fst")])
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,ild,ist,fcpys,fld,fst")])
(define_insn ""
- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,f,f")
- (match_operand:HI 1 "input_operand" "r,J,I,n,f,J"))]
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,f")
+ (match_operand:HI 1 "input_operand" "rJ,n,fJ"))]
"! TARGET_BWX
&& (register_operand (operands[0], HImode)
|| register_operand (operands[1], HImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%L1
- cpys %1,%1,%0
- cpys $f31,$f31,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,fcpys,fcpys")])
+ fmov %R1,%0"
+ [(set_attr "type" "ilog,iadd,fcpys")])
(define_insn ""
- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,r,m,f,f")
- (match_operand:HI 1 "input_operand" "r,J,I,n,m,rJ,f,J"))]
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,f")
+ (match_operand:HI 1 "input_operand" "rJ,n,m,rJ,fJ"))]
"TARGET_BWX
&& (register_operand (operands[0], HImode)
|| reg_or_0_operand (operands[1], HImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%L1
ldwu %0,%1
stw %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,ild,ist,fcpys,fcpys")])
+ fmov %R1,%0"
+ [(set_attr "type" "ilog,iadd,ild,ist,fcpys")])
(define_insn ""
- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,r,f,f")
- (match_operand:QI 1 "input_operand" "r,J,I,n,f,J"))]
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,f")
+ (match_operand:QI 1 "input_operand" "rJ,n,fJ"))]
"! TARGET_BWX
&& (register_operand (operands[0], QImode)
|| register_operand (operands[1], QImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%L1
- cpys %1,%1,%0
- cpys $f31,$f31,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,fcpys,fcpys")])
+ fmov %R1,%0"
+ [(set_attr "type" "ilog,iadd,fcpys")])
(define_insn ""
- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,r,r,m,f,f")
- (match_operand:QI 1 "input_operand" "r,J,I,n,m,rJ,f,J"))]
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,f")
+ (match_operand:QI 1 "input_operand" "rJ,n,m,rJ,fJ"))]
"TARGET_BWX
&& (register_operand (operands[0], QImode)
|| reg_or_0_operand (operands[1], QImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%L1
ldbu %0,%1
stb %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,ild,ist,fcpys,fcpys")])
+ fmov %R1,%0"
+ [(set_attr "type" "ilog,iadd,ild,ist,fcpys")])
;; We do two major things here: handle mem->mem and construct long
;; constants.
@@ -4247,48 +4256,42 @@
}")
(define_insn ""
- [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,r,r,m,f,f,f,Q")
- (match_operand:DI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,Q,fG"))]
- "! TARGET_CIX
+ [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,m,f,f,Q")
+ (match_operand:DI 1 "input_operand" "rJ,K,L,s,m,rJ,fJ,Q,f"))]
+ "! TARGET_FIX
&& (register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%1
ldah %0,%h1
lda %0,%1
ldq%A1 %0,%1
stq%A0 %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ldt %0,%1
stt %R1,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ldsym,ild,ist,fcpys,fcpys,fld,fst")])
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,ild,ist,fcpys,fld,fst")])
(define_insn ""
- [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,r,r,m,f,f,f,Q,r,*f")
- (match_operand:DI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,Q,fG,f,*r"))]
- "TARGET_CIX
+ [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,m,f,f,Q,r,*f")
+ (match_operand:DI 1 "input_operand" "rJ,K,L,s,m,rJ,fJ,Q,f,f,*r"))]
+ "TARGET_FIX
&& (register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))"
"@
- bis %1,%1,%0
- bis $31,$31,%0
- bis $31,%1,%0
+ mov %r1,%0
lda %0,%1
ldah %0,%h1
lda %0,%1
ldq%A1 %0,%1
stq%A0 %r1,%0
- cpys %1,%1,%0
- cpys $f31,$f31,%0
+ fmov %R1,%0
ldt %0,%1
stt %R1,%0
ftoit %1,%0
itoft %1,%0"
- [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ldsym,ild,ist,fcpys,fcpys,fld,fst,ftoi,itof")])
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,ild,ist,fcpys,fld,fst,ftoi,itof")])
;; We do three major things here: handle mem->mem, put 64-bit constants in
;; memory, and construct long 32-bit constants.
@@ -4320,24 +4323,31 @@
{
if (TARGET_BUILD_CONSTANTS)
{
-#if HOST_BITS_PER_WIDE_INT == 64
- HOST_WIDE_INT i;
+ HOST_WIDE_INT i0, i1;
if (GET_CODE (operands[1]) == CONST_INT)
- i = INTVAL (operands[1]);
+ {
+ i0 = INTVAL (operands[1]);
+ i1 = -(i0 < 0);
+ }
else if (GET_CODE (operands[1]) == CONST_DOUBLE)
- i = CONST_DOUBLE_LOW (operands[1]);
+ {
+#if HOST_BITS_PER_WIDE_INT >= 64
+ i0 = CONST_DOUBLE_LOW (operands[1]);
+ i1 = -(i0 < 0);
+#else
+ i0 = CONST_DOUBLE_LOW (operands[1]);
+ i1 = CONST_DOUBLE_HIGH (operands[1]);
+#endif
+ }
else
abort();
- tem = alpha_emit_set_long_const (operands[0], i);
+ tem = alpha_emit_set_long_const (operands[0], i0, i1);
if (rtx_equal_p (tem, operands[0]))
DONE;
else
operands[1] = tem;
-#else
- abort();
-#endif
}
else
{
@@ -4553,15 +4563,22 @@
{
if (aligned_memory_operand (operands[1], QImode))
{
- rtx aligned_mem, bitnum;
- rtx scratch = (reload_in_progress
- ? gen_rtx_REG (SImode, REGNO (operands[0]))
- : gen_reg_rtx (SImode));
+ if (reload_in_progress)
+ {
+ emit_insn (gen_reload_inqi_help
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[0]))));
+ }
+ else
+ {
+ rtx aligned_mem, bitnum;
+ rtx scratch = gen_reg_rtx (SImode);
- get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
- emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
- scratch));
+ emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
+ scratch));
+ }
}
else
{
@@ -4610,7 +4627,7 @@
rtx temp3 = gen_reg_rtx (DImode);
rtx seq
= gen_unaligned_storeqi (get_unaligned_address (operands[0], 0),
- operands[1], temp1, temp2, temp3);
+ operands[1], temp1, temp2, temp3);
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
@@ -4664,15 +4681,22 @@
{
if (aligned_memory_operand (operands[1], HImode))
{
- rtx aligned_mem, bitnum;
- rtx scratch = (reload_in_progress
- ? gen_rtx_REG (SImode, REGNO (operands[0]))
- : gen_reg_rtx (SImode));
+ if (reload_in_progress)
+ {
+ emit_insn (gen_reload_inhi_help
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[0]))));
+ }
+ else
+ {
+ rtx aligned_mem, bitnum;
+ rtx scratch = gen_reg_rtx (SImode);
- get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
- emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
- scratch));
+ emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
+ scratch));
+ }
}
else
{
@@ -4738,48 +4762,76 @@
(define_expand "reload_inqi"
[(parallel [(match_operand:QI 0 "register_operand" "=r")
- (match_operand:QI 1 "unaligned_memory_operand" "m")
+ (match_operand:QI 1 "any_memory_operand" "m")
(match_operand:TI 2 "register_operand" "=&r")])]
"! TARGET_BWX"
"
{
- rtx addr = get_unaligned_address (operands[1], 0);
+ rtx scratch, seq;
- /* It is possible that one of the registers we got for operands[2]
- might coincide with that of operands[0] (which is why we made
- it TImode). Pick the other one to use as our scratch. */
- rtx scratch = gen_rtx_REG (DImode,
- REGNO (operands[0]) == REGNO (operands[2])
- ? REGNO (operands[2]) + 1 : REGNO (operands[2]));
+ if (GET_CODE (operands[1]) != MEM)
+ abort ();
- rtx seq = gen_unaligned_loadqi (operands[0], addr, scratch,
- gen_rtx_REG (DImode, REGNO (operands[0])));
+ if (aligned_memory_operand (operands[1], QImode))
+ {
+ seq = gen_reload_inqi_help (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])));
+ }
+ else
+ {
+ rtx addr;
+
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (REGNO (operands[0]) == REGNO (operands[2]))
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ else
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
- alpha_set_memflags (seq, operands[1]);
+ addr = get_unaligned_address (operands[1], 0);
+ seq = gen_unaligned_loadqi (operands[0], addr, scratch,
+ gen_rtx_REG (DImode, REGNO (operands[0])));
+ alpha_set_memflags (seq, operands[1]);
+ }
emit_insn (seq);
DONE;
}")
(define_expand "reload_inhi"
[(parallel [(match_operand:HI 0 "register_operand" "=r")
- (match_operand:HI 1 "unaligned_memory_operand" "m")
+ (match_operand:HI 1 "any_memory_operand" "m")
(match_operand:TI 2 "register_operand" "=&r")])]
"! TARGET_BWX"
"
{
- rtx addr = get_unaligned_address (operands[1], 0);
+ rtx scratch, seq;
- /* It is possible that one of the registers we got for operands[2]
- might coincide with that of operands[0] (which is why we made
- it TImode). Pick the other one to use as our scratch. */
- rtx scratch = gen_rtx_REG (DImode,
- REGNO (operands[0]) == REGNO (operands[2])
- ? REGNO (operands[2]) + 1 : REGNO (operands[2]));
+ if (GET_CODE (operands[1]) != MEM)
+ abort ();
- rtx seq = gen_unaligned_loadhi (operands[0], addr, scratch,
- gen_rtx_REG (DImode, REGNO (operands[0])));
+ if (aligned_memory_operand (operands[1], HImode))
+ {
+ seq = gen_reload_inhi_help (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])));
+ }
+ else
+ {
+ rtx addr;
+
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (REGNO (operands[0]) == REGNO (operands[2]))
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ else
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
- alpha_set_memflags (seq, operands[1]);
+ addr = get_unaligned_address (operands[1], 0);
+ seq = gen_unaligned_loadhi (operands[0], addr, scratch,
+ gen_rtx_REG (DImode, REGNO (operands[0])));
+ alpha_set_memflags (seq, operands[1]);
+ }
emit_insn (seq);
DONE;
}")
@@ -4791,16 +4843,15 @@
"! TARGET_BWX"
"
{
+ if (GET_CODE (operands[0]) != MEM)
+ abort ();
+
if (aligned_memory_operand (operands[0], QImode))
{
- rtx aligned_mem, bitnum;
-
- get_aligned_mem (operands[0], &aligned_mem, &bitnum);
-
- emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
- gen_rtx_REG (SImode, REGNO (operands[2])),
- gen_rtx_REG (SImode,
- REGNO (operands[2]) + 1)));
+ emit_insn (gen_reload_outqi_help
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])),
+ gen_rtx_REG (SImode, REGNO (operands[2]) + 1)));
}
else
{
@@ -4818,7 +4869,6 @@
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
}
-
DONE;
}")
@@ -4829,16 +4879,15 @@
"! TARGET_BWX"
"
{
+ if (GET_CODE (operands[0]) != MEM)
+ abort ();
+
if (aligned_memory_operand (operands[0], HImode))
{
- rtx aligned_mem, bitnum;
-
- get_aligned_mem (operands[0], &aligned_mem, &bitnum);
-
- emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
- gen_rtx_REG (SImode, REGNO (operands[2])),
- gen_rtx_REG (SImode,
- REGNO (operands[2]) + 1)));
+ emit_insn (gen_reload_outhi_help
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])),
+ gen_rtx_REG (SImode, REGNO (operands[2]) + 1)));
}
else
{
@@ -4856,7 +4905,102 @@
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
}
+ DONE;
+}")
+
+;; Helpers for the above. The way reload is structured, we can't
+;; always get a proper address for a stack slot during reload_foo
+;; expansion, so we must delay our address manipulations until after.
+
+(define_insn "reload_inqi_help"
+ [(set (match_operand:QI 0 "register_operand" "r")
+ (match_operand:QI 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#")
+
+(define_insn "reload_inhi_help"
+ [(set (match_operand:HI 0 "register_operand" "r")
+ (match_operand:HI 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#")
+(define_insn "reload_outqi_help"
+ [(set (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:QI 1 "register_operand" "r"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))
+ (clobber (match_operand:SI 3 "register_operand" "r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#")
+
+(define_insn "reload_outhi_help"
+ [(set (match_operand:HI 0 "memory_operand" "m")
+ (match_operand:HI 1 "register_operand" "r"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))
+ (clobber (match_operand:SI 3 "register_operand" "r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#")
+
+(define_split
+ [(set (match_operand:QI 0 "register_operand" "r")
+ (match_operand:QI 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))]
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+ "
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+ emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
+ operands[2]));
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "r")
+ (match_operand:HI 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))]
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+ "
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+ emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
+ operands[2]));
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:QI 1 "register_operand" "r"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))
+ (clobber (match_operand:SI 3 "register_operand" "r"))]
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+ "
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ operands[2], operands[3]));
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:HI 0 "memory_operand" "m")
+ (match_operand:HI 1 "register_operand" "r"))
+ (clobber (match_operand:SI 2 "register_operand" "r"))
+ (clobber (match_operand:SI 3 "register_operand" "r"))]
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+ "
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ operands[2], operands[3]));
DONE;
}")
@@ -5080,29 +5224,14 @@
""
"*
{
- static int label_no;
- int count_regno = REGNO (operands[0]);
- int ptr_regno = REGNO (operands[1]);
- char label[64];
-
- /* Ho hum, output the hard way to get the label at the beginning of
- the line. Wish there were a magic char you could get
- asm_output_printf to do that. Then we could use %= as well and
- get rid of the label_no bits here too. */
-
- ASM_GENERATE_INTERNAL_LABEL (label, \"LSC\", label_no);
- ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"LSC\", label_no++);
-
- fprintf (asm_out_file, \"\\tstq $31,-8192($%d)\\n\", ptr_regno);
- fprintf (asm_out_file, \"\\tsubq $%d,1,$%d\\n\", count_regno, count_regno);
- fprintf (asm_out_file, \"\\tlda $%d,-8192($%d)\\n\", ptr_regno, ptr_regno);
- fprintf (asm_out_file, \"\\tbne $%d,\", count_regno);
- assemble_name (asm_out_file, label);
- putc ('\\n', asm_out_file);
-
- return \"\";
+ operands[2] = gen_label_rtx ();
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[2]));
+
+ return \"stq $31,-8192(%1)\;subq %0,1,%0\;lda %1,-8192(%1)\;bne %0,%l2\";
}"
- [(set_attr "length" "16")])
+ [(set_attr "length" "16")
+ (set_attr "type" "multi")])
(define_expand "prologue"
[(clobber (const_int 0))]
@@ -5114,13 +5243,39 @@
(match_operand:DI 1 "register_operand" "r"))
(clobber (mem:BLK (match_operand:DI 2 "register_operand" "r")))]
""
- "bis %1,%1,%0")
+ "mov %1,%0")
(define_expand "epilogue"
[(clobber (const_int 0))]
""
"alpha_expand_epilogue (); DONE;")
+(define_expand "eh_epilogue"
+ [(use (match_operand:DI 0 "register_operand" "r"))
+ (use (match_operand:DI 1 "register_operand" "r"))
+ (use (match_operand:DI 2 "register_operand" "r"))]
+ "! TARGET_OPEN_VMS"
+ "
+{
+ alpha_eh_epilogue_sp_ofs = operands[1];
+ if (GET_CODE (operands[2]) != REG || REGNO (operands[2]) != 26)
+ {
+ rtx ra = gen_rtx_REG (Pmode, 26);
+ emit_move_insn (ra, operands[2]);
+ operands[2] = ra;
+ }
+}")
+
+;; In creating a large stack frame, NT _must_ use ldah+lda to load
+;; the frame size into a register. We use this pattern to ensure
+;; we get lda instead of addq.
+(define_insn "nt_lda"
+ [(set (match_operand:DI 0 "register_operand" "r")
+ (unspec:DI [(match_dup 0)
+ (match_operand:DI 1 "const_int_operand" "n")] 6))]
+ ""
+ "lda %0,%1(%0)")
+
(define_expand "builtin_longjmp"
[(unspec_volatile [(match_operand 0 "register_operand" "r")] 3)]
"! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT"
@@ -5143,19 +5298,29 @@
where to look for it when we get back to setjmp's function for
restoring the gp. */
emit_indirect_jump (pv);
+ DONE;
}")
(define_insn "builtin_setjmp_receiver"
- [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ [(unspec_volatile [(label_ref (match_operand 0 "" ""))] 2)]
"! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT && TARGET_AS_CAN_SUBTRACT_LABELS"
"\\n$LSJ%=:\;ldgp $29,$LSJ%=-%l0($27)"
- [(set_attr "length" "8")])
+ [(set_attr "length" "8")
+ (set_attr "type" "multi")])
(define_insn ""
- [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ [(unspec_volatile [(label_ref (match_operand 0 "" ""))] 2)]
"! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT"
- "br $27,$LSJ%=\\n$LSJ%=:\;ldgp $29,0($27)"
- [(set_attr "length" "12")])
+ "br $29,$LSJ%=\\n$LSJ%=:\;ldgp $29,0($29)"
+ [(set_attr "length" "12")
+ (set_attr "type" "multi")])
+
+(define_insn "exception_receiver"
+ [(unspec_volatile [(const_int 0)] 7)]
+ "! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT"
+ "br $29,$LSJ%=\\n$LSJ%=:\;ldgp $29,0($29)"
+ [(set_attr "length" "12")
+ (set_attr "type" "multi")])
(define_expand "nonlocal_goto_receiver"
[(unspec_volatile [(const_int 0)] 1)
@@ -5187,7 +5352,8 @@
(clobber (reg:DI 0))]
"TARGET_OPEN_VMS"
"lda $0,OTS$HOME_ARGS\;ldq $0,8($0)\;jsr $0,OTS$HOME_ARGS"
- [(set_attr "length" "16")])
+ [(set_attr "length" "16")
+ (set_attr "type" "multi")])
;; Close the trap shadow of preceeding instructions. This is generated
;; by alpha_reorg.
@@ -5197,6 +5363,31 @@
""
"trapb"
[(set_attr "type" "misc")])
+
+;; No-op instructions used by machine-dependant reorg to preserve
+;; alignment for instruction issue.
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "type" "ilog")])
+
+(define_insn "fnop"
+ [(const_int 1)]
+ "TARGET_FP"
+ "fnop"
+ [(set_attr "type" "fcpys")])
+
+(define_insn "unop"
+ [(const_int 2)]
+ ""
+ "unop")
+
+(define_insn "realign"
+ [(unspec_volatile [(match_operand 0 "immediate_operand" "i")] 6)]
+ ""
+ ".align %0 #realign")
;; Peepholes go at the end.
@@ -5220,5 +5411,5 @@
; (match_operand:SI 1 "hard_fp_register_operand" "f"))
; (set (match_operand:DI 2 "register_operand" "=r")
; (sign_extend:DI (match_dup 0)))]
-; "TARGET_CIX && dead_or_set_p (insn, operands[0])"
+; "TARGET_FIX && dead_or_set_p (insn, operands[0])"
; "ftois %1,%2")
diff --git a/contrib/gcc/config/alpha/alpha32.h b/contrib/gcc/config/alpha/alpha32.h
new file mode 100644
index 0000000..3cbcfe1
--- /dev/null
+++ b/contrib/gcc/config/alpha/alpha32.h
@@ -0,0 +1,104 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha
+ running Windows/NT.
+ Copyright (C) 1995, 1996, 1998, 1999 Free Software Foundation, Inc.
+
+ Derived from code
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ Donn Terry, Softway Systems, Inc.
+
+ This file contains the code-generation stuff common to the 32-bit
+ versions of the DEC/Compaq Alpha architecture. It is shared by
+ Interix and NT/Win32 ports. It should not contain compile-time
+ or run-time dependent environment values (such as compiler options
+ or anything containing a file or pathname.)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_WINDOWS_NT
+#define TARGET_WINDOWS_NT 1
+
+/* WinNT (and thus Interix) use unsigned int */
+#define SIZE_TYPE "unsigned int"
+
+/* Pointer is 32 bits but the hardware has 64-bit addresses, sign extended. */
+#undef POINTER_SIZE
+#define POINTER_SIZE 32
+#define POINTERS_EXTEND_UNSIGNED 0
+
+/* We don't change Pmode to the "obvious" SI mode... the above appears
+ to affect the in-memory size; we want the registers to stay DImode
+ to match the md file */
+
+/* "long" is 32 bits. */
+#undef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE 32
+
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ The trampoline should set the static chain pointer to value placed
+ into the trampoline and should branch to the specified routine. */
+
+#undef TRAMPOLINE_TEMPLATE
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf (FILE, "\tbr $27,$LTRAMPP\n"); \
+ fprintf (FILE, "$LTRAMPP:\n\tldl $1,12($27)\n"); \
+ fprintf (FILE, "\tldl $27,16($27)\n"); \
+ fprintf (FILE, "\tjmp $31,($27),0\n"); \
+ fprintf (FILE, "\t.long 0,0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#undef TRAMPOLINE_SIZE
+#define TRAMPOLINE_SIZE 24
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#undef INITIALIZE_TRAMPOLINE
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ alpha_initialize_trampoline (TRAMP, FNADDR, CXT, 20, 16, 12)
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+
+#undef ASM_OUTPUT_MI_THUNK
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ char *op, *fn_name = XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0); \
+ int reg; \
+ \
+ /* Mark end of prologue. */ \
+ output_end_prologue (FILE); \
+ \
+ /* Rely on the assembler to macro expand a large delta. */ \
+ reg = aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) ? 17 : 16; \
+ fprintf (FILE, "\tlda $%d,%ld($%d)\n", reg, (long)(DELTA), reg); \
+ \
+ op = "jsr"; \
+ if (current_file_function_operand (XEXP (DECL_RTL (FUNCTION), 0))) \
+ op = "br"; \
+ fprintf (FILE, "\t%s $31,", op); \
+ assemble_name (FILE, fn_name); \
+ fputc ('\n', FILE); \
+} while (0)
diff --git a/contrib/gcc/config/alpha/crtbegin.asm b/contrib/gcc/config/alpha/crtbegin.asm
index c28440d..f954f1a 100644
--- a/contrib/gcc/config/alpha/crtbegin.asm
+++ b/contrib/gcc/config/alpha/crtbegin.asm
@@ -50,6 +50,8 @@ __CTOR_LIST__:
__DTOR_LIST__:
.quad -1
+.section .eh_frame,"aw"
+__EH_FRAME_BEGIN__:
#
# Fragment of the ELF _fini routine that invokes our dtor cleanup.
@@ -67,18 +69,33 @@ __DTOR_LIST__:
1: ldgp $29,0($29)
jsr $26,__do_global_dtors_aux
+ # Ideally this call would go in crtend.o, except that we can't
+ # get hold of __EH_FRAME_BEGIN__ there.
+
+ jsr $26,__do_frame_takedown
+
# Must match the alignment we got from crti.o else we get
# zero-filled holes in our _fini function and then SIGILL.
.align 3
#
+ # Fragment of the ELF _init routine that sets up the frame info.
+ #
+
+.section .init,"ax"
+ br $29,1f
+1: ldgp $29,0($29)
+ jsr $26,__do_frame_setup
+ .align 3
+
+ #
# Invoke our destructors in order.
#
.data
# Support recursive calls to exit.
-9: .quad __DTOR_LIST__
+$ptr: .quad __DTOR_LIST__
.text
@@ -86,15 +103,14 @@ __DTOR_LIST__:
.ent __do_global_dtors_aux
__do_global_dtors_aux:
- ldgp $29,0($27)
lda $30,-16($30)
.frame $30,16,$26,0
stq $9,8($30)
stq $26,0($30)
.mask 0x4000200,-16
- .prologue 1
+ .prologue 0
- lda $9,9b
+ lda $9,$ptr
br 1f
0: stq $1,0($9)
jsr $26,($27)
@@ -109,3 +125,68 @@ __do_global_dtors_aux:
ret
.end __do_global_dtors_aux
+
+ #
+ # Install our frame info.
+ #
+
+ # ??? How can we rationally keep this size correct?
+
+.section .bss
+ .type $object,@object
+ .align 3
+$object:
+ .zero 48
+ .size $object, 48
+
+.text
+
+ .align 3
+ .ent __do_frame_setup
+
+__do_frame_setup:
+ ldgp $29,0($27)
+ lda $30,-16($30)
+ .frame $30,16,$26,0
+ stq $26,0($30)
+ .mask 0x4000000,-16
+ .prologue 1
+
+ lda $1,__register_frame_info
+ beq $1,0f
+ lda $16,__EH_FRAME_BEGIN__
+ lda $17,$object
+ jsr $26,__register_frame_info
+ ldq $26,0($30)
+0: lda $30,16($30)
+ ret
+
+ .end __do_frame_setup
+
+ #
+ # Remove our frame info.
+ #
+
+ .align 3
+ .ent __do_frame_takedown
+
+__do_frame_takedown:
+ ldgp $29,0($27)
+ lda $30,-16($30)
+ .frame $30,16,$26,0
+ stq $26,0($30)
+ .mask 0x4000000,-16
+ .prologue 1
+
+ lda $1,__deregister_frame_info
+ beq $1,0f
+ lda $16,__EH_FRAME_BEGIN__
+ jsr $26,__deregister_frame_info
+ ldq $26,0($30)
+0: lda $30,16($30)
+ ret
+
+ .end __do_frame_takedown
+
+.weak __register_frame_info
+.weak __deregister_frame_info
diff --git a/contrib/gcc/config/alpha/crtend.asm b/contrib/gcc/config/alpha/crtend.asm
index 36f11b9..4a0cc5e 100644
--- a/contrib/gcc/config/alpha/crtend.asm
+++ b/contrib/gcc/config/alpha/crtend.asm
@@ -50,6 +50,9 @@ __CTOR_END__:
__DTOR_END__:
.quad 0
+.section .eh_frame,"aw"
+__FRAME_END__:
+ .quad 0
#
# Fragment of the ELF _init routine that invokes our ctor startup
diff --git a/contrib/gcc/config/alpha/elf.h b/contrib/gcc/config/alpha/elf.h
index 4f4703c..6cea3da 100644
--- a/contrib/gcc/config/alpha/elf.h
+++ b/contrib/gcc/config/alpha/elf.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for DEC Alpha w/ELF.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
Contributed by Richard Henderson (rth@tamu.edu).
This file is part of GNU CC.
@@ -24,6 +24,7 @@ Boston, MA 02111-1307, USA. */
#define OBJECT_FORMAT_ELF
#define DBX_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
@@ -34,7 +35,7 @@ Boston, MA 02111-1307, USA. */
#define CC1_SPEC "%{G*}"
#undef ASM_SPEC
-#define ASM_SPEC "%{G*} %{relax:-relax}"
+#define ASM_SPEC "%{G*} %{relax:-relax} %{gdwarf*:-no-mdebug}"
#undef LINK_SPEC
#define LINK_SPEC "-m elf64alpha %{G*} %{relax:-relax} \
@@ -49,18 +50,21 @@ Boston, MA 02111-1307, USA. */
/* Output at beginning of assembler file. */
#undef ASM_FILE_START
#define ASM_FILE_START(FILE) \
-{ \
- alpha_write_verstamp (FILE); \
- output_file_directive (FILE, main_input_filename); \
+do { \
+ if (write_symbols != DWARF2_DEBUG) \
+ { \
+ alpha_write_verstamp (FILE); \
+ output_file_directive (FILE, main_input_filename); \
+ } \
fprintf (FILE, "\t.set noat\n"); \
- fprintf (FILE, "\t.set noreorder\n"); \
- if (TARGET_BWX | TARGET_MAX | TARGET_CIX) \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ if (TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX) \
{ \
fprintf (FILE, "\t.arch %s\n", \
(alpha_cpu == PROCESSOR_EV6 ? "ev6" \
: TARGET_MAX ? "pca56" : "ev56")); \
} \
-}
+} while (0)
extern void output_file_directive ();
@@ -79,8 +83,9 @@ extern void output_file_directive ();
#else
#define ASM_FILE_END(FILE) \
do { \
- fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
- IDENT_ASM_OP, version_string); \
+ if (!flag_no_ident) \
+ fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
+ IDENT_ASM_OP, version_string); \
} while (0)
#endif
@@ -434,20 +439,23 @@ void FN () \
size_directive_output was set
by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
-#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
-do { \
- char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
- if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
- && ! AT_END && TOP_LEVEL \
- && DECL_INITIAL (DECL) == error_mark_node \
- && !size_directive_output) \
- { \
- size_directive_output = 1; \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, name); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
- } \
- } while (0)
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ putc ('\n', FILE); \
+ } \
+} while (0)
/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
@@ -518,3 +526,9 @@ do { \
/* We support #pragma. */
#define HANDLE_SYSV_PRAGMA
+
+/* Undo the auto-alignment stuff from alpha.h. ELF has unaligned data
+ pseudos natively. */
+#undef UNALIGNED_SHORT_ASM_OP
+#undef UNALIGNED_INT_ASM_OP
+#undef UNALIGNED_DOUBLE_INT_ASM_OP
diff --git a/contrib/gcc/config/alpha/lib1funcs.asm b/contrib/gcc/config/alpha/lib1funcs.asm
new file mode 100644
index 0000000..e63180a
--- /dev/null
+++ b/contrib/gcc/config/alpha/lib1funcs.asm
@@ -0,0 +1,325 @@
+/* DEC Alpha division and remainder support.
+ Copyright (C) 1994, 1999 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* This had to be written in assembler because the division functions
+ use a non-standard calling convention.
+
+ This file provides an implementation of __divqu, __divq, __divlu,
+ __divl, __remqu, __remq, __remlu and __reml. CPP macros control
+ the exact operation.
+
+ Operation performed: $27 := $24 o $25, clobber $28, return address to
+ caller in $23, where o one of the operations.
+
+ The following macros need to be defined:
+
+ SIZE, the number of bits, 32 or 64.
+
+ TYPE, either UNSIGNED or SIGNED
+
+ OPERATION, either DIVISION or REMAINDER
+
+ SPECIAL_CALLING_CONVENTION, 0 or 1. It is useful for debugging to
+ define this to 0. That removes the `__' prefix to make the function
+ name not collide with the existing libc.a names, and uses the
+ standard Alpha procedure calling convention.
+*/
+
+#ifndef SPECIAL_CALLING_CONVENTION
+#define SPECIAL_CALLING_CONVENTION 1
+#endif
+
+#ifdef L_divl
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divl
+#else
+#define FUNCTION_NAME divl
+#endif
+#define SIZE 32
+#define TYPE SIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_divlu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divlu
+#else
+#define FUNCTION_NAME divlu
+#endif
+#define SIZE 32
+#define TYPE UNSIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_divq
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divq
+#else
+#define FUNCTION_NAME divq
+#endif
+#define SIZE 64
+#define TYPE SIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_divqu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divqu
+#else
+#define FUNCTION_NAME divqu
+#endif
+#define SIZE 64
+#define TYPE UNSIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_reml
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __reml
+#else
+#define FUNCTION_NAME reml
+#endif
+#define SIZE 32
+#define TYPE SIGNED
+#define OPERATION REMAINDER
+#endif
+
+#ifdef L_remlu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __remlu
+#else
+#define FUNCTION_NAME remlu
+#endif
+#define SIZE 32
+#define TYPE UNSIGNED
+#define OPERATION REMAINDER
+#endif
+
+#ifdef L_remq
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __remq
+#else
+#define FUNCTION_NAME remq
+#endif
+#define SIZE 64
+#define TYPE SIGNED
+#define OPERATION REMAINDER
+#endif
+
+#ifdef L_remqu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __remqu
+#else
+#define FUNCTION_NAME remqu
+#endif
+#define SIZE 64
+#define TYPE UNSIGNED
+#define OPERATION REMAINDER
+#endif
+
+#define tmp0 $3
+#define tmp1 $28
+#define cnt $1
+#define result_sign $2
+
+#if SPECIAL_CALLING_CONVENTION
+#define N $24
+#define D $25
+#define Q RETREG
+#define RETREG $27
+#else
+#define N $16
+#define D $17
+#define Q RETREG
+#define RETREG $0
+#endif
+
+/* Misc symbols to make alpha assembler easier to read. */
+#define zero $31
+#define sp $30
+
+/* Symbols to make interface nicer. */
+#define UNSIGNED 0
+#define SIGNED 1
+#define DIVISION 0
+#define REMAINDER 1
+
+ .set noreorder
+ .set noat
+.text
+ .align 3
+ .globl FUNCTION_NAME
+ .ent FUNCTION_NAME
+FUNCTION_NAME:
+
+ .frame $30,0,$26,0
+ .prologue 0
+
+/* Under the special calling convention, we have to preserve all register
+ values but $23 and $28. */
+#if SPECIAL_CALLING_CONVENTION
+ lda sp,-64(sp)
+#if OPERATION == DIVISION
+ stq N,0(sp)
+#endif
+ stq D,8(sp)
+ stq cnt,16(sp)
+ stq result_sign,24(sp)
+ stq tmp0,32(sp)
+#endif
+
+/* If we are computing the remainder, move N to the register that is used
+ for the return value, and redefine what register is used for N. */
+#if OPERATION == REMAINDER
+ bis N,N,RETREG
+#undef N
+#define N RETREG
+#endif
+
+/* Perform conversion from 32 bit types to 64 bit types. */
+#if SIZE == 32
+#if TYPE == SIGNED
+ /* If there are problems with the signed case, add these instructions.
+ The caller should already have done this.
+ addl N,0,N # sign extend N
+ addl D,0,D # sign extend D
+ */
+#else /* UNSIGNED */
+ zap N,0xf0,N # zero extend N (caller required to sign extend)
+ zap D,0xf0,D # zero extend D
+#endif
+#endif
+
+/* Check for divide by zero. */
+ bne D,$34
+ lda $16,-2(zero)
+ call_pal 0xaa
+$34:
+
+#if TYPE == SIGNED
+#if OPERATION == DIVISION
+ xor N,D,result_sign
+#else
+ bis N,N,result_sign
+#endif
+/* Get the absolute values of N and D. */
+ subq zero,N,tmp0
+ cmovlt N,tmp0,N
+ subq zero,D,tmp0
+ cmovlt D,tmp0,D
+#endif
+
+/* Compute CNT = ceil(log2(N)) - ceil(log2(D)). This is the number of
+ divide iterations we will have to perform. Should you wish to optimize
+ this, check a few bits at a time, preferably using zap/zapnot. Be
+ careful though, this code runs fast fro the most common cases, when the
+ quotient is small. */
+ bge N,$35
+ bis zero,1,cnt
+ blt D,$40
+ .align 3
+$39: addq D,D,D
+ addl cnt,1,cnt
+ bge D,$39
+ br zero,$40
+$35: cmpult N,D,tmp0
+ bis zero,zero,cnt
+ bne tmp0,$42
+ .align 3
+$44: addq D,D,D
+ cmpult N,D,tmp0
+ addl cnt,1,cnt
+ beq tmp0,$44
+$42: srl D,1,D
+$40:
+ subl cnt,1,cnt
+
+
+/* Actual divide. Could be optimized with unrolling. */
+#if OPERATION == DIVISION
+ bis zero,zero,Q
+#endif
+ blt cnt,$46
+ .align 3
+$49: cmpule D,N,tmp1
+ subq N,D,tmp0
+ srl D,1,D
+ subl cnt,1,cnt
+ cmovne tmp1,tmp0,N
+#if OPERATION == DIVISION
+ addq Q,Q,Q
+ bis Q,tmp1,Q
+#endif
+ bge cnt,$49
+$46:
+
+
+/* The result is now in RETREG. NOTE! It was written to RETREG using
+ either N or Q as a synonym! */
+
+
+/* Change the sign of the result as needed. */
+#if TYPE == SIGNED
+ subq zero,RETREG,tmp0
+ cmovlt result_sign,tmp0,RETREG
+#endif
+
+
+/* Restore clobbered registers. */
+#if SPECIAL_CALLING_CONVENTION
+#if OPERATION == DIVISION
+ ldq N,0(sp)
+#endif
+ ldq D,8(sp)
+ ldq cnt,16(sp)
+ ldq result_sign,24(sp)
+ ldq tmp0,32(sp)
+
+ lda sp,64(sp)
+#endif
+
+
+/* Sign extend an *unsigned* 32 bit result, as required by the Alpha
+ conventions. */
+#if TYPE == UNSIGNED && SIZE == 32
+ /* This could be avoided by adding some CPP hair to the divide loop.
+ It is probably not worth the added complexity. */
+ addl RETREG,0,RETREG
+#endif
+
+
+#if SPECIAL_CALLING_CONVENTION
+ ret zero,($23),1
+#else
+ ret zero,($26),1
+#endif
+ .end FUNCTION_NAME
diff --git a/contrib/gcc/config/alpha/linux-ecoff.h b/contrib/gcc/config/alpha/linux-ecoff.h
index a6cd5b2..824d028 100644
--- a/contrib/gcc/config/alpha/linux-ecoff.h
+++ b/contrib/gcc/config/alpha/linux-ecoff.h
@@ -17,13 +17,14 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (Alpha GNU/Linux for ECOFF)");
-#undef SUB_CPP_PREDEFINES
-#define SUB_CPP_PREDEFINES "-D__ECOFF__"
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "-D__ECOFF__"
#undef LINK_SPEC
#define LINK_SPEC "-G 8 %{O*:-O3} %{!O*:-O1}"
diff --git a/contrib/gcc/config/alpha/linux-elf.h b/contrib/gcc/config/alpha/linux-elf.h
index 90009f1..fc07127 100644
--- a/contrib/gcc/config/alpha/linux-elf.h
+++ b/contrib/gcc/config/alpha/linux-elf.h
@@ -17,7 +17,8 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (Alpha GNU/Linux for ELF)");
diff --git a/contrib/gcc/config/alpha/linux.h b/contrib/gcc/config/alpha/linux.h
index 3791c89..b8eb9e9 100644
--- a/contrib/gcc/config/alpha/linux.h
+++ b/contrib/gcc/config/alpha/linux.h
@@ -17,14 +17,16 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_FP | MASK_FPREGS | MASK_GAS)
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
-"-Dlinux -Dunix -Asystem(linux) -D_LONGLONG -D__alpha__ " SUB_CPP_PREDEFINES
+"-Dlinux -Dunix -Asystem(linux) -D_LONGLONG -D__alpha__ " \
+SUB_CPP_PREDEFINES
#undef LIB_SPEC
#define LIB_SPEC "%{pg:-lgmon} %{pg:-lc_p} %{!pg:-lc}"
diff --git a/contrib/gcc/config/alpha/netbsd-elf.h b/contrib/gcc/config/alpha/netbsd-elf.h
index 17d7bb0..6e4f4da 100644
--- a/contrib/gcc/config/alpha/netbsd-elf.h
+++ b/contrib/gcc/config/alpha/netbsd-elf.h
@@ -16,7 +16,8 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (Alpha NetBSD/ELF)");
diff --git a/contrib/gcc/config/alpha/netbsd.h b/contrib/gcc/config/alpha/netbsd.h
index 054e9e0..5189064 100644
--- a/contrib/gcc/config/alpha/netbsd.h
+++ b/contrib/gcc/config/alpha/netbsd.h
@@ -16,7 +16,8 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_FP | MASK_FPREGS | MASK_GAS)
diff --git a/contrib/gcc/config/alpha/osf.h b/contrib/gcc/config/alpha/osf.h
index 956961f..5054444 100644
--- a/contrib/gcc/config/alpha/osf.h
+++ b/contrib/gcc/config/alpha/osf.h
@@ -30,9 +30,19 @@ Boston, MA 02111-1307, USA. */
-Dunix -D__osf__ -D_LONGLONG -DSYSTYPE_BSD \
-D_SYSTYPE_BSD -Asystem(unix) -Asystem(xpg4)"
+/* Accept DEC C flags for multithreaded programs. We use _PTHREAD_USE_D4
+ instead of PTHREAD_USE_D4 since both have the same effect and the former
+ doesn't invade the users' namespace. */
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC \
+"%{pthread|threads:-D_REENTRANT} %{threads:-D_PTHREAD_USE_D4}"
+
/* Under OSF4, -p and -pg require -lprof1, and -lprof1 requires -lpdf. */
-#define LIB_SPEC "%{p:-lprof1 -lpdf} %{pg:-lprof1 -lpdf} %{a:-lprof2} -lc"
+#define LIB_SPEC \
+"%{p|pg:-lprof1%{pthread|threads:_r} -lpdf} %{a:-lprof2} \
+ %{threads: -lpthreads} %{pthread|threads: -lpthread -lmach -lexc} -lc"
/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
optimizing, -O1 if we are not. Pass -shared, -non_shared or
diff --git a/contrib/gcc/config/alpha/t-ieee b/contrib/gcc/config/alpha/t-ieee
new file mode 100644
index 0000000..a1f93db
--- /dev/null
+++ b/contrib/gcc/config/alpha/t-ieee
@@ -0,0 +1,6 @@
+# All alphas get an IEEE complaint set of libraries.
+MULTILIB_OPTIONS = mieee
+MULTILIB_DIRNAMES = ieee
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/alpha/t-interix b/contrib/gcc/config/alpha/t-interix
new file mode 100644
index 0000000..d6d80e9
--- /dev/null
+++ b/contrib/gcc/config/alpha/t-interix
@@ -0,0 +1,16 @@
+# t-interix
+
+# System headers will track gcc's needs.
+# Even LANG_EXTRA_HEADERS may be temporary.
+USER_H=$(LANG_EXTRA_HEADERS)
+
+# We don't want this one either.
+INSTALL_ASSERT_H=
+
+
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIBGCC1 = libgcc1-asm.a
+
+LIB1ASMSRC = alpha/lib1funcs.asm
+LIB1ASMFUNCS = _divqu _divq _divlu _divl _remqu _remq _remlu _reml
diff --git a/contrib/gcc/config/alpha/vms.h b/contrib/gcc/config/alpha/vms.h
index 44cf5bf..44388b2 100644
--- a/contrib/gcc/config/alpha/vms.h
+++ b/contrib/gcc/config/alpha/vms.h
@@ -439,10 +439,6 @@ extern int vms_valid_decl_attribute_p ();
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
fprintf (FILE, "\t.align %d\n", LOG);
-#define UNALIGNED_SHORT_ASM_OP ".word"
-#define UNALIGNED_INT_ASM_OP ".long"
-#define UNALIGNED_DOUBLE_INT_ASM_OP ".quad"
-
#define ASM_OUTPUT_SECTION(FILE,SECTION) \
(strcmp (SECTION, ".text") == 0) \
? text_section () \
diff --git a/contrib/gcc/config/alpha/vxworks.h b/contrib/gcc/config/alpha/vxworks.h
index 6dee4b3..7ef1fee 100644
--- a/contrib/gcc/config/alpha/vxworks.h
+++ b/contrib/gcc/config/alpha/vxworks.h
@@ -36,11 +36,11 @@ Boston, MA 02111-1307, USA. */
#undef LIB_SPEC
#define LIB_SPEC ""
-/* VxWorks uses object files, not loadable images. make linker just
- combine objects. */
+/* VxWorks uses object files, not loadable images. Make linker just combine
+ objects. Also show using 32 bit mode and set start of text to 0. */
#undef LINK_SPEC
-#define LINK_SPEC "-r"
+#define LINK_SPEC "-r -taso -T 0"
/* VxWorks provides the functionality of crt0.o and friends itself. */
diff --git a/contrib/gcc/config/alpha/xm-alpha-interix.h b/contrib/gcc/config/alpha/xm-alpha-interix.h
new file mode 100644
index 0000000..02c53b8
--- /dev/null
+++ b/contrib/gcc/config/alpha/xm-alpha-interix.h
@@ -0,0 +1,45 @@
+/* Configuration for GNU compiler
+ for an DEC/Compaq Alpha
+ Copyright (C) 1999 Free Software Foundation, Inc.
+ Donn Terry, Softway Systems, Inc.
+ derived from code by Douglas B. Rupp (drupp@cs.washington.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <alpha/xm-alpha.h>
+
+#undef HOST_BITS_PER_LONG
+#define HOST_BITS_PER_LONG 32
+
+#define HOST_BITS_PER_WIDE_INT 64
+#ifdef __GNUC__
+# define HOST_WIDE_INT long long
+#else
+# define HOST_WIDE_INT __int64
+#endif
+
+
+#define HOST_BITS_PER_WIDEST_INT HOST_BITS_PER_LONGLONG
+#ifdef __GNUC__
+# define HOST_WIDEST_INT long long
+#else
+# define HOST_WIDEST_INT __int64
+#endif
+#define HOST_WIDEST_INT_PRINT_DEC "%lld"
+#define HOST_WIDEST_INT_PRINT_UNSIGNED "%llu"
+#define HOST_WIDEST_INT_PRINT_HEX "0x%llx"
diff --git a/contrib/gcc/config/alpha/xm-alpha.h b/contrib/gcc/config/alpha/xm-alpha.h
index 7665127..c04844f 100644
--- a/contrib/gcc/config/alpha/xm-alpha.h
+++ b/contrib/gcc/config/alpha/xm-alpha.h
@@ -46,7 +46,7 @@ Boston, MA 02111-1307, USA. */
#if defined(__GNUC__) && !defined(USE_C_ALLOCA)
#define alloca __builtin_alloca
#else
-#if !defined(_WIN32) && !defined(USE_C_ALLOCA) && !defined(OPEN_VMS)
+#if !defined(_WIN32) && !defined(USE_C_ALLOCA) && !defined(OPEN_VMS) && !defined(__INTERIX)
#include <alloca.h>
#else
extern void *alloca ();
OpenPOWER on IntegriCloud