diff options
Diffstat (limited to 'contrib/gcc/config/s390')
-rw-r--r-- | contrib/gcc/config/s390/fixdfdi.h | 301 | ||||
-rw-r--r-- | contrib/gcc/config/s390/libgcc-glibc.ver | 21 | ||||
-rw-r--r-- | contrib/gcc/config/s390/linux.h | 308 | ||||
-rw-r--r-- | contrib/gcc/config/s390/s390-protos.h | 85 | ||||
-rw-r--r-- | contrib/gcc/config/s390/s390.c | 4346 | ||||
-rw-r--r-- | contrib/gcc/config/s390/s390.h | 1389 | ||||
-rw-r--r-- | contrib/gcc/config/s390/s390.md | 6727 | ||||
-rw-r--r-- | contrib/gcc/config/s390/s390x.h | 27 | ||||
-rw-r--r-- | contrib/gcc/config/s390/t-linux | 7 | ||||
-rw-r--r-- | contrib/gcc/config/s390/t-linux64 | 3 |
10 files changed, 13214 insertions, 0 deletions
diff --git a/contrib/gcc/config/s390/fixdfdi.h b/contrib/gcc/config/s390/fixdfdi.h new file mode 100644 index 0000000..4f1fb35 --- /dev/null +++ b/contrib/gcc/config/s390/fixdfdi.h @@ -0,0 +1,301 @@ +/* Definitions of target machine for GNU compiler, for IBM S/390 + Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. + Contributed by Hartmut Penner (hpenner@de.ibm.com) and + Ulrich Weigand (uweigand@de.ibm.com). + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#ifdef L_fixunsdfdi +#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF) +#define EXCESSD 1022 +#define SIGNBIT 0x80000000 +#define SIGND(fp) ((fp.l.upper) & SIGNBIT) +#define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL) +#define FRACD_LL(fp) (fp.ll & (HIDDEND_LL-1)) +#define HIDDEND_LL ((UDItype_x)1 << 52) + +typedef int DItype_x __attribute__ ((mode (DI))); +typedef unsigned int UDItype_x __attribute__ ((mode (DI))); +typedef int SItype_x __attribute__ ((mode (SI))); +typedef unsigned int USItype_x __attribute__ ((mode (SI))); + +union double_long { + double d; + struct { + SItype_x upper; + USItype_x lower; + } l; + UDItype_x ll; +}; + + +/* convert double to unsigned int */ +UDItype_x +__fixunsdfdi (double a1) +{ + register union double_long dl1; + register int exp; + register UDItype_x l; + + dl1.d = a1; + + /* +/- 0, denormalized, negativ */ + + if (!EXPD (dl1) || SIGND(dl1)) + return 0; + + exp = EXPD (dl1) - EXCESSD - 53; + + /* number < 1 */ + + if (exp < -53) + return 0; + + /* NaN */ + + if ((EXPD(dl1) == 0x7ff) && (FRACD_LL(dl1) != 0)) /* NaN */ + return 0x0ULL; + + /* Number big number & + inf */ + + if (exp >= 12) { + return 0xFFFFFFFFFFFFFFFFULL; + } + + l = MANTD_LL(dl1); + + /* shift down until exp < 12 or l = 0 */ + if (exp > 0) + l <<= exp; + else + l >>= -exp; + + return l; +} +#define __fixunsdfdi ___fixunsdfdi +#endif +#undef L_fixunsdfdi + +#ifdef L_fixdfdi +#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF) +#define EXCESSD 1022 +#define SIGNBIT 0x80000000 +#define SIGND(fp) ((fp.l.upper) & SIGNBIT) +#define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL) +#define FRACD_LL(fp) (fp.ll & (HIDDEND_LL-1)) +#define HIDDEND_LL ((UDItype_x)1 << 52) + +typedef int DItype_x __attribute__ ((mode (DI))); +typedef unsigned int UDItype_x __attribute__ ((mode (DI))); +typedef int SItype_x __attribute__ ((mode (SI))); +typedef unsigned int USItype_x __attribute__ ((mode (SI))); + +union double_long { + double d; + struct { + SItype_x upper; + USItype_x lower; + } l; + UDItype_x ll; +}; + +/* convert double to int */ +DItype_x +__fixdfdi (double a1) +{ + register union double_long dl1; + register int exp; + register DItype_x l; + + dl1.d = a1; + + /* +/- 0, denormalized */ + + if (!EXPD (dl1)) + return 0; + + exp = EXPD (dl1) - EXCESSD - 53; + + /* number < 1 */ + + if (exp < -53) + return 0; + + /* NaN */ + + if ((EXPD(dl1) == 0x7ff) && (FRACD_LL(dl1) != 0)) /* NaN */ + return 0x8000000000000000ULL; + + /* Number big number & +/- inf */ + + if (exp >= 11) { + l = (long long)1<<63; + if (!SIGND(dl1)) + l--; + return l; + } + + l = MANTD_LL(dl1); + + /* shift down until exp < 12 or l = 0 */ + if (exp > 0) + l <<= exp; + else + l >>= -exp; + + return (SIGND (dl1) ? -l : l); +} +#define __fixdfdi ___fixdfdi +#endif +#undef L_fixdfdi + +#ifdef L_fixunssfdi +#define EXP(fp) (((fp.l) >> 23) & 0xFF) +#define EXCESS 126 +#define SIGNBIT 0x80000000 +#define SIGN(fp) ((fp.l) & SIGNBIT) +#define HIDDEN (1 << 23) +#define MANT(fp) (((fp.l) & 0x7FFFFF) | HIDDEN) +#define FRAC(fp) ((fp.l) & 0x7FFFFF) + +typedef int DItype_x __attribute__ ((mode (DI))); +typedef unsigned int UDItype_x __attribute__ ((mode (DI))); +typedef int SItype_x __attribute__ ((mode (SI))); +typedef unsigned int USItype_x __attribute__ ((mode (SI))); + +union float_long + { + float f; + USItype_x l; + }; + +/* convert float to unsigned int */ +UDItype_x +__fixunssfdi (float a1) +{ + register union float_long fl1; + register int exp; + register UDItype_x l; + + fl1.f = a1; + + /* +/- 0, denormalized, negativ */ + + if (!EXP (fl1) || SIGN(fl1)) + return 0; + + exp = EXP (fl1) - EXCESS - 24; + + /* number < 1 */ + + if (exp < -24) + return 0; + + /* NaN */ + + if ((EXP(fl1) == 0xff) && (FRAC(fl1) != 0)) /* NaN */ + return 0x0ULL; + + /* Number big number & + inf */ + + if (exp >= 41) { + return 0xFFFFFFFFFFFFFFFFULL; + } + + l = MANT(fl1); + + if (exp > 0) + l <<= exp; + else + l >>= -exp; + + return l; +} +#define __fixunssfdi ___fixunssfdi +#endif +#undef L_fixunssfdi + +#ifdef L_fixsfdi +#define EXP(fp) (((fp.l) >> 23) & 0xFF) +#define EXCESS 126 +#define SIGNBIT 0x80000000 +#define SIGN(fp) ((fp.l) & SIGNBIT) +#define HIDDEN (1 << 23) +#define MANT(fp) (((fp.l) & 0x7FFFFF) | HIDDEN) +#define FRAC(fp) ((fp.l) & 0x7FFFFF) + +typedef int DItype_x __attribute__ ((mode (DI))); +typedef unsigned int UDItype_x __attribute__ ((mode (DI))); +typedef int SItype_x __attribute__ ((mode (SI))); +typedef unsigned int USItype_x __attribute__ ((mode (SI))); + +union float_long + { + float f; + USItype_x l; + }; + +/* convert double to int */ +DItype_x +__fixsfdi (float a1) +{ + register union float_long fl1; + register int exp; + register DItype_x l; + + fl1.f = a1; + + /* +/- 0, denormalized */ + + if (!EXP (fl1)) + return 0; + + exp = EXP (fl1) - EXCESS - 24; + + /* number < 1 */ + + if (exp < -24) + return 0; + + /* NaN */ + + if ((EXP(fl1) == 0xff) && (FRAC(fl1) != 0)) /* NaN */ + return 0x8000000000000000ULL; + + /* Number big number & +/- inf */ + + if (exp >= 40) { + l = (long long)1<<63; + if (!SIGN(fl1)) + l--; + return l; + } + + l = MANT(fl1); + + if (exp > 0) + l <<= exp; + else + l >>= -exp; + + return (SIGN (fl1) ? -l : l); +} +#define __fixsfdi ___fixsfdi +#endif +#undef L_fixsfdi + diff --git a/contrib/gcc/config/s390/libgcc-glibc.ver b/contrib/gcc/config/s390/libgcc-glibc.ver new file mode 100644 index 0000000..9a42151 --- /dev/null +++ b/contrib/gcc/config/s390/libgcc-glibc.ver @@ -0,0 +1,21 @@ +# In order to work around the very problems that force us to now generally +# create a libgcc.so, glibc reexported a number of routines from libgcc.a. +# By now choosing the same version tags for these specific routines, we +# maintain enough binary compatibility to allow future versions of glibc +# to defer implementation of these routines to libgcc.so via DT_AUXILIARY. + +# Note that we cannot use the default libgcc-glibc.ver file on s390x, +# because GLIBC_2.0 does not exist on this architecture, as the first +# ever glibc release on the platform was GLIBC_2.2. + +%inherit GCC_3.0 GLIBC_2.2 +GLIBC_2.2 { + __register_frame + __register_frame_table + __deregister_frame + __register_frame_info + __deregister_frame_info + __frame_state_for + __register_frame_info_table +} + diff --git a/contrib/gcc/config/s390/linux.h b/contrib/gcc/config/s390/linux.h new file mode 100644 index 0000000..a0336bf --- /dev/null +++ b/contrib/gcc/config/s390/linux.h @@ -0,0 +1,308 @@ +/* Definitions for Linux for S/390. + Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. + Contributed by Hartmut Penner (hpenner@de.ibm.com) and + Ulrich Weigand (uweigand@de.ibm.com). + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#ifndef _LINUX_H +#define _LINUX_H + +/* Target specific version string. */ + +#ifdef DEFAULT_TARGET_64BIT +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (Linux for zSeries)"); +#else +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (Linux for S/390)"); +#endif + + +/* Target specific type definitions. */ + +/* ??? Do we really want long as size_t on 31-bit? */ +#undef SIZE_TYPE +#define SIZE_TYPE (TARGET_64BIT ? "long unsigned int" : "long unsigned int") +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int") + +#undef WCHAR_TYPE +#define WCHAR_TYPE "int" +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + + +/* Target specific preprocessor settings. */ + +#define NO_BUILTIN_SIZE_TYPE +#define NO_BUILTIN_PTRDIFF_TYPE + +#define CPP_PREDEFINES \ + "-Dunix -Asystem(unix) -D__gnu_linux__ -Dlinux -Asystem(linux) -D__ELF__ \ + -Acpu(s390) -Amachine(s390) -D__s390__" + +#define CPP_ARCH31_SPEC \ + "-D__SIZE_TYPE__=long\\ unsigned\\ int -D__PTRDIFF_TYPE__=int" +#define CPP_ARCH64_SPEC \ + "-D__SIZE_TYPE__=long\\ unsigned\\ int -D__PTRDIFF_TYPE__=long\\ int \ + -D__s390x__ -D__LONG_MAX__=9223372036854775807L" + +#ifdef DEFAULT_TARGET_64BIT +#undef CPP_SPEC +#define CPP_SPEC "%{m31:%(cpp_arch31)} %{!m31:%(cpp_arch64)}" +#else +#undef CPP_SPEC +#define CPP_SPEC "%{m64:%(cpp_arch64)} %{!m64:%(cpp_arch31)}" +#endif + + +/* Target specific compiler settings. */ + +/* ??? -fcaller-saves sometimes doesn't work. Fix this! */ +#undef CC1_SPEC +#define CC1_SPEC "-fno-caller-saves" +#undef CC1PLUS_SPEC +#define CC1PLUS_SPEC "-fno-caller-saves" + + +/* Target specific assembler settings. */ + +#ifdef DEFAULT_TARGET_64BIT +#undef ASM_SPEC +#define ASM_SPEC "%{m31:-m31 -Aesa}" +#else +#undef ASM_SPEC +#define ASM_SPEC "%{m64:-m64 -Aesame}" +#endif + + +/* Target specific linker settings. */ + +#define LINK_ARCH31_SPEC \ + "-m elf_s390 \ + %{shared:-shared} \ + %{!shared: \ + %{static:-static} \ + %{!static: \ + %{rdynamic:-export-dynamic} \ + %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}" + +#define LINK_ARCH64_SPEC \ + "-m elf64_s390 \ + %{shared:-shared} \ + %{!shared: \ + %{static:-static} \ + %{!static: \ + %{rdynamic:-export-dynamic} \ + %{!dynamic-linker:-dynamic-linker /lib/ld64.so.1}}}" + +#ifdef DEFAULT_TARGET_64BIT +#undef LINK_SPEC +#define LINK_SPEC "%{m31:%(link_arch31)} %{!m31:%(link_arch64)}" +#else +#undef LINK_SPEC +#define LINK_SPEC "%{m64:%(link_arch64)} %{!m64:%(link_arch31)}" +#endif + + +/* This macro defines names of additional specifications to put in the specs + that can be used in various specifications like CC1_SPEC. Its definition + is an initializer with a subgrouping for each command option. */ + +#define EXTRA_SPECS \ + { "cpp_arch31", CPP_ARCH31_SPEC }, \ + { "cpp_arch64", CPP_ARCH64_SPEC }, \ + { "link_arch31", LINK_ARCH31_SPEC }, \ + { "link_arch64", LINK_ARCH64_SPEC }, \ + + +/* Character to start a comment. */ + +#define ASM_COMMENT_START "#" + + +/* Assembler pseudos to introduce constants of various size. */ + +#define ASM_DOUBLE "\t.double" + +/* The LOCAL_LABEL_PREFIX variable is used by dbxelf.h. */ +#define LOCAL_LABEL_PREFIX "." + +/* Prefix for internally generated assembler labels. */ +#define LPREFIX ".L" + + +/* This is how to output the definition of a user-level label named NAME, + such as the label on a static function or variable NAME. */ + +#undef ASM_OUTPUT_LABEL +#define ASM_OUTPUT_LABEL(FILE, NAME) \ + (assemble_name (FILE, NAME), fputs (":\n", FILE)) + +/* Store in OUTPUT a string (made with alloca) containing + an assembler-name for a local static variable named NAME. + LABELNO is an integer which is different for each call. */ + +#undef ASM_FORMAT_PRIVATE_NAME +#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \ +( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \ + sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO))) + + + /* internal macro to output long */ +#define _ASM_OUTPUT_LONG(FILE, VALUE) \ + fprintf (FILE, "\t.long\t0x%lX\n", VALUE); + + +/* This is how to output an element of a case-vector that is absolute. */ + +#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ + fprintf (FILE, "%s%s%d\n", integer_asm_op (UNITS_PER_WORD, TRUE), \ + LPREFIX, VALUE) + +/* This is how to output an element of a case-vector that is relative. */ + +#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ + fprintf (FILE, "%s%s%d-%s%d\n", integer_asm_op (UNITS_PER_WORD, TRUE), \ + LPREFIX, VALUE, LPREFIX, REL) + + + +/* This is how to output an assembler line + that says to advance the location counter + to a multiple of 2**LOG bytes. */ + +#undef ASM_OUTPUT_ALIGN +#define ASM_OUTPUT_ALIGN(FILE, LOG) \ + if ((LOG)!=0) fprintf ((FILE), "\t.align\t%d\n", 1<<(LOG)) + +/* This is how to output an assembler line + that says to advance the location counter by SIZE bytes. */ + +#undef ASM_OUTPUT_SKIP +#define ASM_OUTPUT_SKIP(FILE, SIZE) \ + fprintf ((FILE), "\t.set\t.,.+%u\n", (SIZE)) + +/* This is how to output assembler code to declare an + uninitialized external linkage data object. */ + +#undef ASM_OUTPUT_ALIGNED_BSS +#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ + asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN) + +/* Output before read-only data. */ + +#define TEXT_SECTION_ASM_OP ".text" + +/* Output before writable (initialized) data. */ + +#define DATA_SECTION_ASM_OP ".data" + +/* Output before writable (uninitialized) data. */ + +#define BSS_SECTION_ASM_OP ".bss" + +/* This is how to output a command to make the user-level label named NAME + defined for reference from other files. */ + +#define ASM_GLOBALIZE_LABEL(FILE, NAME) \ + (fputs (".globl ", FILE), assemble_name (FILE, NAME), fputs ("\n", FILE)) + +/* Select section for constant in constant pool. + We are in the right section. + undef for 64 bit mode (linux64.h). + */ + +#undef SELECT_RTX_SECTION +#define SELECT_RTX_SECTION(MODE, X, ALIGN) + + +/* Output code to add DELTA to the first argument, and then jump to FUNCTION. + Used for C++ multiple inheritance. */ +#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \ +do { \ + if (TARGET_64BIT) \ + { \ + if (flag_pic) \ + { \ + fprintf (FILE, "\tlarl 1,0f\n"); \ + fprintf (FILE, "\tagf %d,0(1)\n", \ + aggregate_value_p (TREE_TYPE \ + (TREE_TYPE (FUNCTION))) ? 3 :2 ); \ + fprintf (FILE, "\tlarl 1,"); \ + assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \ + fprintf (FILE, "@GOTENT\n"); \ + fprintf (FILE, "\tlg 1,0(1)\n"); \ + fprintf (FILE, "\tbr 1\n"); \ + fprintf (FILE, "0:\t.long "); \ + fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, (DELTA)); \ + fprintf (FILE, "\n"); \ + } \ + else \ + { \ + fprintf (FILE, "\tlarl 1,0f\n"); \ + fprintf (FILE, "\tagf %d,0(1)\n", \ + aggregate_value_p (TREE_TYPE \ + (TREE_TYPE (FUNCTION))) ? 3 :2 ); \ + fprintf (FILE, "\tjg "); \ + assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \ + fprintf (FILE, "\n"); \ + fprintf (FILE, "0:\t.long "); \ + fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, (DELTA)); \ + fprintf (FILE, "\n"); \ + } \ + } \ + else \ + { \ + if (flag_pic) \ + { \ + fprintf (FILE, "\tbras 1,0f\n"); \ + fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_-.\n"); \ + fprintf (FILE, "\t.long "); \ + assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \ + fprintf (FILE, "@GOT\n"); \ + fprintf (FILE, "\t.long "); \ + fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, (DELTA)); \ + fprintf (FILE, "\n"); \ + fprintf (FILE, "0:\tal %d,8(1)\n", \ + aggregate_value_p (TREE_TYPE \ + (TREE_TYPE (FUNCTION))) ? 3 : 2 ); \ + fprintf (FILE, "\tl 0,4(1)\n"); \ + fprintf (FILE, "\tal 1,0(1)\n"); \ + fprintf (FILE, "\talr 1,0\n"); \ + fprintf (FILE, "\tl 1,0(1)\n"); \ + fprintf (FILE, "\tbr 1\n"); \ + } else { \ + fprintf (FILE, "\tbras 1,0f\n"); \ + fprintf (FILE, "\t.long "); \ + assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \ + fprintf (FILE, "-.\n"); \ + fprintf (FILE, "\t.long "); \ + fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, (DELTA)); \ + fprintf (FILE, "\n"); \ + fprintf (FILE, "0:\tal %d,4(1)\n", \ + aggregate_value_p (TREE_TYPE \ + (TREE_TYPE (FUNCTION))) ? 3 : 2 ); \ + fprintf (FILE, "\tal 1,0(1)\n"); \ + fprintf (FILE, "\tbr 1\n"); \ + } \ + } \ +} while (0) + +#endif diff --git a/contrib/gcc/config/s390/s390-protos.h b/contrib/gcc/config/s390/s390-protos.h new file mode 100644 index 0000000..cd4ac10 --- /dev/null +++ b/contrib/gcc/config/s390/s390-protos.h @@ -0,0 +1,85 @@ +/* Definitions of target machine for GNU compiler, for IBM S/390. + Copyright (C) 2000 Free Software Foundation, Inc. + Contributed by Hartmut Penner (hpenner@de.ibm.com) + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* Declare functions in s390.c. */ + +extern void optimization_options PARAMS ((int, int)); +extern void override_options PARAMS ((void)); +extern int s390_arg_frame_offset PARAMS ((void)); +extern void s390_function_prologue PARAMS ((FILE *, HOST_WIDE_INT)); +extern void s390_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT)); +extern void s390_emit_prologue PARAMS ((void)); +extern void s390_emit_epilogue PARAMS ((void)); +extern void s390_function_profiler PARAMS ((FILE *, int)); + +#ifdef RTX_CODE +extern int const0_operand PARAMS ((rtx, enum machine_mode)); +extern int consttable_operand PARAMS ((rtx, enum machine_mode)); +extern int larl_operand PARAMS ((rtx, enum machine_mode)); +extern int fp_operand PARAMS ((rtx, enum machine_mode)); +extern int s_operand PARAMS ((rtx, enum machine_mode)); +extern int s_imm_operand PARAMS ((rtx, enum machine_mode)); +extern int bras_sym_operand PARAMS ((rtx, enum machine_mode)); +extern int load_multiple_operation PARAMS ((rtx, enum machine_mode)); +extern int store_multiple_operation PARAMS ((rtx, enum machine_mode)); +extern int s390_single_hi PARAMS ((rtx, enum machine_mode, int)); +extern int s390_extract_hi PARAMS ((rtx, enum machine_mode, int)); +extern int s390_single_qi PARAMS ((rtx, enum machine_mode, int)); +extern int s390_extract_qi PARAMS ((rtx, enum machine_mode, int)); + +extern int s390_match_ccmode PARAMS ((rtx, enum machine_mode)); +extern enum machine_mode s390_select_ccmode PARAMS ((enum rtx_code, rtx, rtx)); +extern int symbolic_reference_mentioned_p PARAMS ((rtx)); +extern int legitimate_la_operand_p PARAMS ((rtx)); +extern rtx legitimize_la_operand PARAMS ((rtx)); +extern int legitimate_pic_operand_p PARAMS ((rtx)); +extern int legitimate_constant_p PARAMS ((rtx)); +extern int legitimate_reload_constant_p PARAMS ((rtx)); +extern int legitimate_address_p PARAMS ((enum machine_mode, rtx, int)); +extern rtx legitimize_pic_address PARAMS ((rtx, rtx)); +extern rtx legitimize_address PARAMS ((rtx, rtx, enum machine_mode)); +extern enum reg_class s390_preferred_reload_class PARAMS ((rtx, enum reg_class)); +extern enum reg_class s390_secondary_input_reload_class PARAMS ((enum reg_class, enum machine_mode, rtx)); +extern int s390_plus_operand PARAMS ((rtx, enum machine_mode)); +extern void s390_expand_plus_operand PARAMS ((rtx, rtx, rtx)); +extern void emit_pic_move PARAMS ((rtx *, enum machine_mode)); + +extern void s390_output_symbolic_const PARAMS ((FILE *, rtx)); +extern void print_operand_address PARAMS ((FILE *, rtx)); +extern void print_operand PARAMS ((FILE *, rtx, int)); +extern void s390_output_constant_pool PARAMS ((FILE *)); +extern void s390_trampoline_template PARAMS ((FILE *)); +extern void s390_initialize_trampoline PARAMS ((rtx, rtx, rtx)); +extern rtx s390_gen_rtx_const_DI PARAMS ((int, int)); +extern rtx s390_simplify_dwarf_addr PARAMS ((rtx)); +#endif /* RTX_CODE */ + +#ifdef TREE_CODE +extern int s390_function_arg_pass_by_reference PARAMS ((enum machine_mode, tree)); +extern void s390_function_arg_advance PARAMS ((CUMULATIVE_ARGS *, enum machine_mode, tree, int)); +extern tree s390_build_va_list PARAMS ((void)); +#ifdef RTX_CODE +extern rtx s390_function_arg PARAMS ((CUMULATIVE_ARGS *, enum machine_mode, tree, int)); +extern void s390_va_start PARAMS ((int, tree, rtx)); +extern rtx s390_va_arg PARAMS ((tree, tree)); +#endif /* RTX_CODE */ +#endif /* TREE_CODE */ + diff --git a/contrib/gcc/config/s390/s390.c b/contrib/gcc/config/s390/s390.c new file mode 100644 index 0000000..4c96dce --- /dev/null +++ b/contrib/gcc/config/s390/s390.c @@ -0,0 +1,4346 @@ +/* Subroutines used for code generation on IBM S/390 and zSeries + Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. + Contributed by Hartmut Penner (hpenner@de.ibm.com) and + Ulrich Weigand (uweigand@de.ibm.com). + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#include "config.h" +#include "system.h" +#include "rtl.h" +#include "tree.h" +#include "tm_p.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "output.h" +#include "insn-attr.h" +#include "flags.h" +#include "except.h" +#include "function.h" +#include "recog.h" +#include "expr.h" +#include "reload.h" +#include "toplev.h" +#include "basic-block.h" +#include "integrate.h" +#include "ggc.h" +#include "target.h" +#include "target-def.h" +#include "debug.h" + + +static bool s390_assemble_integer PARAMS ((rtx, unsigned int, int)); +static int s390_adjust_cost PARAMS ((rtx, rtx, rtx, int)); +static int s390_adjust_priority PARAMS ((rtx, int)); + +#undef TARGET_ASM_ALIGNED_HI_OP +#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" +#undef TARGET_ASM_INTEGER +#define TARGET_ASM_INTEGER s390_assemble_integer + +#undef TARGET_ASM_FUNCTION_PROLOGUE +#define TARGET_ASM_FUNCTION_PROLOGUE s390_function_prologue + +#undef TARGET_ASM_FUNCTION_EPILOGUE +#define TARGET_ASM_FUNCTION_EPILOGUE s390_function_epilogue + +#undef TARGET_ASM_OPEN_PAREN +#define TARGET_ASM_OPEN_PAREN "" + +#undef TARGET_ASM_CLOSE_PAREN +#define TARGET_ASM_CLOSE_PAREN "" + +#undef TARGET_SCHED_ADJUST_COST +#define TARGET_SCHED_ADJUST_COST s390_adjust_cost + +#undef TARGET_SCHED_ADJUST_PRIORITY +#define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority + +struct gcc_target targetm = TARGET_INITIALIZER; + +extern int reload_completed; + +/* The alias set for prologue/epilogue register save/restore. */ +static int s390_sr_alias_set = 0; + +/* Function count for creating unique internal labels in a compile unit. */ +int s390_function_count = 0; + +/* Save information from a "cmpxx" operation until the branch or scc is + emitted. */ +rtx s390_compare_op0, s390_compare_op1; + +/* Structure used to hold the components of a S/390 memory + address. A legitimate address on S/390 is of the general + form + base + index + displacement + where any of the components is optional. + + base and index are registers of the class ADDR_REGS, + displacement is an unsigned 12-bit immediate constant. */ + +struct s390_address +{ + rtx base; + rtx indx; + rtx disp; + int pointer; +}; + +/* Structure containing information for prologue and epilogue. */ + +struct s390_frame +{ + int frame_pointer_p; + int return_reg_saved_p; + int save_fprs_p; + int first_save_gpr; + int first_restore_gpr; + int last_save_gpr; + int arg_frame_offset; + + HOST_WIDE_INT frame_size; +}; + +static int s390_match_ccmode_set PARAMS ((rtx, enum machine_mode)); +static int s390_branch_condition_mask PARAMS ((rtx)); +static const char *s390_branch_condition_mnemonic PARAMS ((rtx, int)); +static int check_mode PARAMS ((rtx, enum machine_mode *)); +static int general_s_operand PARAMS ((rtx, enum machine_mode, int)); +static int s390_decompose_address PARAMS ((rtx, struct s390_address *, int)); +static int reg_used_in_mem_p PARAMS ((int, rtx)); +static int addr_generation_dependency_p PARAMS ((rtx, rtx)); +static void s390_split_branches PARAMS ((void)); +static void find_constant_pool_ref PARAMS ((rtx, rtx *)); +static void replace_constant_pool_ref PARAMS ((rtx *, rtx, rtx)); +static void s390_chunkify_pool PARAMS ((void)); +static int save_fprs_p PARAMS ((void)); +static int find_unused_clobbered_reg PARAMS ((void)); +static void s390_frame_info PARAMS ((struct s390_frame *)); +static rtx save_fpr PARAMS ((rtx, int, int)); +static rtx restore_fpr PARAMS ((rtx, int, int)); +static int s390_function_arg_size PARAMS ((enum machine_mode, tree)); + + +/* Return true if SET either doesn't set the CC register, or else + the source and destination have matching CC modes and that + CC mode is at least as constrained as REQ_MODE. */ + +static int +s390_match_ccmode_set (set, req_mode) + rtx set; + enum machine_mode req_mode; +{ + enum machine_mode set_mode; + + if (GET_CODE (set) != SET) + abort (); + + if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set)))) + return 1; + + set_mode = GET_MODE (SET_DEST (set)); + switch (set_mode) + { + case CCSmode: + if (req_mode != CCSmode) + return 0; + break; + case CCUmode: + if (req_mode != CCUmode) + return 0; + break; + case CCLmode: + if (req_mode != CCLmode) + return 0; + break; + case CCZmode: + if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode) + return 0; + break; + + default: + abort (); + } + + return (GET_MODE (SET_SRC (set)) == set_mode); +} + +/* Return true if every SET in INSN that sets the CC register + has source and destination with matching CC modes and that + CC mode is at least as constrained as REQ_MODE. */ + +int +s390_match_ccmode (insn, req_mode) + rtx insn; + enum machine_mode req_mode; +{ + int i; + + if (GET_CODE (PATTERN (insn)) == SET) + return s390_match_ccmode_set (PATTERN (insn), req_mode); + + if (GET_CODE (PATTERN (insn)) == PARALLEL) + for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) + { + rtx set = XVECEXP (PATTERN (insn), 0, i); + if (GET_CODE (set) == SET) + if (!s390_match_ccmode_set (set, req_mode)) + return 0; + } + + return 1; +} + +/* Given a comparison code OP (EQ, NE, etc.) and the operands + OP0 and OP1 of a COMPARE, return the mode to be used for the + comparison. */ + +enum machine_mode +s390_select_ccmode (code, op0, op1) + enum rtx_code code; + rtx op0; + rtx op1; +{ + switch (code) + { + case EQ: + case NE: + if (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS + || GET_CODE (op1) == NEG) + return CCLmode; + + return CCZmode; + + case LE: + case LT: + case GE: + case GT: + case UNORDERED: + case ORDERED: + case UNEQ: + case UNLE: + case UNLT: + case UNGE: + case UNGT: + case LTGT: + return CCSmode; + + case LEU: + case LTU: + case GEU: + case GTU: + return CCUmode; + + default: + abort (); + } +} + +/* Return branch condition mask to implement a branch + specified by CODE. */ + +static int +s390_branch_condition_mask (code) + rtx code; +{ + const int CC0 = 1 << 3; + const int CC1 = 1 << 2; + const int CC2 = 1 << 1; + const int CC3 = 1 << 0; + + if (GET_CODE (XEXP (code, 0)) != REG + || REGNO (XEXP (code, 0)) != CC_REGNUM + || XEXP (code, 1) != const0_rtx) + abort (); + + switch (GET_MODE (XEXP (code, 0))) + { + case CCZmode: + switch (GET_CODE (code)) + { + case EQ: return CC0; + case NE: return CC1 | CC2 | CC3; + default: + abort (); + } + break; + + case CCLmode: + switch (GET_CODE (code)) + { + case EQ: return CC0 | CC2; + case NE: return CC1 | CC3; + case UNORDERED: return CC2 | CC3; /* carry */ + case ORDERED: return CC0 | CC1; /* no carry */ + default: + abort (); + } + break; + + case CCUmode: + switch (GET_CODE (code)) + { + case EQ: return CC0; + case NE: return CC1 | CC2 | CC3; + case LTU: return CC1; + case GTU: return CC2; + case LEU: return CC0 | CC1; + case GEU: return CC0 | CC2; + default: + abort (); + } + break; + + case CCSmode: + switch (GET_CODE (code)) + { + case EQ: return CC0; + case NE: return CC1 | CC2 | CC3; + case LT: return CC1; + case GT: return CC2; + case LE: return CC0 | CC1; + case GE: return CC0 | CC2; + case UNORDERED: return CC3; + case ORDERED: return CC0 | CC1 | CC2; + case UNEQ: return CC0 | CC3; + case UNLT: return CC1 | CC3; + case UNGT: return CC2 | CC3; + case UNLE: return CC0 | CC1 | CC3; + case UNGE: return CC0 | CC2 | CC3; + case LTGT: return CC1 | CC2; + default: + abort (); + } + + default: + abort (); + } +} + +/* If INV is false, return assembler mnemonic string to implement + a branch specified by CODE. If INV is true, return mnemonic + for the corresponding inverted branch. */ + +static const char * +s390_branch_condition_mnemonic (code, inv) + rtx code; + int inv; +{ + static const char *mnemonic[16] = + { + NULL, "o", "h", "nle", + "l", "nhe", "lh", "ne", + "e", "nlh", "he", "nl", + "le", "nh", "no", NULL + }; + + int mask = s390_branch_condition_mask (code); + + if (inv) + mask ^= 15; + + if (mask < 1 || mask > 14) + abort (); + + return mnemonic[mask]; +} + +/* If OP is an integer constant of mode MODE with exactly one + HImode subpart unequal to DEF, return the number of that + subpart. As a special case, all HImode subparts of OP are + equal to DEF, return zero. Otherwise, return -1. */ + +int +s390_single_hi (op, mode, def) + rtx op; + enum machine_mode mode; + int def; +{ + if (GET_CODE (op) == CONST_INT) + { + unsigned HOST_WIDE_INT value; + int n_parts = GET_MODE_SIZE (mode) / 2; + int i, part = -1; + + for (i = 0; i < n_parts; i++) + { + if (i == 0) + value = (unsigned HOST_WIDE_INT) INTVAL (op); + else + value >>= 16; + + if ((value & 0xffff) != (unsigned)(def & 0xffff)) + { + if (part != -1) + return -1; + else + part = i; + } + } + + return part == -1 ? 0 : (n_parts - 1 - part); + } + + else if (GET_CODE (op) == CONST_DOUBLE + && GET_MODE (op) == VOIDmode) + { + unsigned HOST_WIDE_INT value; + int n_parts = GET_MODE_SIZE (mode) / 2; + int i, part = -1; + + for (i = 0; i < n_parts; i++) + { + if (i == 0) + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (op); + else if (i == HOST_BITS_PER_WIDE_INT / 16) + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (op); + else + value >>= 16; + + if ((value & 0xffff) != (unsigned)(def & 0xffff)) + { + if (part != -1) + return -1; + else + part = i; + } + } + + return part == -1 ? 0 : (n_parts - 1 - part); + } + + return -1; +} + +/* Extract the HImode part number PART from integer + constant OP of mode MODE. */ + +int +s390_extract_hi (op, mode, part) + rtx op; + enum machine_mode mode; + int part; +{ + int n_parts = GET_MODE_SIZE (mode) / 2; + if (part < 0 || part >= n_parts) + abort(); + else + part = n_parts - 1 - part; + + if (GET_CODE (op) == CONST_INT) + { + unsigned HOST_WIDE_INT value = (unsigned HOST_WIDE_INT) INTVAL (op); + return ((value >> (16 * part)) & 0xffff); + } + else if (GET_CODE (op) == CONST_DOUBLE + && GET_MODE (op) == VOIDmode) + { + unsigned HOST_WIDE_INT value; + if (part < HOST_BITS_PER_WIDE_INT / 16) + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (op); + else + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (op), + part -= HOST_BITS_PER_WIDE_INT / 16; + + return ((value >> (16 * part)) & 0xffff); + } + + abort (); +} + +/* If OP is an integer constant of mode MODE with exactly one + QImode subpart unequal to DEF, return the number of that + subpart. As a special case, all QImode subparts of OP are + equal to DEF, return zero. Otherwise, return -1. */ + +int +s390_single_qi (op, mode, def) + rtx op; + enum machine_mode mode; + int def; +{ + if (GET_CODE (op) == CONST_INT) + { + unsigned HOST_WIDE_INT value; + int n_parts = GET_MODE_SIZE (mode); + int i, part = -1; + + for (i = 0; i < n_parts; i++) + { + if (i == 0) + value = (unsigned HOST_WIDE_INT) INTVAL (op); + else + value >>= 8; + + if ((value & 0xff) != (unsigned)(def & 0xff)) + { + if (part != -1) + return -1; + else + part = i; + } + } + + return part == -1 ? 0 : (n_parts - 1 - part); + } + + else if (GET_CODE (op) == CONST_DOUBLE + && GET_MODE (op) == VOIDmode) + { + unsigned HOST_WIDE_INT value; + int n_parts = GET_MODE_SIZE (mode); + int i, part = -1; + + for (i = 0; i < n_parts; i++) + { + if (i == 0) + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (op); + else if (i == HOST_BITS_PER_WIDE_INT / 8) + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (op); + else + value >>= 8; + + if ((value & 0xff) != (unsigned)(def & 0xff)) + { + if (part != -1) + return -1; + else + part = i; + } + } + + return part == -1 ? 0 : (n_parts - 1 - part); + } + + return -1; +} + +/* Extract the QImode part number PART from integer + constant OP of mode MODE. */ + +int +s390_extract_qi (op, mode, part) + rtx op; + enum machine_mode mode; + int part; +{ + int n_parts = GET_MODE_SIZE (mode); + if (part < 0 || part >= n_parts) + abort(); + else + part = n_parts - 1 - part; + + if (GET_CODE (op) == CONST_INT) + { + unsigned HOST_WIDE_INT value = (unsigned HOST_WIDE_INT) INTVAL (op); + return ((value >> (8 * part)) & 0xff); + } + else if (GET_CODE (op) == CONST_DOUBLE + && GET_MODE (op) == VOIDmode) + { + unsigned HOST_WIDE_INT value; + if (part < HOST_BITS_PER_WIDE_INT / 8) + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (op); + else + value = (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (op), + part -= HOST_BITS_PER_WIDE_INT / 8; + + return ((value >> (8 * part)) & 0xff); + } + + abort (); +} + + +/* Change optimizations to be performed, depending on the + optimization level. + + LEVEL is the optimization level specified; 2 if `-O2' is + specified, 1 if `-O' is specified, and 0 if neither is specified. + + SIZE is non-zero if `-Os' is specified and zero otherwise. */ + +void +optimization_options (level, size) + int level ATTRIBUTE_UNUSED; + int size ATTRIBUTE_UNUSED; +{ +#ifdef HAVE_decrement_and_branch_on_count + /* When optimizing, enable use of BRCT instruction. */ + if (level >= 1) + flag_branch_on_count_reg = 1; +#endif +} + +void +override_options () +{ + /* Acquire a unique set number for our register saves and restores. */ + s390_sr_alias_set = new_alias_set (); +} + + +/* Map for smallest class containing reg regno. */ + +enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] = +{ GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, + ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, + ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, + ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, + FP_REGS, FP_REGS, FP_REGS, FP_REGS, + FP_REGS, FP_REGS, FP_REGS, FP_REGS, + FP_REGS, FP_REGS, FP_REGS, FP_REGS, + FP_REGS, FP_REGS, FP_REGS, FP_REGS, + ADDR_REGS, NO_REGS, ADDR_REGS +}; + + +/* Return true if OP a (const_int 0) operand. + OP is the current operation. + MODE is the current operation mode. */ + +int +const0_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + return op == CONST0_RTX (mode); +} + +/* Return true if OP is constant. + OP is the current operation. + MODE is the current operation mode. */ + +int +consttable_operand (op, mode) + rtx op; + enum machine_mode mode ATTRIBUTE_UNUSED; +{ + return CONSTANT_P (op); +} + +/* Return true if the mode of operand OP matches MODE. + If MODE is set to VOIDmode, set it to the mode of OP. */ + +static int +check_mode (op, mode) + register rtx op; + enum machine_mode *mode; +{ + if (*mode == VOIDmode) + *mode = GET_MODE (op); + else + { + if (GET_MODE (op) != VOIDmode && GET_MODE (op) != *mode) + return 0; + } + return 1; +} + +/* Return true if OP a valid operand for the LARL instruction. + OP is the current operation. + MODE is the current operation mode. */ + +int +larl_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + if (! check_mode (op, &mode)) + return 0; + + /* Allow labels and local symbols. */ + if (GET_CODE (op) == LABEL_REF) + return 1; + if (GET_CODE (op) == SYMBOL_REF + && (!flag_pic || SYMBOL_REF_FLAG (op) + || CONSTANT_POOL_ADDRESS_P (op))) + return 1; + + /* Everything else must have a CONST, so strip it. */ + if (GET_CODE (op) != CONST) + return 0; + op = XEXP (op, 0); + + /* Allow adding *even* constants. */ + if (GET_CODE (op) == PLUS) + { + if (GET_CODE (XEXP (op, 1)) != CONST_INT + || (INTVAL (XEXP (op, 1)) & 1) != 0) + return 0; + op = XEXP (op, 0); + } + + /* Labels and local symbols allowed here as well. */ + if (GET_CODE (op) == LABEL_REF) + return 1; + if (GET_CODE (op) == SYMBOL_REF + && (!flag_pic || SYMBOL_REF_FLAG (op) + || CONSTANT_POOL_ADDRESS_P (op))) + return 1; + + /* Now we must have a @GOTENT offset or @PLT stub. */ + if (GET_CODE (op) == UNSPEC + && XINT (op, 1) == 111) + return 1; + if (GET_CODE (op) == UNSPEC + && XINT (op, 1) == 113) + return 1; + + return 0; +} + +/* Return true if OP is a valid FP-Register. + OP is the current operation. + MODE is the current operation mode. */ + +int +fp_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + register enum rtx_code code = GET_CODE (op); + if (! check_mode (op, &mode)) + return 0; + if (code == REG && REGNO_OK_FOR_FP_P (REGNO (op))) + return 1; + else + return 0; +} + +/* Helper routine to implement s_operand and s_imm_operand. + OP is the current operation. + MODE is the current operation mode. + ALLOW_IMMEDIATE specifies whether immediate operands should + be accepted or not. */ + +static int +general_s_operand (op, mode, allow_immediate) + register rtx op; + enum machine_mode mode; + int allow_immediate; +{ + struct s390_address addr; + + /* Call general_operand first, so that we don't have to + check for many special cases. */ + if (!general_operand (op, mode)) + return 0; + + /* Just like memory_operand, allow (subreg (mem ...)) + after reload. */ + if (reload_completed + && GET_CODE (op) == SUBREG + && GET_CODE (SUBREG_REG (op)) == MEM) + op = SUBREG_REG (op); + + switch (GET_CODE (op)) + { + /* Constants that we are sure will be forced to the + literal pool in reload are OK as s-operand. Note + that we cannot call s390_preferred_reload_class here + because it might not be known yet at this point + whether the current function is a leaf or not. */ + case CONST_INT: + case CONST_DOUBLE: + if (!allow_immediate || reload_completed) + break; + if (!legitimate_reload_constant_p (op)) + return 1; + if (!TARGET_64BIT) + return 1; + break; + + /* Memory operands are OK unless they already use an + index register. */ + case MEM: + if (GET_CODE (XEXP (op, 0)) == ADDRESSOF) + return 1; + if (s390_decompose_address (XEXP (op, 0), &addr, FALSE) + && !addr.indx) + return 1; + break; + + default: + break; + } + + return 0; +} + +/* Return true if OP is a valid S-type operand. + OP is the current operation. + MODE is the current operation mode. */ + +int +s_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + return general_s_operand (op, mode, 0); +} + +/* Return true if OP is a valid S-type operand or an immediate + operand that can be addressed as S-type operand by forcing + it into the literal pool. + OP is the current operation. + MODE is the current operation mode. */ + +int +s_imm_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + return general_s_operand (op, mode, 1); +} + +/* Return true if OP is a valid operand for the BRAS instruction. + OP is the current operation. + MODE is the current operation mode. */ + +int +bras_sym_operand (op, mode) + register rtx op; + enum machine_mode mode ATTRIBUTE_UNUSED; +{ + register enum rtx_code code = GET_CODE (op); + + /* Allow SYMBOL_REFs. */ + if (code == SYMBOL_REF) + return 1; + + /* Allow @PLT stubs. */ + if (code == CONST + && GET_CODE (XEXP (op, 0)) == UNSPEC + && XINT (XEXP (op, 0), 1) == 113) + return 1; + return 0; +} + + +/* Return true if OP is a load multiple operation. It is known to be a + PARALLEL and the first section will be tested. + OP is the current operation. + MODE is the current operation mode. */ + +int +load_multiple_operation (op, mode) + rtx op; + enum machine_mode mode ATTRIBUTE_UNUSED; +{ + int count = XVECLEN (op, 0); + unsigned int dest_regno; + rtx src_addr; + int i, off; + + + /* Perform a quick check so we don't blow up below. */ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM) + return 0; + + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0); + + /* Check, is base, or base + displacement. */ + + if (GET_CODE (src_addr) == REG) + off = 0; + else if (GET_CODE (src_addr) == PLUS + && GET_CODE (XEXP (src_addr, 0)) == REG + && GET_CODE (XEXP (src_addr, 1)) == CONST_INT) + { + off = INTVAL (XEXP (src_addr, 1)); + src_addr = XEXP (src_addr, 0); + } + else + return 0; + + if (src_addr == frame_pointer_rtx || src_addr == arg_pointer_rtx) + return 0; + + for (i = 1; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != Pmode + || REGNO (SET_DEST (elt)) != dest_regno + i + || GET_CODE (SET_SRC (elt)) != MEM + || GET_MODE (SET_SRC (elt)) != Pmode + || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) + || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) + != off + i * UNITS_PER_WORD) + return 0; + } + + return 1; +} + +/* Return true if OP is a store multiple operation. It is known to be a + PARALLEL and the first section will be tested. + OP is the current operation. + MODE is the current operation mode. */ + +int +store_multiple_operation (op, mode) + rtx op; + enum machine_mode mode ATTRIBUTE_UNUSED; +{ + int count = XVECLEN (op, 0); + unsigned int src_regno; + rtx dest_addr; + int i, off; + + /* Perform a quick check so we don't blow up below. */ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG) + return 0; + + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0))); + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0); + + /* Check, is base, or base + displacement. */ + + if (GET_CODE (dest_addr) == REG) + off = 0; + else if (GET_CODE (dest_addr) == PLUS + && GET_CODE (XEXP (dest_addr, 0)) == REG + && GET_CODE (XEXP (dest_addr, 1)) == CONST_INT) + { + off = INTVAL (XEXP (dest_addr, 1)); + dest_addr = XEXP (dest_addr, 0); + } + else + return 0; + + if (dest_addr == frame_pointer_rtx || dest_addr == arg_pointer_rtx) + return 0; + + for (i = 1; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != Pmode + || REGNO (SET_SRC (elt)) != src_regno + i + || GET_CODE (SET_DEST (elt)) != MEM + || GET_MODE (SET_DEST (elt)) != Pmode + || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) + || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) + != off + i * UNITS_PER_WORD) + return 0; + } + return 1; +} + + +/* Return true if OP contains a symbol reference */ + +int +symbolic_reference_mentioned_p (op) + rtx op; +{ + register const char *fmt; + register int i; + + if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) + return 1; + + fmt = GET_RTX_FORMAT (GET_CODE (op)); + for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--) + { + if (fmt[i] == 'E') + { + register int j; + + for (j = XVECLEN (op, i) - 1; j >= 0; j--) + if (symbolic_reference_mentioned_p (XVECEXP (op, i, j))) + return 1; + } + + else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i))) + return 1; + } + + return 0; +} + + +/* Return true if OP is a legitimate general operand when + generating PIC code. It is given that flag_pic is on + and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */ + +int +legitimate_pic_operand_p (op) + register rtx op; +{ + /* Accept all non-symbolic constants. */ + if (!SYMBOLIC_CONST (op)) + return 1; + + /* Reject everything else; must be handled + via emit_pic_move. */ + return 0; +} + +/* Returns true if the constant value OP is a legitimate general operand. + It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */ + +int +legitimate_constant_p (op) + register rtx op; +{ + /* Accept all non-symbolic constants. */ + if (!SYMBOLIC_CONST (op)) + return 1; + + /* In the PIC case, symbolic constants must *not* be + forced into the literal pool. We accept them here, + so that they will be handled by emit_pic_move. */ + if (flag_pic) + return 1; + + /* Even in the non-PIC case, we can accept immediate + LARL operands here. */ + if (TARGET_64BIT) + return larl_operand (op, VOIDmode); + + /* All remaining non-PIC symbolic constants are + forced into the literal pool. */ + return 0; +} + +/* Returns true if the constant value OP is a legitimate general + operand during and after reload. The difference to + legitimate_constant_p is that this function will not accept + a constant that would need to be forced to the literal pool + before it can be used as operand. */ + +int +legitimate_reload_constant_p (op) + register rtx op; +{ + /* Accept l(g)hi operands. */ + if (GET_CODE (op) == CONST_INT + && CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')) + return 1; + + /* Accept lliXX operands. */ + if (TARGET_64BIT + && s390_single_hi (op, DImode, 0) >= 0) + return 1; + + /* Accept larl operands. */ + if (TARGET_64BIT + && larl_operand (op, VOIDmode)) + return 1; + + /* If reload is completed, and we do not already have a + literal pool, and OP must be forced to the literal + pool, then something must have gone wrong earlier. + We *cannot* force the constant any more, because the + prolog generation already decided we don't need to + set up the base register. */ + if (reload_completed && !regs_ever_live[BASE_REGISTER]) + abort (); + + /* Everything else cannot be handled without reload. */ + return 0; +} + +/* Given an rtx OP being reloaded into a reg required to be in class CLASS, + return the class of reg to actually use. */ + +enum reg_class +s390_preferred_reload_class (op, class) + rtx op; + enum reg_class class; +{ + /* This can happen if a floating point constant is being + reloaded into an integer register. Leave well alone. */ + if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT + && class != FP_REGS) + return class; + + switch (GET_CODE (op)) + { + /* Constants we cannot reload must be forced into the + literal pool. For constants we *could* handle directly, + it might still be preferable to put them in the pool and + use a memory-to-memory instruction. + + However, try to avoid needlessly allocating a literal + pool in a routine that wouldn't otherwise need any. + Heuristically, we assume that 64-bit leaf functions + typically don't need a literal pool, all others do. */ + case CONST_DOUBLE: + case CONST_INT: + if (!legitimate_reload_constant_p (op)) + return NO_REGS; + + if (TARGET_64BIT && current_function_is_leaf) + return class; + + return NO_REGS; + + /* If a symbolic constant or a PLUS is reloaded, + it is most likely being used as an address, so + prefer ADDR_REGS. If 'class' is not a superset + of ADDR_REGS, e.g. FP_REGS, reject this reload. */ + case PLUS: + case LABEL_REF: + case SYMBOL_REF: + case CONST: + if (reg_class_subset_p (ADDR_REGS, class)) + return ADDR_REGS; + else + return NO_REGS; + + default: + break; + } + + return class; +} + +/* Return the register class of a scratch register needed to + load IN into a register of class CLASS in MODE. + + We need a temporary when loading a PLUS expression which + is not a legitimate operand of the LOAD ADDRESS instruction. */ + +enum reg_class +s390_secondary_input_reload_class (class, mode, in) + enum reg_class class ATTRIBUTE_UNUSED; + enum machine_mode mode; + rtx in; +{ + if (s390_plus_operand (in, mode)) + return ADDR_REGS; + + return NO_REGS; +} + +/* Return true if OP is a PLUS that is not a legitimate + operand for the LA instruction. + OP is the current operation. + MODE is the current operation mode. */ + +int +s390_plus_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + if (!check_mode (op, &mode) || mode != Pmode) + return FALSE; + + if (GET_CODE (op) != PLUS) + return FALSE; + + if (legitimate_la_operand_p (op)) + return FALSE; + + return TRUE; +} + +/* Generate code to load SRC, which is PLUS that is not a + legitimate operand for the LA instruction, into TARGET. + SCRATCH may be used as scratch register. */ + +void +s390_expand_plus_operand (target, src, scratch_in) + register rtx target; + register rtx src; + register rtx scratch_in; +{ + rtx sum1, sum2, scratch; + + /* ??? reload apparently does not ensure that the scratch register + and the target do not overlap. We absolutely require this to be + the case, however. Therefore the reload_in[sd]i patterns ask for + a double-sized scratch register, and if one part happens to be + equal to the target, we use the other one. */ + scratch = gen_rtx_REG (Pmode, REGNO (scratch_in)); + if (rtx_equal_p (scratch, target)) + scratch = gen_rtx_REG (Pmode, REGNO (scratch_in) + 1); + + /* src must be a PLUS; get its two operands. */ + if (GET_CODE (src) != PLUS || GET_MODE (src) != Pmode) + abort (); + + /* Check if any of the two operands is already scheduled + for replacement by reload. This can happen e.g. when + float registers occur in an address. */ + sum1 = find_replacement (&XEXP (src, 0)); + sum2 = find_replacement (&XEXP (src, 1)); + + /* If one of the two operands is equal to the target, + make it the first one. If one is a constant, make + it the second one. */ + if (rtx_equal_p (target, sum2) + || GET_CODE (sum1) == CONST_INT) + { + rtx tem = sum2; + sum2 = sum1; + sum1 = tem; + } + + /* If the first operand is not an address register, + we reload it into the target. */ + if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15) + { + emit_move_insn (target, sum1); + sum1 = target; + } + + /* Likewise for the second operand. However, take + care not to clobber the target if we already used + it for the first operand. Use the scratch instead. + Also, allow an immediate offset if it is in range. */ + if ((true_regnum (sum2) < 1 || true_regnum (sum2) > 15) + && !(GET_CODE (sum2) == CONST_INT + && INTVAL (sum2) >= 0 && INTVAL (sum2) < 4096)) + { + if (!rtx_equal_p (target, sum1)) + { + emit_move_insn (target, sum2); + sum2 = target; + } + else + { + emit_move_insn (scratch, sum2); + sum2 = scratch; + } + } + + /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS + is only ever performed on addresses, so we can mark the + sum as legitimate for LA in any case. */ + src = gen_rtx_PLUS (Pmode, sum1, sum2); + src = legitimize_la_operand (src); + emit_insn (gen_rtx_SET (VOIDmode, target, src)); +} + + +/* Decompose a RTL expression ADDR for a memory address into + its components, returned in OUT. The boolean STRICT + specifies whether strict register checking applies. + Returns 0 if ADDR is not a valid memory address, nonzero + otherwise. If OUT is NULL, don't return the components, + but check for validity only. + + Note: Only addresses in canonical form are recognized. + LEGITIMIZE_ADDRESS should convert non-canonical forms to the + canonical form so that they will be recognized. */ + +static int +s390_decompose_address (addr, out, strict) + register rtx addr; + struct s390_address *out; + int strict; +{ + rtx base = NULL_RTX; + rtx indx = NULL_RTX; + rtx disp = NULL_RTX; + int pointer = FALSE; + + /* Decompose address into base + index + displacement. */ + + if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC) + base = addr; + + else if (GET_CODE (addr) == PLUS) + { + rtx op0 = XEXP (addr, 0); + rtx op1 = XEXP (addr, 1); + enum rtx_code code0 = GET_CODE (op0); + enum rtx_code code1 = GET_CODE (op1); + + if (code0 == REG || code0 == UNSPEC) + { + if (code1 == REG || code1 == UNSPEC) + { + indx = op0; /* index + base */ + base = op1; + } + + else + { + base = op0; /* base + displacement */ + disp = op1; + } + } + + else if (code0 == PLUS) + { + indx = XEXP (op0, 0); /* index + base + disp */ + base = XEXP (op0, 1); + disp = op1; + } + + else + { + return FALSE; + } + } + + else + disp = addr; /* displacement */ + + + /* Validate base register. */ + if (base) + { + if (GET_CODE (base) == UNSPEC) + { + if (XVECLEN (base, 0) != 1 || XINT (base, 1) != 101) + return FALSE; + base = XVECEXP (base, 0, 0); + pointer = TRUE; + } + + if (GET_CODE (base) != REG || GET_MODE (base) != Pmode) + return FALSE; + + if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base)) + || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base))) + return FALSE; + + if (REGNO (base) == BASE_REGISTER + || REGNO (base) == STACK_POINTER_REGNUM + || REGNO (base) == FRAME_POINTER_REGNUM + || ((reload_completed || reload_in_progress) + && frame_pointer_needed + && REGNO (base) == HARD_FRAME_POINTER_REGNUM) + || (flag_pic + && REGNO (base) == PIC_OFFSET_TABLE_REGNUM)) + pointer = TRUE; + } + + /* Validate index register. */ + if (indx) + { + if (GET_CODE (indx) == UNSPEC) + { + if (XVECLEN (indx, 0) != 1 || XINT (indx, 1) != 101) + return FALSE; + indx = XVECEXP (indx, 0, 0); + pointer = TRUE; + } + + if (GET_CODE (indx) != REG || GET_MODE (indx) != Pmode) + return FALSE; + + if ((strict && ! REG_OK_FOR_BASE_STRICT_P (indx)) + || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (indx))) + return FALSE; + + if (REGNO (indx) == BASE_REGISTER + || REGNO (indx) == STACK_POINTER_REGNUM + || REGNO (indx) == FRAME_POINTER_REGNUM + || ((reload_completed || reload_in_progress) + && frame_pointer_needed + && REGNO (indx) == HARD_FRAME_POINTER_REGNUM) + || (flag_pic + && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM)) + pointer = TRUE; + } + + /* Validate displacement. */ + if (disp) + { + /* Allow integer constant in range. */ + if (GET_CODE (disp) == CONST_INT) + { + if (INTVAL (disp) < 0 || INTVAL (disp) >= 4096) + return FALSE; + } + + /* In the small-PIC case, the linker converts @GOT12 + offsets to possible displacements. */ + else if (GET_CODE (disp) == CONST + && GET_CODE (XEXP (disp, 0)) == UNSPEC + && XINT (XEXP (disp, 0), 1) == 110) + { + if (flag_pic != 1) + return FALSE; + + pointer = TRUE; + } + + /* Accept chunkfied literal pool symbol references. */ + else if (GET_CODE (disp) == CONST + && GET_CODE (XEXP (disp, 0)) == MINUS + && GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF + && GET_CODE (XEXP (XEXP (disp, 0), 1)) == LABEL_REF) + { + pointer = TRUE; + } + + /* Likewise if a constant offset is present. */ + else if (GET_CODE (disp) == CONST + && GET_CODE (XEXP (disp, 0)) == PLUS + && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT + && GET_CODE (XEXP (XEXP (disp, 0), 0)) == MINUS + && GET_CODE (XEXP (XEXP (XEXP (disp, 0), 0), 0)) == LABEL_REF + && GET_CODE (XEXP (XEXP (XEXP (disp, 0), 0), 1)) == LABEL_REF) + { + pointer = TRUE; + } + + /* We can convert literal pool addresses to + displacements by basing them off the base register. */ + else + { + /* In some cases, we can accept an additional + small constant offset. Split these off here. */ + + unsigned int offset = 0; + + if (GET_CODE (disp) == CONST + && GET_CODE (XEXP (disp, 0)) == PLUS + && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT) + { + offset = INTVAL (XEXP (XEXP (disp, 0), 1)); + disp = XEXP (XEXP (disp, 0), 0); + } + + /* Now we must have a literal pool address. */ + if (GET_CODE (disp) != SYMBOL_REF + || !CONSTANT_POOL_ADDRESS_P (disp)) + return FALSE; + + /* In 64-bit PIC mode we cannot accept symbolic + constants in the constant pool. */ + if (TARGET_64BIT && flag_pic + && SYMBOLIC_CONST (get_pool_constant (disp))) + return FALSE; + + /* If we have an offset, make sure it does not + exceed the size of the constant pool entry. */ + if (offset && offset >= GET_MODE_SIZE (get_pool_mode (disp))) + return FALSE; + + /* Either base or index must be free to + hold the base register. */ + if (base && indx) + return FALSE; + + /* Convert the address. */ + if (base) + indx = gen_rtx_REG (Pmode, BASE_REGISTER); + else + base = gen_rtx_REG (Pmode, BASE_REGISTER); + + disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp), 100); + disp = gen_rtx_CONST (Pmode, disp); + + if (offset) + disp = plus_constant (disp, offset); + + pointer = TRUE; + } + } + + if (!base && !indx) + pointer = TRUE; + + if (out) + { + out->base = base; + out->indx = indx; + out->disp = disp; + out->pointer = pointer; + } + + return TRUE; +} + +/* Return nonzero if ADDR is a valid memory address. + STRICT specifies whether strict register checking applies. */ + +int +legitimate_address_p (mode, addr, strict) + enum machine_mode mode ATTRIBUTE_UNUSED; + register rtx addr; + int strict; +{ + return s390_decompose_address (addr, NULL, strict); +} + +/* Return 1 if OP is a valid operand for the LA instruction. + In 31-bit, we need to prove that the result is used as an + address, as LA performs only a 31-bit addition. */ + +int +legitimate_la_operand_p (op) + register rtx op; +{ + struct s390_address addr; + if (!s390_decompose_address (op, &addr, FALSE)) + return FALSE; + + if (TARGET_64BIT || addr.pointer) + return TRUE; + + return FALSE; +} + +/* Return a modified variant of OP that is guaranteed to + be accepted by legitimate_la_operand_p. */ + +rtx +legitimize_la_operand (op) + register rtx op; +{ + struct s390_address addr; + if (!s390_decompose_address (op, &addr, FALSE)) + abort (); + + if (TARGET_64BIT || addr.pointer) + return op; + + if (!addr.base) + abort (); + + op = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr.base), 101); + if (addr.indx) + op = gen_rtx_PLUS (Pmode, op, addr.indx); + if (addr.disp) + op = gen_rtx_PLUS (Pmode, op, addr.disp); + + return op; +} + +/* Return a legitimate reference for ORIG (an address) using the + register REG. If REG is 0, a new pseudo is generated. + + There are two types of references that must be handled: + + 1. Global data references must load the address from the GOT, via + the PIC reg. An insn is emitted to do this load, and the reg is + returned. + + 2. Static data references, constant pool addresses, and code labels + compute the address as an offset from the GOT, whose base is in + the PIC reg. Static data objects have SYMBOL_REF_FLAG set to + differentiate them from global data objects. The returned + address is the PIC reg + an unspec constant. + + GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC + reg also appears in the address. */ + +rtx +legitimize_pic_address (orig, reg) + rtx orig; + rtx reg; +{ + rtx addr = orig; + rtx new = orig; + rtx base; + + if (GET_CODE (addr) == LABEL_REF + || (GET_CODE (addr) == SYMBOL_REF + && (SYMBOL_REF_FLAG (addr) + || CONSTANT_POOL_ADDRESS_P (addr)))) + { + /* This is a local symbol. */ + if (TARGET_64BIT) + { + /* Access local symbols PC-relative via LARL. + This is the same as in the non-PIC case, so it is + handled automatically ... */ + } + else + { + /* Access local symbols relative to the literal pool. */ + + rtx temp = reg? reg : gen_reg_rtx (Pmode); + + addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, addr), 100); + addr = gen_rtx_CONST (SImode, addr); + addr = force_const_mem (SImode, addr); + emit_move_insn (temp, addr); + + base = gen_rtx_REG (Pmode, BASE_REGISTER); + base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), 101); + new = gen_rtx_PLUS (Pmode, base, temp); + + if (reg != 0) + { + emit_move_insn (reg, new); + new = reg; + } + } + } + else if (GET_CODE (addr) == SYMBOL_REF) + { + if (reg == 0) + reg = gen_reg_rtx (Pmode); + + if (flag_pic == 1) + { + /* Assume GOT offset < 4k. This is handled the same way + in both 31- and 64-bit code (@GOT12). */ + + current_function_uses_pic_offset_table = 1; + + new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), 110); + new = gen_rtx_CONST (Pmode, new); + new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new); + new = gen_rtx_MEM (Pmode, new); + RTX_UNCHANGING_P (new) = 1; + emit_move_insn (reg, new); + new = reg; + } + else if (TARGET_64BIT) + { + /* If the GOT offset might be >= 4k, we determine the position + of the GOT entry via a PC-relative LARL (@GOTENT). */ + + rtx temp = gen_reg_rtx (Pmode); + + new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), 111); + new = gen_rtx_CONST (Pmode, new); + emit_move_insn (temp, new); + + new = gen_rtx_MEM (Pmode, temp); + RTX_UNCHANGING_P (new) = 1; + emit_move_insn (reg, new); + new = reg; + } + else + { + /* If the GOT offset might be >= 4k, we have to load it + from the literal pool (@GOT). */ + + rtx temp = gen_reg_rtx (Pmode); + + current_function_uses_pic_offset_table = 1; + + addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, addr), 112); + addr = gen_rtx_CONST (SImode, addr); + addr = force_const_mem (SImode, addr); + emit_move_insn (temp, addr); + + new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp); + new = gen_rtx_MEM (Pmode, new); + RTX_UNCHANGING_P (new) = 1; + emit_move_insn (reg, new); + new = reg; + } + } + else + { + if (GET_CODE (addr) == CONST) + { + addr = XEXP (addr, 0); + if (GET_CODE (addr) == UNSPEC) + { + if (XVECLEN (addr, 0) != 1) + abort (); + switch (XINT (addr, 1)) + { + /* If someone moved an @GOT or lt-relative UNSPEC + out of the literal pool, force them back in. */ + case 100: + case 112: + case 114: + new = force_const_mem (SImode, orig); + break; + + /* @GOTENT is OK as is. */ + case 111: + break; + + /* @PLT is OK as is on 64-bit, must be converted to + lt-relative PLT on 31-bit. */ + case 113: + if (!TARGET_64BIT) + { + rtx temp = reg? reg : gen_reg_rtx (Pmode); + + addr = XVECEXP (addr, 0, 0); + addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, addr), 114); + addr = gen_rtx_CONST (SImode, addr); + addr = force_const_mem (SImode, addr); + emit_move_insn (temp, addr); + + base = gen_rtx_REG (Pmode, BASE_REGISTER); + base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), 101); + new = gen_rtx_PLUS (Pmode, base, temp); + + if (reg != 0) + { + emit_move_insn (reg, new); + new = reg; + } + } + break; + + /* Everything else cannot happen. */ + default: + abort (); + } + } + else if (GET_CODE (addr) != PLUS) + abort (); + } + if (GET_CODE (addr) == PLUS) + { + rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1); + /* Check first to see if this is a constant offset + from a local symbol reference. */ + if ((GET_CODE (op0) == LABEL_REF + || (GET_CODE (op0) == SYMBOL_REF + && (SYMBOL_REF_FLAG (op0) + || CONSTANT_POOL_ADDRESS_P (op0)))) + && GET_CODE (op1) == CONST_INT) + { + if (TARGET_64BIT) + { + if (INTVAL (op1) & 1) + { + /* LARL can't handle odd offsets, so emit a + pair of LARL and LA. */ + rtx temp = reg? reg : gen_reg_rtx (Pmode); + + if (INTVAL (op1) < 0 || INTVAL (op1) >= 4096) + { + int even = INTVAL (op1) - 1; + op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even)); + op0 = gen_rtx_CONST (Pmode, op0); + op1 = GEN_INT (1); + } + + emit_move_insn (temp, op0); + new = gen_rtx_PLUS (Pmode, temp, op1); + + if (reg != 0) + { + emit_move_insn (reg, new); + new = reg; + } + } + else + { + /* If the offset is even, we can just use LARL. + This will happen automatically. */ + } + } + else + { + /* Access local symbols relative to the literal pool. */ + + rtx temp = reg? reg : gen_reg_rtx (Pmode); + + addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, op0), 100); + addr = gen_rtx_PLUS (SImode, addr, op1); + addr = gen_rtx_CONST (SImode, addr); + addr = force_const_mem (SImode, addr); + emit_move_insn (temp, addr); + + base = gen_rtx_REG (Pmode, BASE_REGISTER); + base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), 101); + new = gen_rtx_PLUS (Pmode, base, temp); + + if (reg != 0) + { + emit_move_insn (reg, new); + new = reg; + } + } + } + + /* Now, check whether it is an LT-relative symbol plus offset + that was pulled out of the literal pool. Force it back in. */ + + else if (GET_CODE (op0) == UNSPEC + && GET_CODE (op1) == CONST_INT) + { + if (XVECLEN (op0, 0) != 1) + abort (); + if (XINT (op0, 1) != 100) + abort (); + + new = force_const_mem (SImode, orig); + } + + /* Otherwise, compute the sum. */ + else + { + base = legitimize_pic_address (XEXP (addr, 0), reg); + new = legitimize_pic_address (XEXP (addr, 1), + base == reg ? NULL_RTX : reg); + if (GET_CODE (new) == CONST_INT) + new = plus_constant (base, INTVAL (new)); + else + { + if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1))) + { + base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0)); + new = XEXP (new, 1); + } + new = gen_rtx_PLUS (Pmode, base, new); + } + + if (GET_CODE (new) == CONST) + new = XEXP (new, 0); + new = force_operand (new, 0); + } + } + } + return new; +} + +/* Emit insns to move operands[1] into operands[0]. */ + +void +emit_pic_move (operands, mode) + rtx *operands; + enum machine_mode mode ATTRIBUTE_UNUSED; +{ + rtx temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode); + + if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1])) + operands[1] = force_reg (Pmode, operands[1]); + else + operands[1] = legitimize_pic_address (operands[1], temp); +} + +/* Try machine-dependent ways of modifying an illegitimate address X + to be legitimate. If we find one, return the new, valid address. + + OLDX is the address as it was before break_out_memory_refs was called. + In some cases it is useful to look at this to decide what needs to be done. + + MODE is the mode of the operand pointed to by X. + + When -fpic is used, special handling is needed for symbolic references. + See comments by legitimize_pic_address for details. */ + +rtx +legitimize_address (x, oldx, mode) + register rtx x; + register rtx oldx ATTRIBUTE_UNUSED; + enum machine_mode mode ATTRIBUTE_UNUSED; +{ + rtx constant_term = const0_rtx; + + if (flag_pic) + { + if (SYMBOLIC_CONST (x) + || (GET_CODE (x) == PLUS + && (SYMBOLIC_CONST (XEXP (x, 0)) + || SYMBOLIC_CONST (XEXP (x, 1))))) + x = legitimize_pic_address (x, 0); + + if (legitimate_address_p (mode, x, FALSE)) + return x; + } + + x = eliminate_constant_term (x, &constant_term); + + if (GET_CODE (x) == PLUS) + { + if (GET_CODE (XEXP (x, 0)) == REG) + { + register rtx temp = gen_reg_rtx (Pmode); + register rtx val = force_operand (XEXP (x, 1), temp); + if (val != temp) + emit_move_insn (temp, val); + + x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp); + } + + else if (GET_CODE (XEXP (x, 1)) == REG) + { + register rtx temp = gen_reg_rtx (Pmode); + register rtx val = force_operand (XEXP (x, 0), temp); + if (val != temp) + emit_move_insn (temp, val); + + x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1)); + } + } + + if (constant_term != const0_rtx) + x = gen_rtx_PLUS (Pmode, x, constant_term); + + return x; +} + +/* In the name of slightly smaller debug output, and to cater to + general assembler losage, recognize various UNSPEC sequences + and turn them back into a direct symbol reference. */ + +rtx +s390_simplify_dwarf_addr (orig_x) + rtx orig_x; +{ + rtx x = orig_x, y; + + if (GET_CODE (x) != MEM) + return orig_x; + + x = XEXP (x, 0); + if (GET_CODE (x) == PLUS + && GET_CODE (XEXP (x, 1)) == CONST + && GET_CODE (XEXP (x, 0)) == REG + && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM) + { + y = XEXP (XEXP (x, 1), 0); + if (GET_CODE (y) == UNSPEC + && XINT (y, 1) == 110) + return XVECEXP (y, 0, 0); + return orig_x; + } + + if (GET_CODE (x) == CONST) + { + y = XEXP (x, 0); + if (GET_CODE (y) == UNSPEC + && XINT (y, 1) == 111) + return XVECEXP (y, 0, 0); + return orig_x; + } + + return orig_x; +} + +/* Output symbolic constant X in assembler syntax to + stdio stream FILE. */ + +void +s390_output_symbolic_const (file, x) + FILE *file; + rtx x; +{ + switch (GET_CODE (x)) + { + case CONST: + case ZERO_EXTEND: + case SIGN_EXTEND: + s390_output_symbolic_const (file, XEXP (x, 0)); + break; + + case PLUS: + s390_output_symbolic_const (file, XEXP (x, 0)); + fprintf (file, "+"); + s390_output_symbolic_const (file, XEXP (x, 1)); + break; + + case MINUS: + s390_output_symbolic_const (file, XEXP (x, 0)); + fprintf (file, "-"); + s390_output_symbolic_const (file, XEXP (x, 1)); + break; + + case CONST_INT: + case LABEL_REF: + case CODE_LABEL: + case SYMBOL_REF: + output_addr_const (file, x); + break; + + case UNSPEC: + if (XVECLEN (x, 0) != 1) + output_operand_lossage ("invalid UNSPEC as operand (1)"); + switch (XINT (x, 1)) + { + case 100: + s390_output_symbolic_const (file, XVECEXP (x, 0, 0)); + fprintf (file, "-.LT%X", s390_function_count); + break; + case 110: + s390_output_symbolic_const (file, XVECEXP (x, 0, 0)); + fprintf (file, "@GOT12"); + break; + case 111: + s390_output_symbolic_const (file, XVECEXP (x, 0, 0)); + fprintf (file, "@GOTENT"); + break; + case 112: + s390_output_symbolic_const (file, XVECEXP (x, 0, 0)); + fprintf (file, "@GOT"); + break; + case 113: + s390_output_symbolic_const (file, XVECEXP (x, 0, 0)); + fprintf (file, "@PLT"); + break; + case 114: + s390_output_symbolic_const (file, XVECEXP (x, 0, 0)); + fprintf (file, "@PLT-.LT%X", s390_function_count); + break; + default: + output_operand_lossage ("invalid UNSPEC as operand (2)"); + break; + } + break; + + default: + fatal_insn ("UNKNOWN in s390_output_symbolic_const !?", x); + break; + } +} + +/* Output address operand ADDR in assembler syntax to + stdio stream FILE. */ + +void +print_operand_address (file, addr) + FILE *file; + rtx addr; +{ + struct s390_address ad; + + if (!s390_decompose_address (addr, &ad, TRUE)) + output_operand_lossage ("Cannot decompose address."); + + if (ad.disp) + s390_output_symbolic_const (file, ad.disp); + else + fprintf (file, "0"); + + if (ad.base && ad.indx) + fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)], + reg_names[REGNO (ad.base)]); + else if (ad.base) + fprintf (file, "(%s)", reg_names[REGNO (ad.base)]); +} + +/* Output operand X in assembler syntax to stdio stream FILE. + CODE specified the format flag. The following format flags + are recognized: + + 'C': print opcode suffix for branch condition. + 'D': print opcode suffix for inverse branch condition. + 'O': print only the displacement of a memory reference. + 'R': print only the base register of a memory reference. + 'N': print the second word of a DImode operand. + 'M': print the second word of a TImode operand. + + 'b': print integer X as if it's an unsigned byte. + 'x': print integer X as if it's an unsigned word. + 'h': print integer X as if it's a signed word. */ + +void +print_operand (file, x, code) + FILE *file; + rtx x; + int code; +{ + switch (code) + { + case 'C': + fprintf (file, s390_branch_condition_mnemonic (x, FALSE)); + return; + + case 'D': + fprintf (file, s390_branch_condition_mnemonic (x, TRUE)); + return; + + case 'O': + { + struct s390_address ad; + + if (GET_CODE (x) != MEM + || !s390_decompose_address (XEXP (x, 0), &ad, TRUE) + || ad.indx) + abort (); + + if (ad.disp) + s390_output_symbolic_const (file, ad.disp); + else + fprintf (file, "0"); + } + return; + + case 'R': + { + struct s390_address ad; + + if (GET_CODE (x) != MEM + || !s390_decompose_address (XEXP (x, 0), &ad, TRUE) + || ad.indx) + abort (); + + if (ad.base) + fprintf (file, "%s", reg_names[REGNO (ad.base)]); + else + fprintf (file, "0"); + } + return; + + case 'N': + if (GET_CODE (x) == REG) + x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1); + else if (GET_CODE (x) == MEM) + x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4)); + else + abort (); + break; + + case 'M': + if (GET_CODE (x) == REG) + x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1); + else if (GET_CODE (x) == MEM) + x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8)); + else + abort (); + break; + } + + switch (GET_CODE (x)) + { + case REG: + fprintf (file, "%s", reg_names[REGNO (x)]); + break; + + case MEM: + output_address (XEXP (x, 0)); + break; + + case CONST: + case CODE_LABEL: + case LABEL_REF: + case SYMBOL_REF: + s390_output_symbolic_const (file, x); + break; + + case CONST_INT: + if (code == 'b') + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff); + else if (code == 'x') + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff); + else if (code == 'h') + fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000); + else + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); + break; + + case CONST_DOUBLE: + if (GET_MODE (x) != VOIDmode) + abort (); + if (code == 'b') + fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff); + else if (code == 'x') + fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff); + else if (code == 'h') + fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000); + else + abort (); + break; + + default: + fatal_insn ("UNKNOWN in print_operand !?", x); + break; + } +} + +/* Target hook for assembling integer objects. We need to define it + here to work a round a bug in some versions of GAS, which couldn't + handle values smaller than INT_MIN when printed in decimal. */ + +static bool +s390_assemble_integer (x, size, aligned_p) + rtx x; + unsigned int size; + int aligned_p; +{ + if (size == 8 && aligned_p + && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN) + { + fputs ("\t.quad\t", asm_out_file); + fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x)); + putc ('\n', asm_out_file); + return true; + } + return default_assemble_integer (x, size, aligned_p); +} + + +#define DEBUG_SCHED 0 + +/* Returns true if register REGNO is used for forming + a memory address in expression X. */ + +static int +reg_used_in_mem_p (regno, x) + int regno; + rtx x; +{ + enum rtx_code code = GET_CODE (x); + int i, j; + const char *fmt; + + if (code == MEM) + { + if (refers_to_regno_p (regno, regno+1, + XEXP (x, 0), 0)) + return 1; + } + else if (code == SET + && GET_CODE (SET_DEST (x)) == PC) + { + if (refers_to_regno_p (regno, regno+1, + SET_SRC (x), 0)) + return 1; + } + + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e' + && reg_used_in_mem_p (regno, XEXP (x, i))) + return 1; + + else if (fmt[i] == 'E') + for (j = 0; j < XVECLEN (x, i); j++) + if (reg_used_in_mem_p (regno, XVECEXP (x, i, j))) + return 1; + } + return 0; +} + +/* Returns true if expression DEP_RTX sets an address register + used by instruction INSN to address memory. */ + +static int +addr_generation_dependency_p (dep_rtx, insn) + rtx dep_rtx; + rtx insn; +{ + rtx target, pat; + + if (GET_CODE (dep_rtx) == SET) + { + target = SET_DEST (dep_rtx); + + if (GET_CODE (target) == REG) + { + int regno = REGNO (target); + + if (get_attr_type (insn) == TYPE_LA) + { + pat = PATTERN (insn); + if (GET_CODE (pat) == PARALLEL) + { + if (XVECLEN (pat, 0) != 2) + abort(); + pat = XVECEXP (pat, 0, 0); + } + if (GET_CODE (pat) == SET) + return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0); + else + abort(); + } + else if (get_attr_atype (insn) == ATYPE_MEM) + return reg_used_in_mem_p (regno, PATTERN (insn)); + } + } + return 0; +} + + +/* Return the modified cost of the dependency of instruction INSN + on instruction DEP_INSN through the link LINK. COST is the + default cost of that dependency. + + Data dependencies are all handled without delay. However, if a + register is modified and subsequently used as base or index + register of a memory reference, at least 4 cycles need to pass + between setting and using the register to avoid pipeline stalls. + An exception is the LA instruction. An address generated by LA can + be used by introducing only a one cycle stall on the pipeline. */ + +static int +s390_adjust_cost (insn, link, dep_insn, cost) + rtx insn; + rtx link; + rtx dep_insn; + int cost; +{ + rtx dep_rtx; + int i; + + /* If the dependence is an anti-dependence, there is no cost. For an + output dependence, there is sometimes a cost, but it doesn't seem + worth handling those few cases. */ + + if (REG_NOTE_KIND (link) != 0) + return 0; + + /* If we can't recognize the insns, we can't really do anything. */ + if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0) + return cost; + + dep_rtx = PATTERN (dep_insn); + + if (GET_CODE (dep_rtx) == SET) + { + if (addr_generation_dependency_p (dep_rtx, insn)) + { + cost += (get_attr_type (dep_insn) == TYPE_LA) ? 1 : 4; + if (DEBUG_SCHED) + { + fprintf (stderr, "\n\nAddress dependency detected: cost %d\n", + cost); + debug_rtx (dep_insn); + debug_rtx (insn); + } + } + } + else if (GET_CODE (dep_rtx) == PARALLEL) + { + for (i = 0; i < XVECLEN (dep_rtx, 0); i++) + { + if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), + insn)) + { + cost += (get_attr_type (dep_insn) == TYPE_LA) ? 1 : 4; + if (DEBUG_SCHED) + { + fprintf (stderr, "\n\nAddress dependency detected: cost %d\n" + ,cost); + debug_rtx (dep_insn); + debug_rtx (insn); + } + } + } + } + + return cost; +} + + +/* A C statement (sans semicolon) to update the integer scheduling priority + INSN_PRIORITY (INSN). Reduce the priority to execute the INSN earlier, + increase the priority to execute INSN later. Do not define this macro if + you do not need to adjust the scheduling priorities of insns. + + A LA instruction maybe scheduled later, since the pipeline bypasses the + calculated value. */ + +static int +s390_adjust_priority (insn, priority) + rtx insn ATTRIBUTE_UNUSED; + int priority; +{ + if (! INSN_P (insn)) + return priority; + + if (GET_CODE (PATTERN (insn)) == USE + || GET_CODE (PATTERN (insn)) == CLOBBER) + return priority; + + switch (get_attr_type (insn)) + { + default: + break; + + case TYPE_LA: + if (priority >= 0 && priority < 0x01000000) + priority <<= 3; + break; + case TYPE_LM: + /* LM in epilogue should never be scheduled. This + is due to literal access done in function body. + The usage of register 13 is not mentioned explicitly, + leading to scheduling 'LM' accross this instructions. + */ + priority = 0x7fffffff; + break; + } + + return priority; +} + + +/* Split all branches that exceed the maximum distance. */ + +static void +s390_split_branches (void) +{ + rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM); + rtx insn, pat, label, target, jump, tmp; + + /* In 64-bit mode we can jump +- 4GB. */ + + if (TARGET_64BIT) + return; + + /* Find all branches that exceed 64KB, and split them. */ + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + { + if (GET_CODE (insn) != JUMP_INSN) + continue; + + pat = PATTERN (insn); + if (GET_CODE (pat) != SET) + continue; + + if (GET_CODE (SET_SRC (pat)) == LABEL_REF) + { + label = SET_SRC (pat); + } + else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE) + { + if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF) + label = XEXP (SET_SRC (pat), 1); + else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF) + label = XEXP (SET_SRC (pat), 2); + else + continue; + } + else + continue; + + if (get_attr_length (insn) == 4) + continue; + + if (flag_pic) + { + target = gen_rtx_UNSPEC (SImode, gen_rtvec (1, label), 100); + target = gen_rtx_CONST (SImode, target); + target = force_const_mem (SImode, target); + jump = gen_rtx_REG (Pmode, BASE_REGISTER); + jump = gen_rtx_PLUS (Pmode, jump, temp_reg); + } + else + { + target = force_const_mem (Pmode, label); + jump = temp_reg; + } + + if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE) + { + if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF) + jump = gen_rtx_IF_THEN_ELSE (VOIDmode, XEXP (SET_SRC (pat), 0), + jump, pc_rtx); + else + jump = gen_rtx_IF_THEN_ELSE (VOIDmode, XEXP (SET_SRC (pat), 0), + pc_rtx, jump); + } + + tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn); + INSN_ADDRESSES_NEW (tmp, -1); + + tmp = emit_jump_insn_before (gen_rtx_SET (VOIDmode, pc_rtx, jump), insn); + INSN_ADDRESSES_NEW (tmp, -1); + + remove_insn (insn); + insn = tmp; + } +} + + +/* Find a literal pool symbol referenced in RTX X, and store + it at REF. Will abort if X contains references to more than + one such pool symbol; multiple references to the same symbol + are allowed, however. + + The rtx pointed to by REF must be initialized to NULL_RTX + by the caller before calling this routine. */ + +static void +find_constant_pool_ref (x, ref) + rtx x; + rtx *ref; +{ + int i, j; + const char *fmt; + + if (GET_CODE (x) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P (x)) + { + if (*ref == NULL_RTX) + *ref = x; + else if (*ref != x) + abort(); + } + + fmt = GET_RTX_FORMAT (GET_CODE (x)); + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + { + find_constant_pool_ref (XEXP (x, i), ref); + } + else if (fmt[i] == 'E') + { + for (j = 0; j < XVECLEN (x, i); j++) + find_constant_pool_ref (XVECEXP (x, i, j), ref); + } + } +} + +/* Replace every reference to the literal pool symbol REF + in X by the address ADDR. Fix up MEMs as required. */ + +static void +replace_constant_pool_ref (x, ref, addr) + rtx *x; + rtx ref; + rtx addr; +{ + int i, j; + const char *fmt; + + if (*x == ref) + abort (); + + /* Literal pool references can only occur inside a MEM ... */ + if (GET_CODE (*x) == MEM) + { + rtx memref = XEXP (*x, 0); + + if (memref == ref) + { + *x = replace_equiv_address (*x, addr); + return; + } + + if (GET_CODE (memref) == CONST + && GET_CODE (XEXP (memref, 0)) == PLUS + && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT + && XEXP (XEXP (memref, 0), 0) == ref) + { + HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1)); + *x = replace_equiv_address (*x, plus_constant (addr, off)); + return; + } + } + + /* ... or a load-address type pattern. */ + if (GET_CODE (*x) == SET) + { + rtx addrref = SET_SRC (*x); + + if (addrref == ref) + { + SET_SRC (*x) = addr; + return; + } + + if (GET_CODE (addrref) == CONST + && GET_CODE (XEXP (addrref, 0)) == PLUS + && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT + && XEXP (XEXP (addrref, 0), 0) == ref) + { + HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1)); + SET_SRC (*x) = plus_constant (addr, off); + return; + } + } + + fmt = GET_RTX_FORMAT (GET_CODE (*x)); + for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + { + replace_constant_pool_ref (&XEXP (*x, i), ref, addr); + } + else if (fmt[i] == 'E') + { + for (j = 0; j < XVECLEN (*x, i); j++) + replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, addr); + } + } +} + +/* We keep a list of constants we which we have to add to internal + constant tables in the middle of large functions. */ + +#define NR_C_MODES 6 +enum machine_mode constant_modes[NR_C_MODES] = +{ + DFmode, DImode, + SFmode, SImode, + HImode, + QImode +}; + +rtx (*gen_consttable[NR_C_MODES])(rtx) = +{ + gen_consttable_df, gen_consttable_di, + gen_consttable_sf, gen_consttable_si, + gen_consttable_hi, + gen_consttable_qi +}; + +struct constant +{ + struct constant *next; + rtx value; + rtx label; +}; + +struct constant_pool +{ + struct constant_pool *next; + rtx first_insn; + rtx last_insn; + + struct constant *constants[NR_C_MODES]; + rtx label; + int size; +}; + +static struct constant_pool *s390_start_pool PARAMS ((struct constant_pool **, rtx)); +static void s390_end_pool PARAMS ((struct constant_pool *, rtx)); +static struct constant_pool *s390_find_pool PARAMS ((struct constant_pool *, rtx)); +static rtx s390_add_pool PARAMS ((struct constant_pool *, rtx, enum machine_mode)); +static rtx s390_dump_pool PARAMS ((struct constant_pool *)); +static void s390_free_pool PARAMS ((struct constant_pool *)); + +/* Create new constant pool covering instructions starting at INSN + and chain it to the end of POOL_LIST. */ + +static struct constant_pool * +s390_start_pool (pool_list, insn) + struct constant_pool **pool_list; + rtx insn; +{ + struct constant_pool *pool, **prev; + int i; + + pool = (struct constant_pool *) xmalloc (sizeof *pool); + pool->next = NULL; + for (i = 0; i < NR_C_MODES; i++) + pool->constants[i] = NULL; + + pool->label = gen_label_rtx (); + pool->first_insn = insn; + pool->last_insn = NULL_RTX; + pool->size = 0; + + for (prev = pool_list; *prev; prev = &(*prev)->next) + ; + *prev = pool; + + return pool; +} + +/* End range of instructions covered by POOL at INSN. */ + +static void +s390_end_pool (pool, insn) + struct constant_pool *pool; + rtx insn; +{ + pool->last_insn = insn; +} + +/* Return pool out of POOL_LIST that covers INSN. */ + +static struct constant_pool * +s390_find_pool (pool_list, insn) + struct constant_pool *pool_list; + rtx insn; +{ + int addr = INSN_ADDRESSES (INSN_UID (insn)); + struct constant_pool *pool; + + if (addr == -1) + return NULL; + + for (pool = pool_list; pool; pool = pool->next) + if (INSN_ADDRESSES (INSN_UID (pool->first_insn)) <= addr + && (pool->last_insn == NULL_RTX + || INSN_ADDRESSES (INSN_UID (pool->last_insn)) > addr)) + break; + + return pool; +} + +/* Add constant VAL of mode MODE to the constant pool POOL. + Return an RTX describing the distance from the start of + the pool to the location of the new constant. */ + +static rtx +s390_add_pool (pool, val, mode) + struct constant_pool *pool; + rtx val; + enum machine_mode mode; +{ + struct constant *c; + rtx offset; + int i; + + for (i = 0; i < NR_C_MODES; i++) + if (constant_modes[i] == mode) + break; + if (i == NR_C_MODES) + abort (); + + for (c = pool->constants[i]; c != NULL; c = c->next) + if (rtx_equal_p (val, c->value)) + break; + + if (c == NULL) + { + c = (struct constant *) xmalloc (sizeof *c); + c->value = val; + c->label = gen_label_rtx (); + c->next = pool->constants[i]; + pool->constants[i] = c; + pool->size += GET_MODE_SIZE (mode); + } + + offset = gen_rtx_MINUS (Pmode, gen_rtx_LABEL_REF (Pmode, c->label), + gen_rtx_LABEL_REF (Pmode, pool->label)); + offset = gen_rtx_CONST (Pmode, offset); + return offset; +} + +/* Dump out the constants in POOL. */ + +static rtx +s390_dump_pool (pool) + struct constant_pool *pool; +{ + struct constant *c; + rtx insn; + int i; + + /* Select location to put literal pool. */ + if (TARGET_64BIT) + insn = get_last_insn (); + else + insn = pool->last_insn? pool->last_insn : get_last_insn (); + + /* Pool start insn switches to proper section + and guarantees necessary alignment. */ + if (TARGET_64BIT) + insn = emit_insn_after (gen_pool_start_64 (), insn); + else + insn = emit_insn_after (gen_pool_start_31 (), insn); + INSN_ADDRESSES_NEW (insn, -1); + + insn = emit_label_after (pool->label, insn); + INSN_ADDRESSES_NEW (insn, -1); + + /* Dump constants in descending alignment requirement order, + ensuring proper alignment for every constant. */ + for (i = 0; i < NR_C_MODES; i++) + for (c = pool->constants[i]; c; c = c->next) + { + insn = emit_label_after (c->label, insn); + INSN_ADDRESSES_NEW (insn, -1); + insn = emit_insn_after (gen_consttable[i] (c->value), insn); + INSN_ADDRESSES_NEW (insn, -1); + } + + /* Pool end insn switches back to previous section + and guarantees necessary alignment. */ + if (TARGET_64BIT) + insn = emit_insn_after (gen_pool_end_64 (), insn); + else + insn = emit_insn_after (gen_pool_end_31 (), insn); + INSN_ADDRESSES_NEW (insn, -1); + + insn = emit_barrier_after (insn); + INSN_ADDRESSES_NEW (insn, -1); + + return insn; +} + +/* Free all memory used by POOL. */ + +static void +s390_free_pool (pool) + struct constant_pool *pool; +{ + int i; + + for (i = 0; i < NR_C_MODES; i++) + { + struct constant *c = pool->constants[i]; + while (c != NULL) + { + struct constant *next = c->next; + free (c); + c = next; + } + } + + free (pool); +} + +/* Used in s390.md for branch length calculation. */ +int s390_pool_overflow = 0; + +/* Chunkify the literal pool if required. */ + +#define S390_POOL_CHUNK_MIN 0xc00 +#define S390_POOL_CHUNK_MAX 0xe00 + +static void +s390_chunkify_pool (void) +{ + rtx base_reg = gen_rtx_REG (Pmode, + TARGET_64BIT? BASE_REGISTER : RETURN_REGNUM); + + struct constant_pool *curr_pool = NULL, *pool_list = NULL; + int extra_size = 0; + bitmap far_labels; + rtx insn; + + /* Do we need to chunkify the literal pool? */ + + if (get_pool_size () < S390_POOL_CHUNK_MAX) + return; + + /* Scan all insns and move literals to pool chunks. + Replace all occurrances of literal pool references + by explicit references to pool chunk entries. */ + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + { + if (GET_CODE (insn) == INSN) + { + rtx addr, pool_ref = NULL_RTX; + find_constant_pool_ref (PATTERN (insn), &pool_ref); + if (pool_ref) + { + if (!curr_pool) + curr_pool = s390_start_pool (&pool_list, insn); + + addr = s390_add_pool (curr_pool, get_pool_constant (pool_ref), + get_pool_mode (pool_ref)); + + addr = gen_rtx_PLUS (Pmode, base_reg, addr); + replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr); + INSN_CODE (insn) = -1; + } + } + + if (!curr_pool + || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn) + || INSN_ADDRESSES (INSN_UID (insn)) == -1) + continue; + + if (TARGET_64BIT) + { + if (curr_pool->size < S390_POOL_CHUNK_MAX) + continue; + + s390_end_pool (curr_pool, insn); + curr_pool = NULL; + } + else + { + int chunk_size = INSN_ADDRESSES (INSN_UID (insn)) + - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn)) + + extra_size; + + /* We will later have to insert base register reload insns. + Those will have an effect on code size, which we need to + consider here. This calculation makes rather pessimistic + worst-case assumptions. */ + if (GET_CODE (insn) == CODE_LABEL + || GET_CODE (insn) == JUMP_INSN) + extra_size += 6; + else if (GET_CODE (insn) == CALL_INSN) + extra_size += 4; + + if (chunk_size < S390_POOL_CHUNK_MIN + && curr_pool->size < S390_POOL_CHUNK_MIN) + continue; + + /* Pool chunks can only be inserted after BARRIERs ... */ + if (GET_CODE (insn) == BARRIER) + { + s390_end_pool (curr_pool, insn); + curr_pool = NULL; + extra_size = 0; + } + + /* ... so if we don't find one in time, create one. */ + else if ((chunk_size > S390_POOL_CHUNK_MAX + || curr_pool->size > S390_POOL_CHUNK_MAX) + && (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)) + { + int addr = INSN_ADDRESSES (INSN_UID (insn)); + rtx label, jump, barrier; + + label = gen_label_rtx (); + jump = emit_jump_insn_after (gen_jump (label), insn); + barrier = emit_barrier_after (jump); + insn = emit_label_after (label, barrier); + JUMP_LABEL (jump) = label; + LABEL_NUSES (label) = 1; + + INSN_ADDRESSES_NEW (jump, addr+1); + INSN_ADDRESSES_NEW (barrier, addr+1); + INSN_ADDRESSES_NEW (insn, -1); + + s390_end_pool (curr_pool, barrier); + curr_pool = NULL; + extra_size = 0; + } + } + } + + /* Dump out all literal pools. */ + + for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next) + s390_dump_pool (curr_pool); + + + /* Find all labels that are branched into + from an insn belonging to a different chunk. */ + + far_labels = BITMAP_XMALLOC (); + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + { + /* Labels marked with LABEL_PRESERVE_P can be target + of non-local jumps, so we have to mark them. + The same holds for named labels. + + Don't do that, however, if it is the label before + a jump table. */ + + if (GET_CODE (insn) == CODE_LABEL + && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn))) + { + rtx vec_insn = next_real_insn (insn); + rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ? + PATTERN (vec_insn) : NULL_RTX; + if (!vec_pat + || !(GET_CODE (vec_pat) == ADDR_VEC + || GET_CODE (vec_pat) == ADDR_DIFF_VEC)) + bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn)); + } + + /* If we have a direct jump (conditional or unconditional) + or a casesi jump, check all potential targets. */ + else if (GET_CODE (insn) == JUMP_INSN) + { + rtx pat = PATTERN (insn); + if (GET_CODE (pat) == SET) + { + rtx label = 0; + + if (GET_CODE (SET_SRC (pat)) == LABEL_REF) + { + label = XEXP (SET_SRC (pat), 0); + } + else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE) + { + if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF) + label = XEXP (XEXP (SET_SRC (pat), 1), 0); + else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF) + label = XEXP (XEXP (SET_SRC (pat), 2), 0); + } + + if (label) + { + if (s390_find_pool (pool_list, label) + != s390_find_pool (pool_list, insn)) + bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label)); + } + } + else if (GET_CODE (pat) == PARALLEL + && XVECLEN (pat, 0) == 2 + && GET_CODE (XVECEXP (pat, 0, 0)) == SET + && GET_CODE (XVECEXP (pat, 0, 1)) == USE + && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF) + { + /* Find the jump table used by this casesi jump. */ + rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0); + rtx vec_insn = next_real_insn (vec_label); + rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ? + PATTERN (vec_insn) : NULL_RTX; + if (vec_pat + && (GET_CODE (vec_pat) == ADDR_VEC + || GET_CODE (vec_pat) == ADDR_DIFF_VEC)) + { + int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC; + + for (i = 0; i < XVECLEN (vec_pat, diff_p); i++) + { + rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0); + + if (s390_find_pool (pool_list, label) + != s390_find_pool (pool_list, insn)) + bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label)); + } + } + } + } + } + + /* Insert base register reload insns before every pool. */ + + for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next) + if (TARGET_64BIT) + { + rtx pool_ref = gen_rtx_LABEL_REF (Pmode, curr_pool->label); + rtx new_insn = gen_rtx_SET (Pmode, base_reg, pool_ref); + rtx insn = curr_pool->first_insn; + INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1); + } + else + { + rtx new_insn = gen_reload_base (base_reg, curr_pool->label); + rtx insn = curr_pool->first_insn; + INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1); + } + + /* Insert base register reload insns at every far label. */ + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == CODE_LABEL + && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn))) + { + struct constant_pool *pool = s390_find_pool (pool_list, insn); + if (pool) + { + if (TARGET_64BIT) + { + rtx pool_ref = gen_rtx_LABEL_REF (Pmode, pool->label); + rtx new_insn = gen_rtx_SET (Pmode, base_reg, pool_ref); + INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1); + } + else + { + rtx new_insn = gen_reload_base (base_reg, pool->label); + INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1); + } + } + } + + /* Insert base register reload insns after every call if necessary. */ + + if (REGNO (base_reg) == RETURN_REGNUM) + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == CALL_INSN) + { + struct constant_pool *pool = s390_find_pool (pool_list, insn); + if (pool) + { + rtx new_insn = gen_reload_base2 (base_reg, pool->label); + INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1); + } + } + + + /* Recompute insn addresses. */ + + s390_pool_overflow = 1; + init_insn_lengths (); + shorten_branches (get_insns ()); + s390_pool_overflow = 0; + + /* Insert base register reload insns after far branches. */ + + if (!TARGET_64BIT) + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == JUMP_INSN + && GET_CODE (PATTERN (insn)) == SET + && get_attr_length (insn) >= 12) + { + struct constant_pool *pool = s390_find_pool (pool_list, insn); + if (pool) + { + rtx new_insn = gen_reload_base (base_reg, pool->label); + INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1); + } + } + + + /* Free all memory. */ + + while (pool_list) + { + struct constant_pool *next = pool_list->next; + s390_free_pool (pool_list); + pool_list = next; + } + + BITMAP_XFREE (far_labels); +} + + +/* Index of constant pool chunk that is currently being processed. + Set to -1 before function output has started. */ +int s390_pool_count = -1; + +/* Number of elements of current constant pool. */ +int s390_nr_constants; + +/* Output main constant pool to stdio stream FILE. */ + +void +s390_output_constant_pool (file) + FILE *file; +{ + /* Output constant pool. */ + if (s390_nr_constants) + { + if (TARGET_64BIT) + { + fprintf (file, "\tlarl\t%s,.LT%X\n", reg_names[BASE_REGISTER], + s390_function_count); + readonly_data_section (); + ASM_OUTPUT_ALIGN (file, 3); + } + else + { + fprintf (file, "\tbras\t%s,.LTN%X\n", reg_names[BASE_REGISTER], + s390_function_count); + } + fprintf (file, ".LT%X:\n", s390_function_count); + + s390_pool_count = 0; + output_constant_pool (current_function_name, current_function_decl); + s390_pool_count = -1; + + if (TARGET_64BIT) + function_section (current_function_decl); + else + fprintf (file, ".LTN%X:\n", s390_function_count); + } +} + + +/* Return true if floating point registers need to be saved. */ + +static int +save_fprs_p () +{ + int i; + if (!TARGET_64BIT) + return 0; + for (i=24; i<=31; i++) + { + if (regs_ever_live[i] == 1) + return 1; + } + return 0; +} + +/* Find first call clobbered register unsused in a function. + This could be used as base register in a leaf function + or for holding the return address before epilogue. */ + +static int +find_unused_clobbered_reg () +{ + int i; + for (i = 0; i < 6; i++) + if (!regs_ever_live[i]) + return i; + return 0; +} + +/* Fill FRAME with info about frame of current function. */ + +static void +s390_frame_info (frame) + struct s390_frame *frame; +{ + int i, j; + HOST_WIDE_INT fsize = get_frame_size (); + + if (fsize > 0x7fff0000) + fatal_error ("Total size of local variables exceeds architecture limit."); + + /* fprs 8 - 15 are caller saved for 64 Bit ABI. */ + frame->save_fprs_p = save_fprs_p (); + + frame->frame_size = fsize + frame->save_fprs_p * 64; + + /* Does function need to setup frame and save area. */ + + if (! current_function_is_leaf + || frame->frame_size > 0 + || current_function_calls_alloca + || current_function_stdarg + || current_function_varargs) + frame->frame_size += STARTING_FRAME_OFFSET; + + /* If we need to allocate a frame, the stack pointer is changed. */ + + if (frame->frame_size > 0) + regs_ever_live[STACK_POINTER_REGNUM] = 1; + + /* If the literal pool might overflow, the return register might + be used as temp literal pointer. */ + + if (!TARGET_64BIT && get_pool_size () >= S390_POOL_CHUNK_MAX / 2) + regs_ever_live[RETURN_REGNUM] = 1; + + /* If there is (possibly) any pool entry, we need to + load base register. */ + + if (get_pool_size () + || !CONST_OK_FOR_LETTER_P (frame->frame_size, 'K') + || (!TARGET_64BIT && current_function_uses_pic_offset_table)) + regs_ever_live[BASE_REGISTER] = 1; + + /* If we need the GOT pointer, remember to save/restore it. */ + + if (current_function_uses_pic_offset_table) + regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; + + /* Frame pointer needed. */ + + frame->frame_pointer_p = frame_pointer_needed; + + /* Find first and last gpr to be saved. */ + + for (i = 6; i < 16; i++) + if (regs_ever_live[i]) + break; + + for (j = 15; j > i; j--) + if (regs_ever_live[j]) + break; + + if (i == 16) + { + /* Nothing to save / restore. */ + frame->first_save_gpr = -1; + frame->first_restore_gpr = -1; + frame->last_save_gpr = -1; + frame->return_reg_saved_p = 0; + } + else + { + /* Save / Restore from gpr i to j. */ + frame->first_save_gpr = i; + frame->first_restore_gpr = i; + frame->last_save_gpr = j; + frame->return_reg_saved_p = (j >= RETURN_REGNUM && i <= RETURN_REGNUM); + } + + if (current_function_stdarg || current_function_varargs) + { + /* Varargs function need to save from gpr 2 to gpr 15. */ + frame->first_save_gpr = 2; + } +} + +/* Return offset between argument pointer and frame pointer + initially after prologue. */ + +int +s390_arg_frame_offset () +{ + struct s390_frame frame; + + /* Compute frame_info. */ + + s390_frame_info (&frame); + + return frame.frame_size + STACK_POINTER_OFFSET; +} + +/* Emit insn to save fpr REGNUM at offset OFFSET relative + to register BASE. Return generated insn. */ + +static rtx +save_fpr (base, offset, regnum) + rtx base; + int offset; + int regnum; +{ + rtx addr; + addr = gen_rtx_MEM (DFmode, plus_constant (base, offset)); + set_mem_alias_set (addr, s390_sr_alias_set); + + return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum)); +} + +/* Emit insn to restore fpr REGNUM from offset OFFSET relative + to register BASE. Return generated insn. */ + +static rtx +restore_fpr (base, offset, regnum) + rtx base; + int offset; + int regnum; +{ + rtx addr; + addr = gen_rtx_MEM (DFmode, plus_constant (base, offset)); + set_mem_alias_set (addr, s390_sr_alias_set); + + return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr); +} + +/* Output the function prologue assembly code to the + stdio stream FILE. The local frame size is passed + in LSIZE. */ + +void +s390_function_prologue (file, lsize) + FILE *file ATTRIBUTE_UNUSED; + HOST_WIDE_INT lsize ATTRIBUTE_UNUSED; +{ + s390_chunkify_pool (); + s390_split_branches (); +} + +/* Output the function epilogue assembly code to the + stdio stream FILE. The local frame size is passed + in LSIZE. */ + +void +s390_function_epilogue (file, lsize) + FILE *file ATTRIBUTE_UNUSED; + HOST_WIDE_INT lsize ATTRIBUTE_UNUSED; +{ + current_function_uses_pic_offset_table = 0; + s390_function_count++; +} + +/* Expand the prologue into a bunch of separate insns. */ + +void +s390_emit_prologue () +{ + struct s390_frame frame; + rtx insn, addr; + rtx temp_reg; + int i; + + /* Compute frame_info. */ + + s390_frame_info (&frame); + + /* Choose best register to use for temp use within prologue. */ + + if (frame.return_reg_saved_p + && !has_hard_reg_initial_val (Pmode, RETURN_REGNUM) + && get_pool_size () < S390_POOL_CHUNK_MAX / 2) + temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM); + else + temp_reg = gen_rtx_REG (Pmode, 1); + + /* Save call saved gprs. */ + + if (frame.first_save_gpr != -1) + { + addr = plus_constant (stack_pointer_rtx, + frame.first_save_gpr * UNITS_PER_WORD); + addr = gen_rtx_MEM (Pmode, addr); + set_mem_alias_set (addr, s390_sr_alias_set); + + if (frame.first_save_gpr != frame.last_save_gpr ) + { + insn = emit_insn (gen_store_multiple (addr, + gen_rtx_REG (Pmode, frame.first_save_gpr), + GEN_INT (frame.last_save_gpr + - frame.first_save_gpr + 1))); + + /* We need to set the FRAME_RELATED flag on all SETs + inside the store-multiple pattern. + + However, we must not emit DWARF records for registers 2..5 + if they are stored for use by variable arguments ... + + ??? Unfortunately, it is not enough to simply not the the + FRAME_RELATED flags for those SETs, because the first SET + of the PARALLEL is always treated as if it had the flag + set, even if it does not. Therefore we emit a new pattern + without those registers as REG_FRAME_RELATED_EXPR note. */ + + if (frame.first_save_gpr >= 6) + { + rtx pat = PATTERN (insn); + + for (i = 0; i < XVECLEN (pat, 0); i++) + if (GET_CODE (XVECEXP (pat, 0, i)) == SET) + RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1; + + RTX_FRAME_RELATED_P (insn) = 1; + } + else if (frame.last_save_gpr >= 6) + { + rtx note, naddr; + naddr = plus_constant (stack_pointer_rtx, 6 * UNITS_PER_WORD); + note = gen_store_multiple (gen_rtx_MEM (Pmode, naddr), + gen_rtx_REG (Pmode, 6), + GEN_INT (frame.last_save_gpr - 6 + 1)); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, + note, REG_NOTES (insn)); + + for (i = 0; i < XVECLEN (note, 0); i++) + if (GET_CODE (XVECEXP (note, 0, i)) == SET) + RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1; + + RTX_FRAME_RELATED_P (insn) = 1; + } + } + else + { + insn = emit_move_insn (addr, + gen_rtx_REG (Pmode, frame.first_save_gpr)); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + + /* Dump constant pool and set constant pool register (13). */ + + insn = emit_insn (gen_lit ()); + + /* Save fprs for variable args. */ + + if (current_function_stdarg || current_function_varargs) + { + /* Save fpr 0 and 2. */ + + save_fpr (stack_pointer_rtx, STACK_POINTER_OFFSET - 32, 16); + save_fpr (stack_pointer_rtx, STACK_POINTER_OFFSET - 24, 17); + + if (TARGET_64BIT) + { + /* Save fpr 4 and 6. */ + + save_fpr (stack_pointer_rtx, STACK_POINTER_OFFSET - 16, 18); + save_fpr (stack_pointer_rtx, STACK_POINTER_OFFSET - 8, 19); + } + } + + /* Save fprs 4 and 6 if used (31 bit ABI). */ + + if (!TARGET_64BIT) + { + /* Save fpr 4 and 6. */ + if (regs_ever_live[18]) + { + insn = save_fpr (stack_pointer_rtx, STACK_POINTER_OFFSET - 16, 18); + RTX_FRAME_RELATED_P (insn) = 1; + } + if (regs_ever_live[19]) + { + insn = save_fpr (stack_pointer_rtx, STACK_POINTER_OFFSET - 8, 19); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + + /* Decrement stack pointer. */ + + if (frame.frame_size > 0) + { + rtx frame_off = GEN_INT (-frame.frame_size); + + /* Save incoming stack pointer into temp reg. */ + + if (TARGET_BACKCHAIN || frame.save_fprs_p) + { + insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx)); + } + + /* Substract frame size from stack pointer. */ + + frame_off = GEN_INT (-frame.frame_size); + if (!CONST_OK_FOR_LETTER_P (-frame.frame_size, 'K')) + frame_off = force_const_mem (Pmode, frame_off); + + insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off)); + RTX_FRAME_RELATED_P (insn) = 1; + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, + gen_rtx_SET (VOIDmode, stack_pointer_rtx, + gen_rtx_PLUS (Pmode, stack_pointer_rtx, + GEN_INT (-frame.frame_size))), + REG_NOTES (insn)); + + /* Set backchain. */ + + if (TARGET_BACKCHAIN) + { + addr = gen_rtx_MEM (Pmode, stack_pointer_rtx); + set_mem_alias_set (addr, s390_sr_alias_set); + insn = emit_insn (gen_move_insn (addr, temp_reg)); + } + } + + /* Save fprs 8 - 15 (64 bit ABI). */ + + if (frame.save_fprs_p) + { + insn = emit_insn (gen_add2_insn (temp_reg, GEN_INT(-64))); + + for (i = 24; i < 32; i++) + if (regs_ever_live[i]) + { + rtx addr = plus_constant (stack_pointer_rtx, + frame.frame_size - 64 + (i-24)*8); + + insn = save_fpr (temp_reg, (i-24)*8, i); + RTX_FRAME_RELATED_P (insn) = 1; + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, + gen_rtx_SET (VOIDmode, + gen_rtx_MEM (DFmode, addr), + gen_rtx_REG (DFmode, i)), + REG_NOTES (insn)); + } + } + + /* Set frame pointer, if needed. */ + + if (frame.frame_pointer_p) + { + insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; + } + + /* Set up got pointer, if needed. */ + + if (current_function_uses_pic_offset_table) + { + rtx got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_"); + SYMBOL_REF_FLAG (got_symbol) = 1; + + if (TARGET_64BIT) + { + insn = emit_insn (gen_movdi (pic_offset_table_rtx, + got_symbol)); + + /* It can happen that the GOT pointer isn't really needed ... */ + REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, NULL_RTX, + REG_NOTES (insn)); + } + else + { + got_symbol = gen_rtx_UNSPEC (VOIDmode, + gen_rtvec (1, got_symbol), 100); + got_symbol = gen_rtx_CONST (VOIDmode, got_symbol); + got_symbol = force_const_mem (Pmode, got_symbol); + insn = emit_move_insn (pic_offset_table_rtx, + got_symbol); + REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, NULL_RTX, + REG_NOTES (insn)); + + insn = emit_insn (gen_add2_insn (pic_offset_table_rtx, + gen_rtx_REG (Pmode, BASE_REGISTER))); + REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, NULL_RTX, + REG_NOTES (insn)); + } + } +} + +/* Expand the epilogue into a bunch of separate insns. */ + +void +s390_emit_epilogue () +{ + struct s390_frame frame; + rtx frame_pointer, return_reg; + int area_bottom, area_top, offset; + rtvec p; + + /* Compute frame_info. */ + + s390_frame_info (&frame); + + /* Check whether to use frame or stack pointer for restore. */ + + frame_pointer = frame.frame_pointer_p ? + hard_frame_pointer_rtx : stack_pointer_rtx; + + /* Compute which parts of the save area we need to access. */ + + if (frame.first_restore_gpr != -1) + { + area_bottom = frame.first_restore_gpr * UNITS_PER_WORD; + area_top = (frame.last_save_gpr + 1) * UNITS_PER_WORD; + } + else + { + area_bottom = INT_MAX; + area_top = INT_MIN; + } + + if (TARGET_64BIT) + { + if (frame.save_fprs_p) + { + if (area_bottom > -64) + area_bottom = -64; + if (area_top < 0) + area_top = 0; + } + } + else + { + if (regs_ever_live[18]) + { + if (area_bottom > STACK_POINTER_OFFSET - 16) + area_bottom = STACK_POINTER_OFFSET - 16; + if (area_top < STACK_POINTER_OFFSET - 8) + area_top = STACK_POINTER_OFFSET - 8; + } + if (regs_ever_live[19]) + { + if (area_bottom > STACK_POINTER_OFFSET - 8) + area_bottom = STACK_POINTER_OFFSET - 8; + if (area_top < STACK_POINTER_OFFSET) + area_top = STACK_POINTER_OFFSET; + } + } + + /* Check whether we can access the register save area. + If not, increment the frame pointer as required. */ + + if (area_top <= area_bottom) + { + /* Nothing to restore. */ + } + else if (frame.frame_size + area_bottom >= 0 + && frame.frame_size + area_top <= 4096) + { + /* Area is in range. */ + offset = frame.frame_size; + } + else + { + rtx insn, frame_off; + + offset = area_bottom < 0 ? -area_bottom : 0; + frame_off = GEN_INT (frame.frame_size - offset); + + if (!CONST_OK_FOR_LETTER_P (INTVAL (frame_off), 'K')) + frame_off = force_const_mem (Pmode, frame_off); + + insn = emit_insn (gen_add2_insn (frame_pointer, frame_off)); + } + + /* Restore call saved fprs. */ + + if (TARGET_64BIT) + { + int i; + + if (frame.save_fprs_p) + for (i = 24; i < 32; i++) + if (regs_ever_live[i] && !global_regs[i]) + restore_fpr (frame_pointer, + offset - 64 + (i-24) * 8, i); + } + else + { + if (regs_ever_live[18] && !global_regs[18]) + restore_fpr (frame_pointer, offset + STACK_POINTER_OFFSET - 16, 18); + if (regs_ever_live[19] && !global_regs[19]) + restore_fpr (frame_pointer, offset + STACK_POINTER_OFFSET - 8, 19); + } + + /* Return register. */ + + return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM); + + /* Restore call saved gprs. */ + + if (frame.first_restore_gpr != -1) + { + rtx addr; + int i; + + /* Check for global register and save them + to stack location from where they get restored. */ + + for (i = frame.first_restore_gpr; + i <= frame.last_save_gpr; + i++) + { + /* These registers are special and need to be + restored in any case. */ + if (i == STACK_POINTER_REGNUM + || i == RETURN_REGNUM + || i == BASE_REGISTER + || (flag_pic && i == PIC_OFFSET_TABLE_REGNUM)) + continue; + + if (global_regs[i]) + { + addr = plus_constant (frame_pointer, + offset + i * UNITS_PER_WORD); + addr = gen_rtx_MEM (Pmode, addr); + set_mem_alias_set (addr, s390_sr_alias_set); + emit_move_insn (addr, gen_rtx_REG (Pmode, i)); + } + } + + /* Fetch return address from stack before load multiple, + this will do good for scheduling. */ + + if (frame.last_save_gpr >= RETURN_REGNUM + && frame.first_restore_gpr < RETURN_REGNUM) + { + int return_regnum = find_unused_clobbered_reg(); + if (!return_regnum) + return_regnum = 4; + return_reg = gen_rtx_REG (Pmode, return_regnum); + + addr = plus_constant (frame_pointer, + offset + RETURN_REGNUM * UNITS_PER_WORD); + addr = gen_rtx_MEM (Pmode, addr); + set_mem_alias_set (addr, s390_sr_alias_set); + emit_move_insn (return_reg, addr); + } + + /* ??? As references to the base register are not made + explicit in insn RTX code, we have to add a barrier here + to prevent incorrect scheduling. */ + + emit_insn (gen_blockage()); + + addr = plus_constant (frame_pointer, + offset + frame.first_restore_gpr * UNITS_PER_WORD); + addr = gen_rtx_MEM (Pmode, addr); + set_mem_alias_set (addr, s390_sr_alias_set); + + if (frame.first_restore_gpr != frame.last_save_gpr) + { + emit_insn (gen_load_multiple ( + gen_rtx_REG (Pmode, frame.first_restore_gpr), + addr, + GEN_INT (frame.last_save_gpr - frame.first_restore_gpr + 1))); + } + else + { + emit_move_insn (gen_rtx_REG (Pmode, frame.first_restore_gpr), + addr); + } + } + + /* Return to caller. */ + + p = rtvec_alloc (2); + + RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode); + RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg); + emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p)); +} + + +/* Return the size in bytes of a function argument of + type TYPE and/or mode MODE. At least one of TYPE or + MODE must be specified. */ + +static int +s390_function_arg_size (mode, type) + enum machine_mode mode; + tree type; +{ + if (type) + return int_size_in_bytes (type); + + /* No type info available for some library calls ... */ + if (mode != BLKmode) + return GET_MODE_SIZE (mode); + + /* If we have neither type nor mode, abort */ + abort (); +} + +/* Return 1 if a function argument of type TYPE and mode MODE + is to be passed by reference. The ABI specifies that only + structures of size 1, 2, 4, or 8 bytes are passed by value, + all other structures (and complex numbers) are passed by + reference. */ + +int +s390_function_arg_pass_by_reference (mode, type) + enum machine_mode mode; + tree type; +{ + int size = s390_function_arg_size (mode, type); + + if (type) + { + if (AGGREGATE_TYPE_P (type) && + size != 1 && size != 2 && size != 4 && size != 8) + return 1; + + if (TREE_CODE (type) == COMPLEX_TYPE) + return 1; + } + return 0; + +} + +/* Update the data in CUM to advance over an argument of mode MODE and + data type TYPE. (TYPE is null for libcalls where that information + may not be available.). The boolean NAMED specifies whether the + argument is a named argument (as opposed to an unnamed argument + matching an ellipsis). */ + +void +s390_function_arg_advance (cum, mode, type, named) + CUMULATIVE_ARGS *cum; + enum machine_mode mode; + tree type; + int named ATTRIBUTE_UNUSED; +{ + if (! TARGET_SOFT_FLOAT && (mode == DFmode || mode == SFmode)) + { + cum->fprs++; + } + else if (s390_function_arg_pass_by_reference (mode, type)) + { + cum->gprs += 1; + } + else + { + int size = s390_function_arg_size (mode, type); + cum->gprs += ((size + UNITS_PER_WORD-1) / UNITS_PER_WORD); + } +} + +/* Define where to put the arguments to a function. + Value is zero to push the argument on the stack, + or a hard register in which to store the argument. + + MODE is the argument's machine mode. + TYPE is the data type of the argument (as a tree). + This is null for libcalls where that information may + not be available. + CUM is a variable of type CUMULATIVE_ARGS which gives info about + the preceding args and about the function being called. + NAMED is nonzero if this argument is a named parameter + (otherwise it is an extra parameter matching an ellipsis). + + On S/390, we use general purpose registers 2 through 6 to + pass integer, pointer, and certain structure arguments, and + floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit) + to pass floating point arguments. All remaining arguments + are pushed to the stack. */ + +rtx +s390_function_arg (cum, mode, type, named) + CUMULATIVE_ARGS *cum; + enum machine_mode mode; + tree type; + int named ATTRIBUTE_UNUSED; +{ + if (s390_function_arg_pass_by_reference (mode, type)) + return 0; + + if (! TARGET_SOFT_FLOAT && (mode == DFmode || mode == SFmode)) + { + if (cum->fprs + 1 > (TARGET_64BIT? 4 : 2)) + return 0; + else + return gen_rtx (REG, mode, cum->fprs + 16); + } + else + { + int size = s390_function_arg_size (mode, type); + int n_gprs = (size + UNITS_PER_WORD-1) / UNITS_PER_WORD; + + if (cum->gprs + n_gprs > 5) + return 0; + else + return gen_rtx (REG, mode, cum->gprs + 2); + } +} + + +/* Create and return the va_list datatype. + + On S/390, va_list is an array type equivalent to + + typedef struct __va_list_tag + { + long __gpr; + long __fpr; + void *__overflow_arg_area; + void *__reg_save_area; + + } va_list[1]; + + where __gpr and __fpr hold the number of general purpose + or floating point arguments used up to now, respectively, + __overflow_arg_area points to the stack location of the + next argument passed on the stack, and __reg_save_area + always points to the start of the register area in the + call frame of the current function. The function prologue + saves all registers used for argument passing into this + area if the function uses variable arguments. */ + +tree +s390_build_va_list () +{ + tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl; + + record = make_lang_type (RECORD_TYPE); + + type_decl = + build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record); + + f_gpr = build_decl (FIELD_DECL, get_identifier ("__gpr"), + long_integer_type_node); + f_fpr = build_decl (FIELD_DECL, get_identifier ("__fpr"), + long_integer_type_node); + f_ovf = build_decl (FIELD_DECL, get_identifier ("__overflow_arg_area"), + ptr_type_node); + f_sav = build_decl (FIELD_DECL, get_identifier ("__reg_save_area"), + ptr_type_node); + + DECL_FIELD_CONTEXT (f_gpr) = record; + DECL_FIELD_CONTEXT (f_fpr) = record; + DECL_FIELD_CONTEXT (f_ovf) = record; + DECL_FIELD_CONTEXT (f_sav) = record; + + TREE_CHAIN (record) = type_decl; + TYPE_NAME (record) = type_decl; + TYPE_FIELDS (record) = f_gpr; + TREE_CHAIN (f_gpr) = f_fpr; + TREE_CHAIN (f_fpr) = f_ovf; + TREE_CHAIN (f_ovf) = f_sav; + + layout_type (record); + + /* The correct type is an array type of one element. */ + return build_array_type (record, build_index_type (size_zero_node)); +} + +/* Implement va_start by filling the va_list structure VALIST. + STDARG_P is true if implementing __builtin_stdarg_va_start, + false if implementing __builtin_varargs_va_start. NEXTARG + points to the first anonymous stack argument. + + The following global variables are used to initialize + the va_list structure: + + current_function_args_info: + holds number of gprs and fprs used for named arguments. + current_function_arg_offset_rtx: + holds the offset of the first anonymous stack argument + (relative to the virtual arg pointer). */ + +void +s390_va_start (stdarg_p, valist, nextarg) + int stdarg_p; + tree valist; + rtx nextarg ATTRIBUTE_UNUSED; +{ + HOST_WIDE_INT n_gpr, n_fpr; + int off; + tree f_gpr, f_fpr, f_ovf, f_sav; + tree gpr, fpr, ovf, sav, t; + + f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node)); + f_fpr = TREE_CHAIN (f_gpr); + f_ovf = TREE_CHAIN (f_fpr); + f_sav = TREE_CHAIN (f_ovf); + + valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist); + gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr); + fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr); + ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf); + sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav); + + /* Count number of gp and fp argument registers used. */ + + n_gpr = current_function_args_info.gprs; + n_fpr = current_function_args_info.fprs; + + t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr, build_int_2 (n_gpr, 0)); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr, build_int_2 (n_fpr, 0)); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* Find the overflow area. */ + t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx); + + off = INTVAL (current_function_arg_offset_rtx); + off = off < 0 ? 0 : off; + if (! stdarg_p) + off = off > 0 ? off - UNITS_PER_WORD : off; + if (TARGET_DEBUG_ARG) + fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n", + (int)n_gpr, (int)n_fpr, off); + + t = build (PLUS_EXPR, TREE_TYPE (ovf), t, build_int_2 (off, 0)); + + t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* Find the register save area. */ + t = make_tree (TREE_TYPE (sav), virtual_incoming_args_rtx); + t = build (PLUS_EXPR, TREE_TYPE (sav), t, + build_int_2 (-STACK_POINTER_OFFSET, -1)); + t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); +} + +/* Implement va_arg by updating the va_list structure + VALIST as required to retrieve an argument of type + TYPE, and returning that argument. + + Generates code equivalent to: + + if (integral value) { + if (size <= 4 && args.gpr < 5 || + size > 4 && args.gpr < 4 ) + ret = args.reg_save_area[args.gpr+8] + else + ret = *args.overflow_arg_area++; + } else if (float value) { + if (args.fgpr < 2) + ret = args.reg_save_area[args.fpr+64] + else + ret = *args.overflow_arg_area++; + } else if (aggregate value) { + if (args.gpr < 5) + ret = *args.reg_save_area[args.gpr] + else + ret = **args.overflow_arg_area++; + } */ + +rtx +s390_va_arg (valist, type) + tree valist; + tree type; +{ + tree f_gpr, f_fpr, f_ovf, f_sav; + tree gpr, fpr, ovf, sav, reg, t, u; + int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg; + rtx lab_false, lab_over, addr_rtx, r; + + f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node)); + f_fpr = TREE_CHAIN (f_gpr); + f_ovf = TREE_CHAIN (f_fpr); + f_sav = TREE_CHAIN (f_ovf); + + valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist); + gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr); + fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr); + ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf); + sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav); + + size = int_size_in_bytes (type); + + if (s390_function_arg_pass_by_reference (TYPE_MODE (type), type)) + { + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "va_arg: aggregate type"); + debug_tree (type); + } + + /* Aggregates are passed by reference. */ + indirect_p = 1; + reg = gpr; + n_reg = 1; + sav_ofs = 2 * UNITS_PER_WORD; + sav_scale = UNITS_PER_WORD; + size = UNITS_PER_WORD; + max_reg = 4; + } + else if (FLOAT_TYPE_P (type) && ! TARGET_SOFT_FLOAT) + { + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "va_arg: float type"); + debug_tree (type); + } + + /* FP args go in FP registers, if present. */ + indirect_p = 0; + reg = fpr; + n_reg = 1; + sav_ofs = 16 * UNITS_PER_WORD; + sav_scale = 8; + /* TARGET_64BIT has up to 4 parameter in fprs */ + max_reg = TARGET_64BIT ? 3 : 1; + } + else + { + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "va_arg: other type"); + debug_tree (type); + } + + /* Otherwise into GP registers. */ + indirect_p = 0; + reg = gpr; + n_reg = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + sav_ofs = 2 * UNITS_PER_WORD; + if (TARGET_64BIT) + sav_ofs += TYPE_MODE (type) == SImode ? 4 : + TYPE_MODE (type) == HImode ? 6 : + TYPE_MODE (type) == QImode ? 7 : 0; + else + sav_ofs += TYPE_MODE (type) == HImode ? 2 : + TYPE_MODE (type) == QImode ? 3 : 0; + + sav_scale = UNITS_PER_WORD; + if (n_reg > 1) + max_reg = 3; + else + max_reg = 4; + } + + /* Pull the value out of the saved registers ... */ + + lab_false = gen_label_rtx (); + lab_over = gen_label_rtx (); + addr_rtx = gen_reg_rtx (Pmode); + + emit_cmp_and_jump_insns (expand_expr (reg, NULL_RTX, Pmode, EXPAND_NORMAL), + GEN_INT (max_reg), + GT, const1_rtx, Pmode, 0, lab_false); + + if (sav_ofs) + t = build (PLUS_EXPR, ptr_type_node, sav, build_int_2 (sav_ofs, 0)); + else + t = sav; + + u = build (MULT_EXPR, long_integer_type_node, + reg, build_int_2 (sav_scale, 0)); + TREE_SIDE_EFFECTS (u) = 1; + + t = build (PLUS_EXPR, ptr_type_node, t, u); + TREE_SIDE_EFFECTS (t) = 1; + + r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL); + if (r != addr_rtx) + emit_move_insn (addr_rtx, r); + + + emit_jump_insn (gen_jump (lab_over)); + emit_barrier (); + emit_label (lab_false); + + /* ... Otherwise out of the overflow area. */ + + t = save_expr (ovf); + + + /* In 64 BIT for each argument on stack, a full 64 bit slot is allocated. */ + if (size < UNITS_PER_WORD) + { + t = build (PLUS_EXPR, TREE_TYPE (t), t, build_int_2 (UNITS_PER_WORD-size, 0)); + t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + t = save_expr (ovf); + } + + r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL); + if (r != addr_rtx) + emit_move_insn (addr_rtx, r); + + t = build (PLUS_EXPR, TREE_TYPE (t), t, build_int_2 (size, 0)); + t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + emit_label (lab_over); + + /* If less than max_regs a registers are retrieved out + of register save area, increment. */ + + u = build (PREINCREMENT_EXPR, TREE_TYPE (reg), reg, + build_int_2 (n_reg, 0)); + TREE_SIDE_EFFECTS (u) = 1; + expand_expr (u, const0_rtx, VOIDmode, EXPAND_NORMAL); + + if (indirect_p) + { + r = gen_rtx_MEM (Pmode, addr_rtx); + set_mem_alias_set (r, get_varargs_alias_set ()); + emit_move_insn (addr_rtx, r); + } + + + return addr_rtx; +} + + +/* Output assembly code for the trampoline template to + stdio stream FILE. + + On S/390, we use gpr 1 internally in the trampoline code; + gpr 0 is used to hold the static chain. */ + +void +s390_trampoline_template (file) + FILE *file; +{ + if (TARGET_64BIT) + { + fprintf (file, "larl\t%s,0f\n", reg_names[1]); + fprintf (file, "lg\t%s,0(%s)\n", reg_names[0], reg_names[1]); + fprintf (file, "lg\t%s,8(%s)\n", reg_names[1], reg_names[1]); + fprintf (file, "br\t%s\n", reg_names[1]); + fprintf (file, "0:\t.quad\t0\n"); + fprintf (file, ".quad\t0\n"); + } + else + { + fprintf (file, "basr\t%s,0\n", reg_names[1]); + fprintf (file, "l\t%s,10(%s)\n", reg_names[0], reg_names[1]); + fprintf (file, "l\t%s,14(%s)\n", reg_names[1], reg_names[1]); + fprintf (file, "br\t%s\n", reg_names[1]); + fprintf (file, ".long\t0\n"); + fprintf (file, ".long\t0\n"); + } +} + +/* Emit RTL insns to initialize the variable parts of a trampoline. + FNADDR is an RTX for the address of the function's pure code. + CXT is an RTX for the static chain value for the function. */ + +void +s390_initialize_trampoline (addr, fnaddr, cxt) + rtx addr; + rtx fnaddr; + rtx cxt; +{ + emit_move_insn (gen_rtx + (MEM, Pmode, + memory_address (Pmode, + plus_constant (addr, (TARGET_64BIT ? 20 : 12) ))), cxt); + emit_move_insn (gen_rtx + (MEM, Pmode, + memory_address (Pmode, + plus_constant (addr, (TARGET_64BIT ? 28 : 16) ))), fnaddr); +} + +/* Return rtx for 64-bit constant formed from the 32-bit subwords + LOW and HIGH, independent of the host word size. */ + +rtx +s390_gen_rtx_const_DI (high, low) + int high; + int low; +{ +#if HOST_BITS_PER_WIDE_INT >= 64 + HOST_WIDE_INT val; + val = (HOST_WIDE_INT)high; + val <<= 32; + val |= (HOST_WIDE_INT)low; + + return GEN_INT (val); +#else +#if HOST_BITS_PER_WIDE_INT >= 32 + return immed_double_const ((HOST_WIDE_INT)low, (HOST_WIDE_INT)high, DImode); +#else + abort (); +#endif +#endif +} + +/* Output assembler code to FILE to increment profiler label # LABELNO + for profiling a function entry. */ + +void +s390_function_profiler (file, labelno) + FILE *file; + int labelno; +{ + rtx op[7]; + + char label[128]; + sprintf (label, "%sP%d", LPREFIX, labelno); + + fprintf (file, "# function profiler \n"); + + op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM); + op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM); + op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_WORD)); + + op[2] = gen_rtx_REG (Pmode, 1); + op[3] = gen_rtx_SYMBOL_REF (Pmode, label); + SYMBOL_REF_FLAG (op[3]) = 1; + + op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount"); + if (flag_pic) + { + op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), 113); + op[4] = gen_rtx_CONST (Pmode, op[4]); + } + + if (TARGET_64BIT) + { + output_asm_insn ("stg\t%0,%1", op); + output_asm_insn ("larl\t%2,%3", op); + output_asm_insn ("brasl\t%0,%4", op); + output_asm_insn ("lg\t%0,%1", op); + } + else if (!flag_pic) + { + op[6] = gen_label_rtx (); + + output_asm_insn ("st\t%0,%1", op); + output_asm_insn ("bras\t%2,%l6", op); + output_asm_insn (".long\t%4", op); + output_asm_insn (".long\t%3", op); + ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (op[6])); + output_asm_insn ("l\t%0,0(%2)", op); + output_asm_insn ("l\t%2,4(%2)", op); + output_asm_insn ("basr\t%0,%0", op); + output_asm_insn ("l\t%0,%1", op); + } + else + { + op[5] = gen_label_rtx (); + op[6] = gen_label_rtx (); + + output_asm_insn ("st\t%0,%1", op); + output_asm_insn ("bras\t%2,%l6", op); + ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (op[5])); + output_asm_insn (".long\t%4-%l5", op); + output_asm_insn (".long\t%3-%l5", op); + ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (op[6])); + output_asm_insn ("lr\t%0,%2", op); + output_asm_insn ("a\t%0,0(%2)", op); + output_asm_insn ("a\t%2,4(%2)", op); + output_asm_insn ("basr\t%0,%0", op); + output_asm_insn ("l\t%0,%1", op); + } +} + diff --git a/contrib/gcc/config/s390/s390.h b/contrib/gcc/config/s390/s390.h new file mode 100644 index 0000000..f3218e9 --- /dev/null +++ b/contrib/gcc/config/s390/s390.h @@ -0,0 +1,1389 @@ +/* Definitions of target machine for GNU compiler, for IBM S/390 + Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. + Contributed by Hartmut Penner (hpenner@de.ibm.com) and + Ulrich Weigand (uweigand@de.ibm.com). +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#ifndef _S390_H +#define _S390_H + +extern int flag_pic; + +/* Run-time compilation parameters selecting different hardware subsets. */ + +extern int target_flags; + +/* Target macros checked at runtime of compiler. */ + +#define TARGET_HARD_FLOAT (target_flags & 1) +#define TARGET_SOFT_FLOAT (!(target_flags & 1)) +#define TARGET_BACKCHAIN (target_flags & 2) +#define TARGET_SMALL_EXEC (target_flags & 4) +#define TARGET_DEBUG_ARG (target_flags & 8) +#define TARGET_64BIT (target_flags & 16) +#define TARGET_MVCLE (target_flags & 32) + +#ifdef DEFAULT_TARGET_64BIT +#define TARGET_DEFAULT 0x13 +#define TARGET_VERSION fprintf (stderr, " (zSeries)"); +#else +#define TARGET_DEFAULT 0x3 +#define TARGET_VERSION fprintf (stderr, " (S/390)"); +#endif + + +/* Macro to define tables used to set the flags. This is a list in braces + of pairs in braces, each pair being { "NAME", VALUE } + where VALUE is the bits to set or minus the bits to clear. + An empty string NAME is used to identify the default VALUE. */ + +#define TARGET_SWITCHES \ +{ { "hard-float", 1, N_("Use hardware fp")}, \ + { "soft-float", -1, N_("Don't use hardware fp")}, \ + { "backchain", 2, N_("Set backchain")}, \ + { "no-backchain", -2, N_("Don't set backchain (faster, but debug harder")}, \ + { "small-exec", 4, N_("Use bras for execucable < 64k")}, \ + { "no-small-exec",-4, N_("Don't use bras")}, \ + { "debug", 8, N_("Additional debug prints")}, \ + { "no-debug", -8, N_("Don't print additional debug prints")}, \ + { "64", 16, N_("64 bit mode")}, \ + { "31", -16, N_("31 bit mode")}, \ + { "mvcle", 32, N_("mvcle use")}, \ + { "no-mvcle", -32, N_("mvc&ex")}, \ + { "", TARGET_DEFAULT, 0 } } + +/* Define this to change the optimizations performed by default. */ +#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) optimization_options(LEVEL, SIZE) + +/* Sometimes certain combinations of command options do not make sense + on a particular target machine. You can define a macro + `OVERRIDE_OPTIONS' to take account of this. This macro, if + defined, is executed once just after all the command options have + been parsed. */ +#define OVERRIDE_OPTIONS override_options () + + +/* Defines for REAL_ARITHMETIC. */ +#define IEEE_FLOAT 1 +#define TARGET_IBM_FLOAT 0 +#define TARGET_IEEE_FLOAT 1 + +/* The current function count for create unique internal labels. */ + +extern int s390_function_count; + +/* The amount of space used for outgoing arguments. */ + +extern int current_function_outgoing_args_size; + +/* Target machine storage layout. */ + +/* Define this if most significant bit is lowest numbered in instructions + that operate on numbered bit-fields. */ + +#define BITS_BIG_ENDIAN 1 + +/* Define this if most significant byte of a word is the lowest numbered. */ + +#define BYTES_BIG_ENDIAN 1 + +/* Define this if MS word of a multiword is the lowest numbered. */ + +#define WORDS_BIG_ENDIAN 1 + +/* Number of bits in an addressable storage unit. */ + +#define BITS_PER_UNIT 8 + +/* Width in bits of a "word", which is the contents of a machine register. */ + +#define BITS_PER_WORD (TARGET_64BIT ? 64 : 32) +#define MAX_BITS_PER_WORD 64 + +/* Width of a word, in units (bytes). */ + +#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) +#define MIN_UNITS_PER_WORD 4 + +/* Width in bits of a pointer. See also the macro `Pmode' defined below. */ + +#define POINTER_SIZE (TARGET_64BIT ? 64 : 32) + +/* A C expression for the size in bits of the type `short' on the + target machine. If you don't define this, the default is half a + word. (If this would be less than one storage unit, it is + rounded up to one unit.) */ +#define SHORT_TYPE_SIZE 16 + +/* A C expression for the size in bits of the type `int' on the + target machine. If you don't define this, the default is one + word. */ +#define INT_TYPE_SIZE 32 + +/* A C expression for the size in bits of the type `long' on the + target machine. If you don't define this, the default is one + word. */ +#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32) +#define MAX_LONG_TYPE_SIZE 64 + +/* A C expression for the size in bits of the type `long long' on the + target machine. If you don't define this, the default is two + words. */ +#define LONG_LONG_TYPE_SIZE 64 + +/* Right now we only support two floating point formats, the + 32 and 64 bit ieee formats. */ + +#define FLOAT_TYPE_SIZE 32 +#define DOUBLE_TYPE_SIZE 64 +#define LONG_DOUBLE_TYPE_SIZE 64 + +/* Define this macro if it is advisable to hold scalars in registers + in a wider mode than that declared by the program. In such cases, + the value is constrained to be within the bounds of the declared + type, but kept valid in the wider mode. The signedness of the + extension may differ from that of the type. */ + +#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ +if (INTEGRAL_MODE_P (MODE) && \ + GET_MODE_SIZE (MODE) < UNITS_PER_WORD) { \ + (MODE) = Pmode; \ + } + +/* Defining PROMOTE_FUNCTION_ARGS eliminates some unnecessary zero/sign + extensions applied to char/short functions arguments. Defining + PROMOTE_FUNCTION_RETURN does the same for function returns. */ + +#define PROMOTE_FUNCTION_ARGS +#define PROMOTE_FUNCTION_RETURN +#define PROMOTE_FOR_CALL_ONLY + +/* Allocation boundary (in *bits*) for storing pointers in memory. */ + +#define POINTER_BOUNDARY 32 + +/* Allocation boundary (in *bits*) for storing arguments in argument list. */ + +#define PARM_BOUNDARY (TARGET_64BIT ? 64 : 32) + +/* Boundary (in *bits*) on which stack pointer should be aligned. */ + +#define STACK_BOUNDARY 64 + +/* Allocation boundary (in *bits*) for the code of a function. */ + +#define FUNCTION_BOUNDARY 32 + +/* There is no point aligning anything to a rounder boundary than this. */ + +#define BIGGEST_ALIGNMENT 64 + +/* Alignment of field after `int : 0' in a structure. */ + +#define EMPTY_FIELD_BOUNDARY 32 + +/* Alignment on even addresses for LARL instruction. */ + +#define CONSTANT_ALIGNMENT(EXP, ALIGN) (ALIGN) < 16 ? 16 : (ALIGN) + +#define DATA_ALIGNMENT(TYPE, ALIGN) (ALIGN) < 16 ? 16 : (ALIGN) + +/* Define this if move instructions will actually fail to work when given + unaligned data. */ + +#define STRICT_ALIGNMENT 0 + +/* real arithmetic */ + +#define REAL_ARITHMETIC + +/* Define target floating point format. */ + +#undef TARGET_FLOAT_FORMAT +#ifdef IEEE_FLOAT +#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT +#else +#define TARGET_FLOAT_FORMAT IBM_FLOAT_FORMAT +#endif + +/* Define if special allocation order desired. */ + +#define REG_ALLOC_ORDER \ +{ 1, 2, 3, 4, 5, 0, 14, 13, 12, 11, 10, 9, 8, 7, 6, \ + 16, 17, 18, 19, 20, 21, 22, 23, \ + 24, 25, 26, 27, 28, 29, 30, 31, \ + 15, 32, 33, 34 } + +/* Standard register usage. */ + +#define INT_REGNO_P(N) ( (int)(N) >= 0 && (N) < 16 ) +#ifdef IEEE_FLOAT +#define FLOAT_REGNO_P(N) ( (N) >= 16 && (N) < 32 ) +#else +#define FLOAT_REGNO_P(N) ( (N) >= 16 && (N) < 20 ) +#endif +#define CC_REGNO_P(N) ( (N) == 33 ) + +/* Number of actual hardware registers. The hardware registers are + assigned numbers for the compiler from 0 to just below + FIRST_PSEUDO_REGISTER. + All registers that the compiler knows about must be given numbers, + even those that are not normally considered general registers. + For the 390, we give the data registers numbers 0-15, + and the floating point registers numbers 16-19. + G5 and following have 16 IEEE floating point register, + which get numbers 16-31. */ + +#define FIRST_PSEUDO_REGISTER 35 + +/* Number of hardware registers that go into the DWARF-2 unwind info. + If not defined, equals FIRST_PSEUDO_REGISTER. */ + +#define DWARF_FRAME_REGISTERS 34 + +/* The following register have a fix usage + GPR 12: GOT register points to the GOT, setup in prologue, + GOT contains pointer to variables in shared libraries + GPR 13: Base register setup in prologue to point to the + literal table of each function + GPR 14: Return registers holds the return address + GPR 15: Stack pointer */ + +#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? 12 : INVALID_REGNUM) +#define BASE_REGISTER 13 +#define RETURN_REGNUM 14 +#define STACK_POINTER_REGNUM 15 + +#define FIXED_REGISTERS \ +{ 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 0, 1, 1, 1, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 1, 1, 1 } + +/* 1 for registers not available across function calls. These must include + the FIXED_REGISTERS and also any registers that can be used without being + saved. + The latter must include the registers where values are returned + and the register where structure-value addresses are passed. */ + +#define CALL_USED_REGISTERS \ +{ 1, 1, 1, 1, \ + 1, 1, 0, 0, \ + 0, 0, 0, 0, \ + 0, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1 } + +/* Like `CALL_USED_REGISTERS' except this macro doesn't require that + the entire set of `FIXED_REGISTERS' be included. + (`CALL_USED_REGISTERS' must be a superset of `FIXED_REGISTERS'). */ + +#define CALL_REALLY_USED_REGISTERS \ +{ 1, 1, 1, 1, \ + 1, 1, 0, 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 1, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1, 1, \ + 1, 1, 1 } + +/* Macro to conditionally modify fixed_regs/call_used_regs. */ + +#define CONDITIONAL_REGISTER_USAGE \ +do \ + { \ + int i; \ + \ + if (flag_pic) \ + { \ + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ + } \ + if (TARGET_64BIT) \ + { \ + for (i = 24; i < 32; i++) \ + call_used_regs[i] = call_really_used_regs[i] = 0; \ + } \ + else \ + { \ + for (i = 18; i < 20; i++) \ + call_used_regs[i] = call_really_used_regs[i] = 0; \ + } \ + } while (0) + +/* The following register have a special usage + GPR 11: Frame pointer if needed to point to automatic variables. + GPR 32: In functions with more the 5 args this register + points to that arguments, it is always eliminated + with stack- or frame-pointer. + GPR 33: Condition code 'register' */ + +#define HARD_FRAME_POINTER_REGNUM 11 +#define FRAME_POINTER_REGNUM 34 + +#define ARG_POINTER_REGNUM 32 + +#define CC_REGNUM 33 + +/* We use the register %r0 to pass the static chain to a nested function. + + Note: It is assumed that this register is call-clobbered! + We can't use any of the function-argument registers either, + and register 1 is needed by the trampoline code, so we have + no other choice but using this one ... */ + +#define STATIC_CHAIN_REGNUM 0 + +/* Return number of consecutive hard regs needed starting at reg REGNO + to hold something of mode MODE. + This is ordinarily the length in words of a value of mode MODE + but can be less for certain modes in special long registers. */ + +#define HARD_REGNO_NREGS(REGNO, MODE) \ + (FLOAT_REGNO_P(REGNO)? \ + (GET_MODE_CLASS(MODE) == MODE_COMPLEX_FLOAT ? 2 : 1) : \ + INT_REGNO_P(REGNO)? \ + ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1) / UNITS_PER_WORD) : \ + 1) + +/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. + The gprs can hold QI, HI, SI, SF, DF, SC and DC. + Even gprs can hold DI. + The floating point registers can hold DF, SF, DC and SC. */ + +#define HARD_REGNO_MODE_OK(REGNO, MODE) \ + (FLOAT_REGNO_P(REGNO)? \ + (GET_MODE_CLASS(MODE) == MODE_FLOAT || \ + GET_MODE_CLASS(MODE) == MODE_COMPLEX_FLOAT || \ + (MODE) == SImode || (MODE) == DImode) : \ + INT_REGNO_P(REGNO)? \ + (HARD_REGNO_NREGS(REGNO, MODE) == 1 || !((REGNO) & 1)) : \ + CC_REGNO_P(REGNO)? \ + GET_MODE_CLASS (MODE) == MODE_CC : \ + 0) + +/* Value is 1 if it is a good idea to tie two pseudo registers when one has + mode MODE1 and one has mode MODE2. + If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2, + for any hard reg, then this must be 0 for correct output. */ + +#define MODES_TIEABLE_P(MODE1, MODE2) \ + (((MODE1) == SFmode || (MODE1) == DFmode) \ + == ((MODE2) == SFmode || (MODE2) == DFmode)) + +/* If defined, gives a class of registers that cannot be used as the + operand of a SUBREG that changes the mode of the object illegally. */ + +#define CLASS_CANNOT_CHANGE_MODE FP_REGS + +/* Defines illegal mode changes for CLASS_CANNOT_CHANGE_MODE. */ + +#define CLASS_CANNOT_CHANGE_MODE_P(FROM,TO) \ + (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO)) + +/* Define this macro if references to a symbol must be treated + differently depending on something about the variable or + function named by the symbol (such as what section it is in). + + On s390, if using PIC, mark a SYMBOL_REF for a non-global symbol + so that we may access it directly in the GOT. */ + +#define ENCODE_SECTION_INFO(DECL) \ +do \ + { \ + if (flag_pic) \ + { \ + rtx rtl = (TREE_CODE_CLASS (TREE_CODE (DECL)) != 'd' \ + ? TREE_CST_RTL (DECL) : DECL_RTL (DECL)); \ + \ + if (GET_CODE (rtl) == MEM) \ + { \ + SYMBOL_REF_FLAG (XEXP (rtl, 0)) \ + = (TREE_CODE_CLASS (TREE_CODE (DECL)) != 'd' \ + || ! TREE_PUBLIC (DECL)); \ + } \ + } \ + } \ +while (0) + + +/* This is an array of structures. Each structure initializes one pair + of eliminable registers. The "from" register number is given first, + followed by "to". Eliminations of the same "from" register are listed + in order of preference. */ + +#define ELIMINABLE_REGS \ +{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} + +#define CAN_ELIMINATE(FROM, TO) (1) + +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ +{ if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \ + { (OFFSET) = 0; } \ + else if ((FROM) == FRAME_POINTER_REGNUM \ + && (TO) == HARD_FRAME_POINTER_REGNUM) \ + { (OFFSET) = 0; } \ + else if ((FROM) == ARG_POINTER_REGNUM \ + && (TO) == HARD_FRAME_POINTER_REGNUM) \ + { (OFFSET) = s390_arg_frame_offset (); } \ + else if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \ + { (OFFSET) = s390_arg_frame_offset (); } \ + else \ + abort(); \ +} + +#define CAN_DEBUG_WITHOUT_FP + +/* Value should be nonzero if functions must have frame pointers. + Zero means the frame pointer need not be set up (and parms may be + accessed via the stack pointer) in functions that seem suitable. + This is computed in `reload', in reload1.c. */ + +#define FRAME_POINTER_REQUIRED 0 + +/* Define the classes of registers for register constraints in the + machine description. Also define ranges of constants. + + One of the classes must always be named ALL_REGS and include all hard regs. + If there is more than one class, another class must be named NO_REGS + and contain no registers. + + The name GENERAL_REGS must be the name of a class (or an alias for + another name such as ALL_REGS). This is the class of registers + that is allowed by "g" or "r" in a register constraint. + Also, registers outside this class are allocated only when + instructions express preferences for them. + + The classes must be numbered in nondecreasing order; that is, + a larger-numbered class must never be contained completely + in a smaller-numbered class. + + For any two classes, it is very desirable that there be another + class that represents their union. */ + +/*#define SMALL_REGISTER_CLASSES 1*/ + +enum reg_class +{ + NO_REGS, ADDR_REGS, GENERAL_REGS, + FP_REGS, ADDR_FP_REGS, GENERAL_FP_REGS, + ALL_REGS, LIM_REG_CLASSES +}; + +#define N_REG_CLASSES (int) LIM_REG_CLASSES + +/* Give names of register classes as strings for dump file. */ + +#define REG_CLASS_NAMES \ +{ "NO_REGS", "ADDR_REGS", "GENERAL_REGS", \ + "FP_REGS", "ADDR_FP_REGS", "GENERAL_FP_REGS", "ALL_REGS" } + +/* Define which registers fit in which classes. This is an initializer for + a vector of HARD_REG_SET of length N_REG_CLASSES. + G5 and latter have 16 register and support IEEE floating point operations. */ + +#define REG_CLASS_CONTENTS \ +{ \ + { 0x00000000, 0x00000000 }, /* NO_REGS */ \ + { 0x0000fffe, 0x00000005 }, /* ADDR_REGS */ \ + { 0x0000ffff, 0x00000005 }, /* GENERAL_REGS */ \ + { 0xffff0000, 0x00000000 }, /* FP_REGS */ \ + { 0xfffffffe, 0x00000005 }, /* ADDR_FP_REGS */ \ + { 0xffffffff, 0x00000005 }, /* GENERAL_FP_REGS */ \ + { 0xffffffff, 0x00000007 }, /* ALL_REGS */ \ +} + + +/* The same information, inverted: + Return the class number of the smallest class containing + reg number REGNO. This could be a conditional expression + or could index an array. */ + +#define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO]) + +extern enum reg_class regclass_map[FIRST_PSEUDO_REGISTER]; /* smalled class containing REGNO */ + +/* The class value for index registers, and the one for base regs. */ + +#define INDEX_REG_CLASS ADDR_REGS +#define BASE_REG_CLASS ADDR_REGS + +/* Get reg_class from a letter such as appears in the machine description. */ + +#define REG_CLASS_FROM_LETTER(C) \ + ((C) == 'a' ? ADDR_REGS : \ + (C) == 'd' ? GENERAL_REGS : \ + (C) == 'f' ? FP_REGS : NO_REGS) + +/* The letters I, J, K, L and M in a register constraint string can be used + to stand for particular ranges of immediate operands. + This macro defines what the ranges are. + C is the letter, and VALUE is a constant value. + Return 1 if VALUE is in the range specified by C. */ + +#define CONST_OK_FOR_LETTER_P(VALUE, C) \ + ((C) == 'I' ? (unsigned long) (VALUE) < 256 : \ + (C) == 'J' ? (unsigned long) (VALUE) < 4096 : \ + (C) == 'K' ? (VALUE) >= -32768 && (VALUE) < 32768 : \ + (C) == 'L' ? (unsigned long) (VALUE) < 65536 : 0) + +/* Similar, but for floating constants, and defining letters G and H. + Here VALUE is the CONST_DOUBLE rtx itself. */ + +#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) 1 + +/* 'Q' means a memory-reference for a S-type operand. */ + +#define EXTRA_CONSTRAINT(OP, C) \ + ((C) == 'Q' ? s_operand (OP, GET_MODE (OP)) : \ + (C) == 'S' ? larl_operand (OP, GET_MODE (OP)) : 0) + +/* Given an rtx X being reloaded into a reg required to be in class CLASS, + return the class of reg to actually use. In general this is just CLASS; + but on some machines in some cases it is preferable to use a more + restrictive class. */ + +#define PREFERRED_RELOAD_CLASS(X, CLASS) \ + s390_preferred_reload_class ((X), (CLASS)) + +/* Return the maximum number of consecutive registers needed to represent + mode MODE in a register of class CLASS. */ + +#define CLASS_MAX_NREGS(CLASS, MODE) \ + ((CLASS) == FP_REGS ? \ + (GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT ? 2 : 1) : \ + (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) + +/* We need a secondary reload when loading a PLUS which is + not a valid operand for LOAD ADDRESS. */ + +#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, IN) \ + s390_secondary_input_reload_class ((CLASS), (MODE), (IN)) + +/* If we are copying between FP registers and anything else, we need a memory + location. */ + +#define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \ + ((CLASS1) != (CLASS2) && ((CLASS1) == FP_REGS || (CLASS2) == FP_REGS)) + +/* Get_secondary_mem widens its argument to BITS_PER_WORD which loses on 64bit + because the movsi and movsf patterns don't handle r/f moves. */ + +#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \ + (GET_MODE_BITSIZE (MODE) < 32 \ + ? mode_for_size (32, GET_MODE_CLASS (MODE), 0) \ + : MODE) + + +/* A C expression whose value is nonzero if pseudos that have been + assigned to registers of class CLASS would likely be spilled + because registers of CLASS are needed for spill registers. + + The default value of this macro returns 1 if CLASS has exactly one + register and zero otherwise. On most machines, this default + should be used. Only define this macro to some other expression + if pseudo allocated by `local-alloc.c' end up in memory because + their hard registers were needed for spill registers. If this + macro returns nonzero for those classes, those pseudos will only + be allocated by `global.c', which knows how to reallocate the + pseudo to another register. If there would not be another + register available for reallocation, you should not change the + definition of this macro since the only effect of such a + definition would be to slow down register allocation. */ + +/* Stack layout; function entry, exit and calling. */ + +/* The return address of the current frame is retrieved + from the initial value of register RETURN_REGNUM. + For frames farther back, we use the stack slot where + the corresponding RETURN_REGNUM register was saved. */ + +#define DYNAMIC_CHAIN_ADDRESS(FRAME) \ + ((FRAME) != hard_frame_pointer_rtx ? (FRAME) : \ + plus_constant (arg_pointer_rtx, -STACK_POINTER_OFFSET)) + +#define RETURN_ADDR_RTX(COUNT, FRAME) \ + ((COUNT) == 0 ? get_hard_reg_initial_val (Pmode, RETURN_REGNUM) : \ + gen_rtx_MEM (Pmode, \ + memory_address (Pmode, \ + plus_constant (DYNAMIC_CHAIN_ADDRESS ((FRAME)), \ + RETURN_REGNUM * UNITS_PER_WORD)))) + +/* The following macros will turn on dwarf2 exception hndling + Other code location for this exception handling are + in s390.md (eh_return insn) and in linux.c in the prologue. */ + +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_REGNUM) + +/* We have 31 bit mode. */ + +#define MASK_RETURN_ADDR (TARGET_64BIT ? GEN_INT (-1) : GEN_INT (0x7fffffff)) + +/* The offset from the incoming value of %sp to the top of the stack frame + for the current function. */ + +#define INCOMING_FRAME_SP_OFFSET STACK_POINTER_OFFSET + +/* Location, from where return address to load. */ + +#define DWARF_FRAME_RETURN_COLUMN 14 + +/* Describe how we implement __builtin_eh_return. */ +#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 6 : INVALID_REGNUM) +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 10) +#define EH_RETURN_HANDLER_RTX \ + gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, \ + TARGET_64BIT? -48 : -40)) + +/* Define this if pushing a word on the stack makes the stack pointer a + smaller address. */ + +#define STACK_GROWS_DOWNWARD + +/* Define this if the nominal address of the stack frame is at the + high-address end of the local variables; that is, each additional local + variable allocated goes at a more negative offset in the frame. */ + +/* #define FRAME_GROWS_DOWNWARD */ + +/* Offset from stack-pointer to first location of outgoing args. */ + +#define STACK_POINTER_OFFSET (TARGET_64BIT ? 160 : 96) + +/* Offset within stack frame to start allocating local variables at. + If FRAME_GROWS_DOWNWARD, this is the offset to the END of the + first local allocated. Otherwise, it is the offset to the BEGINNING + of the first local allocated. */ + +#define STARTING_FRAME_OFFSET \ + (STACK_POINTER_OFFSET + current_function_outgoing_args_size) + +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = 0 + +/* If we generate an insn to push BYTES bytes, this says how many the stack + pointer really advances by. On S/390, we have no push instruction. */ + +/* #define PUSH_ROUNDING(BYTES) */ + +/* Accumulate the outgoing argument count so we can request the right + DSA size and determine stack offset. */ + +#define ACCUMULATE_OUTGOING_ARGS 1 + +/* Offset from the stack pointer register to an item dynamically + allocated on the stack, e.g., by `alloca'. + + The default value for this macro is `STACK_POINTER_OFFSET' plus the + length of the outgoing arguments. The default is correct for most + machines. See `function.c' for details. */ +#define STACK_DYNAMIC_OFFSET(FUNDECL) (STARTING_FRAME_OFFSET) + +/* Offset of first parameter from the argument pointer register value. + On the S/390, we define the argument pointer to the start of the fixed + area. */ +#define FIRST_PARM_OFFSET(FNDECL) 0 + +/* Define this if stack space is still allocated for a parameter passed + in a register. The value is the number of bytes allocated to this + area. */ +/* #define REG_PARM_STACK_SPACE(FNDECL) 32 */ + +/* Define this if the above stack space is to be considered part of the + space allocated by the caller. */ +/* #define OUTGOING_REG_PARM_STACK_SPACE */ + +/* 1 if N is a possible register number for function argument passing. + On S390, general registers 2 - 6 and floating point register 0 and 2 + are used in this way. */ + +#define FUNCTION_ARG_REGNO_P(N) (((N) >=2 && (N) <7) || \ + (N) == 16 || (N) == 17) + +/* Define a data type for recording info about an argument list during + the scan of that argument list. This data type should hold all + necessary information about the function itself and about the args + processed so far, enough to enable macros such as FUNCTION_ARG to + determine where the next arg should go. */ + +typedef struct s390_arg_structure +{ + int gprs; /* gpr so far */ + int fprs; /* fpr so far */ +} +CUMULATIVE_ARGS; + + +/* Initialize a variable CUM of type CUMULATIVE_ARGS for a call to + a function whose data type is FNTYPE. + For a library call, FNTYPE is 0. */ + +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, NN) \ + ((CUM).gprs=0, (CUM).fprs=0) + +/* Update the data in CUM to advance over an argument of mode MODE and + data type TYPE. (TYPE is null for libcalls where that information + may not be available.) */ + +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ + s390_function_arg_advance (&CUM, MODE, TYPE, NAMED) + +/* Define where to put the arguments to a function. Value is zero to push + the argument on the stack, or a hard register in which to store the + argument. */ + +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ + s390_function_arg (&CUM, MODE, TYPE, NAMED) + +/* Define where to expect the arguments of a function. Value is zero, if + the argument is on the stack, or a hard register in which the argument + is stored. It is the same like FUNCTION_ARG, except for unnamed args + That means, that all in case of varargs used, the arguments are expected + from the stack. + S/390 has already space on the stack for args coming in registers, + they are pushed in prologue, if needed. */ + + +/* Define the `__builtin_va_list' type. */ + +#define BUILD_VA_LIST_TYPE(VALIST) \ + (VALIST) = s390_build_va_list () + +/* Implement `va_start' for varargs and stdarg. */ + +#define EXPAND_BUILTIN_VA_START(stdarg, valist, nextarg) \ + s390_va_start (stdarg, valist, nextarg) + +/* Implement `va_arg'. */ + +#define EXPAND_BUILTIN_VA_ARG(valist, type) \ + s390_va_arg (valist, type) + +/* For an arg passed partly in registers and partly in memory, this is the + number of registers used. For args passed entirely in registers or + entirely in memory, zero. */ + +#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0 + + +/* Define if returning from a function call automatically pops the + arguments described by the number-of-args field in the call. */ + +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) 0 + + +/* Define how to find the value returned by a function. VALTYPE is the + data type of the value (as a tree). + If the precise function being called is known, FUNC is its FUNCTION_DECL; + otherwise, FUNC is 15. */ + +#define RET_REG(MODE) ((GET_MODE_CLASS (MODE) == MODE_INT \ + || TARGET_SOFT_FLOAT ) ? 2 : 16) + + +/* for structs the address is passed, and the Callee makes a + copy, only if needed */ + +#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \ + s390_function_arg_pass_by_reference (MODE, TYPE) + + +/* Register 2 (and 3) for integral values + or floating point register 0 (and 2) for fp values are used. */ + +#define FUNCTION_VALUE(VALTYPE, FUNC) \ + gen_rtx_REG ((INTEGRAL_TYPE_P (VALTYPE) \ + && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \ + || POINTER_TYPE_P (VALTYPE) \ + ? word_mode : TYPE_MODE (VALTYPE), \ + TREE_CODE (VALTYPE) == REAL_TYPE && TARGET_HARD_FLOAT ? 16 : 2) + +/* Define how to find the value returned by a library function assuming + the value has mode MODE. */ + +#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, RET_REG (MODE)) + +/* 1 if N is a possible register number for a function value. */ + +#define FUNCTION_VALUE_REGNO_P(N) ((N) == 2 || (N) == 16) + +/* The definition of this macro implies that there are cases where + a scalar value cannot be returned in registers. */ + +#define RETURN_IN_MEMORY(type) \ + (TYPE_MODE (type) == BLKmode || \ + GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_INT || \ + GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT) + +/* Mode of stack savearea. + FUNCTION is VOIDmode because calling convention maintains SP. + BLOCK needs Pmode for SP. + NONLOCAL needs twice Pmode to maintain both backchain and SP. */ + +#define STACK_SAVEAREA_MODE(LEVEL) \ + (LEVEL == SAVE_FUNCTION ? VOIDmode \ + : LEVEL == SAVE_NONLOCAL ? (TARGET_64BIT ? TImode : DImode) : Pmode) + +/* Structure value address is passed as invisible first argument (gpr 2). */ + +#define STRUCT_VALUE 0 + +/* This macro definition sets up a default value for `main' to return. */ + +#define DEFAULT_MAIN_RETURN c_expand_return (integer_zero_node) + +/* Length in units of the trampoline for entering a nested function. */ + +#define TRAMPOLINE_SIZE (TARGET_64BIT ? 36 : 20) + +/* Initialize the dynamic part of trampoline. */ + +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, CXT) \ + s390_initialize_trampoline ((ADDR), (FNADDR), (CXT)) + +/* Template for constant part of trampoline. */ + +#define TRAMPOLINE_TEMPLATE(FILE) \ + s390_trampoline_template (FILE) + +/* Output assembler code to FILE to increment profiler label # LABELNO + for profiling a function entry. */ + +#define FUNCTION_PROFILER(FILE, LABELNO) \ + s390_function_profiler ((FILE), ((LABELNO))) + +#define PROFILE_BEFORE_PROLOGUE 1 + +/* Define EXIT_IGNORE_STACK if, when returning from a function, the stack + pointer does not matter (provided there is a frame pointer). */ + +#define EXIT_IGNORE_STACK 1 + +/* Addressing modes, and classification of registers for them. */ + +/* #define HAVE_POST_INCREMENT */ +/* #define HAVE_POST_DECREMENT */ + +/* #define HAVE_PRE_DECREMENT */ +/* #define HAVE_PRE_INCREMENT */ + +/* These assume that REGNO is a hard or pseudo reg number. They give + nonzero only if REGNO is a hard reg of the suitable class or a pseudo + reg currently allocated to a suitable hard reg. + These definitions are NOT overridden anywhere. */ + +#define REGNO_OK_FOR_INDEX_P(REGNO) \ + (((REGNO) < FIRST_PSEUDO_REGISTER \ + && REGNO_REG_CLASS ((REGNO)) == ADDR_REGS) \ + || (reg_renumber[REGNO] > 0 && reg_renumber[REGNO] < 16)) + +#define REGNO_OK_FOR_BASE_P(REGNO) REGNO_OK_FOR_INDEX_P (REGNO) + +#define REGNO_OK_FOR_DATA_P(REGNO) \ + ((REGNO) < 16 || (unsigned) reg_renumber[REGNO] < 16) + +#define REGNO_OK_FOR_FP_P(REGNO) \ + FLOAT_REGNO_P (REGNO) + +/* Now macros that check whether X is a register and also, + strictly, whether it is in a specified class. */ + +/* 1 if X is a data register. */ + +#define DATA_REG_P(X) (REG_P (X) && REGNO_OK_FOR_DATA_P (REGNO (X))) + +/* 1 if X is an fp register. */ + +#define FP_REG_P(X) (REG_P (X) && REGNO_OK_FOR_FP_P (REGNO (X))) + +/* 1 if X is an address register. */ + +#define ADDRESS_REG_P(X) (REG_P (X) && REGNO_OK_FOR_BASE_P (REGNO (X))) + +/* Maximum number of registers that can appear in a valid memory address. */ + +#define MAX_REGS_PER_ADDRESS 2 + +/* Recognize any constant value that is a valid address. */ + +#define CONSTANT_ADDRESS_P(X) 0 + +#define SYMBOLIC_CONST(X) \ +(GET_CODE (X) == SYMBOL_REF \ + || GET_CODE (X) == LABEL_REF \ + || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X))) + +/* General operand is everything except SYMBOL_REF, CONST and CONST_DOUBLE + they have to be forced to constant pool + CONST_INT have to be forced into constant pool, if greater than + 64k. Depending on the insn they have to be force into constant pool + for smaller value; in this case we have to work with nonimmediate operand. */ + +#define LEGITIMATE_PIC_OPERAND_P(X) \ + legitimate_pic_operand_p (X) + +/* Nonzero if the constant value X is a legitimate general operand. + It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ + +#define LEGITIMATE_CONSTANT_P(X) \ + legitimate_constant_p (X) + +/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx and check + its validity for a certain class. We have two alternate definitions + for each of them. The usual definition accepts all pseudo regs; the + other rejects them all. The symbol REG_OK_STRICT causes the latter + definition to be used. + + Most source files want to accept pseudo regs in the hope that they will + get allocated to the class that the insn wants them to be in. + Some source files that are used after register allocation + need to be strict. */ + +/* + * Nonzero if X is a hard reg that can be used as an index or if it is + * a pseudo reg. + */ + +#define REG_OK_FOR_INDEX_NONSTRICT_P(X) \ +((GET_MODE (X) == Pmode) && \ + ((REGNO (X) >= FIRST_PSEUDO_REGISTER) \ + || REGNO_REG_CLASS (REGNO (X)) == ADDR_REGS)) + +/* Nonzero if X is a hard reg that can be used as a base reg or if it is + a pseudo reg. */ + +#define REG_OK_FOR_BASE_NONSTRICT_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X) + +/* Nonzero if X is a hard reg that can be used as an index. */ + +#define REG_OK_FOR_INDEX_STRICT_P(X) \ +((GET_MODE (X) == Pmode) && (REGNO_OK_FOR_INDEX_P (REGNO (X)))) + +/* Nonzero if X is a hard reg that can be used as a base reg. */ + +#define REG_OK_FOR_BASE_STRICT_P(X) \ +((GET_MODE (X) == Pmode) && (REGNO_OK_FOR_BASE_P (REGNO (X)))) + + +#ifndef REG_OK_STRICT +#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P(X) +#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P(X) +#else +#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P(X) +#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P(X) +#endif + + +/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a + valid memory address for an instruction. + The MODE argument is the machine mode for the MEM expression + that wants to use this address. + + The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS, + except for CONSTANT_ADDRESS_P which is actually machine-independent. */ + +#ifdef REG_OK_STRICT +#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ +{ \ + if (legitimate_address_p (MODE, X, 1)) \ + goto ADDR; \ +} +#else +#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ +{ \ + if (legitimate_address_p (MODE, X, 0)) \ + goto ADDR; \ +} +#endif + + +/* S/390 has no mode dependent addresses. */ + +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) + +/* Try machine-dependent ways of modifying an illegitimate address + to be legitimate. If we find one, return the new, valid address. + This macro is used in only one place: `memory_address' in explow.c. */ + +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \ +{ \ + (X) = legitimize_address (X, OLDX, MODE); \ + if (memory_address_p (MODE, X)) \ + goto WIN; \ +} + +/* Specify the machine mode that this machine uses for the index in the + tablejump instruction. */ + +#define CASE_VECTOR_MODE (TARGET_64BIT ? DImode : SImode) + +/* Define this if the tablejump instruction expects the table to contain + offsets from the address of the table. + Do not define this if the table should contain absolute addresses. */ + +/* #define CASE_VECTOR_PC_RELATIVE */ + +/* Load from integral MODE < SI from memory into register makes sign_extend + or zero_extend + In our case sign_extension happens for Halfwords, other no extension. */ + +#define LOAD_EXTEND_OP(MODE) \ +(TARGET_64BIT ? ((MODE) == QImode ? ZERO_EXTEND : \ + (MODE) == HImode ? SIGN_EXTEND : NIL) \ + : ((MODE) == HImode ? SIGN_EXTEND : NIL)) + +/* Define this if fixuns_trunc is the same as fix_trunc. */ + +/* #define FIXUNS_TRUNC_LIKE_FIX_TRUNC */ + +/* We use "unsigned char" as default. */ + +#define DEFAULT_SIGNED_CHAR 0 + +/* Max number of bytes we can move from memory to memory in one reasonably + fast instruction. */ + +#define MOVE_MAX 256 + +/* Nonzero if access to memory by bytes is slow and undesirable. */ + +#define SLOW_BYTE_ACCESS 1 + +/* Define if shifts truncate the shift count which implies one can omit + a sign-extension or zero-extension of a shift count. */ + +/* #define SHIFT_COUNT_TRUNCATED */ + +/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits + is done just by pretending it is already truncated. */ + +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 + +/* We assume that the store-condition-codes instructions store 0 for false + and some other value for true. This is the value stored for true. */ + +/* #define STORE_FLAG_VALUE -1 */ + +/* Don't perform CSE on function addresses. */ + +#define NO_FUNCTION_CSE + +/* Specify the machine mode that pointers have. + After generation of rtl, the compiler makes no further distinction + between pointers and any other objects of this machine mode. */ + +#define Pmode ((enum machine_mode) (TARGET_64BIT ? DImode : SImode)) + +/* A function address in a call instruction is a byte address (for + indexing purposes) so give the MEM rtx a byte's mode. */ + +#define FUNCTION_MODE QImode + + +/* A part of a C `switch' statement that describes the relative costs + of constant RTL expressions. It must contain `case' labels for + expression codes `const_int', `const', `symbol_ref', `label_ref' + and `const_double'. Each case must ultimately reach a `return' + statement to return the relative cost of the use of that kind of + constant value in an expression. The cost may depend on the + precise value of the constant, which is available for examination + in X, and the rtx code of the expression in which it is contained, + found in OUTER_CODE. + + CODE is the expression code--redundant, since it can be obtained + with `GET_CODE (X)'. */ +/* Force_const_mem does not work out of reload, because the saveable_obstack + is set to reload_obstack, which does not live long enough. + Because of this we cannot use force_const_mem in addsi3. + This leads to problems with gen_add2_insn with a constant greater + than a short. Because of that we give an addition of greater + constants a cost of 3 (reload1.c 10096). */ + + +#define CONST_COSTS(RTX, CODE, OUTER_CODE) \ + case CONST: \ + if ((GET_CODE (XEXP (RTX, 0)) == MINUS) && \ + (GET_CODE (XEXP (XEXP (RTX, 0), 1)) != CONST_INT)) \ + return 1000; \ + case CONST_INT: \ + if ((OUTER_CODE == PLUS) && \ + ((INTVAL (RTX) > 32767) || \ + (INTVAL (RTX) < -32768))) \ + return COSTS_N_INSNS (3); \ + case LABEL_REF: \ + case SYMBOL_REF: \ + case CONST_DOUBLE: \ + return 0; \ + + +/* Like `CONST_COSTS' but applies to nonconstant RTL expressions. + This can be used, for example, to indicate how costly a multiply + instruction is. In writing this macro, you can use the construct + `COSTS_N_INSNS (N)' to specify a cost equal to N fast + instructions. OUTER_CODE is the code of the expression in which X + is contained. + + This macro is optional; do not define it if the default cost + assumptions are adequate for the target machine. */ + +#define RTX_COSTS(X, CODE, OUTER_CODE) \ + case ASHIFT: \ + case ASHIFTRT: \ + case LSHIFTRT: \ + case PLUS: \ + case AND: \ + case IOR: \ + case XOR: \ + case MINUS: \ + case NEG: \ + case NOT: \ + return 1; \ + case MULT: \ + if (GET_MODE (XEXP (X, 0)) == DImode) \ + return 40; \ + else \ + return 7; \ + case DIV: \ + case UDIV: \ + case MOD: \ + case UMOD: \ + return 33; + + +/* An expression giving the cost of an addressing mode that contains + ADDRESS. If not defined, the cost is computed from the ADDRESS + expression and the `CONST_COSTS' values. + + For most CISC machines, the default cost is a good approximation + of the true cost of the addressing mode. However, on RISC + machines, all instructions normally have the same length and + execution time. Hence all addresses will have equal costs. + + In cases where more than one form of an address is known, the form + with the lowest cost will be used. If multiple forms have the + same, lowest, cost, the one that is the most complex will be used. + + For example, suppose an address that is equal to the sum of a + register and a constant is used twice in the same basic block. + When this macro is not defined, the address will be computed in a + register and memory references will be indirect through that + register. On machines where the cost of the addressing mode + containing the sum is no higher than that of a simple indirect + reference, this will produce an additional instruction and + possibly require an additional register. Proper specification of + this macro eliminates this overhead for such machines. + + Similar use of this macro is made in strength reduction of loops. + + ADDRESS need not be valid as an address. In such a case, the cost + is not relevant and can be any value; invalid addresses need not be + assigned a different cost. + + On machines where an address involving more than one register is as + cheap as an address computation involving only one register, + defining `ADDRESS_COST' to reflect this can cause two registers to + be live over a region of code where only one would have been if + `ADDRESS_COST' were not defined in that manner. This effect should + be considered in the definition of this macro. Equivalent costs + should probably only be given to addresses with different numbers + of registers on machines with lots of registers. + + This macro will normally either not be defined or be defined as a + constant. + + On s390 symbols are expensive if compiled with fpic + lifetimes. */ + +#define ADDRESS_COST(RTX) \ + ((flag_pic && GET_CODE (RTX) == SYMBOL_REF) ? 2 : 1) + +/* On s390, copy between fprs and gprs is expensive. */ + +#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \ + (( ( reg_classes_intersect_p ((CLASS1), GENERAL_REGS) \ + && reg_classes_intersect_p ((CLASS2), FP_REGS)) \ + || ( reg_classes_intersect_p ((CLASS1), FP_REGS) \ + && reg_classes_intersect_p ((CLASS2), GENERAL_REGS))) ? 10 : 1) + + +/* A C expression for the cost of moving data of mode M between a + register and memory. A value of 2 is the default; this cost is + relative to those in `REGISTER_MOVE_COST'. + + If moving between registers and memory is more expensive than + between two registers, you should define this macro to express the + relative cost. */ + +#define MEMORY_MOVE_COST(M, C, I) 1 + +/* A C expression for the cost of a branch instruction. A value of 1 + is the default; other values are interpreted relative to that. */ + +#define BRANCH_COST 1 + +/* Add any extra modes needed to represent the condition code. */ +#define EXTRA_CC_MODES \ + CC (CCZmode, "CCZ") \ + CC (CCAmode, "CCA") \ + CC (CCLmode, "CCL") \ + CC (CCUmode, "CCU") \ + CC (CCSmode, "CCS") \ + CC (CCTmode, "CCT") + +/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, + return the mode to be used for the comparison. */ + +#define SELECT_CC_MODE(OP, X, Y) s390_select_ccmode ((OP), (X), (Y)) + + +/* Define the information needed to generate branch and scc insns. This is + stored from the compare operation. Note that we can't use "rtx" here + since it hasn't been defined! */ + +extern struct rtx_def *s390_compare_op0, *s390_compare_op1; + + +/* How to refer to registers in assembler output. This sequence is + indexed by compiler's hard-register-number (see above). */ + +#define REGISTER_NAMES \ +{ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", \ + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", \ + "%f0", "%f2", "%f4", "%f6", "%f1", "%f3", "%f5", "%f7", \ + "%f8", "%f10", "%f12", "%f14", "%f9", "%f11", "%f13", "%f15", \ + "%ap", "%cc", "%fp" \ +} + +/* implicit call of memcpy, not bcopy */ + +#define TARGET_MEM_FUNCTIONS + +/* Either simplify a location expression, or return the original. */ + +#define ASM_SIMPLIFY_DWARF_ADDR(X) \ + s390_simplify_dwarf_addr (X) + +/* Print operand X (an rtx) in assembler syntax to file FILE. + CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified. + For `%' followed by punctuation, CODE is the punctuation and X is null. */ + +#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE) + +#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR) + + +/* Define the codes that are matched by predicates in aux-output.c. */ + +#define PREDICATE_CODES \ + {"s_operand", { SUBREG, MEM }}, \ + {"s_imm_operand", { CONST_INT, CONST_DOUBLE, SUBREG, MEM }}, \ + {"bras_sym_operand",{ SYMBOL_REF, CONST }}, \ + {"larl_operand", { SYMBOL_REF, CONST, CONST_INT, CONST_DOUBLE }}, \ + {"load_multiple_operation", {PARALLEL}}, \ + {"store_multiple_operation", {PARALLEL}}, \ + {"const0_operand", { CONST_INT, CONST_DOUBLE }}, \ + {"consttable_operand", { SYMBOL_REF, LABEL_REF, CONST, \ + CONST_INT, CONST_DOUBLE }}, \ + {"s390_plus_operand", { PLUS }}, + + +/* S/390 constant pool breaks the devices in crtstuff.c to control section + in where code resides. We have to write it as asm code. */ +#ifndef __s390x__ +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ + asm (SECTION_OP "\n\ + bras\t%r2,1f\n\ +0: .long\t" USER_LABEL_PREFIX #FUNC " - 0b\n\ +1: l\t%r3,0(%r2)\n\ + bas\t%r14,0(%r3,%r2)\n\ + .previous"); +#endif + +/* Constant Pool for all symbols operands which are changed with + force_const_mem during insn generation (expand_insn). */ + +extern int s390_pool_count; +extern int s390_nr_constants; +extern int s390_pool_overflow; + +#define ASM_OUTPUT_POOL_PROLOGUE(FILE, FUNNAME, fndecl, size) \ +{ \ + struct pool_constant *pool; \ + \ + if (s390_pool_count == -1) \ + { \ + s390_nr_constants = 0; \ + for (pool = first_pool; pool; pool = pool->next) \ + if (pool->mark) s390_nr_constants++; \ + return; \ + } \ +} + +#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE, EXP, MODE, ALIGN, LABELNO, WIN) \ +{ \ + fprintf (FILE, ".LC%d:\n", LABELNO); \ + \ + /* Output the value of the constant itself. */ \ + switch (GET_MODE_CLASS (MODE)) \ + { \ + case MODE_FLOAT: \ + if (GET_CODE (EXP) != CONST_DOUBLE) \ + abort (); \ + \ + memcpy ((char *) &u, (char *) &CONST_DOUBLE_LOW (EXP), sizeof u); \ + assemble_real (u.d, MODE, ALIGN); \ + break; \ + \ + case MODE_INT: \ + case MODE_PARTIAL_INT: \ + if (flag_pic \ + && (GET_CODE (EXP) == CONST \ + || GET_CODE (EXP) == SYMBOL_REF \ + || GET_CODE (EXP) == LABEL_REF )) \ + { \ + fputs (integer_asm_op (UNITS_PER_WORD, TRUE), FILE); \ + s390_output_symbolic_const (FILE, EXP); \ + fputc ('\n', (FILE)); \ + } \ + else \ + { \ + assemble_integer (EXP, GET_MODE_SIZE (MODE), ALIGN, 1); \ + if (GET_MODE_SIZE (MODE) == 1) \ + ASM_OUTPUT_SKIP ((FILE), 1); \ + } \ + break; \ + \ + default: \ + abort (); \ + } \ + goto WIN; \ +} + +#endif diff --git a/contrib/gcc/config/s390/s390.md b/contrib/gcc/config/s390/s390.md new file mode 100644 index 0000000..2742638 --- /dev/null +++ b/contrib/gcc/config/s390/s390.md @@ -0,0 +1,6727 @@ +;;- Machine description for GNU compiler -- S/390 / zSeries version. +;; Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. +;; Contributed by Hartmut Penner (hpenner@de.ibm.com) and +;; Ulrich Weigand (uweigand@de.ibm.com). +;; This file is part of GNU CC. + +;; GNU CC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2, or (at your option) +;; any later version. + +;; GNU CC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GNU CC; see the file COPYING. If not, write to +;; the Free Software Foundation, 59 Temple Place - Suite 330, +;; Boston, MA 02111-1307, USA. + +;; +;; Special constraints for s/390 machine description: +;; +;; a -- Any address register from 1 to 15. +;; d -- Any register from 0 to 15. +;; I -- An 8-bit constant (0..255). +;; J -- A 12-bit constant (0..4095). +;; K -- A 16-bit constant (-32768..32767). +;; Q -- A memory reference without index-register. +;; S -- Valid operand for the LARL instruction. +;; +;; Special formats used for outputting 390 instructions. +;; +;; %b -- Print a constant byte integer. xy +;; %h -- Print a signed 16-bit. wxyz +;; %N -- Print next register (second word of a DImode reg) or next word. +;; %M -- Print next register (second word of a TImode reg) or next word. +;; %O -- Print the offset of a memory reference (PLUS (REG) (CONST_INT)). +;; %R -- Print the register of a memory reference (PLUS (REG) (CONST_INT)). +;; +;; We have a special constraint for pattern matching. +;; +;; s_operand -- Matches a valid S operand in a RS, SI or SS type instruction. +;; + + +;; Define an insn type attribute. This is used in function unit delay +;; computations. + +(define_attr "type" "none,integer,load,lr,la,lm,stm,cs,vs,store,imul,lmul,fmul,idiv,ldiv,fdiv,branch,jsr,other,o2,o3" + (const_string "integer")) + +;; Insn are devide in two classes: +;; mem: Insn accessing memory +;; reg: Insn operands all in registers + +(define_attr "atype" "reg,mem" + (const_string "reg")) + +;; Generic pipeline function unit. + +(define_function_unit "integer" 1 0 + (eq_attr "type" "none") 0 0) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "integer") 1 1) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "load") 1 1) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "la") 1 1) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "lr") 1 1) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "store") 1 1) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "lm") 2 2) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "stm") 2 2) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "cs") 5 5) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "vs") 30 30) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "jsr") 5 5) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "imul") 7 7) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "fmul") 6 6) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "idiv") 33 33) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "fdiv") 33 33) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "o2") 2 2) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "o3") 3 3) + +(define_function_unit "integer" 1 0 + (eq_attr "type" "other") 5 5) + +;; Operand type. Used to default length attribute values + +(define_attr "op_type" + "NN,E,RR,RRE,RX,RS,RSI,RI,SI,S,SS,SSE,RXE,RSE,RIL,RIE" + (const_string "RX")) + +;; Length in bytes. + +(define_attr "length" "" +(cond [ (eq_attr "op_type" "E") (const_int 2) + (eq_attr "op_type" "RR") (const_int 2) + (eq_attr "op_type" "RX") (const_int 4) + (eq_attr "op_type" "RI") (const_int 4) + (eq_attr "op_type" "RRE") (const_int 4) + (eq_attr "op_type" "RS") (const_int 4) + (eq_attr "op_type" "RSI") (const_int 4) + (eq_attr "op_type" "RX") (const_int 4) + (eq_attr "op_type" "S") (const_int 4) + (eq_attr "op_type" "SI") (const_int 4) + (eq_attr "op_type" "SS") (const_int 6) + (eq_attr "op_type" "SSE") (const_int 6) + (eq_attr "op_type" "RXE") (const_int 6) + (eq_attr "op_type" "RSE") (const_int 6) + (eq_attr "op_type" "RIL") (const_int 6)] + (const_int 4))) + +;; Define attributes for `asm' insns. + +(define_asm_attributes [(set_attr "type" "other") + (set_attr "op_type" "NN")]) + +;; +;; Condition Codes +;; +; +; CCL: Zero Nonzero Zero Nonzero (AL, ALR, SL, SLR, N, NC, NI, NR, O, OC, OI, OR, X, XC, XI, XR) +; CCA: Zero <Zero >Zero Overflow (A, AR, AH, AHI, S, SR, SH, SHI, LTR, LCR, LNR, LPR, SLA, SLDA, SLA, SRDA) +; CCU: Equal ULess UGreater -- (CL, CLR, CLI, CLM) +; CCS: Equal SLess SGreater -- (C, CR, CH, CHI, ICM) +; CCT: Zero Mixed Mixed Ones (TM, TMH, TML) + +; CCZ -> CCL / CCZ1 +; CCZ1 -> CCA/CCU/CCS/CCT +; CCS -> CCA + +; String: CLC, CLCL, CLCLE, CLST, CUSE, MVCL, MVCLE, MVPG, MVST, SRST +; Clobber: CKSM, CFC, CS, CDS, CUUTF, CUTFU, PLO, SPM, STCK, STCKE, TS, TRT, TRE, UPT + + +;; +;;- Compare instructions. +;; + +(define_expand "cmpdi" + [(set (reg:CC 33) + (compare:CC (match_operand:DI 0 "register_operand" "") + (match_operand:DI 1 "general_operand" "")))] + "TARGET_64BIT" + " +{ + s390_compare_op0 = operands[0]; + s390_compare_op1 = operands[1]; + DONE; +}") + +(define_expand "cmpsi" + [(set (reg:CC 33) + (compare:CC (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "general_operand" "")))] + "" + " +{ + s390_compare_op0 = operands[0]; + s390_compare_op1 = operands[1]; + DONE; +}") + +;(define_expand "cmphi" +; [(set (reg:CC 33) +; (compare:CC (match_operand:HI 0 "register_operand" "") +; (match_operand:HI 1 "general_operand" "")))] +; "" +; " +;{ +; s390_compare_op0 = operands[0]; +; s390_compare_op1 = operands[1]; +; DONE; +;}") + +;(define_expand "cmpqi" +; [(set (reg:CC 33) +; (compare:CC (match_operand:QI 0 "register_operand" "") +; (match_operand:QI 1 "general_operand" "")))] +; "" +; " +;{ +; s390_compare_op0 = operands[0]; +; s390_compare_op1 = operands[1]; +; DONE; +;}") + +(define_expand "cmpdf" + [(set (reg:CC 33) + (compare:CC (match_operand:DF 0 "register_operand" "") + (match_operand:DF 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + s390_compare_op0 = operands[0]; + s390_compare_op1 = operands[1]; + DONE; +}") + +(define_expand "cmpsf" + [(set (reg:CC 33) + (compare:CC (match_operand:SF 0 "register_operand" "") + (match_operand:SF 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + s390_compare_op0 = operands[0]; + s390_compare_op1 = operands[1]; + DONE; +}") + + +; DI instructions + +(define_insn "*cmpdi_tm2" + [(set (reg 33) + (compare (zero_extract:DI (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "const_int_operand" "n") + (match_operand:DI 2 "const_int_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT + && INTVAL (operands[1]) >= 1 && INTVAL (operands[2]) >= 0 + && INTVAL (operands[1]) + INTVAL (operands[2]) <= 64 + && (INTVAL (operands[1]) + INTVAL (operands[2]) - 1) >> 4 + == INTVAL (operands[2]) >> 4" + "* +{ + int part = INTVAL (operands[2]) >> 4; + int block = (1 << INTVAL (operands[1])) - 1; + int shift = 16 - INTVAL (operands[1]) - (INTVAL (operands[2]) & 15); + + operands[2] = GEN_INT (block << shift); + + switch (part) + { + case 0: return \"tmhh\\t%0,%x2\"; + case 1: return \"tmhl\\t%0,%x2\"; + case 2: return \"tmlh\\t%0,%x2\"; + case 3: return \"tmll\\t%0,%x2\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI")]) + +(define_insn "*cmpdi_tm_reg" + [(set (reg 33) + (compare (and:DI (match_operand:DI 0 "register_operand" "%d") + (match_operand:DI 1 "immediate_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT + && s390_single_hi (operands[1], DImode, 0) >= 0" + "* +{ + int part = s390_single_hi (operands[1], DImode, 0); + operands[1] = GEN_INT (s390_extract_hi (operands[1], DImode, part)); + + switch (part) + { + case 0: return \"tmhh\\t%0,%x1\"; + case 1: return \"tmhl\\t%0,%x1\"; + case 2: return \"tmlh\\t%0,%x1\"; + case 3: return \"tmll\\t%0,%x1\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI")]) + +(define_insn "*cmpdi_tm_mem" + [(set (reg 33) + (compare (and:DI (match_operand:DI 0 "s_operand" "%Qo") + (match_operand:DI 1 "immediate_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT + && s390_single_qi (operands[1], DImode, 0) >= 0" + "* +{ + int part = s390_single_qi (operands[1], DImode, 0); + operands[1] = GEN_INT (s390_extract_qi (operands[1], DImode, part)); + + operands[0] = gen_rtx_MEM (QImode, + plus_constant (XEXP (operands[0], 0), part)); + return \"tm\\t%0,%b1\"; +}" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*ltgr" + [(set (reg 33) + (compare (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "const0_operand" ""))) + (set (match_operand:DI 2 "register_operand" "=d") + (match_dup 0))] + "s390_match_ccmode(insn, CCSmode) && TARGET_64BIT" + "ltgr\\t%2,%0" + [(set_attr "op_type" "RRE")]) + +(define_insn "*cmpdi_ccs_0_64" + [(set (reg 33) + (compare (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_64BIT" + "ltgr\\t%0,%0" + [(set_attr "op_type" "RRE")]) + +(define_insn "*cmpdi_ccs_0_31" + [(set (reg 33) + (compare (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode)" + "srda\\t%0,0" + [(set_attr "op_type" "RS")]) + +(define_insn "*cmpdi_ccs" + [(set (reg 33) + (compare (match_operand:DI 0 "register_operand" "d,d,d") + (match_operand:DI 1 "general_operand" "d,K,m")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_64BIT" + "@ + cgr\\t%0,%1 + cghi\\t%0,%c1 + cg\\t%0,%1" + [(set_attr "op_type" "RRE,RI,RXE") + (set_attr "atype" "reg,reg,mem")]) + +(define_insn "*cmpdi_ccu" + [(set (reg 33) + (compare (match_operand:DI 0 "register_operand" "d,d") + (match_operand:DI 1 "general_operand" "d,m")))] + "s390_match_ccmode(insn, CCUmode) && TARGET_64BIT" + "@ + clgr\\t%0,%1 + clg\\t%0,%1" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*cmpdi_ccu_mem" + [(set (reg 33) + (compare (match_operand:DI 0 "s_operand" "oQ") + (match_operand:DI 1 "s_imm_operand" "oQ")))] + "s390_match_ccmode(insn, CCUmode)" + "clc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; SI instructions + +(define_insn "*cmpsi_tm2" + [(set (reg 33) + (compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) + && INTVAL (operands[1]) >= 1 && INTVAL (operands[2]) >= 0 + && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32 + && (INTVAL (operands[1]) + INTVAL (operands[2]) - 1) >> 4 + == INTVAL (operands[2]) >> 4" + "* +{ + int part = INTVAL (operands[2]) >> 4; + int block = (1 << INTVAL (operands[1])) - 1; + int shift = 16 - INTVAL (operands[1]) - (INTVAL (operands[2]) & 15); + + operands[2] = GEN_INT (block << shift); + + switch (part) + { + case 0: return \"tmh\\t%0,%x2\"; + case 1: return \"tml\\t%0,%x2\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI")]) + +(define_insn "*cmpsi_tm_reg" + [(set (reg 33) + (compare (and:SI (match_operand:SI 0 "register_operand" "%d") + (match_operand:SI 1 "immediate_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) + && s390_single_hi (operands[1], SImode, 0) >= 0" + "* +{ + int part = s390_single_hi (operands[1], SImode, 0); + operands[1] = GEN_INT (s390_extract_hi (operands[1], SImode, part)); + + switch (part) + { + case 0: return \"tmh\\t%0,%x1\"; + case 1: return \"tml\\t%0,%x1\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI")]) + +(define_insn "*cmpsi_tm_mem" + [(set (reg 33) + (compare (and:SI (match_operand:SI 0 "s_operand" "%Qo") + (match_operand:SI 1 "immediate_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) + && s390_single_qi (operands[1], SImode, 0) >= 0" + "* +{ + int part = s390_single_qi (operands[1], SImode, 0); + operands[1] = GEN_INT (s390_extract_qi (operands[1], SImode, part)); + + operands[0] = gen_rtx_MEM (QImode, + plus_constant (XEXP (operands[0], 0), part)); + return \"tm\\t%0,%b1\"; +}" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*ltr" + [(set (reg 33) + (compare (match_operand:SI 0 "register_operand" "d") + (match_operand:SI 1 "const0_operand" ""))) + (set (match_operand:SI 2 "register_operand" "=d") + (match_dup 0))] + "s390_match_ccmode(insn, CCSmode)" + "ltr\\t%2,%0" + [(set_attr "op_type" "RR")]) + +(define_insn "*icm15" + [(set (reg 33) + (compare (match_operand:SI 0 "s_operand" "Qo") + (match_operand:SI 1 "const0_operand" ""))) + (set (match_operand:SI 2 "register_operand" "=d") + (match_dup 0))] + "s390_match_ccmode(insn, CCSmode)" + "icm\\t%2,15,%0" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*icm15_cconly" + [(set (reg 33) + (compare (match_operand:SI 0 "s_operand" "Qo") + (match_operand:SI 1 "const0_operand" ""))) + (clobber (match_scratch:SI 2 "=d"))] + "s390_match_ccmode(insn, CCSmode)" + "icm\\t%2,15,%0" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*cmpsi_ccs_0" + [(set (reg 33) + (compare (match_operand:SI 0 "register_operand" "d") + (match_operand:SI 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode)" + "ltr\\t%0,%0" + [(set_attr "op_type" "RR")]) + +(define_insn "*cmpsidi_ccs" + [(set (reg 33) + (compare (match_operand:SI 0 "register_operand" "d") + (sign_extend:SI (match_operand:HI 1 "memory_operand" "m"))))] + "s390_match_ccmode(insn, CCSmode)" + "ch\\t%0,%1" + [(set_attr "op_type" "RR") + (set_attr "atype" "mem")]) + +(define_insn "*cmpsi_ccs" + [(set (reg 33) + (compare (match_operand:SI 0 "register_operand" "d,d,d") + (match_operand:SI 1 "general_operand" "d,K,m")))] + "s390_match_ccmode(insn, CCSmode)" + "@ + cr\\t%0,%1 + chi\\t%0,%c1 + c\\t%0,%1" + [(set_attr "op_type" "RR,RI,RX") + (set_attr "atype" "reg,reg,mem")]) + +(define_insn "*cmpsi_ccu" + [(set (reg 33) + (compare (match_operand:SI 0 "register_operand" "d,d") + (match_operand:SI 1 "general_operand" "d,m")))] + "s390_match_ccmode(insn, CCUmode)" + "@ + clr\\t%0,%1 + cl\\t%0,%1" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*cmpsi_ccu_mem" + [(set (reg 33) + (compare (match_operand:SI 0 "s_operand" "oQ") + (match_operand:SI 1 "s_imm_operand" "oQ")))] + "s390_match_ccmode(insn, CCUmode)" + "clc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + + +; HI instructions + +(define_insn "*cmphi_tm_sub" + [(set (reg 33) + (compare (and:SI (subreg:SI (match_operand:HI 0 "s_operand" "%Qo") 0) + (match_operand:SI 1 "immediate_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) + && s390_single_qi (operands[1], HImode, 0) >= 0" + "* +{ + int part = s390_single_qi (operands[1], HImode, 0); + operands[1] = GEN_INT (s390_extract_qi (operands[1], HImode, part)); + + operands[0] = gen_rtx_MEM (QImode, + plus_constant (XEXP (operands[0], 0), part)); + return \"tm\\t%0,%b1\"; +}" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*icm3" + [(set (reg 33) + (compare (match_operand:HI 0 "s_operand" "Qo") + (match_operand:HI 1 "const0_operand" ""))) + (set (match_operand:HI 2 "register_operand" "=d") + (match_dup 0))] + "s390_match_ccmode(insn, CCSmode)" + "icm\\t%2,3,%0" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*cmphi_cct_0" + [(set (reg 33) + (compare (match_operand:HI 0 "register_operand" "d") + (match_operand:HI 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCTmode)" + "tml\\t%0,65535" + [(set_attr "op_type" "RX")]) + +(define_insn "*cmphi_ccs_0" + [(set (reg 33) + (compare (match_operand:HI 0 "s_operand" "Qo") + (match_operand:HI 1 "const0_operand" ""))) + (clobber (match_scratch:HI 2 "=d"))] + "s390_match_ccmode(insn, CCSmode)" + "icm\\t%2,3,%0" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*cmphi_ccu" + [(set (reg 33) + (compare (match_operand:HI 0 "register_operand" "d") + (match_operand:HI 1 "s_imm_operand" "Qo")))] + "s390_match_ccmode(insn, CCUmode)" + "clm\\t%0,3,%1" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*cmphi_ccu_mem" + [(set (reg 33) + (compare (match_operand:HI 0 "s_operand" "oQ") + (match_operand:HI 1 "s_imm_operand" "oQ")))] + "s390_match_ccmode(insn, CCUmode)" + "clc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + + +; QI instructions + +(define_insn "*cmpqi_tm2" + [(set (reg 33) + (compare (zero_extract:SI (match_operand:QI 0 "s_operand" "Qo") + (match_operand:SI 1 "const_int_operand" "n") + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) + && INTVAL (operands[1]) >= 1 && INTVAL (operands[2]) >= 0 + && INTVAL (operands[1]) + INTVAL (operands[2]) <= 8" + "* +{ + int block = (1 << INTVAL (operands[1])) - 1; + int shift = 8 - INTVAL (operands[1]) - INTVAL (operands[2]); + + operands[2] = GEN_INT (block << shift); + return \"tm\\t%0,%b2\"; +}" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*cmpqi_tm" + [(set (reg 33) + (compare (and:QI (match_operand:QI 0 "nonimmediate_operand" "%d,Q") + (match_operand:QI 1 "immediate_operand" "n,n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode)" + "@ + tml\\t%0,%b1 + tm\\t%0,%b1" + [(set_attr "op_type" "RI,SI") + (set_attr "atype" "reg,mem")]) + +(define_insn "*cmpqi_tm_sub" + [(set (reg 33) + (compare (and:SI (subreg:SI (match_operand:QI 0 "s_operand" "%Qo") 0) + (match_operand:SI 1 "immediate_operand" "n")) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode)" + "tm\\t%0,%b1" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*icm1" + [(set (reg 33) + (compare (match_operand:QI 0 "s_operand" "Qo") + (match_operand:QI 1 "const0_operand" ""))) + (set (match_operand:QI 2 "register_operand" "=d") + (match_dup 0))] + "s390_match_ccmode(insn, CCSmode)" + "icm\\t%2,1,%0" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*tm_0" + [(set (reg 33) + (compare (zero_extend:SI (and:QI (match_operand:QI 0 "s_operand" "Qo") + (match_operand:QI 1 "immediate_operand" ""))) + (const_int 0)))] + "s390_match_ccmode(insn, CCTmode) && + INTVAL(operands[1]) >= 0 && INTVAL(operands[1]) < 256" + "tm\\t%0,%1" + [(set_attr "op_type" "RI") + (set_attr "atype" "mem")]) + +(define_insn "*cmpqi_cct_0" + [(set (reg 33) + (compare (match_operand:QI 0 "register_operand" "d") + (match_operand:QI 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCTmode)" + "tml\\t%0,255" + [(set_attr "op_type" "RI")]) + +(define_insn "*cmpqi_ccs_0" + [(set (reg 33) + (compare (match_operand:QI 0 "s_operand" "Qo") + (match_operand:QI 1 "const0_operand" ""))) + (clobber (match_scratch:QI 2 "=d"))] + "s390_match_ccmode(insn, CCSmode)" + "icm\\t%2,1,%0" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*cmpqi_ccu_0" + [(set (reg 33) + (compare (match_operand:QI 0 "s_operand" "Qo") + (match_operand:QI 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCUmode)" + "cli\\t%0,0" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*cmpqi_ccu" + [(set (reg 33) + (compare (match_operand:QI 0 "register_operand" "d") + (match_operand:QI 1 "s_imm_operand" "Qo")))] + "s390_match_ccmode(insn, CCUmode)" + "clm\\t%0,1,%1" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*cmpqi_ccu_immed" + [(set (reg 33) + (compare (match_operand:QI 0 "s_operand" "Qo") + (match_operand:QI 1 "const_int_operand" "n")))] + "s390_match_ccmode(insn, CCUmode) && + INTVAL(operands[1]) >= 0 && INTVAL(operands[1]) < 256" + "cli\\t%0,%1" + [(set_attr "op_type" "SI") + (set_attr "atype" "mem")]) + +(define_insn "*cmpqi_ccu_mem" + [(set (reg 33) + (compare (match_operand:QI 0 "s_operand" "oQ") + (match_operand:QI 1 "s_imm_operand" "oQ")))] + "s390_match_ccmode(insn, CCUmode)" + "clc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + + +; DF instructions + +(define_insn "*cmpdf_ccs_0" + [(set (reg 33) + (compare (match_operand:DF 0 "register_operand" "f") + (match_operand:DF 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "ltdbr\\t%0,%0" + [(set_attr "op_type" "RRE")]) + +(define_insn "*cmpdf_ccs_0_ibm" + [(set (reg 33) + (compare (match_operand:DF 0 "register_operand" "f") + (match_operand:DF 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "ltdr\\t%0,%0" + [(set_attr "op_type" "RR")]) + +(define_insn "*cmpdf_ccs" + [(set (reg 33) + (compare (match_operand:DF 0 "register_operand" "f,f") + (match_operand:DF 1 "general_operand" "f,m")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + cdbr\\t%0,%1 + cdb\\t%0,%1" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*cmpdf_ccs_ibm" + [(set (reg 33) + (compare (match_operand:DF 0 "register_operand" "f,f") + (match_operand:DF 1 "general_operand" "f,m")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + cdr\\t%0,%1 + cd\\t%0,%1" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + + +; SF instructions + +(define_insn "*cmpsf_ccs_0" + [(set (reg 33) + (compare (match_operand:SF 0 "register_operand" "f") + (match_operand:SF 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "ltebr\\t%0,%0" + [(set_attr "op_type" "RRE")]) + +(define_insn "*cmpsf_ccs_0_ibm" + [(set (reg 33) + (compare (match_operand:SF 0 "register_operand" "f") + (match_operand:SF 1 "const0_operand" "")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "lter\\t%0,%0" + [(set_attr "op_type" "RR")]) + +(define_insn "*cmpsf_ccs" + [(set (reg 33) + (compare (match_operand:SF 0 "register_operand" "f,f") + (match_operand:SF 1 "general_operand" "f,m")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + cebr\\t%0,%1 + ceb\\t%0,%1" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*cmpsf_ccs" + [(set (reg 33) + (compare (match_operand:SF 0 "register_operand" "f,f") + (match_operand:SF 1 "general_operand" "f,m")))] + "s390_match_ccmode(insn, CCSmode) && TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + cer\\t%0,%1 + ce\\t%0,%1" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + + +;; +;;- Move instructions. +;; + +; +; movti instruction pattern(s). +; + +(define_insn "*movti_ss" + [(set (match_operand:TI 0 "s_operand" "=Qo") + (match_operand:TI 1 "s_imm_operand" "Qo"))] + "" + "mvc\\t%O0(16,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "movti" + [(set (match_operand:TI 0 "nonimmediate_operand" "=d,Q,d,m") + (match_operand:TI 1 "general_operand" "Q,d,dKm,d"))] + "TARGET_64BIT" + "@ + lmg\\t%0,%N0,%1 + stmg\\t%1,%N1,%0 + # + #" + [(set_attr "op_type" "RSE,RSE,NN,NN") + (set_attr "atype" "mem")]) + +(define_split + [(set (match_operand:TI 0 "nonimmediate_operand" "") + (match_operand:TI 1 "general_operand" ""))] + "TARGET_64BIT && reload_completed + && !s_operand (operands[0], VOIDmode) + && !s_operand (operands[1], VOIDmode) + && (register_operand (operands[0], VOIDmode) + || register_operand (operands[1], VOIDmode)) + && (!register_operand (operands[0], VOIDmode) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 0, 0, TImode), + operands[1]) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 1, 0, TImode), + operands[1]))" + [(set (match_dup 2) (match_dup 4)) + (set (match_dup 3) (match_dup 5))] + " +{ + if (!register_operand (operands[0], VOIDmode) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 0, 0, TImode), + operands[1])) + { + operands[2] = operand_subword (operands[0], 0, 0, TImode); + operands[3] = operand_subword (operands[0], 1, 0, TImode); + operands[4] = operand_subword (operands[1], 0, 0, TImode); + operands[5] = operand_subword (operands[1], 1, 0, TImode); + } + else + { + operands[2] = operand_subword (operands[0], 1, 0, TImode); + operands[3] = operand_subword (operands[0], 0, 0, TImode); + operands[4] = operand_subword (operands[1], 1, 0, TImode); + operands[5] = operand_subword (operands[1], 0, 0, TImode); + } +}") + +(define_split + [(set (match_operand:TI 0 "register_operand" "") + (match_operand:TI 1 "memory_operand" ""))] + "TARGET_64BIT && reload_completed + && !s_operand (operands[1], VOIDmode)" + [(set (match_dup 2) (match_dup 3)) + (set (match_dup 0) (mem:TI (match_dup 2)))] + "operands[2] = operand_subword (operands[0], 1, 0, TImode); + operands[3] = legitimize_la_operand (XEXP (operands[1], 0));") + +; +; movdi instruction pattern(s). +; + +;; If generating PIC code and operands[1] is a symbolic CONST, emit a +;; move to get the address of the symbolic object from the GOT. + +(define_expand "movdi" + [(set (match_operand:DI 0 "general_operand" "") + (match_operand:DI 1 "general_operand" ""))] + "" + " +{ + /* Handle PIC symbolic constants. */ + if (TARGET_64BIT && flag_pic && SYMBOLIC_CONST (operands[1])) + emit_pic_move (operands, DImode); + + /* During and after reload, we need to force constants + to the literal pool ourselves, if necessary. */ + if ((reload_in_progress || reload_completed) + && CONSTANT_P (operands[1]) + && (!legitimate_reload_constant_p (operands[1]) + || fp_operand (operands[0], VOIDmode))) + operands[1] = force_const_mem (DImode, operands[1]); +}") + +(define_insn "*movdi_lhi" + [(set (match_operand:DI 0 "register_operand" "=d") + (match_operand:DI 1 "immediate_operand" "K"))] + "TARGET_64BIT + && GET_CODE (operands[1]) == CONST_INT + && CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'K') + && !fp_operand (operands[0], VOIDmode)" + "lghi\\t%0,%h1" + [(set_attr "op_type" "RI") + (set_attr "atype" "reg")]) + +(define_insn "*movdi_lli" + [(set (match_operand:DI 0 "register_operand" "=d") + (match_operand:DI 1 "immediate_operand" "n"))] + "TARGET_64BIT && s390_single_hi (operands[1], DImode, 0) >= 0 + && !fp_operand (operands[0], VOIDmode)" + "* +{ + int part = s390_single_hi (operands[1], DImode, 0); + operands[1] = GEN_INT (s390_extract_hi (operands[1], DImode, part)); + + switch (part) + { + case 0: return \"llihh\\t%0,%x1\"; + case 1: return \"llihl\\t%0,%x1\"; + case 2: return \"llilh\\t%0,%x1\"; + case 3: return \"llill\\t%0,%x1\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI") + (set_attr "atype" "reg")]) + +(define_insn "*movdi_larl" + [(set (match_operand:DI 0 "register_operand" "=d") + (match_operand:DI 1 "larl_operand" "X"))] + "TARGET_64BIT + && !fp_operand (operands[0], VOIDmode)" + "larl\\t%0,%1" + [(set_attr "op_type" "RIL") + (set_attr "atype" "reg") + (set_attr "type" "la")]) + +(define_insn "*movdi_ss" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (match_operand:DI 1 "s_imm_operand" "Qo"))] + "" + "mvc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*movdi_64" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d,d,m,!*f,!*f,!m") + (match_operand:DI 1 "general_operand" "d,m,d,*f,m,*f"))] + "TARGET_64BIT" + "@ + lgr\\t%0,%1 + lg\\t%0,%1 + stg\\t%1,%0 + ldr\\t%0,%1 + ld\\t%0,%1 + std\\t%1,%0" + [(set_attr "op_type" "RRE,RXE,RXE,RR,RX,RX") + (set_attr "atype" "reg,mem,mem,reg,mem,mem")]) + +(define_insn "*movdi_31" + [(set (match_operand:DI 0 "nonimmediate_operand" "=d,Q,d,m,!*f,!*f,!m") + (match_operand:DI 1 "general_operand" "Q,d,dKm,d,*f,m,*f"))] + "!TARGET_64BIT" + "@ + lm\\t%0,%N0,%1 + stm\\t%1,%N1,%0 + # + # + ldr\\t%0,%1 + ld\\t%0,%1 + std\\t%1,%0" + [(set_attr "op_type" "RS,RS,NN,NN,RR,RX,RX") + (set_attr "atype" "mem,mem,*,*,reg,mem,mem")]) + +(define_split + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (match_operand:DI 1 "general_operand" ""))] + "!TARGET_64BIT && reload_completed + && !fp_operand (operands[0], VOIDmode) + && !fp_operand (operands[1], VOIDmode) + && !s_operand (operands[0], VOIDmode) + && !s_operand (operands[1], VOIDmode) + && (register_operand (operands[0], VOIDmode) + || register_operand (operands[1], VOIDmode)) + && (!register_operand (operands[0], VOIDmode) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 0, 0, DImode), + operands[1]) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 1, 0, DImode), + operands[1]))" + [(set (match_dup 2) (match_dup 4)) + (set (match_dup 3) (match_dup 5))] + " +{ + if (!register_operand (operands[0], VOIDmode) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 0, 0, DImode), + operands[1])) + { + operands[2] = operand_subword (operands[0], 0, 0, DImode); + operands[3] = operand_subword (operands[0], 1, 0, DImode); + operands[4] = operand_subword (operands[1], 0, 0, DImode); + operands[5] = operand_subword (operands[1], 1, 0, DImode); + } + else + { + operands[2] = operand_subword (operands[0], 1, 0, DImode); + operands[3] = operand_subword (operands[0], 0, 0, DImode); + operands[4] = operand_subword (operands[1], 1, 0, DImode); + operands[5] = operand_subword (operands[1], 0, 0, DImode); + } +}") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (match_operand:DI 1 "memory_operand" ""))] + "!TARGET_64BIT && reload_completed + && !fp_operand (operands[0], VOIDmode) + && !fp_operand (operands[1], VOIDmode) + && !s_operand (operands[1], VOIDmode)" + [(set (match_dup 2) (match_dup 3)) + (set (match_dup 0) (mem:DI (match_dup 2)))] + "operands[2] = operand_subword (operands[0], 1, 0, DImode); + operands[3] = legitimize_la_operand (XEXP (operands[1], 0));") + +; +; movsi instruction pattern(s). +; + +;; If generating PIC code and operands[1] is a symbolic CONST, emit a +;; move to get the address of the symbolic object from the GOT. + +(define_expand "movsi" + [(set (match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "general_operand" ""))] + "" + " +{ + /* Handle PIC symbolic constants. */ + if (!TARGET_64BIT && flag_pic && SYMBOLIC_CONST (operands[1])) + emit_pic_move (operands, SImode); + + /* expr.c tries to load an effective address using + force_reg. This fails because we don't have a + generic load_address pattern. Convert the move + to a proper arithmetic operation instead, unless + it is guaranteed to be OK. */ + if (GET_CODE (operands[1]) == PLUS + && !legitimate_la_operand_p (operands[1])) + { + operands[1] = force_operand (operands[1], operands[0]); + if (operands[1] == operands[0]) + DONE; + } + + /* During and after reload, we need to force constants + to the literal pool ourselves, if necessary. */ + if ((reload_in_progress || reload_completed) + && CONSTANT_P (operands[1]) + && (!legitimate_reload_constant_p (operands[1]) + || fp_operand (operands[0], VOIDmode))) + operands[1] = force_const_mem (SImode, operands[1]); +}") + +(define_insn "*movsi_lhi" + [(set (match_operand:SI 0 "register_operand" "=d") + (match_operand:SI 1 "immediate_operand" "K"))] + "GET_CODE (operands[1]) == CONST_INT + && CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'K') + && !fp_operand (operands[0], VOIDmode)" + "lhi\\t%0,%h1" + [(set_attr "op_type" "RI")]) + +(define_insn "*movsi_lli" + [(set (match_operand:SI 0 "register_operand" "=d") + (match_operand:SI 1 "immediate_operand" "n"))] + "TARGET_64BIT && s390_single_hi (operands[1], SImode, 0) >= 0 + && !fp_operand (operands[0], VOIDmode)" + "* +{ + int part = s390_single_hi (operands[1], SImode, 0); + operands[1] = GEN_INT (s390_extract_hi (operands[1], SImode, part)); + + switch (part) + { + case 0: return \"llilh\\t%0,%x1\"; + case 1: return \"llill\\t%0,%x1\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI")]) + +(define_insn "*movsi_ss" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (match_operand:SI 1 "s_imm_operand" "Qo"))] + "" + "mvc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*movsi" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d,d,m,!*f,!*f,!m") + (match_operand:SI 1 "general_operand" "d,m,d,*f,m,*f"))] + "" + "@ + lr\\t%0,%1 + l\\t%0,%1 + st\\t%1,%0 + ler\\t%0,%1 + le\\t%0,%1 + ste\\t%1,%0" + [(set_attr "op_type" "RR,RX,RX,RR,RX,RX") + (set_attr "atype" "reg,mem,mem,reg,mem,mem")]) + + +; +; movhi instruction pattern(s). +; + +(define_insn "movhi" + [(set (match_operand:HI 0 "nonimmediate_operand" "=d,d,d,m") + (match_operand:HI 1 "general_operand" "d,n,m,d"))] + "" + "@ + lr\\t%0,%1 + lhi\\t%0,%h1 + lh\\t%0,%1 + sth\\t%1,%0" + [(set_attr "op_type" "RR,RI,RX,RX") + (set_attr "atype" "reg,reg,mem,mem")]) + + +; +; movqi instruction pattern(s). +; + +(define_insn "movqi_64" + [(set (match_operand:QI 0 "nonimmediate_operand" "=d,d,d,m,Q") + (match_operand:QI 1 "general_operand" "d,n,m,d,n"))] + "TARGET_64BIT" + "@ + lr\\t%0,%1 + lhi\\t%0,%b1 + llgc\\t%0,%1 + stc\\t%1,%0 + mvi\\t%0,%b1" + [(set_attr "op_type" "RR,RI,RXE,RX,SI") + (set_attr "atype" "reg,reg,mem,mem,mem")]) + + +(define_insn "movqi" + [(set (match_operand:QI 0 "nonimmediate_operand" "=d,d,d,m,Q") + (match_operand:QI 1 "general_operand" "d,n,m,d,n"))] + "" + "@ + lr\\t%0,%1 + lhi\\t%0,%b1 + ic\\t%0,%1 + stc\\t%1,%0 + mvi\\t%0,%b1" + [(set_attr "op_type" "RR,RI,RX,RX,SI") + (set_attr "atype" "reg,reg,mem,mem,mem")]) + + +; +; moveqstrictqi instruction pattern(s). +; + +(define_insn "*movstrictqi" + [(set (strict_low_part (match_operand:QI 0 "register_operand" "+d")) + (match_operand:QI 1 "memory_operand" "m"))] + "" + "ic\\t%0,%1" + [(set_attr "op_type" "RX") + (set_attr "atype" "mem")]) + +; +; movstricthi instruction pattern(s). +; + +(define_insn "*movstricthi" + [(set (strict_low_part (match_operand:HI 0 "register_operand" "+d")) + (match_operand:HI 1 "s_imm_operand" "Qo")) + (clobber (reg:CC 33))] + "" + "icm\\t%0,3,%1" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + + +; +; movstrictsi instruction pattern(s). +; + +(define_insn "movestrictsi" + [(set (strict_low_part (match_operand:SI 0 "register_operand" "+d,d")) + (match_operand:SI 1 "general_operand" "d,m"))] + "TARGET_64BIT" + "@ + lr\\t%0,%1 + l\\t%0,%1" + [(set_attr "op_type" "RR,RS") + (set_attr "atype" "reg,mem")]) + + +; +; movdf instruction pattern(s). +; + +(define_expand "movdf" + [(set (match_operand:DF 0 "nonimmediate_operand" "") + (match_operand:DF 1 "general_operand" ""))] + "" + " +{ + /* During and after reload, we need to force constants + to the literal pool ourselves, if necessary. */ + if ((reload_in_progress || reload_completed) + && CONSTANT_P (operands[1])) + operands[1] = force_const_mem (DFmode, operands[1]); +}") + +(define_insn "*movdf_ss" + [(set (match_operand:DF 0 "s_operand" "=Qo") + (match_operand:DF 1 "s_imm_operand" "Qo"))] + "" + "mvc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*movdf_64" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,m,d,d,m") + (match_operand:DF 1 "general_operand" "f,m,f,d,m,d"))] + "TARGET_64BIT" + "@ + ldr\\t%0,%1 + ld\\t%0,%1 + std\\t%1,%0 + lgr\\t%0,%1 + lg\\t%0,%1 + stg\\t%1,%0" + [(set_attr "op_type" "RR,RX,RX,RRE,RXE,RXE") + (set_attr "atype" "reg,mem,mem,reg,mem,mem")]) + +(define_insn "*movdf_31" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,m,d,Q,d,m") + (match_operand:DF 1 "general_operand" "f,m,f,Q,d,dKm,d"))] + "!TARGET_64BIT" + "@ + ldr\\t%0,%1 + ld\\t%0,%1 + std\\t%1,%0 + lm\\t%0,%N0,%1 + stm\\t%1,%N1,%0 + # + #" + [(set_attr "op_type" "RR,RX,RX,RS,RS,NN,NN") + (set_attr "atype" "reg,mem,mem,mem,mem,*,*")]) + +(define_split + [(set (match_operand:DF 0 "nonimmediate_operand" "") + (match_operand:DF 1 "general_operand" ""))] + "!TARGET_64BIT && reload_completed + && !fp_operand (operands[0], VOIDmode) + && !fp_operand (operands[1], VOIDmode) + && !s_operand (operands[0], VOIDmode) + && !s_operand (operands[1], VOIDmode) + && (register_operand (operands[0], VOIDmode) + || register_operand (operands[1], VOIDmode)) + && (!register_operand (operands[0], VOIDmode) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 0, 0, DFmode), + operands[1]) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 1, 0, DFmode), + operands[1]))" + [(set (match_dup 2) (match_dup 4)) + (set (match_dup 3) (match_dup 5))] + " +{ + if (!register_operand (operands[0], VOIDmode) + || !reg_overlap_mentioned_p (operand_subword (operands[0], 0, 0, DFmode), + operands[1])) + { + operands[2] = operand_subword (operands[0], 0, 0, DFmode); + operands[3] = operand_subword (operands[0], 1, 0, DFmode); + operands[4] = operand_subword (operands[1], 0, 0, DFmode); + operands[5] = operand_subword (operands[1], 1, 0, DFmode); + } + else + { + operands[2] = operand_subword (operands[0], 1, 0, DFmode); + operands[3] = operand_subword (operands[0], 0, 0, DFmode); + operands[4] = operand_subword (operands[1], 1, 0, DFmode); + operands[5] = operand_subword (operands[1], 0, 0, DFmode); + } +}") + +(define_split + [(set (match_operand:DF 0 "register_operand" "") + (match_operand:DF 1 "memory_operand" ""))] + "!TARGET_64BIT && reload_completed + && !fp_operand (operands[0], VOIDmode) + && !fp_operand (operands[1], VOIDmode) + && !s_operand (operands[1], VOIDmode)" + [(set (match_dup 2) (match_dup 3)) + (set (match_dup 0) (mem:DI (match_dup 2)))] + "operands[2] = operand_subword (operands[0], 1, 0, DFmode); + operands[3] = legitimize_la_operand (XEXP (operands[1], 0));") + +; +; movsf instruction pattern(s). +; + +(define_expand "movsf" + [(set (match_operand:SF 0 "nonimmediate_operand" "") + (match_operand:SF 1 "general_operand" ""))] + "" + " +{ + /* During and after reload, we need to force constants + to the literal pool ourselves, if necessary. */ + if ((reload_in_progress || reload_completed) + && CONSTANT_P (operands[1])) + operands[1] = force_const_mem (SFmode, operands[1]); +}") + +(define_insn "*movsf_ss" + [(set (match_operand:SF 0 "s_operand" "=Qo") + (match_operand:SF 1 "s_imm_operand" "Qo"))] + "" + "mvc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*movsf" + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,m,d,d,m") + (match_operand:SF 1 "general_operand" "f,m,f,d,m,d"))] + "" + "@ + ler\\t%0,%1 + le\\t%0,%1 + ste\\t%1,%0 + lr\\t%0,%1 + l\\t%0,%1 + st\\t%1,%0" + [(set_attr "op_type" "RR,RX,RX,RR,RX,RX") + (set_attr "atype" "reg,mem,mem,reg,mem,mem")]) + +; +; load_multiple pattern(s). +; + +(define_expand "load_multiple" + [(match_par_dup 3 [(set (match_operand 0 "" "") + (match_operand 1 "" "")) + (use (match_operand 2 "" ""))])] + "" + " +{ + int regno; + int count; + rtx from; + int i, off; + + /* Support only loading a constant number of fixed-point registers from + memory and only bother with this if more than two */ + if (GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) < 2 + || INTVAL (operands[2]) > 16 + || GET_CODE (operands[1]) != MEM + || GET_CODE (operands[0]) != REG + || REGNO (operands[0]) >= 16) + FAIL; + + count = INTVAL (operands[2]); + regno = REGNO (operands[0]); + + operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); + if (no_new_pseudos) + { + if (GET_CODE (XEXP (operands[1], 0)) == REG) + { + from = XEXP (operands[1], 0); + off = 0; + } + else if (GET_CODE (XEXP (operands[1], 0)) == PLUS + && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == REG + && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT) + { + from = XEXP (XEXP (operands[1], 0), 0); + off = INTVAL (XEXP (XEXP (operands[1], 0), 1)); + } + else + FAIL; + + if (from == frame_pointer_rtx || from == arg_pointer_rtx) + FAIL; + } + else + { + from = force_reg (Pmode, XEXP (operands[1], 0)); + off = 0; + } + + for (i = 0; i < count; i++) + XVECEXP (operands[3], 0, i) + = gen_rtx_SET (VOIDmode, gen_rtx_REG (Pmode, regno + i), + change_address (operands[1], Pmode, + plus_constant (from, + off + i * UNITS_PER_WORD))); +}") + +(define_insn "*load_multiple_di" + [(match_parallel 0 "load_multiple_operation" + [(set (match_operand:DI 1 "register_operand" "=r") + (match_operand:DI 2 "s_operand" "oQ"))])] + "" + "* +{ + int words = XVECLEN (operands[0], 0); + + if (XVECLEN (operands[0], 0) == 1) + return \"lg\\t%1,0(%2)\"; + + operands[0] = gen_rtx_REG (DImode, REGNO (operands[1]) + words - 1); + return \"lmg\\t%1,%0,%2\"; +}" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem") + (set_attr "type" "lm")]) + +(define_insn "*load_multiple_si" + [(match_parallel 0 "load_multiple_operation" + [(set (match_operand:SI 1 "register_operand" "=r") + (match_operand:SI 2 "s_operand" "oQ"))])] + "" + "* +{ + int words = XVECLEN (operands[0], 0); + + if (XVECLEN (operands[0], 0) == 1) + return \"l\\t%1,0(%2)\"; + + operands[0] = gen_rtx_REG (SImode, REGNO (operands[1]) + words - 1); + return \"lm\\t%1,%0,%2\"; +}" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem") + (set_attr "type" "lm")]) + +; +; store multiple pattern(s). +; + +(define_expand "store_multiple" + [(match_par_dup 3 [(set (match_operand 0 "" "") + (match_operand 1 "" "")) + (use (match_operand 2 "" ""))])] + "" + " +{ + int regno; + int count; + rtx to; + int i, off; + + /* Support only storing a constant number of fixed-point registers to + memory and only bother with this if more than two. */ + if (GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) < 2 + || INTVAL (operands[2]) > 16 + || GET_CODE (operands[0]) != MEM + || GET_CODE (operands[1]) != REG + || REGNO (operands[1]) >= 16) + FAIL; + + count = INTVAL (operands[2]); + regno = REGNO (operands[1]); + + operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); + + if (no_new_pseudos) + { + if (GET_CODE (XEXP (operands[0], 0)) == REG) + { + to = XEXP (operands[0], 0); + off = 0; + } + else if (GET_CODE (XEXP (operands[0], 0)) == PLUS + && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG + && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT) + { + to = XEXP (XEXP (operands[0], 0), 0); + off = INTVAL (XEXP (XEXP (operands[0], 0), 1)); + } + else + FAIL; + + if (to == frame_pointer_rtx || to == arg_pointer_rtx) + FAIL; + } + else + { + to = force_reg (Pmode, XEXP (operands[0], 0)); + off = 0; + } + + for (i = 0; i < count; i++) + XVECEXP (operands[3], 0, i) + = gen_rtx_SET (VOIDmode, + change_address (operands[0], Pmode, + plus_constant (to, + off + i * UNITS_PER_WORD)), + gen_rtx_REG (Pmode, regno + i)); +}") + +(define_insn "*store_multiple_di" + [(match_parallel 0 "store_multiple_operation" + [(set (match_operand:DI 1 "s_operand" "=oQ") + (match_operand:DI 2 "register_operand" "r"))])] + "" + "* +{ + int words = XVECLEN (operands[0], 0); + + if (XVECLEN (operands[0], 0) == 1) + return \"stg\\t%1,0(%2)\"; + + operands[0] = gen_rtx_REG (DImode, REGNO (operands[2]) + words - 1); + return \"stmg\\t%2,%0,%1\"; +}" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem") + (set_attr "type" "stm")]) + + +(define_insn "*store_multiple_si" + [(match_parallel 0 "store_multiple_operation" + [(set (match_operand:SI 1 "s_operand" "=oQ") + (match_operand:SI 2 "register_operand" "r"))])] + "" + "* +{ + int words = XVECLEN (operands[0], 0); + + if (XVECLEN (operands[0], 0) == 1) + return \"st\\t%1,0(%2)\"; + + operands[0] = gen_rtx_REG (SImode, REGNO (operands[2]) + words - 1); + return \"stm\\t%2,%0,%1\"; +}" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem") + (set_attr "type" "stm")]) + +;; +;; String instructions. +;; + +; +; movstrdi instruction pattern(s). +; + +(define_expand "movstrdi" + [(set (match_operand:BLK 0 "general_operand" "") + (match_operand:BLK 1 "general_operand" "")) + (use (match_operand:DI 2 "general_operand" "")) + (match_operand 3 "" "")] + "TARGET_64BIT" + " +{ + rtx addr0, addr1; + + addr0 = force_operand (XEXP (operands[0], 0), NULL_RTX); + addr1 = force_operand (XEXP (operands[1], 0), NULL_RTX); + + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) <= 256) + { + operands[0] = change_address (operands[0], VOIDmode, addr0); + operands[1] = change_address (operands[1], VOIDmode, addr1); + operands[2] = GEN_INT (INTVAL (operands[2]) - 1); + + emit_insn (gen_movstrdi_short (operands[0], operands[1], operands[2])); + DONE; + } + else + { + if (TARGET_MVCLE) + { + /* implementation suggested by Richard Henderson <rth@cygnus.com> */ + rtx reg0 = gen_reg_rtx (TImode); + rtx reg1 = gen_reg_rtx (TImode); + rtx len = operands[2]; + + if (! CONSTANT_P (len)) + len = force_reg (DImode, len); + + /* Load up the address+length pairs. */ + + emit_move_insn (gen_highpart (DImode, reg0), addr0); + emit_move_insn (gen_lowpart (DImode, reg0), len); + + emit_move_insn (gen_highpart (DImode, reg1), addr1); + emit_move_insn (gen_lowpart (DImode, reg1), len); + + /* MOVE */ + emit_insn (gen_movstrdi_64 (reg0, reg1, reg0, reg1)); + DONE; + } + else + { + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx reg0, reg1, len, blocks; + + reg0 = gen_reg_rtx (DImode); + reg1 = gen_reg_rtx (DImode); + len = gen_reg_rtx (DImode); + blocks = gen_reg_rtx (DImode); + + emit_move_insn (len, operands[2]); + emit_insn (gen_cmpdi (len, const0_rtx)); + emit_jump_insn (gen_beq (label1)); + emit_move_insn (reg0, addr0); + emit_move_insn (reg1, addr1); + emit_insn (gen_adddi3 (len, len, constm1_rtx)); + emit_insn (gen_ashrdi3 (blocks, len, GEN_INT (8))); + emit_insn (gen_cmpdi (blocks, const0_rtx)); + emit_jump_insn (gen_beq (label2)); + emit_insn (gen_movstrdi_long (reg0, reg1, reg0, reg1, blocks, blocks)); + emit_label (label2); + operands[0] = change_address (operands[0], VOIDmode, reg0); + operands[1] = change_address (operands[1], VOIDmode, reg1); + emit_insn (gen_movstrdi_short (operands[0], operands[1], len)); + emit_label (label1); + DONE; + } + } +}") + +; +; movstrsi instruction pattern(s). +; + +(define_expand "movstrsi" + [(set (match_operand:BLK 0 "general_operand" "") + (match_operand:BLK 1 "general_operand" "")) + (use (match_operand:SI 2 "general_operand" "")) + (match_operand 3 "" "")] + "!TARGET_64BIT" + " +{ + rtx addr0 = force_operand (XEXP (operands[0], 0), NULL_RTX); + rtx addr1 = force_operand (XEXP (operands[1], 0), NULL_RTX); + + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) <= 256) + { + operands[0] = change_address (operands[0], VOIDmode, addr0); + operands[1] = change_address (operands[1], VOIDmode, addr1); + operands[2] = GEN_INT (INTVAL (operands[2]) - 1); + + emit_insn (gen_movstrsi_short (operands[0], operands[1], operands[2])); + DONE; + } + else + { + if (TARGET_MVCLE) + { + /* implementation suggested by Richard Henderson <rth@cygnus.com> */ + rtx reg0 = gen_reg_rtx (DImode); + rtx reg1 = gen_reg_rtx (DImode); + rtx len = operands[2]; + + + if (! CONSTANT_P (len)) + len = force_reg (SImode, len); + + /* Load up the address+length pairs. */ + + emit_move_insn (gen_highpart (SImode, reg0), addr0); + emit_move_insn (gen_lowpart (SImode, reg0), len); + + emit_move_insn (gen_highpart (SImode, reg1), addr1); + emit_move_insn (gen_lowpart (SImode, reg1), len); + + /* MOVE */ + emit_insn (gen_movstrsi_31 (reg0, reg1, reg0, reg1)); + DONE; + } + else + { + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx reg0, reg1, len, blocks; + + reg0 = gen_reg_rtx (SImode); + reg1 = gen_reg_rtx (SImode); + len = gen_reg_rtx (SImode); + blocks = gen_reg_rtx (SImode); + + emit_move_insn (len, operands[2]); + emit_insn (gen_cmpsi (len, const0_rtx)); + emit_jump_insn (gen_beq (label1)); + emit_move_insn (reg0, addr0); + emit_move_insn (reg1, addr1); + emit_insn (gen_addsi3 (len, len, constm1_rtx)); + emit_insn (gen_ashrsi3 (blocks, len, GEN_INT (8))); + emit_insn (gen_cmpsi (blocks, const0_rtx)); + emit_jump_insn (gen_beq (label2)); + emit_insn (gen_movstrsi_long (reg0, reg1, reg0, reg1, blocks, blocks)); + emit_label (label2); + operands[0] = change_address (operands[0], VOIDmode, reg0); + operands[1] = change_address (operands[1], VOIDmode, reg1); + emit_insn (gen_movstrsi_short (operands[0], operands[1], len)); + emit_label (label1); + DONE; + } + } +}") + +; Move a block that is up to 256 bytes in length. +; The block length is taken as (operands[2] % 256) + 1. + +(define_insn "movstrdi_short" + [(set (match_operand:BLK 0 "s_operand" "=oQ,oQ") + (match_operand:BLK 1 "s_operand" "oQ,oQ")) + (use (match_operand:DI 2 "nonmemory_operand" "n,a")) + (clobber (match_scratch:DI 3 "=X,&a"))] + "TARGET_64BIT" + "* +{ + switch (which_alternative) + { + case 0: + return \"mvc\\t%O0(%b2+1,%R0),%1\"; + + case 1: + output_asm_insn (\"bras\\t%3,.+10\", operands); + output_asm_insn (\"mvc\\t%O0(1,%R0),%1\", operands); + return \"ex\\t%2,0(%3)\"; + + default: + abort (); + } +}" + [(set_attr "op_type" "SS,NN") + (set_attr "atype" "mem,mem") + (set_attr "length" "*,14")]) + +(define_insn "movstrsi_short" + [(set (match_operand:BLK 0 "s_operand" "=oQ,oQ") + (match_operand:BLK 1 "s_operand" "oQ,oQ")) + (use (match_operand:SI 2 "nonmemory_operand" "n,a")) + (clobber (match_scratch:SI 3 "=X,&a"))] + "!TARGET_64BIT" + "* +{ + switch (which_alternative) + { + case 0: + return \"mvc\\t%O0(%b2+1,%R0),%1\"; + + case 1: + output_asm_insn (\"bras\\t%3,.+10\", operands); + output_asm_insn (\"mvc\\t%O0(1,%R0),%1\", operands); + return \"ex\\t%2,0(%3)\"; + + default: + abort (); + } +}" + [(set_attr "op_type" "SS,NN") + (set_attr "atype" "mem,mem") + (set_attr "length" "*,14")]) + +; Move a block that is a multiple of 256 bytes in length + +(define_insn "movstrdi_long" + [(set (match_operand:DI 4 "register_operand" "=d") + (const_int 0)) + (set (match_operand:DI 0 "register_operand" "=a") + (plus:DI (match_operand:DI 2 "register_operand" "0") + (ashift:DI (match_operand:DI 5 "register_operand" "4") + (const_int 8)))) + (set (match_operand:DI 1 "register_operand" "=a") + (plus:DI (match_operand:DI 3 "register_operand" "1") + (ashift:DI (match_dup 5) (const_int 8)))) + (set (mem:BLK (match_dup 2)) + (mem:BLK (match_dup 3))) + (use (match_dup 5))] + "TARGET_64BIT" + "* +{ + output_asm_insn (\"mvc\\t0(256,%0),0(%1)\", operands); + output_asm_insn (\"la\\t%0,256(%0)\", operands); + output_asm_insn (\"la\\t%1,256(%1)\", operands); + return \"brct\\t%4,.-14\"; +}" + [(set_attr "op_type" "NN") + (set_attr "atype" "mem") + (set_attr "length" "18")]) + +(define_insn "movstrsi_long" + [(set (match_operand:SI 4 "register_operand" "=d") + (const_int 0)) + (set (match_operand:SI 0 "register_operand" "=a") + (plus:SI (match_operand:SI 2 "register_operand" "0") + (ashift:SI (match_operand:SI 5 "register_operand" "4") + (const_int 8)))) + (set (match_operand:SI 1 "register_operand" "=a") + (plus:SI (match_operand:SI 3 "register_operand" "1") + (ashift:SI (match_dup 5) (const_int 8)))) + (set (mem:BLK (match_dup 2)) + (mem:BLK (match_dup 3))) + (use (match_dup 5))] + "!TARGET_64BIT" + "* +{ + output_asm_insn (\"mvc\\t0(256,%0),0(%1)\", operands); + output_asm_insn (\"la\\t%0,256(%0)\", operands); + output_asm_insn (\"la\\t%1,256(%1)\", operands); + return \"brct\\t%4,.-14\"; +}" + [(set_attr "op_type" "NN") + (set_attr "atype" "mem") + (set_attr "length" "18")]) + +; Move a block that is larger than 255 bytes in length. + +(define_insn "movstrdi_64" + [(set (match_operand:TI 0 "register_operand" "=d") + (ashift:TI (plus:TI (match_operand:TI 2 "register_operand" "0") + (lshiftrt:TI (match_dup 2) (const_int 64))) + (const_int 64))) + (set (match_operand:TI 1 "register_operand" "=d") + (ashift:TI (plus:TI (match_operand:TI 3 "register_operand" "1") + (lshiftrt:TI (match_dup 3) (const_int 64))) + (const_int 64))) + (set (mem:BLK (subreg:DI (match_dup 2) 0)) + (mem:BLK (subreg:DI (match_dup 3) 0))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "mvcle\\t%0,%1,0\;jo\\t.-4" + [(set_attr "op_type" "NN") + (set_attr "atype" "mem") + (set_attr "length" "8")]) + +(define_insn "movstrsi_31" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashift:DI (plus:DI (match_operand:DI 2 "register_operand" "0") + (lshiftrt:DI (match_dup 2) (const_int 32))) + (const_int 32))) + (set (match_operand:DI 1 "register_operand" "=d") + (ashift:DI (plus:DI (match_operand:DI 3 "register_operand" "1") + (lshiftrt:DI (match_dup 3) (const_int 32))) + (const_int 32))) + (set (mem:BLK (subreg:SI (match_dup 2) 0)) + (mem:BLK (subreg:SI (match_dup 3) 0))) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "mvcle\\t%0,%1,0\;jo\\t.-4" + [(set_attr "op_type" "NN") + (set_attr "atype" "mem") + (set_attr "length" "8")]) + +; +; clrstrdi instruction pattern(s). +; + +(define_expand "clrstrdi" + [(set (match_operand:BLK 0 "general_operand" "") + (const_int 0)) + (use (match_operand:DI 1 "general_operand" "")) + (match_operand 2 "" "")] + "TARGET_64BIT" + " +{ + rtx addr = force_operand (XEXP (operands[0], 0), NULL_RTX); + + operands[0] = change_address (operands[0], VOIDmode, addr); + + if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < 256) + { + emit_insn (gen_clrstrsico (operands[0], operands[1])); + DONE; + } + else + { + rtx reg0 = gen_reg_rtx (TImode); + rtx reg1 = gen_reg_rtx (TImode); + rtx len = operands[1]; + + if (! CONSTANT_P (len)) + len = force_reg (DImode, len); + + /* Load up the address+length pairs. */ + + emit_move_insn (gen_highpart (DImode, reg0), addr); + emit_move_insn (gen_lowpart (DImode, reg0), len); + + emit_move_insn (gen_lowpart (DImode, reg1), const0_rtx); + + /* Clear! */ + emit_insn (gen_clrstrsi_64 (reg0, reg1, reg0)); + DONE; + } +}") + +; +; clrstrsi instruction pattern(s). +; + +(define_expand "clrstrsi" + [(set (match_operand:BLK 0 "general_operand" "") + (const_int 0)) + (use (match_operand:SI 1 "general_operand" "")) + (match_operand 2 "" "")] + "!TARGET_64BIT" + " +{ + rtx addr = force_operand (XEXP (operands[0], 0), NULL_RTX); + + operands[0] = change_address (operands[0], VOIDmode, addr); + + if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < 256) + { + emit_insn (gen_clrstrsico (operands[0], operands[1])); + DONE; + } + else + { + rtx reg0 = gen_reg_rtx (DImode); + rtx reg1 = gen_reg_rtx (DImode); + rtx len = operands[1]; + + if (! CONSTANT_P (len)) + len = force_reg (SImode, len); + + /* Load up the address+length pairs. */ + + emit_move_insn (gen_highpart (SImode, reg0), addr); + emit_move_insn (gen_lowpart (SImode, reg0), len); + + emit_move_insn (gen_lowpart (SImode, reg1), const0_rtx); + + /* CLear! */ + emit_insn (gen_clrstrsi_31 (reg0, reg1, reg0)); + DONE; + } +}") + +; Clear memory with length less than 256 bytes + +(define_insn "clrstrsico" + [(set (match_operand:BLK 0 "s_operand" "=Qo") + (const_int 0)) + (use (match_operand 1 "immediate_operand" "I")) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(%1,%R0),%0" + [(set_attr "op_type" "RS") + (set_attr "type" "cs") + (set_attr "atype" "mem")]) + +; Clear memory with length greater 256 bytes or lenght not constant + +(define_insn "clrstrsi_64" + [(set (match_operand:TI 0 "register_operand" "=d") + (ashift:TI (plus:TI (match_operand:TI 2 "register_operand" "0") + (lshiftrt:TI (match_dup 2) (const_int 64))) + (const_int 64))) + (set (mem:BLK (subreg:DI (match_dup 2) 0)) + (const_int 0)) + (use (match_operand:TI 1 "register_operand" "d")) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "mvcle\\t%0,%1,0\;jo\\t.-4" + [(set_attr "op_type" "NN") + (set_attr "atype" "mem") + (set_attr "type" "vs") + (set_attr "length" "8")]) + +(define_insn "clrstrsi_31" + [(set (match_operand:DI 0 "register_operand" "=d") + (ashift:DI (plus:DI (match_operand:DI 2 "register_operand" "0") + (lshiftrt:DI (match_dup 2) (const_int 32))) + (const_int 32))) + (set (mem:BLK (subreg:SI (match_dup 2) 0)) + (const_int 0)) + (use (match_operand:DI 1 "register_operand" "d")) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "mvcle\\t%0,%1,0\;jo\\t.-4" + [(set_attr "op_type" "NN") + (set_attr "atype" "mem") + (set_attr "type" "vs") + (set_attr "length" "8")]) + +; +; cmpstrdi instruction pattern(s). +; + +(define_expand "cmpstrdi" + [(set (match_operand:DI 0 "register_operand" "") + (compare:DI (match_operand:BLK 1 "s_operand" "") + (match_operand:BLK 2 "s_operand" "") ) ) + (use (match_operand:DI 3 "general_operand" "")) + (use (match_operand:DI 4 "" ""))] + "TARGET_64BIT" + " +{ + rtx addr0, addr1; + + /* for pre/post increment */ + operands[1] = protect_from_queue (operands[1], 0); + operands[2] = protect_from_queue (operands[2], 0); + operands[3] = protect_from_queue (operands[3], 0); + + addr0 = force_operand (XEXP (operands[1], 0), NULL_RTX); + addr1 = force_operand (XEXP (operands[2], 0), NULL_RTX); + + if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) < 256) + { + if (INTVAL (operands[3]) == 0) { + emit_move_insn (operands[0], operands[3]); + DONE; + } + + operands[1] = change_address (operands[1], VOIDmode, addr0); + operands[2] = change_address (operands[2], VOIDmode, addr1); + + emit_insn (gen_cmpstr_const (operands[1], operands[2], operands[3])); + emit_insn (gen_cmpint_di (operands[0])); + DONE; + } + else + { + /* implementation suggested by Richard Henderson <rth@cygnus.com> */ + rtx reg0 = gen_reg_rtx (TImode); + rtx reg1 = gen_reg_rtx (TImode); + rtx len = operands[3]; + + if (! CONSTANT_P (len)) + len = force_reg (DImode, len); + + /* Load up the address+length pairs. */ + emit_move_insn (gen_highpart (DImode, reg0), addr0); + emit_move_insn (gen_lowpart (DImode, reg0), len); + + emit_move_insn (gen_highpart (DImode, reg1), addr1); + emit_move_insn (gen_lowpart (DImode, reg1), len); + + /* Compare! */ + emit_insn (gen_cmpstr_64 (reg0, reg1, reg0, reg1)); + emit_insn (gen_cmpint_di (operands[0])); + DONE; + } +}") + +; +; cmpstrsi instruction pattern(s). +; + +(define_expand "cmpstrsi" + [(set (match_operand:SI 0 "register_operand" "") + (compare:SI (match_operand:BLK 1 "s_operand" "") + (match_operand:BLK 2 "s_operand" "") ) ) + (use (match_operand:SI 3 "general_operand" "")) + (use (match_operand:SI 4 "" ""))] + "" + " +{ + rtx addr0, addr1; + + /* for pre/post increment */ + operands[1] = protect_from_queue (operands[1], 0); + operands[2] = protect_from_queue (operands[2], 0); + operands[3] = protect_from_queue (operands[3], 0); + + addr0 = force_operand (XEXP (operands[1], 0), NULL_RTX); + addr1 = force_operand (XEXP (operands[2], 0), NULL_RTX); + + if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) < 256) + { + if (INTVAL (operands[3]) == 0) { + emit_move_insn (operands[0], operands[3]); + DONE; + } + + operands[1] = change_address (operands[1], VOIDmode, addr0); + operands[2] = change_address (operands[2], VOIDmode, addr1); + + emit_insn (gen_cmpstr_const (operands[1], operands[2], operands[3])); + emit_insn (gen_cmpint_si (operands[0])); + DONE; + } + else + { + /* implementation suggested by Richard Henderson <rth@cygnus.com> */ + rtx reg0, reg1; + rtx len = operands[3]; + + if (TARGET_64BIT) + { + reg0 = gen_reg_rtx (TImode); + reg1 = gen_reg_rtx (TImode); + } + else + { + reg0 = gen_reg_rtx (DImode); + reg1 = gen_reg_rtx (DImode); + } + + /* Load up the address+length pairs. */ + emit_move_insn (gen_highpart (Pmode, reg0), addr0); + convert_move (gen_lowpart (Pmode, reg0), len, 1); + + emit_move_insn (gen_highpart (Pmode, reg1), addr1); + convert_move (gen_lowpart (Pmode, reg1), len, 1); + + /* Compare! */ + if (TARGET_64BIT) + emit_insn (gen_cmpstr_64 (reg0, reg1, reg0, reg1)); + else + emit_insn (gen_cmpstr_31 (reg0, reg1, reg0, reg1)); + + emit_insn (gen_cmpint_si (operands[0])); + DONE; + } +}") + +; Compare a block that is less than 256 bytes in length. + +(define_insn "cmpstr_const" + [(set (reg:CCS 33) + (compare:CCS (match_operand:BLK 0 "s_operand" "oQ") + (match_operand:BLK 1 "s_operand" "oQ"))) + (use (match_operand 2 "immediate_operand" "I"))] + "(unsigned) INTVAL (operands[2]) < 256" + "clc\\t%O0(%c2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem") + (set_attr "type" "cs")]) + +; Compare a block that is larger than 255 bytes in length. + +(define_insn "cmpstr_64" + [(clobber (match_operand:TI 0 "register_operand" "=d")) + (clobber (match_operand:TI 1 "register_operand" "=d")) + (set (reg:CCS 33) + (compare:CCS (mem:BLK (subreg:DI (match_operand:TI 2 "register_operand" "0") 0)) + (mem:BLK (subreg:DI (match_operand:TI 3 "register_operand" "1") 0))))] + "TARGET_64BIT" + "clcl\\t%0,%1" + [(set_attr "op_type" "RR") + (set_attr "atype" "mem") + (set_attr "type" "vs")]) + +(define_insn "cmpstr_31" + [(clobber (match_operand:DI 0 "register_operand" "=d")) + (clobber (match_operand:DI 1 "register_operand" "=d")) + (set (reg:CCS 33) + (compare:CCS (mem:BLK (subreg:SI (match_operand:DI 2 "register_operand" "0") 0)) + (mem:BLK (subreg:SI (match_operand:DI 3 "register_operand" "1") 0))))] + "!TARGET_64BIT" + "clcl\\t%0,%1" + [(set_attr "op_type" "RR") + (set_attr "atype" "mem") + (set_attr "type" "vs")]) + +; Convert condition code to integer in range (-1, 0, 1) + +(define_insn "cmpint_si" + [(set (match_operand:SI 0 "register_operand" "=d") + (compare:SI (reg:CCS 33) (const_int 0)))] + "" + "* +{ + output_asm_insn (\"lhi\\t%0,1\", operands); + output_asm_insn (\"jh\\t.+12\", operands); + output_asm_insn (\"jl\\t.+6\", operands); + output_asm_insn (\"sr\\t%0,%0\", operands); + return \"lcr\\t%0,%0\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "16") + (set_attr "atype" "reg") + (set_attr "type" "other")]) + +(define_insn "cmpint_di" + [(set (match_operand:DI 0 "register_operand" "=d") + (compare:DI (reg:CCS 33) (const_int 0)))] + "TARGET_64BIT" + "* +{ + output_asm_insn (\"lghi\\t%0,1\", operands); + output_asm_insn (\"jh\\t.+12\", operands); + output_asm_insn (\"jl\\t.+6\", operands); + output_asm_insn (\"sgr\\t%0,%0\", operands); + return \"lcgr\\t%0,%0\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "22") + (set_attr "atype" "reg") + (set_attr "type" "other")]) + + +;; +;;- Conversion instructions. +;; + +(define_insn "*sethighqisi" + [(set (match_operand:SI 0 "register_operand" "=d") + (unspec:SI [(match_operand:QI 1 "s_operand" "Qo")] 10)) + (clobber (reg:CC 33))] + "" + "icm\\t%0,8,%1" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*sethighhisi" + [(set (match_operand:SI 0 "register_operand" "=d") + (unspec:SI [(match_operand:HI 1 "s_operand" "Qo")] 10)) + (clobber (reg:CC 33))] + "" + "icm\\t%0,12,%1" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_insn "*sethighqidi_64" + [(set (match_operand:DI 0 "register_operand" "=d") + (unspec:DI [(match_operand:QI 1 "s_operand" "Qo")] 10)) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "icmh\\t%0,8,%1" + [(set_attr "op_type" "RSE") + (set_attr "atype" "mem")]) + +(define_insn "*sethighqidi_31" + [(set (match_operand:DI 0 "register_operand" "=d") + (unspec:DI [(match_operand:QI 1 "s_operand" "Qo")] 10)) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "icm\\t%0,8,%1" + [(set_attr "op_type" "RS") + (set_attr "atype" "mem")]) + +(define_split + [(set (match_operand:SI 0 "register_operand" "") + (zero_extract:SI (match_operand:QI 1 "s_operand" "") + (match_operand 2 "const_int_operand" "") + (const_int 0)))] + "!TARGET_64BIT && !reload_completed + && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) < 8" + [(parallel + [(set (match_dup 0) (unspec:SI [(match_dup 1)] 10)) + (clobber (reg:CC 33))]) + (set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 2)))] + " +{ + operands[2] = GEN_INT (32 - INTVAL (operands[2])); + operands[1] = change_address (operands[1], QImode, 0); +}") + +(define_split + [(set (match_operand:SI 0 "register_operand" "") + (zero_extract:SI (match_operand:QI 1 "s_operand" "") + (match_operand 2 "const_int_operand" "") + (const_int 0)))] + "!TARGET_64BIT && !reload_completed + && INTVAL (operands[2]) >= 8 && INTVAL (operands[2]) < 16" + [(parallel + [(set (match_dup 0) (unspec:SI [(match_dup 1)] 10)) + (clobber (reg:CC 33))]) + (set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 2)))] + " +{ + operands[2] = GEN_INT (32 - INTVAL (operands[2])); + operands[1] = change_address (operands[1], HImode, 0); +}") + +; +; extendsidi2 instruction pattern(s). +; + +(define_expand "extendsidi2" + [(set (match_operand:DI 0 "register_operand" "") + (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))] + "" + " +{ + if (!TARGET_64BIT) + { + emit_insn (gen_rtx_CLOBBER (VOIDmode, operands[0])); + emit_move_insn (gen_highpart (SImode, operands[0]), operands[1]); + emit_move_insn (gen_lowpart (SImode, operands[0]), const0_rtx); + emit_insn (gen_ashrdi3 (operands[0], operands[0], GEN_INT (32))); + DONE; + } +} +") + +(define_insn "*extendsidi2" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,m")))] + "TARGET_64BIT" + "@ + lgfr\\t%0,%1 + lgf\\t%0,%1" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +; +; extendhidi2 instruction pattern(s). +; + +(define_expand "extendhidi2" + [(set (match_operand:DI 0 "register_operand" "") + (sign_extend:DI (match_operand:HI 1 "register_operand" "")))] + "" + " +{ + if (!TARGET_64BIT) + { + rtx tmp = gen_reg_rtx (SImode); + emit_insn (gen_extendhisi2 (tmp, operands[1])); + emit_insn (gen_extendsidi2 (operands[0], tmp)); + DONE; + } + else + { + operands[1] = gen_lowpart (DImode, operands[1]); + emit_insn (gen_ashldi3 (operands[0], operands[1], GEN_INT (48))); + emit_insn (gen_ashrdi3 (operands[0], operands[0], GEN_INT (48))); + DONE; + } +} +") + +(define_insn "*extendhidi2" + [(set (match_operand:DI 0 "register_operand" "=d") + (sign_extend:DI (match_operand:HI 1 "memory_operand" "m")))] + "TARGET_64BIT" + "lgh\\t%0,%1" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem")]) + +; +; extendqidi2 instruction pattern(s). +; + +(define_expand "extendqidi2" + [(set (match_operand:DI 0 "register_operand" "") + (sign_extend:DI (match_operand:QI 1 "register_operand" "")))] + "" + " +{ + if (!TARGET_64BIT) + { + rtx tmp = gen_reg_rtx (SImode); + emit_insn (gen_extendqisi2 (tmp, operands[1])); + emit_insn (gen_extendsidi2 (operands[0], tmp)); + DONE; + } + else + { + operands[1] = gen_lowpart (DImode, operands[1]); + emit_insn (gen_ashldi3 (operands[0], operands[1], GEN_INT (56))); + emit_insn (gen_ashrdi3 (operands[0], operands[0], GEN_INT (56))); + DONE; + } +} +") + +(define_split + [(set (match_operand:DI 0 "register_operand" "") + (sign_extend:DI (match_operand:QI 1 "s_operand" "")))] + "TARGET_64BIT && !reload_completed" + [(parallel + [(set (match_dup 0) (unspec:DI [(match_dup 1)] 10)) + (clobber (reg:CC 33))]) + (parallel + [(set (match_dup 0) (ashiftrt:DI (match_dup 0) (const_int 56))) + (clobber (reg:CC 33))])] + "") + +; +; extendhisi2 instruction pattern(s). +; + +(define_expand "extendhisi2" + [(set (match_operand:SI 0 "register_operand" "") + (sign_extend:SI (match_operand:HI 1 "register_operand" "")))] + "" + " +{ + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_ashlsi3 (operands[0], operands[1], GEN_INT (16))); + emit_insn (gen_ashrsi3 (operands[0], operands[0], GEN_INT (16))); + DONE; +} +") + +(define_insn "*extendhisi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))] + "" + "lh\\t%0,%1" + [(set_attr "op_type" "RX") + (set_attr "atype" "mem")]) + +; +; extendqisi2 instruction pattern(s). +; + +(define_expand "extendqisi2" + [(set (match_operand:SI 0 "register_operand" "") + (sign_extend:SI (match_operand:QI 1 "register_operand" "")))] + "" + " +{ + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_ashlsi3 (operands[0], operands[1], GEN_INT (24))); + emit_insn (gen_ashrsi3 (operands[0], operands[0], GEN_INT (24))); + DONE; +} +") + +(define_split + [(set (match_operand:SI 0 "register_operand" "") + (sign_extend:SI (match_operand:QI 1 "s_operand" "")))] + "!reload_completed" + [(parallel + [(set (match_dup 0) (unspec:SI [(match_dup 1)] 10)) + (clobber (reg:CC 33))]) + (parallel + [(set (match_dup 0) (ashiftrt:SI (match_dup 0) (const_int 24))) + (clobber (reg:CC 33))])] + "") + +; +; extendqihi2 instruction pattern(s). +; + + +; +; zero_extendsidi2 instruction pattern(s). +; + +(define_expand "zero_extendsidi2" + [(set (match_operand:DI 0 "register_operand" "") + (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))] + "" + " +{ + if (!TARGET_64BIT) + { + emit_insn (gen_rtx_CLOBBER (VOIDmode, operands[0])); + emit_move_insn (gen_lowpart (SImode, operands[0]), operands[1]); + emit_move_insn (gen_highpart (SImode, operands[0]), const0_rtx); + DONE; + } +} +") + +(define_insn "*zero_extendsidi2" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,m")))] + "TARGET_64BIT" + "@ + llgfr\\t%0,%1 + llgf\\t%0,%1" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +; +; zero_extendhidi2 instruction pattern(s). +; + +(define_expand "zero_extendhidi2" + [(set (match_operand:DI 0 "register_operand" "") + (zero_extend:DI (match_operand:HI 1 "register_operand" "")))] + "" + " +{ + if (!TARGET_64BIT) + { + rtx tmp = gen_reg_rtx (SImode); + emit_insn (gen_zero_extendhisi2 (tmp, operands[1])); + emit_insn (gen_zero_extendsidi2 (operands[0], tmp)); + DONE; + } + else + { + operands[1] = gen_lowpart (DImode, operands[1]); + emit_insn (gen_ashldi3 (operands[0], operands[1], GEN_INT (48))); + emit_insn (gen_lshrdi3 (operands[0], operands[0], GEN_INT (48))); + DONE; + } +} +") + +(define_insn "*zero_extendhidi2" + [(set (match_operand:DI 0 "register_operand" "=d") + (zero_extend:DI (match_operand:HI 1 "memory_operand" "m")))] + "TARGET_64BIT" + "llgh\\t%0,%1" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem")]) + +; +; zero_extendqidi2 instruction pattern(s) +; + +(define_expand "zero_extendqidi2" + [(set (match_operand:DI 0 "register_operand" "") + (zero_extend:DI (match_operand:QI 1 "register_operand" "")))] + "" + " +{ + if (!TARGET_64BIT) + { + rtx tmp = gen_reg_rtx (SImode); + emit_insn (gen_zero_extendqisi2 (tmp, operands[1])); + emit_insn (gen_zero_extendsidi2 (operands[0], tmp)); + DONE; + } + else + { + operands[1] = gen_lowpart (DImode, operands[1]); + emit_insn (gen_ashldi3 (operands[0], operands[1], GEN_INT (56))); + emit_insn (gen_lshrdi3 (operands[0], operands[0], GEN_INT (56))); + DONE; + } +} +") + +(define_insn "*zero_extendqidi2" + [(set (match_operand:DI 0 "register_operand" "=d") + (zero_extend:DI (match_operand:QI 1 "memory_operand" "m")))] + "TARGET_64BIT" + "llgc\\t%0,%1" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem")]) + +; +; zero_extendhisi2 instruction pattern(s). +; + +(define_expand "zero_extendhisi2" + [(set (match_operand:SI 0 "register_operand" "") + (zero_extend:SI (match_operand:HI 1 "register_operand" "")))] + "" + " +{ + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_andsi3 (operands[0], operands[1], GEN_INT (0xffff))); + DONE; +} +") + +(define_insn "*zero_extendhisi2_64" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))] + "TARGET_64BIT" + "llgh\\t%0,%1" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem")]) + +; +; zero_extendqisi2 instruction pattern(s). +; + +(define_expand "zero_extendqisi2" + [(set (match_operand:SI 0 "register_operand" "") + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))] + "" + " +{ + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_andsi3 (operands[0], operands[1], GEN_INT (0xff))); + DONE; +} +") + +(define_insn "*zero_extendqisi2_64" + [(set (match_operand:SI 0 "register_operand" "=d") + (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))] + "TARGET_64BIT" + "llgc\\t%0,%1" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem")]) + +; +; zero_extendqihi2 instruction pattern(s). +; + +(define_expand "zero_extendqihi2" + [(set (match_operand:HI 0 "register_operand" "") + (zero_extend:HI (match_operand:QI 1 "register_operand" "")))] + "TARGET_64BIT" + " +{ + operands[1] = gen_lowpart (HImode, operands[1]); + emit_insn (gen_andhi3 (operands[0], operands[1], GEN_INT (0xff))); + DONE; +} +") + +(define_insn "*zero_extendqihi2_64" + [(set (match_operand:HI 0 "register_operand" "=d") + (zero_extend:HI (match_operand:QI 1 "memory_operand" "m"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "llgc\\t%0,%1" + [(set_attr "op_type" "RXE") + (set_attr "atype" "mem")]) + +; +; fixuns_truncdfdi2 and fix_truncdfsi2 instruction pattern(s). +; + +(define_expand "fixuns_truncdfdi2" + [(set (match_operand:DI 0 "register_operand" "") + (unsigned_fix:DI (match_operand:DF 1 "register_operand" "")))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + " +{ + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx temp = gen_reg_rtx (DFmode); + operands[1] = force_reg (DFmode, operands[1]); + + emit_insn (gen_cmpdf (operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"9223372036854775808.0\", DFmode), DFmode))); + emit_jump_insn (gen_blt (label1)); + emit_insn (gen_subdf3 (temp, operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"18446744073709551616.0\", DFmode), DFmode))); + emit_insn (gen_fix_truncdfdi2_ieee (operands[0], temp, GEN_INT(7))); + emit_jump (label2); + + emit_label (label1); + emit_insn (gen_fix_truncdfdi2_ieee (operands[0], operands[1], GEN_INT(5))); + emit_label (label2); + DONE; +}") + +(define_expand "fix_truncdfdi2" + [(set (match_operand:DI 0 "register_operand" "") + (fix:DI (match_operand:DF 1 "nonimmediate_operand" "")))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + " +{ + operands[1] = force_reg (DFmode, operands[1]); + emit_insn (gen_fix_truncdfdi2_ieee (operands[0], operands[1], GEN_INT(5))); + DONE; +}") + +(define_insn "fix_truncdfdi2_ieee" + [(set (match_operand:DI 0 "register_operand" "=d") + (fix:DI (match_operand:DF 1 "register_operand" "f"))) + (unspec:DI [(match_operand:DI 2 "immediate_operand" "K")] 1) + (clobber (reg:CC 33))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cgdbr\\t%0,%h2,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other")]) + +; +; fixuns_truncdfsi2 and fix_truncdfsi2 instruction pattern(s). +; + +(define_expand "fixuns_truncdfsi2" + [(set (match_operand:SI 0 "register_operand" "") + (unsigned_fix:SI (match_operand:DF 1 "register_operand" "")))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + " +{ + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx temp = gen_reg_rtx (DFmode); + + operands[1] = force_reg (DFmode,operands[1]); + emit_insn (gen_cmpdf (operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"2147483648.0\", DFmode), DFmode))); + emit_jump_insn (gen_blt (label1)); + emit_insn (gen_subdf3 (temp, operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"4294967296.0\", DFmode), DFmode))); + emit_insn (gen_fix_truncdfsi2_ieee (operands[0], temp, GEN_INT (7))); + emit_jump (label2); + + emit_label (label1); + emit_insn (gen_fix_truncdfsi2_ieee (operands[0], operands[1], GEN_INT (5))); + emit_label (label2); + DONE; +}") + +(define_expand "fix_truncdfsi2" + [(set (match_operand:SI 0 "register_operand" "") + (fix:SI (match_operand:DF 1 "nonimmediate_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + if (TARGET_IBM_FLOAT) + { + /* This is the algorithm from POP chapter A.5.7.2. */ + + rtx temp = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD); + rtx two31r = s390_gen_rtx_const_DI (0x4f000000, 0x08000000); + rtx two32 = s390_gen_rtx_const_DI (0x4e000001, 0x00000000); + + operands[1] = force_reg (DFmode, operands[1]); + emit_insn (gen_fix_truncdfsi2_ibm (operands[0], operands[1], + two31r, two32, temp)); + } + else + { + operands[1] = force_reg (DFmode, operands[1]); + emit_insn (gen_fix_truncdfsi2_ieee (operands[0], operands[1], GEN_INT (5))); + } + + DONE; +}") + +(define_insn "fix_truncdfsi2_ieee" + [(set (match_operand:SI 0 "register_operand" "=d") + (fix:SI (match_operand:DF 1 "register_operand" "f"))) + (unspec:SI [(match_operand:SI 2 "immediate_operand" "K")] 1) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cfdbr\\t%0,%h2,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other" )]) + +(define_insn "fix_truncdfsi2_ibm" + [(set (match_operand:SI 0 "register_operand" "=d") + (fix:SI (match_operand:DF 1 "nonimmediate_operand" "+f"))) + (use (match_operand:DI 2 "immediate_operand" "m")) + (use (match_operand:DI 3 "immediate_operand" "m")) + (use (match_operand:BLK 4 "memory_operand" "m")) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "* +{ + output_asm_insn (\"sd\\t%1,%2\", operands); + output_asm_insn (\"aw\\t%1,%3\", operands); + output_asm_insn (\"std\\t%1,%4\", operands); + output_asm_insn (\"xi\\t%N4,128\", operands); + return \"l\\t%0,%N4\"; +}" + [(set_attr "op_type" "NN") + (set_attr "type" "other") + (set_attr "length" "20")]) + +; +; fixuns_truncsfdi2 and fix_truncsfdi2 instruction pattern(s). +; + +(define_expand "fixuns_truncsfdi2" + [(set (match_operand:DI 0 "register_operand" "") + (unsigned_fix:DI (match_operand:SF 1 "register_operand" "")))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + " +{ + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx temp = gen_reg_rtx (SFmode); + + operands[1] = force_reg (SFmode, operands[1]); + emit_insn (gen_cmpsf (operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"9223372036854775808.0\", SFmode), SFmode))); + emit_jump_insn (gen_blt (label1)); + + emit_insn (gen_subsf3 (temp, operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"18446744073709551616.0\", SFmode), SFmode))); + emit_insn (gen_fix_truncsfdi2_ieee (operands[0], temp, GEN_INT(7))); + emit_jump (label2); + + emit_label (label1); + emit_insn (gen_fix_truncsfdi2_ieee (operands[0], operands[1], GEN_INT(5))); + emit_label (label2); + DONE; +}") + +(define_expand "fix_truncsfdi2" + [(set (match_operand:DI 0 "register_operand" "") + (fix:DI (match_operand:SF 1 "nonimmediate_operand" "")))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + " +{ + operands[1] = force_reg (SFmode, operands[1]); + emit_insn (gen_fix_truncsfdi2_ieee (operands[0], operands[1], GEN_INT(5))); + DONE; +}") + +(define_insn "fix_truncsfdi2_ieee" + [(set (match_operand:DI 0 "register_operand" "=d") + (fix:DI (match_operand:SF 1 "register_operand" "f"))) + (unspec:DI [(match_operand:DI 2 "immediate_operand" "K")] 1) + (clobber (reg:CC 33))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cgebr\\t%0,%h2,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other")]) + +; +; fixuns_truncsfsi2 and fix_truncsfsi2 instruction pattern(s). +; + +(define_expand "fixuns_truncsfsi2" + [(set (match_operand:SI 0 "register_operand" "") + (unsigned_fix:SI (match_operand:SF 1 "register_operand" "")))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + " +{ + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx temp = gen_reg_rtx (SFmode); + + operands[1] = force_reg (SFmode, operands[1]); + emit_insn (gen_cmpsf (operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"2147483648.0\", SFmode), SFmode))); + emit_jump_insn (gen_blt (label1)); + emit_insn (gen_subsf3 (temp, operands[1], + CONST_DOUBLE_FROM_REAL_VALUE ( + REAL_VALUE_ATOF (\"4294967296.0\", SFmode), SFmode))); + emit_insn (gen_fix_truncsfsi2_ieee (operands[0], temp, GEN_INT (7))); + emit_jump (label2); + + emit_label (label1); + emit_insn (gen_fix_truncsfsi2_ieee (operands[0], operands[1], GEN_INT (5))); + emit_label (label2); + DONE; +}") + +(define_expand "fix_truncsfsi2" + [(set (match_operand:SI 0 "register_operand" "") + (fix:SI (match_operand:SF 1 "nonimmediate_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + if (TARGET_IBM_FLOAT) + { + /* Convert to DFmode and then use the POP algorithm. */ + rtx temp = gen_reg_rtx (DFmode); + emit_insn (gen_extendsfdf2 (temp, operands[1])); + emit_insn (gen_fix_truncdfsi2 (operands[0], temp)); + } + else + { + operands[1] = force_reg (SFmode, operands[1]); + emit_insn (gen_fix_truncsfsi2_ieee (operands[0], operands[1], GEN_INT (5))); + } + + DONE; +}") + +(define_insn "fix_truncsfsi2_ieee" + [(set (match_operand:SI 0 "register_operand" "=d") + (fix:SI (match_operand:SF 1 "register_operand" "f"))) + (unspec:SI [(match_operand:SI 2 "immediate_operand" "K")] 1) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cfebr\\t%0,%h2,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other")]) + +; +; floatdidf2 instruction pattern(s). +; + +(define_insn "floatdidf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:DI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cdgbr\\t%0,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other" )]) + +; +; floatdisf2 instruction pattern(s). +; + +(define_insn "floatdisf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (float:SF (match_operand:DI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "TARGET_64BIT && TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cegbr\\t%0,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other" )]) + +; +; floatsidf2 instruction pattern(s). +; + +(define_expand "floatsidf2" + [(parallel + [(set (match_operand:DF 0 "register_operand" "") + (float:DF (match_operand:SI 1 "register_operand" ""))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + " +{ + if (TARGET_IBM_FLOAT) + { + /* This is the algorithm from POP chapter A.5.7.1. */ + + rtx temp = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD); + rtx two31 = s390_gen_rtx_const_DI (0x4e000000, 0x80000000); + + emit_insn (gen_floatsidf2_ibm (operands[0], operands[1], two31, temp)); + DONE; + } +}") + +(define_insn "floatsidf2_ieee" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:SI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cdfbr\\t%0,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other" )]) + +(define_insn "floatsidf2_ibm" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:SI 1 "register_operand" "d"))) + (use (match_operand:DI 2 "immediate_operand" "m")) + (use (match_operand:BLK 3 "memory_operand" "m")) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "* +{ + output_asm_insn (\"st\\t%1,%N3\", operands); + output_asm_insn (\"xi\\t%N3,128\", operands); + output_asm_insn (\"mvc\\t%O3(4,%R3),%2\", operands); + output_asm_insn (\"ld\\t%0,%3\", operands); + return \"sd\\t%0,%2\"; +}" + [(set_attr "op_type" "NN") + (set_attr "type" "other" ) + (set_attr "length" "20")]) + +; +; floatsisf2 instruction pattern(s). +; + +(define_expand "floatsisf2" + [(parallel + [(set (match_operand:SF 0 "register_operand" "") + (float:SF (match_operand:SI 1 "register_operand" ""))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + " +{ + if (TARGET_IBM_FLOAT) + { + /* Use the POP algorithm to convert to DFmode and then truncate. */ + rtx temp = gen_reg_rtx (DFmode); + emit_insn (gen_floatsidf2 (temp, operands[1])); + emit_insn (gen_truncdfsf2 (operands[0], temp)); + DONE; + } +}") + +(define_insn "floatsisf2_ieee" + [(set (match_operand:SF 0 "register_operand" "=f") + (float:SF (match_operand:SI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "cefbr\\t%0,%1" + [(set_attr "op_type" "RRE") + (set_attr "type" "other" )]) + +; +; truncdfsf2 instruction pattern(s). +; + +(define_expand "truncdfsf2" + [(set (match_operand:SF 0 "register_operand" "") + (float_truncate:SF (match_operand:DF 1 "general_operand" "")))] + "TARGET_HARD_FLOAT" + "") + +(define_insn "truncdfsf2_ieee" + [(set (match_operand:SF 0 "register_operand" "=f") + (float_truncate:SF (match_operand:DF 1 "general_operand" "f")))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "ledbr\\t%0,%1" + [(set_attr "op_type" "RRE")]) + +(define_insn "truncdfsf2_ibm" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (float_truncate:SF (match_operand:DF 1 "general_operand" "f,m")))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + lrer\\t%0,%1 + le\\t%0,%1" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +; +; extendsfdf2 instruction pattern(s). +; + +(define_expand "extendsfdf2" + [(set (match_operand:DF 0 "register_operand" "") + (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "")))] + "TARGET_HARD_FLOAT" + " +{ + if (TARGET_IBM_FLOAT) + { + emit_insn (gen_extendsfdf2_ibm (operands[0], operands[1])); + DONE; + } +}") + +(define_insn "extendsfdf2_ieee" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m")))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + ldebr\\t%0,%1 + ldeb\\t%0,%1" + [(set_attr "op_type" "RRE,RXE")]) + +(define_insn "extendsfdf2_ibm" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + sdr\\t%0,%0\;ler\\t%0,%1 + sdr\\t%0,%0\;le\\t%0,%1" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem") + (set_attr "type" "o2,o2")]) + + +;; +;; ARITHMETRIC OPERATIONS +;; +; arithmetric operations set the ConditionCode, +; because of unpredictable Bits in Register for Halfword and Byte +; the ConditionCode can be set wrong in operations for Halfword and Byte + +;; +;;- Add instructions. +;; + +; +; adddi3 instruction pattern(s). +; + +(define_insn "addaddr_esame" + [(set (match_operand:DI 0 "register_operand" "=a,a") + (plus:DI (match_operand:DI 1 "register_operand" "%a,a") + (match_operand:DI 2 "nonmemory_operand" "J,a")))] + "TARGET_64BIT && (((REGNO (operands[1]) == STACK_POINTER_REGNUM ) || + (REGNO (operands[1]) == BASE_REGISTER)) && + (GET_CODE (operands[2]) == REG || + CONST_OK_FOR_LETTER_P (INTVAL (operands[2]),'J')))" + "@ + la\\t%0,%c2(,%1) + la\\t%0,0(%1,%2)" + [(set_attr "op_type" "RX") + (set_attr "atype" "mem") + (set_attr "type" "la")]) + +(define_insn "adddi3_64" + [(set (match_operand:DI 0 "register_operand" "=d,d,d") + (plus:DI (match_operand:DI 1 "register_operand" "%0,0,0") + (match_operand:DI 2 "general_operand" "d,K,m") ) ) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + agr\\t%0,%2 + aghi\\t%0,%h2 + ag\\t%0,%2" + [(set_attr "op_type" "RRE,RI,RXE") + (set_attr "atype" "reg,reg,mem")]) + +(define_insn "adddi3_31" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (plus:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:DI 2 "general_operand" "d,m") ) ) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "* +{ + switch (which_alternative) + { + case 0: /* d <- d */ + output_asm_insn (\"ar\\t%0,%2\", operands); + output_asm_insn (\"alr\\t%N0,%N2\", operands); + break; + + case 1: /* d <- m */ + output_asm_insn (\"a\\t%0,%2\", operands); + output_asm_insn (\"al\\t%N0,%N2\", operands); + break; + + default: + abort (); + } + + output_asm_insn (\"brc\\t12,.+8\", operands); + return \"ahi\\t%0,1\"; +}" + [(set_attr "op_type" "NN,NN") + (set_attr "atype" "reg,mem") + (set_attr "type" "o2,o2") + (set_attr "length" "12,16")]) + +(define_expand "adddi3" + [(set (match_operand:DI 0 "register_operand" "") + (plus:DI (match_operand:DI 1 "register_operand" "") + (match_operand:DI 2 "general_operand" "")))] + "" + " +{ + if (TARGET_64BIT) + emit_insn(gen_adddi3_64 (operands[0],operands[1],operands[2])); + else + emit_insn(gen_adddi3_31 (operands[0],operands[1],operands[2])); + DONE; +}") + +(define_insn "*la_64" + [(set (match_operand:DI 0 "register_operand" "=d") + (match_operand:QI 1 "address_operand" "p"))] + "TARGET_64BIT" + "la\\t%0,%a1" + [(set_attr "op_type" "RX") + (set_attr "atype" "mem") + (set_attr "type" "la")]) + +(define_expand "reload_indi" + [(parallel [(match_operand:DI 0 "register_operand" "=a") + (match_operand:DI 1 "s390_plus_operand" "") + (match_operand:TI 2 "register_operand" "=&a")])] + "TARGET_64BIT" + " +{ + s390_expand_plus_operand (operands[0], operands[1], operands[2]); + DONE; +}") + + +; +; addsi3 instruction pattern(s). +; + +(define_insn "*la_ccclobber" + [(set (match_operand:SI 0 "register_operand" "=d") + (match_operand:QI 1 "address_operand" "p")) + (clobber (reg:CC 33))] + "legitimate_la_operand_p (operands[1])" + "la\\t%0,%a1" + [(set_attr "op_type" "RX") + (set_attr "atype" "mem") + (set_attr "type" "la")]) + +(define_insn "*addsi3_cc" + [(set (reg 33) + (compare (plus:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "nonimmediate_operand" "d,m")) + (const_int 0))) + (set (match_operand:SI 0 "register_operand" "=d,d") + (plus:SI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCLmode)" + "@ + alr\\t%0,%2 + al\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*addsi3_cconly" + [(set (reg 33) + (compare (plus:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCLmode)" + "@ + alr\\t%0,%2 + al\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*addsi3_cconly2" + [(set (reg 33) + (compare (match_operand:SI 1 "register_operand" "%0,0") + (neg:SI (match_operand:SI 2 "general_operand" "d,m")))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCLmode)" + "@ + alr\\t%0,%2 + al\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "addsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d,d") + (plus:SI (match_operand:SI 1 "register_operand" "%0,0,0") + (match_operand:SI 2 "general_operand" "d,K,m"))) + (clobber (reg:CC 33))] + "" + "@ + ar\\t%0,%2 + ahi\\t%0,%h2 + a\\t%0,%2" + [(set_attr "op_type" "RR,RI,RX") + (set_attr "atype" "reg,reg,mem")]) + +(define_insn "*addsi3_inv" + [(set (match_operand:SI 0 "register_operand" "=d,d,d") + (plus:SI (match_operand:SI 1 "general_operand" "%d,K,m") + (match_operand:SI 2 "register_operand" "0,0,0"))) + (clobber (reg:CC 33))] + "" + "@ + ar\\t%0,%1 + ahi\\t%0,%h1 + a\\t%0,%1" + [(set_attr "op_type" "RR,RI,RX") + (set_attr "atype" "reg,reg,mem")]) + +(define_insn "*la_31" + [(set (match_operand:SI 0 "register_operand" "=d") + (match_operand:QI 1 "address_operand" "p"))] + "legitimate_la_operand_p (operands[1])" + "la\\t%0,%a1" + [(set_attr "op_type" "RX") + (set_attr "atype" "mem") + (set_attr "type" "la")]) + +(define_expand "reload_insi" + [(parallel [(match_operand:SI 0 "register_operand" "=a") + (match_operand:SI 1 "s390_plus_operand" "") + (match_operand:DI 2 "register_operand" "=&a")])] + "!TARGET_64BIT" + " +{ + s390_expand_plus_operand (operands[0], operands[1], operands[2]); + DONE; +}") + + +; +; addhi3 instruction pattern(s). +; + +(define_insn "addhi3" + [(set (match_operand:HI 0 "register_operand" "=d,d,d") + (plus:HI (match_operand:HI 1 "register_operand" "%0,0,0") + (match_operand:HI 2 "general_operand" "d,K,m"))) + (clobber (reg:CC 33))] + "" + "@ + ar\\t%0,%2 + ahi\\t%0,%h2 + ah\\t%0,%2" + [(set_attr "op_type" "RR,RI,RX") + (set_attr "atype" "reg,reg,mem")]) + + +; +; addqi3 instruction pattern(s). +; + +(define_insn "addqi3" + [(set (match_operand:QI 0 "register_operand" "=d,d") + (plus:QI (match_operand:QI 1 "register_operand" "%0,0") + (match_operand:QI 2 "general_operand" "a,n"))) + (clobber (reg:CC 33))] + "" + "@ + ar\\t%0,%2 + ahi\\t%0,%h2" + [(set_attr "op_type" "RX,RX") + (set_attr "atype" "reg,mem")]) + + +; +; adddf3 instruction pattern(s). +; + +(define_expand "adddf3" + [(parallel + [(set (match_operand:DF 0 "register_operand" "=f,f") + (plus:DF (match_operand:DF 1 "register_operand" "%0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*adddf3" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (plus:DF (match_operand:DF 1 "register_operand" "%0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + adbr\\t%0,%2 + adb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*adddf3_ibm" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (plus:DF (match_operand:DF 1 "register_operand" "%0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + adr\\t%0,%2 + ad\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +; +; addsf3 instruction pattern(s). +; + +(define_expand "addsf3" + [(parallel + [(set (match_operand:SF 0 "register_operand" "=f,f") + (plus:SF (match_operand:SF 1 "register_operand" "%0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*addsf3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (plus:SF (match_operand:SF 1 "register_operand" "%0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + aebr\\t%0,%2 + aeb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*addsf3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (plus:SF (match_operand:SF 1 "register_operand" "%0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + aer\\t%0,%2 + ae\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + + +;; +;;- Subtract instructions. +;; + +; +; subdi3 instruction pattern(s). +; + +(define_insn "*subdi3_64" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (minus:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:DI 2 "general_operand" "d,m") ) ) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + sgr\\t%0,%2 + sg\\t%0,%2" + [(set_attr "op_type" "RRE,RRE") + (set_attr "atype" "reg,mem")]) + +(define_insn "subdi3" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (minus:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:DI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "" + "* +{ + switch (which_alternative) + { + case 0: /* d <- d */ + output_asm_insn (\"sr\\t%0,%2\", operands); + output_asm_insn (\"slr\\t%N0,%N2\", operands); + break; + case 1: /* d <- m */ + output_asm_insn (\"s\\t%0,%2\", operands); + output_asm_insn (\"sl\\t%N0,%N2\", operands); + break; + + default: + abort (); + } + + output_asm_insn (\"brc\\t11,.+8\", operands); + return \"ahi\\t%0,-1\"; +}" + [(set_attr "op_type" "NN,NN") + (set_attr "atype" "reg,mem") + (set_attr "type" "other,other") + (set_attr "length" "12,16")]) + +; +; subsi3 instruction pattern(s). +; + +(define_insn "*subsi3_cc" + [(set (reg 33) + (compare (minus:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:SI 0 "register_operand" "=d,d") + (minus:SI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCLmode)" + "@ + slr\\t%0,%2 + sl\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*subsi3_cconly" + [(set (reg 33) + (compare (minus:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCLmode)" + "@ + slr\\t%0,%2 + sl\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "subsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (minus:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "" + "@ + sr\\t%0,%2 + s\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +; +; subhi3 instruction pattern(s). +; + +(define_insn "subhi3" + [(set (match_operand:HI 0 "register_operand" "=d,d") + (minus:HI (match_operand:HI 1 "register_operand" "0,0") + (match_operand:HI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "" + "@ + sr\\t%0,%2 + sh\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +; +; subqi3 instruction pattern(s). +; + +(define_insn "subqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (minus:QI (match_operand:QI 1 "register_operand" "0") + (match_operand:QI 2 "register_operand" "d"))) + (clobber (reg:CC 33))] + "" + "sr\\t%0,%2" + [(set_attr "op_type" "RR")]) + +; +; subdf3 instruction pattern(s). +; + +(define_expand "subdf3" + [(parallel + [(set (match_operand:DF 0 "register_operand" "=f,f") + (minus:DF (match_operand:DF 1 "register_operand" "0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*subdf3" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (minus:DF (match_operand:DF 1 "register_operand" "0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + sdbr\\t%0,%2 + sdb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*subdf3_ibm" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (minus:DF (match_operand:DF 1 "register_operand" "0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + sdr\\t%0,%2 + sd\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +; +; subsf3 instruction pattern(s). +; + +(define_expand "subsf3" + [(parallel + [(set (match_operand:SF 0 "register_operand" "=f,f") + (minus:SF (match_operand:SF 1 "register_operand" "0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*subsf3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (minus:SF (match_operand:SF 1 "register_operand" "0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + sebr\\t%0,%2 + seb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*subsf3_ibm" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (minus:SF (match_operand:SF 1 "register_operand" "0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + ser\\t%0,%2 + se\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + + +;; +;;- Multiply instructions. +;; + +; +; muldi3 instruction pattern(s). +; + +(define_insn "muldi3" + [(set (match_operand:DI 0 "register_operand" "=d,d,d") + (mult:DI (match_operand:DI 1 "register_operand" "%0,0,0") + (match_operand:DI 2 "general_operand" "d,K,m"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + msgr\\t%0,%2 + mghi\\t%0,%h2 + msg\\t%0,%2" + [(set_attr "op_type" "RRE,RI,RX") + (set_attr "atype" "reg,reg,mem") + (set_attr "type" "imul")]) + +; +; mulsi3 instruction pattern(s). +; + +(define_insn "mulsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d,d") + (mult:SI (match_operand:SI 1 "register_operand" "%0,0,0") + (match_operand:SI 2 "general_operand" "d,K,m"))) + (clobber (reg:CC 33))] + "" + "@ + msr\\t%0,%2 + mhi\\t%0,%h2 + ms\\t%0,%2" + [(set_attr "op_type" "RRE,RI,RX") + (set_attr "atype" "reg,reg,mem") + (set_attr "type" "imul")]) + +; +; mulsidi3 instruction pattern(s). +; + +(define_expand "mulsidi3" + [(set (match_operand:DI 0 "register_operand" "") + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "")) + (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" ""))))] + "!TARGET_64BIT" + " +{ + rtx insn; + + emit_insn (gen_zero_extendsidi2 (operands[0], operands[1])); + insn = emit_insn (gen_mulsi_6432 (operands[0], operands[0], operands[2])); + + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, + gen_rtx_MULT (DImode, + gen_rtx_SIGN_EXTEND (DImode, operands[1]), + gen_rtx_SIGN_EXTEND (DImode, operands[2])), + REG_NOTES (insn)); + DONE; +}") + +(define_insn "mulsi_6432" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (mult:DI (sign_extend:DI + (truncate:SI (match_operand:DI 1 "register_operand" "0,0"))) + (sign_extend:DI + (match_operand:SI 2 "nonimmediate_operand" "d,m")))) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "@ + mr\\t%0,%2 + m\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem") + (set_attr "type" "imul")]) + +; +; muldf3 instruction pattern(s). +; + +(define_expand "muldf3" + [(parallel + [(set (match_operand:DF 0 "register_operand" "=f,f") + (mult:DF (match_operand:DF 1 "register_operand" "%0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*muldf3" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (mult:DF (match_operand:DF 1 "register_operand" "%0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + mdbr\\t%0,%2 + mdb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "fmul") + (set_attr "atype" "reg,mem")]) + +(define_insn "*muldf3_ibm" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (mult:DF (match_operand:DF 1 "register_operand" "%0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + mdr\\t%0,%2 + md\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "type" "fmul") + (set_attr "atype" "reg,mem")]) + +; +; mulsf3 instruction pattern(s). +; + +(define_expand "mulsf3" + [(parallel + [(set (match_operand:SF 0 "register_operand" "=f,f") + (mult:SF (match_operand:SF 1 "register_operand" "%0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*mulsf3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (mult:SF (match_operand:SF 1 "register_operand" "%0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + meebr\\t%0,%2 + meeb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "fmul") + (set_attr "atype" "reg,mem")]) + +(define_insn "*mulsf3_ibm" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (mult:SF (match_operand:SF 1 "register_operand" "%0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + mer\\t%0,%2 + me\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "type" "fmul") + (set_attr "atype" "reg,mem")]) + + +;; +;;- Divide and modulo instructions. +;; + +; +; divmoddi4 instruction pattern(s). +; + +(define_expand "divmoddi4" + [(parallel [(set (match_operand:DI 0 "general_operand" "") + (div:DI (match_operand:DI 1 "general_operand" "") + (match_operand:DI 2 "general_operand" ""))) + (set (match_operand:DI 3 "general_operand" "") + (mod:DI (match_dup 1) (match_dup 2)))]) + (clobber (match_dup 4))] + "TARGET_64BIT" + " +{ + rtx insn, div_equal, mod_equal, equal; + + div_equal = gen_rtx_DIV (DImode, operands[1], operands[2]); + mod_equal = gen_rtx_MOD (DImode, operands[1], operands[2]); + equal = gen_rtx_IOR (TImode, + gen_rtx_ZERO_EXTEND (TImode, div_equal), + gen_rtx_ASHIFT (TImode, + gen_rtx_ZERO_EXTEND (TImode, mod_equal), + GEN_INT (64))); + + operands[4] = gen_reg_rtx(TImode); + emit_insn (gen_rtx_CLOBBER (VOIDmode, operands[4])); + emit_move_insn (gen_lowpart (DImode, operands[4]), operands[1]); + emit_move_insn (gen_highpart (DImode, operands[4]), const0_rtx); + insn = emit_insn (gen_divmodtidi3 (operands[4], operands[4], operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], gen_lowpart (DImode, operands[4])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, div_equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[3], gen_highpart (DImode, operands[4])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, mod_equal, REG_NOTES (insn)); + + DONE; +}") + +(define_insn "divmodtidi3" + [(set (match_operand:TI 0 "register_operand" "=d,d") + (ior:TI + (zero_extend:TI + (div:DI (truncate:DI (match_operand:TI 1 "register_operand" "0,0")) + (match_operand:DI 2 "general_operand" "d,m"))) + (ashift:TI + (zero_extend:TI + (mod:DI (truncate:DI (match_dup 1)) + (match_dup 2))) + (const_int 64))))] + "TARGET_64BIT" + "@ + dsgr\\t%0,%2 + dsg\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "idiv") + (set_attr "atype" "reg,mem")]) + +(define_insn "divmodtisi3" + [(set (match_operand:TI 0 "register_operand" "=d,d") + (ior:TI + (zero_extend:TI + (div:DI (truncate:DI (match_operand:TI 1 "register_operand" "0,0")) + (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "d,m")))) + (ashift:TI + (zero_extend:TI + (mod:DI (truncate:DI (match_dup 1)) + (sign_extend:DI (match_dup 2)))) + (const_int 64))))] + "TARGET_64BIT" + "@ + dsgfr\\t%0,%2 + dsgf\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "idiv") + (set_attr "atype" "reg,mem")]) + +; +; udivmoddi4 instruction pattern(s). +; + +(define_expand "udivmoddi4" + [(parallel [(set (match_operand:DI 0 "general_operand" "") + (udiv:DI (match_operand:DI 1 "general_operand" "") + (match_operand:DI 2 "nonimmediate_operand" ""))) + (set (match_operand:DI 3 "general_operand" "") + (umod:DI (match_dup 1) (match_dup 2)))]) + (clobber (match_dup 4))] + "TARGET_64BIT" + " +{ + rtx insn, div_equal, mod_equal, equal; + + div_equal = gen_rtx_UDIV (DImode, operands[1], operands[2]); + mod_equal = gen_rtx_UMOD (DImode, operands[1], operands[2]); + equal = gen_rtx_IOR (TImode, + gen_rtx_ZERO_EXTEND (TImode, div_equal), + gen_rtx_ASHIFT (TImode, + gen_rtx_ZERO_EXTEND (TImode, mod_equal), + GEN_INT (64))); + + operands[4] = gen_reg_rtx(TImode); + emit_insn (gen_rtx_CLOBBER (VOIDmode, operands[4])); + emit_move_insn (gen_lowpart (DImode, operands[4]), operands[1]); + emit_move_insn (gen_highpart (DImode, operands[4]), const0_rtx); + insn = emit_insn (gen_udivmodtidi3 (operands[4], operands[4], operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], gen_lowpart (DImode, operands[4])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, div_equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[3], gen_highpart (DImode, operands[4])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, mod_equal, REG_NOTES (insn)); + + DONE; +}") + +(define_insn "udivmodtidi3" + [(set (match_operand:TI 0 "register_operand" "=d,d") + (ior:TI (zero_extend:TI + (truncate:DI + (udiv:TI (match_operand:TI 1 "register_operand" "0,0") + (zero_extend:TI + (match_operand:DI 2 "nonimmediate_operand" "d,m"))))) + (ashift:TI + (zero_extend:TI + (truncate:DI + (umod:TI (match_dup 1) (zero_extend:TI (match_dup 2))))) + (const_int 64))))] + "TARGET_64BIT" + "@ + dlgr\\t%0,%2 + dlg\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "idiv") + (set_attr "atype" "reg,mem")]) + +; +; divmodsi4 instruction pattern(s). +; + +(define_expand "divmodsi4" + [(parallel [(set (match_operand:SI 0 "general_operand" "") + (div:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "nonimmediate_operand" ""))) + (set (match_operand:SI 3 "general_operand" "") + (mod:SI (match_dup 1) (match_dup 2)))]) + (clobber (match_dup 4))] + "!TARGET_64BIT" + " +{ + rtx insn, div_equal, mod_equal, equal; + + div_equal = gen_rtx_DIV (SImode, operands[1], operands[2]); + mod_equal = gen_rtx_MOD (SImode, operands[1], operands[2]); + equal = gen_rtx_IOR (DImode, + gen_rtx_ZERO_EXTEND (DImode, div_equal), + gen_rtx_ASHIFT (DImode, + gen_rtx_ZERO_EXTEND (DImode, mod_equal), + GEN_INT (32))); + + operands[4] = gen_reg_rtx(DImode); + emit_insn (gen_extendsidi2 (operands[4], operands[1])); + insn = emit_insn (gen_divmoddisi3 (operands[4], operands[4], operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], gen_lowpart (SImode, operands[4])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, div_equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[3], gen_highpart (SImode, operands[4])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, mod_equal, REG_NOTES (insn)); + + DONE; +}") + +(define_insn "divmoddisi3" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (ior:DI (zero_extend:DI + (truncate:SI + (div:DI (match_operand:DI 1 "register_operand" "0,0") + (sign_extend:DI + (match_operand:SI 2 "nonimmediate_operand" "d,m"))))) + (ashift:DI + (zero_extend:DI + (truncate:SI + (mod:DI (match_dup 1) (sign_extend:SI (match_dup 2))))) + (const_int 32))))] + "!TARGET_64BIT" + "@ + dr\\t%0,%2 + d\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "type" "idiv") + (set_attr "atype" "reg,mem")]) + +; +; udivsi3 and umodsi3 instruction pattern(s). +; + + +(define_expand "udivsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (udiv:SI (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_operand" ""))) + (clobber (match_dup 3))] + "!TARGET_64BIT" + " +{ + rtx insn, udiv_equal, umod_equal, equal; + + udiv_equal = gen_rtx_UDIV (SImode, operands[1], operands[2]); + umod_equal = gen_rtx_UMOD (SImode, operands[1], operands[2]); + equal = gen_rtx_IOR (DImode, + gen_rtx_ZERO_EXTEND (DImode, udiv_equal), + gen_rtx_ASHIFT (DImode, + gen_rtx_ZERO_EXTEND (DImode, umod_equal), + GEN_INT (32))); + + operands[3] = gen_reg_rtx (DImode); + + if (CONSTANT_P (operands[2])) + { + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0) + { + rtx label1 = gen_label_rtx (); + + operands[1] = make_safe_from (operands[1], operands[0]); + emit_move_insn (operands[0], const0_rtx); + emit_insn (gen_cmpsi (operands[1], operands[2])); + emit_jump_insn (gen_bltu (label1)); + emit_move_insn (operands[0], const1_rtx); + emit_label (label1); + } + else + { + operands[2] = force_reg (SImode, operands[2]); + operands[2] = make_safe_from (operands[2], operands[0]); + + emit_insn (gen_zero_extendsidi2 (operands[3], operands[1])); + insn = emit_insn (gen_divmoddisi3 (operands[3], operands[3], + operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], + gen_lowpart (SImode, operands[3])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, + udiv_equal, REG_NOTES (insn)); + } + } + else + { + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx label3 = gen_label_rtx (); + + operands[1] = force_reg (SImode, operands[1]); + operands[1] = make_safe_from (operands[1], operands[0]); + operands[2] = force_reg (SImode, operands[2]); + operands[2] = make_safe_from (operands[2], operands[0]); + + emit_move_insn (operands[0], const0_rtx); + emit_insn (gen_cmpsi (operands[2], operands[1])); + emit_jump_insn (gen_bgtu (label3)); + emit_insn (gen_cmpsi (operands[2], const1_rtx)); + emit_jump_insn (gen_blt (label2)); + emit_insn (gen_cmpsi (operands[2], const1_rtx)); + emit_jump_insn (gen_beq (label1)); + emit_insn (gen_zero_extendsidi2 (operands[3], operands[1])); + insn = emit_insn (gen_divmoddisi3 (operands[3], operands[3], + operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], + gen_lowpart (SImode, operands[3])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, + udiv_equal, REG_NOTES (insn)); + emit_jump (label3); + emit_label (label1); + emit_move_insn (operands[0], operands[1]); + emit_jump (label3); + emit_label (label2); + emit_move_insn (operands[0], const1_rtx); + emit_label (label3); + } + emit_move_insn (operands[0], operands[0]); + DONE; +}") + +(define_expand "umodsi3" + [(set (match_operand:SI 0 "register_operand" "=d") + (umod:SI (match_operand:SI 1 "nonimmediate_operand" "") + (match_operand:SI 2 "nonimmediate_operand" ""))) + (clobber (match_dup 3))] + "!TARGET_64BIT" + " +{ + rtx insn, udiv_equal, umod_equal, equal; + + udiv_equal = gen_rtx_UDIV (SImode, operands[1], operands[2]); + umod_equal = gen_rtx_UMOD (SImode, operands[1], operands[2]); + equal = gen_rtx_IOR (DImode, + gen_rtx_ZERO_EXTEND (DImode, udiv_equal), + gen_rtx_ASHIFT (DImode, + gen_rtx_ZERO_EXTEND (DImode, umod_equal), + GEN_INT (32))); + + operands[3] = gen_reg_rtx (DImode); + + if (CONSTANT_P (operands[2])) + { + if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) <= 0) + { + rtx label1 = gen_label_rtx (); + + operands[1] = make_safe_from (operands[1], operands[0]); + emit_move_insn (operands[0], operands[1]); + emit_insn (gen_cmpsi (operands[0], operands[2])); + emit_jump_insn (gen_bltu (label1)); + emit_insn (gen_abssi2 (operands[0], operands[2])); + emit_insn (gen_addsi3 (operands[0], operands[0], operands[1])); + emit_label (label1); + } + else + { + operands[2] = force_reg (SImode, operands[2]); + operands[2] = make_safe_from (operands[2], operands[0]); + + emit_insn (gen_zero_extendsidi2 (operands[3], operands[1])); + insn = emit_insn (gen_divmoddisi3 (operands[3], operands[3], + operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], + gen_highpart (SImode, operands[3])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, + umod_equal, REG_NOTES (insn)); + } + } + else + { + rtx label1 = gen_label_rtx (); + rtx label2 = gen_label_rtx (); + rtx label3 = gen_label_rtx (); + + operands[1] = force_reg (SImode, operands[1]); + operands[1] = make_safe_from (operands[1], operands[0]); + operands[2] = force_reg (SImode, operands[2]); + operands[2] = make_safe_from (operands[2], operands[0]); + + emit_move_insn(operands[0], operands[1]); + emit_insn (gen_cmpsi (operands[2], operands[1])); + emit_jump_insn (gen_bgtu (label3)); + emit_insn (gen_cmpsi (operands[2], const1_rtx)); + emit_jump_insn (gen_blt (label2)); + emit_insn (gen_cmpsi (operands[2], const1_rtx)); + emit_jump_insn (gen_beq (label1)); + emit_insn (gen_zero_extendsidi2 (operands[3], operands[1])); + insn = emit_insn (gen_divmoddisi3 (operands[3], operands[3], + operands[2])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, equal, REG_NOTES (insn)); + + insn = emit_move_insn (operands[0], + gen_highpart (SImode, operands[3])); + REG_NOTES (insn) = + gen_rtx_EXPR_LIST (REG_EQUAL, + umod_equal, REG_NOTES (insn)); + emit_jump (label3); + emit_label (label1); + emit_move_insn (operands[0], const0_rtx); + emit_jump (label3); + emit_label (label2); + emit_insn (gen_subsi3 (operands[0], operands[0], operands[2])); + emit_label (label3); + } + DONE; +}") + +; +; divdf3 instruction pattern(s). +; + +(define_expand "divdf3" + [(parallel + [(set (match_operand:DF 0 "register_operand" "=f,f") + (div:DF (match_operand:DF 1 "register_operand" "0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*divdf3" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (div:DF (match_operand:DF 1 "register_operand" "0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + ddbr\\t%0,%2 + ddb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "fdiv") + (set_attr "atype" "reg,mem")]) + +(define_insn "*divdf3_ibm" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (div:DF (match_operand:DF 1 "register_operand" "0,0") + (match_operand:DF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + ddr\\t%0,%2 + dd\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "type" "fdiv") + (set_attr "atype" "reg,mem")]) + +; +; divsf3 instruction pattern(s). +; + +(define_expand "divsf3" + [(parallel + [(set (match_operand:SF 0 "register_operand" "=f,f") + (div:SF (match_operand:SF 1 "register_operand" "0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*divsf3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (div:SF (match_operand:SF 1 "register_operand" "0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + debr\\t%0,%2 + deb\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "type" "fdiv") + (set_attr "atype" "reg,mem")]) + +(define_insn "*divsf3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (div:SF (match_operand:SF 1 "register_operand" "0,0") + (match_operand:SF 2 "general_operand" "f,m"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "@ + der\\t%0,%2 + de\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "type" "fdiv") + (set_attr "atype" "reg,mem")]) + + +;; +;;- And instructions. +;; + +; +; anddi3 instruction pattern(s). +; + +(define_insn "*anddi3_cc" + [(set (reg 33) + (compare (and:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=d,d") + (and:DI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT" + "@ + ngr\\t%0,%2 + ng\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*anddi3_cconly" + [(set (reg 33) + (compare (and:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:DI 0 "=d,d"))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT" + "@ + ngr\\t%0,%2 + ng\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*anddi3_ni" + [(set (match_operand:DI 0 "register_operand" "=d") + (and:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "immediate_operand" "n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT && s390_single_hi (operands[2], DImode, -1) >= 0" + "* +{ + int part = s390_single_hi (operands[2], DImode, -1); + operands[2] = GEN_INT (s390_extract_hi (operands[2], DImode, part)); + + switch (part) + { + case 0: return \"nihh\\t%0,%x2\"; + case 1: return \"nihl\\t%0,%x2\"; + case 2: return \"nilh\\t%0,%x2\"; + case 3: return \"nill\\t%0,%x2\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI") + (set_attr "atype" "reg")]) + +(define_insn "anddi3" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (and:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + ngr\\t%0,%2 + ng\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*anddi3_ss" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (and:DI (match_dup 0) + (match_operand:DI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "nc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*anddi3_ss_inv" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (and:DI (match_operand:DI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "nc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; andsi3 instruction pattern(s). +; + +(define_insn "*andsi3_cc" + [(set (reg 33) + (compare (and:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:SI 0 "register_operand" "=d,d") + (and:SI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCTmode)" + "@ + nr\\t%0,%2 + n\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*andsi3_cconly" + [(set (reg 33) + (compare (and:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCTmode)" + "@ + nr\\t%0,%2 + n\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*andsi3_ni" + [(set (match_operand:SI 0 "register_operand" "=d") + (and:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "immediate_operand" "n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT && s390_single_hi (operands[2], SImode, -1) >= 0" + "* +{ + int part = s390_single_hi (operands[2], SImode, -1); + operands[2] = GEN_INT (s390_extract_hi (operands[2], SImode, part)); + + switch (part) + { + case 0: return \"nilh\\t%0,%x2\"; + case 1: return \"nill\\t%0,%x2\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI") + (set_attr "atype" "reg")]) + +(define_insn "andsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (and:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "" + "@ + nr\\t%0,%2 + n\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*andsi3_ss" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (and:SI (match_dup 0) + (match_operand:SI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "nc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*andsi3_ss_inv" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (and:SI (match_operand:SI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "nc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; andhi3 instruction pattern(s). +; + +(define_insn "*andhi3_ni" + [(set (match_operand:HI 0 "register_operand" "=d,d") + (and:HI (match_operand:HI 1 "register_operand" "%0,0") + (match_operand:HI 2 "nonmemory_operand" "d,n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + nr\\t%0,%2 + nill\\t%0,%x2" + [(set_attr "op_type" "RR,RI") + (set_attr "atype" "reg")]) + +(define_insn "andhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (and:HI (match_operand:HI 1 "register_operand" "%0") + (match_operand:HI 2 "nonmemory_operand" "d"))) + (clobber (reg:CC 33))] + "" + "nr\\t%0,%2" + [(set_attr "op_type" "RR") + (set_attr "atype" "reg")]) + +(define_insn "*andhi3_ss" + [(set (match_operand:HI 0 "s_operand" "=Qo") + (and:HI (match_dup 0) + (match_operand:HI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "nc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*andhi3_ss_inv" + [(set (match_operand:HI 0 "s_operand" "=Qo") + (and:HI (match_operand:HI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "nc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; andqi3 instruction pattern(s). +; + +(define_insn "*andqi3_ni" + [(set (match_operand:QI 0 "register_operand" "=d,d") + (and:QI (match_operand:QI 1 "register_operand" "%0,0") + (match_operand:QI 2 "nonmemory_operand" "d,n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + nr\\t%0,%2 + nill\\t%0,%b2" + [(set_attr "op_type" "RR,RI") + (set_attr "atype" "reg")]) + +(define_insn "andqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (and:QI (match_operand:QI 1 "register_operand" "%0") + (match_operand:QI 2 "nonmemory_operand" "d"))) + (clobber (reg:CC 33))] + "" + "nr\\t%0,%2" + [(set_attr "op_type" "RR") + (set_attr "atype" "reg")]) + +(define_insn "*andqi3_ss" + [(set (match_operand:QI 0 "s_operand" "=Qo,Qo") + (and:QI (match_dup 0) + (match_operand:QI 1 "s_imm_operand" "n,Qo"))) + (clobber (reg:CC 33))] + "" + "@ + ni\\t%0,%b1 + nc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SI,SS") + (set_attr "atype" "mem")]) + +(define_insn "*andqi3_ss_inv" + [(set (match_operand:QI 0 "s_operand" "=Qo,Qo") + (and:QI (match_operand:QI 1 "s_imm_operand" "n,Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "@ + ni\\t%0,%b1 + nc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SI,SS") + (set_attr "atype" "mem")]) + + +;; +;;- Bit set (inclusive or) instructions. +;; + +; +; iordi3 instruction pattern(s). +; + +(define_insn "*iordi3_cc" + [(set (reg 33) + (compare (ior:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=d,d") + (ior:DI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT" + "@ + ogr\\t%0,%2 + og\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iordi3_cconly" + [(set (reg 33) + (compare (ior:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:DI 0 "=d,d"))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT" + "@ + ogr\\t%0,%2 + og\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iordi3_oi" + [(set (match_operand:DI 0 "register_operand" "=d") + (ior:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "immediate_operand" "n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT && s390_single_hi (operands[2], DImode, 0) >= 0" + "* +{ + int part = s390_single_hi (operands[2], DImode, 0); + operands[2] = GEN_INT (s390_extract_hi (operands[2], DImode, part)); + + switch (part) + { + case 0: return \"oihh\\t%0,%x2\"; + case 1: return \"oihl\\t%0,%x2\"; + case 2: return \"oilh\\t%0,%x2\"; + case 3: return \"oill\\t%0,%x2\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI") + (set_attr "atype" "reg")]) + +(define_insn "iordi3" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (ior:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + ogr\\t%0,%2 + og\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iordi3_ss" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (ior:DI (match_dup 0) + (match_operand:DI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "oc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*iordi3_ss_inv" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (ior:DI (match_operand:DI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "oc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; iorsi3 instruction pattern(s). +; + +(define_insn "*iorsi3_cc" + [(set (reg 33) + (compare (ior:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:SI 0 "register_operand" "=d,d") + (ior:SI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCTmode)" + "@ + or\\t%0,%2 + o\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iorsi3_cconly" + [(set (reg 33) + (compare (ior:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCTmode)" + "@ + or\\t%0,%2 + o\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iorsi3_oi" + [(set (match_operand:SI 0 "register_operand" "=d") + (ior:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "immediate_operand" "n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT && s390_single_hi (operands[2], SImode, 0) >= 0" + "* +{ + int part = s390_single_hi (operands[2], SImode, 0); + operands[2] = GEN_INT (s390_extract_hi (operands[2], SImode, part)); + + switch (part) + { + case 0: return \"oilh\\t%0,%x2\"; + case 1: return \"oill\\t%0,%x2\"; + default: abort (); + } +}" + [(set_attr "op_type" "RI") + (set_attr "atype" "reg")]) + +(define_insn "iorsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (ior:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "" + "@ + or\\t%0,%2 + o\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iorsi3_ss" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (ior:SI (match_dup 0) + (match_operand:SI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "oc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*iorsi3_ss_inv" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (ior:SI (match_operand:SI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "oc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; iorhi3 instruction pattern(s). +; + +(define_insn "*iorhi3_oi" + [(set (match_operand:HI 0 "register_operand" "=d,d") + (ior:HI (match_operand:HI 1 "register_operand" "%0,0") + (match_operand:HI 2 "nonmemory_operand" "d,n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + or\\t%0,%2 + oill\\t%0,%x2" + [(set_attr "op_type" "RR,RI") + (set_attr "atype" "reg")]) + +(define_insn "iorhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (ior:HI (match_operand:HI 1 "register_operand" "%0") + (match_operand:HI 2 "nonmemory_operand" "d"))) + (clobber (reg:CC 33))] + "" + "or\\t%0,%2" + [(set_attr "op_type" "RR") + (set_attr "atype" "reg")]) + +(define_insn "*iorhi3_ss" + [(set (match_operand:HI 0 "s_operand" "=Qo") + (ior:HI (match_dup 0) + (match_operand:HI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "oc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*iorhi3_ss_inv" + [(set (match_operand:HI 0 "s_operand" "=Qo") + (ior:HI (match_operand:HI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "oc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; iorqi3 instruction pattern(s). +; + +(define_insn "*iorqi3_oi" + [(set (match_operand:QI 0 "register_operand" "=d,d") + (ior:QI (match_operand:QI 1 "register_operand" "%0,0") + (match_operand:QI 2 "nonmemory_operand" "d,n"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + or\\t%0,%2 + oill\\t%0,%b2" + [(set_attr "op_type" "RR,RI") + (set_attr "atype" "reg")]) + +(define_insn "iorqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (ior:QI (match_operand:QI 1 "register_operand" "%0") + (match_operand:QI 2 "nonmemory_operand" "d"))) + (clobber (reg:CC 33))] + "" + "or\\t%0,%2" + [(set_attr "op_type" "RR") + (set_attr "atype" "reg")]) + +(define_insn "*iorqi3_ss" + [(set (match_operand:QI 0 "s_operand" "=Qo,Qo") + (ior:QI (match_dup 0) + (match_operand:QI 1 "s_imm_operand" "n,Qo"))) + (clobber (reg:CC 33))] + "" + "@ + oi\\t%0,%b1 + oc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SI,SS") + (set_attr "atype" "reg,mem")]) + +(define_insn "*iorqi3_ss_inv" + [(set (match_operand:QI 0 "s_operand" "=Qo,Qo") + (ior:QI (match_operand:QI 1 "s_imm_operand" "n,Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "@ + oi\\t%0,%b1 + oc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SI,SS") + (set_attr "atype" "reg,mem")]) + + +;; +;;- Xor instructions. +;; + +; +; xordi3 instruction pattern(s). +; + +(define_insn "*xordi3_cc" + [(set (reg 33) + (compare (xor:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=d,d") + (xor:DI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT" + "@ + xgr\\t%0,%2 + xg\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*xordi3_cconly" + [(set (reg 33) + (compare (xor:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:DI 0 "=d,d"))] + "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT" + "@ + xgr\\t%0,%2 + xr\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "xordi3" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (xor:DI (match_operand:DI 1 "register_operand" "%0,0") + (match_operand:DI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + xgr\\t%0,%2 + xg\\t%0,%2" + [(set_attr "op_type" "RRE,RXE") + (set_attr "atype" "reg,mem")]) + +(define_insn "*xordi3_ss" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (xor:DI (match_dup 0) + (match_operand:DI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*xordi3_ss_inv" + [(set (match_operand:DI 0 "s_operand" "=Qo") + (xor:DI (match_operand:DI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(8,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; xorsi3 instruction pattern(s). +; + +(define_insn "*xorsi3_cc" + [(set (reg 33) + (compare (xor:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (set (match_operand:SI 0 "register_operand" "=d,d") + (xor:SI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCTmode)" + "@ + xr\\t%0,%2 + x\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*xorsi3_cconly" + [(set (reg 33) + (compare (xor:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCTmode)" + "@ + xr\\t%0,%2 + x\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "xorsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (xor:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "general_operand" "d,m"))) + (clobber (reg:CC 33))] + "" + "@ + xr\\t%0,%2 + x\\t%0,%2" + [(set_attr "op_type" "RR,RX") + (set_attr "atype" "reg,mem")]) + +(define_insn "*xorsi3_ss" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (xor:SI (match_dup 0) + (match_operand:SI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*xorsi3_ss_inv" + [(set (match_operand:SI 0 "s_operand" "=Qo") + (xor:SI (match_operand:SI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(4,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; xorhi3 instruction pattern(s). +; + +(define_insn "xorhi3" + [(set (match_operand:HI 0 "register_operand" "=d") + (xor:HI (match_operand:HI 1 "register_operand" "%0") + (match_operand:HI 2 "nonmemory_operand" "d"))) + (clobber (reg:CC 33))] + "" + "xr\\t%0,%2" + [(set_attr "op_type" "RR") + (set_attr "atype" "reg")]) + +(define_insn "*xorhi3_ss" + [(set (match_operand:HI 0 "s_operand" "=Qo") + (xor:HI (match_dup 0) + (match_operand:HI 1 "s_imm_operand" "Qo"))) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +(define_insn "*xorhi3_ss_inv" + [(set (match_operand:HI 0 "s_operand" "=Qo") + (xor:HI (match_operand:HI 1 "s_imm_operand" "Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "xc\\t%O0(2,%R0),%1" + [(set_attr "op_type" "SS") + (set_attr "atype" "mem")]) + +; +; xorqi3 instruction pattern(s). +; + +(define_insn "xorqi3" + [(set (match_operand:QI 0 "register_operand" "=d") + (xor:QI (match_operand:QI 1 "register_operand" "%0") + (match_operand:QI 2 "nonmemory_operand" "d"))) + (clobber (reg:CC 33))] + "" + "xr\\t%0,%2" + [(set_attr "op_type" "RR") + (set_attr "atype" "reg")]) + +(define_insn "*xorqi3_ss" + [(set (match_operand:QI 0 "s_operand" "=Qo,Qo") + (xor:QI (match_dup 0) + (match_operand:QI 1 "s_imm_operand" "n,Qo"))) + (clobber (reg:CC 33))] + "" + "@ + xi\\t%0,%b1 + xc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SI,SS") + (set_attr "atype" "mem")]) + +(define_insn "*xorqi3_ss_inv" + [(set (match_operand:QI 0 "s_operand" "=Qo,Qo") + (xor:QI (match_operand:QI 1 "s_imm_operand" "n,Qo") + (match_dup 0))) + (clobber (reg:CC 33))] + "" + "@ + xi\\t%0,%b1 + xc\\t%O0(1,%R0),%1" + [(set_attr "op_type" "SI,SS") + (set_attr "atype" "mem")]) + + +;; +;;- Negate instructions. +;; + +; +; negdi2 instruction pattern(s). +; + +(define_expand "negdi2" + [(parallel + [(set (match_operand:DI 0 "register_operand" "=d") + (neg:DI (match_operand:DI 1 "register_operand" "d"))) + (clobber (reg:CC 33))])] + "" + "") + +(define_insn "*negdi2_64" + [(set (match_operand:DI 0 "register_operand" "=d") + (neg:DI (match_operand:DI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "lcgr\\t%0,%1" + [(set_attr "op_type" "RR")]) + +(define_insn "*negdi2_31" + [(set (match_operand:DI 0 "register_operand" "=d") + (neg:DI (match_operand:DI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "* +{ + rtx xop[1]; + xop[0] = gen_label_rtx (); + output_asm_insn (\"lcr\\t%0,%1\", operands); + output_asm_insn (\"lcr\\t%N0,%N1\", operands); + output_asm_insn (\"je\\t%l0\", xop); + output_asm_insn (\"bctr\\t%0,0\", operands); + ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\", + CODE_LABEL_NUMBER (xop[0])); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "type" "other") + (set_attr "length" "10")]) + +; +; negsi2 instruction pattern(s). +; + +(define_insn "negsi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (neg:SI (match_operand:SI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "" + "lcr\\t%0,%1" + [(set_attr "op_type" "RR")]) + +; +; negdf2 instruction pattern(s). +; + +(define_expand "negdf2" + [(parallel + [(set (match_operand:DF 0 "register_operand" "=f") + (neg:DF (match_operand:DF 1 "register_operand" "f"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*negdf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (neg:DF (match_operand:DF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "lcdbr\\t%0,%1" + [(set_attr "op_type" "RRE")]) + +(define_insn "*negdf2_ibm" + [(set (match_operand:DF 0 "register_operand" "=f") + (neg:DF (match_operand:DF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "lcdr\\t%0,%1" + [(set_attr "op_type" "RR")]) + +; +; negsf2 instruction pattern(s). +; + +(define_expand "negsf2" + [(parallel + [(set (match_operand:SF 0 "register_operand" "=f") + (neg:SF (match_operand:SF 1 "register_operand" "f"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*negsf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (neg:SF (match_operand:SF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "lcebr\\t%0,%1" + [(set_attr "op_type" "RRE")]) + +(define_insn "*negsf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (neg:SF (match_operand:SF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "lcer\\t%0,%1" + [(set_attr "op_type" "RR")]) + + +;; +;;- Absolute value instructions. +;; + +; +; absdi2 instruction pattern(s). +; + +(define_insn "absdi2" + [(set (match_operand:DI 0 "register_operand" "=d") + (abs:DI (match_operand:DI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "lpgr\\t%0,%1" + [(set_attr "op_type" "RRE")]) + +; +; abssi2 instruction pattern(s). +; + +(define_insn "abssi2" + [(set (match_operand:SI 0 "register_operand" "=d") + (abs:SI (match_operand:SI 1 "register_operand" "d"))) + (clobber (reg:CC 33))] + "" + "lpr\\t%0,%1" + [(set_attr "op_type" "RR")]) + +; +; absdf2 instruction pattern(s). +; + +(define_expand "absdf2" + [(parallel + [(set (match_operand:DF 0 "register_operand" "=f") + (abs:DF (match_operand:DF 1 "register_operand" "f"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*absdf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (abs:DF (match_operand:DF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "lpdbr\\t%0,%1" + [(set_attr "op_type" "RRE")]) + +(define_insn "*absdf2_ibm" + [(set (match_operand:DF 0 "register_operand" "=f") + (abs:DF (match_operand:DF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "lpdr\\t%0,%1" + [(set_attr "op_type" "RR")]) + +; +; abssf2 instruction pattern(s). +; + +(define_expand "abssf2" + [(parallel + [(set (match_operand:SF 0 "register_operand" "=f") + (abs:SF (match_operand:SF 1 "register_operand" "f"))) + (clobber (reg:CC 33))])] + "TARGET_HARD_FLOAT" + "") + +(define_insn "*abssf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (abs:SF (match_operand:SF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "lpebr\\t%0,%1" + [(set_attr "op_type" "RRE")]) + +(define_insn "*abssf2_ibm" + [(set (match_operand:SF 0 "register_operand" "=f") + (abs:SF (match_operand:SF 1 "register_operand" "f"))) + (clobber (reg:CC 33))] + "TARGET_HARD_FLOAT && TARGET_IBM_FLOAT" + "lper\\t%0,%1" + [(set_attr "op_type" "RR")]) + +;; +;;- Square root instructions. +;; + +; +; sqrtdf2 instruction pattern(s). +; + +(define_insn "sqrtdf2" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (sqrt:DF (match_operand:DF 1 "general_operand" "f,m")))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + sqdbr\\t%0,%1 + sqdb\\t%0,%1" + [(set_attr "op_type" "RRE,RSE")]) + +; +; sqrtsf2 instruction pattern(s). +; + +(define_insn "sqrtsf2" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (sqrt:SF (match_operand:SF 1 "general_operand" "f,m")))] + "TARGET_HARD_FLOAT && TARGET_IEEE_FLOAT" + "@ + sqebr\\t%0,%1 + sqeb\\t%0,%1" + [(set_attr "op_type" "RRE,RSE")]) + +;; +;;- One complement instructions. +;; + +; +; one_cmpldi2 instruction pattern(s). +; + +(define_expand "one_cmpldi2" + [(parallel + [(set (match_operand:DI 0 "register_operand" "") + (xor:DI (match_operand:DI 1 "register_operand" "") + (const_int -1))) + (clobber (reg:CC 33))])] + "TARGET_64BIT" + "") + +; +; one_cmplsi2 instruction pattern(s). +; + +(define_expand "one_cmplsi2" + [(parallel + [(set (match_operand:SI 0 "register_operand" "") + (xor:SI (match_operand:SI 1 "register_operand" "") + (const_int -1))) + (clobber (reg:CC 33))])] + "" + "") + +; +; one_cmplhi2 instruction pattern(s). +; + +(define_expand "one_cmplhi2" + [(parallel + [(set (match_operand:HI 0 "register_operand" "") + (xor:HI (match_operand:HI 1 "register_operand" "") + (const_int -1))) + (clobber (reg:CC 33))])] + "" + "") + +; +; one_cmplqi2 instruction pattern(s). +; + +(define_expand "one_cmplqi2" + [(parallel + [(set (match_operand:QI 0 "register_operand" "") + (xor:QI (match_operand:QI 1 "register_operand" "") + (const_int -1))) + (clobber (reg:CC 33))])] + "" + "") + + +;; +;;- Rotate instructions. +;; + +; +; rotldi3 instruction pattern(s). +; + +(define_insn "rotldi3" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (rotate:DI (match_operand:DI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "TARGET_64BIT" + "@ + rllg\\t%0,%1,%c2 + rllg\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE")]) + +; +; rotlsi3 instruction pattern(s). +; + +(define_insn "rotlsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (rotate:SI (match_operand:SI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "TARGET_64BIT" + "@ + rll\\t%0,%1,%c2 + rll\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE")]) + + +;; +;;- Arithmetic shift instructions. +;; + +; +; ashldi3 instruction pattern(s). +; + +(define_expand "ashldi3" + [(set (match_operand:DI 0 "register_operand" "") + (ashift:DI (match_operand:DI 1 "register_operand" "") + (match_operand:SI 2 "nonmemory_operand" "")))] + "" + "") + +(define_insn "*ashldi3_31" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (ashift:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "!TARGET_64BIT" + "@ + sldl\\t%0,%c2 + sldl\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +(define_insn "*ashldi3_64" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (ashift:DI (match_operand:DI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "TARGET_64BIT" + "@ + sllg\\t%0,%1,%2 + sllg\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE")]) + +; +; ashrdi3 instruction pattern(s). +; + +(define_expand "ashrdi3" + [(parallel + [(set (match_operand:DI 0 "register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "") + (match_operand:SI 2 "nonmemory_operand" ""))) + (clobber (reg:CC 33))])] + "" + "") + +(define_insn "*ashrdi3_cc_31" + [(set (reg 33) + (compare (ashiftrt:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=d,d") + (ashiftrt:DI (match_dup 1) (match_dup 2)))] + "!TARGET_64BIT && s390_match_ccmode(insn, CCSmode)" + "@ + srda\\t%0,%c2 + srda\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +(define_insn "*ashrdi3_cconly_31" + [(set (reg 33) + (compare (ashiftrt:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")) + (const_int 0))) + (clobber (match_scratch:DI 0 "=d,d"))] + "!TARGET_64BIT && s390_match_ccmode(insn, CCSmode)" + "@ + srda\\t%0,%c2 + srda\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +(define_insn "*ashrdi3_31" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a"))) + (clobber (reg:CC 33))] + "!TARGET_64BIT" + "@ + srda\\t%0,%c2 + srda\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +(define_insn "*ashrdi3_cc_64" + [(set (reg 33) + (compare (ashiftrt:DI (match_operand:DI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=d,d") + (ashiftrt:DI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCSmode) && TARGET_64BIT" + "@ + srag\\t%0,%1,%c2 + srag\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE")]) + +(define_insn "*ashrdi3_cconly_64" + [(set (reg 33) + (compare (ashiftrt:DI (match_operand:DI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a")) + (const_int 0))) + (clobber (match_scratch:DI 0 "=d,d"))] + "s390_match_ccmode(insn, CCSmode) && TARGET_64BIT" + "@ + srag\\t%0,%1,%c2 + srag\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE")]) + +(define_insn "*ashrdi3_64" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (ashiftrt:DI (match_operand:DI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a"))) + (clobber (reg:CC 33))] + "TARGET_64BIT" + "@ + srag\\t%0,%1,%c2 + srag\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE")]) + +; +; ashlsi3 instruction pattern(s). +; + +(define_insn "ashlsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (ashift:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "" + "@ + sll\\t%0,%c2 + sll\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +; +; ashrsi3 instruction pattern(s). +; + +(define_insn "*ashrsi3_cc" + [(set (reg 33) + (compare (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")) + (const_int 0))) + (set (match_operand:SI 0 "register_operand" "=d,d") + (ashiftrt:SI (match_dup 1) (match_dup 2)))] + "s390_match_ccmode(insn, CCSmode)" + "@ + sra\\t%0,%c2 + sra\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +(define_insn "*ashrsi3_cconly" + [(set (reg 33) + (compare (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=d,d"))] + "s390_match_ccmode(insn, CCSmode)" + "@ + sra\\t%0,%c2 + sra\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + +(define_insn "ashrsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a"))) + (clobber (reg:CC 33))] + "" + "@ + sra\\t%0,%c2 + sra\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + + +;; +;;- logical shift instructions. +;; + +; +; lshrdi3 instruction pattern(s). +; + +(define_expand "lshrdi3" + [(set (match_operand:DI 0 "register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "") + (match_operand:SI 2 "nonmemory_operand" "")))] + "" + "") + +(define_insn "*lshrdi3_31" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "!TARGET_64BIT" + "@ + srdl\\t%0,%c2 + srdl\\t%0,0(%2)" + [(set_attr "op_type" "RS,RS")]) + +(define_insn "*lshrdi3_64" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (lshiftrt:DI (match_operand:DI 1 "register_operand" "d,d") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "TARGET_64BIT" + "@ + srlg\\t%0,%1,%c2 + srlg\\t%0,%1,0(%2)" + [(set_attr "op_type" "RSE,RSE")]) + +; +; lshrsi3 instruction pattern(s). +; + +(define_insn "lshrsi3" + [(set (match_operand:SI 0 "register_operand" "=d,d") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0") + (match_operand:SI 2 "nonmemory_operand" "J,a")))] + "" + "@ + srl\\t%0,%c2 + srl\\t%0,0(%2)" + [(set_attr "op_type" "RS")]) + + +;; +;; Branch instruction patterns. +;; + +(define_expand "beq" + [(set (reg:CCZ 33) (compare:CCZ (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (eq (reg:CCZ 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bne" + [(set (reg:CCZ 33) (compare:CCZ (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (ne (reg:CCZ 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bgt" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (gt (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bgtu" + [(set (reg:CCU 33) (compare:CCU (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (gtu (reg:CCU 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "blt" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (lt (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bltu" + [(set (reg:CCU 33) (compare:CCU (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (ltu (reg:CCU 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bge" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (ge (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bgeu" + [(set (reg:CCU 33) (compare:CCU (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (geu (reg:CCU 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "ble" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (le (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bleu" + [(set (reg:CCU 33) (compare:CCU (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (leu (reg:CCU 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bunordered" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (unordered (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bordered" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (ordered (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "buneq" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (uneq (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bungt" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (ungt (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bunlt" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (unlt (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bunge" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (unge (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bunle" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (unle (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + +(define_expand "bltgt" + [(set (reg:CCS 33) (compare:CCS (match_dup 1) (match_dup 2))) + (set (pc) + (if_then_else (ltgt (reg:CCS 33) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "{ operands[1] = s390_compare_op0; operands[2] = s390_compare_op1; }") + + +;; +;;- Conditional jump instructions. +;; + +(define_insn "cjump" + [(set (pc) + (if_then_else + (match_operator 1 "comparison_operator" [(reg 33) (const_int 0)]) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "* +{ + if (get_attr_length (insn) == 4) + return \"j%C1\\t%l0\"; + else if (TARGET_64BIT) + return \"jg%C1\\t%l0\"; + else + abort (); +}" + [(set_attr "op_type" "RI") + (set (attr "length") + (cond [(lt (abs (minus (pc) (match_dup 0))) (const_int 60000)) + (const_int 4) + (ne (symbol_ref "TARGET_64BIT") (const_int 0)) + (const_int 6) + (ne (symbol_ref "s390_pool_overflow") (const_int 0)) + (if_then_else (eq (symbol_ref "flag_pic") (const_int 0)) + (const_int 12) (const_int 14)) + (eq (symbol_ref "flag_pic") (const_int 0)) + (const_int 6)] (const_int 8)))]) + +(define_insn "*cjump_long" + [(set (pc) + (if_then_else + (match_operator 1 "comparison_operator" [(reg 33) (const_int 0)]) + (match_operand 0 "address_operand" "p") + (pc)))] + "" + "* +{ + if (get_attr_op_type (insn) == OP_TYPE_RR) + return \"b%C1r\\t%0\"; + else + return \"b%C1\\t%a0\"; +}" + [(set (attr "op_type") + (if_then_else (match_operand 0 "register_operand" "") + (const_string "RR") (const_string "RX"))) + (set_attr "atype" "mem")]) + + +;; +;;- Negated conditional jump instructions. +;; + +(define_insn "icjump" + [(set (pc) + (if_then_else + (match_operator 1 "comparison_operator" [(reg 33) (const_int 0)]) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "* +{ + if (get_attr_length (insn) == 4) + return \"j%D1\\t%l0\"; + else if (TARGET_64BIT) + return \"jg%D1\\t%l0\"; + else + abort (); +}" + [(set_attr "op_type" "RI") + (set (attr "length") + (cond [(lt (abs (minus (pc) (match_dup 0))) (const_int 60000)) + (const_int 4) + (ne (symbol_ref "TARGET_64BIT") (const_int 0)) + (const_int 6) + (ne (symbol_ref "s390_pool_overflow") (const_int 0)) + (if_then_else (eq (symbol_ref "flag_pic") (const_int 0)) + (const_int 12) (const_int 14)) + (eq (symbol_ref "flag_pic") (const_int 0)) + (const_int 6)] (const_int 8)))]) + +(define_insn "*icjump_long" + [(set (pc) + (if_then_else + (match_operator 1 "comparison_operator" [(reg 33) (const_int 0)]) + (pc) + (match_operand 0 "address_operand" "p")))] + "" + "* +{ + if (get_attr_op_type (insn) == OP_TYPE_RR) + return \"b%D1r\\t%0\"; + else + return \"b%D1\\t%a0\"; +}" + [(set (attr "op_type") + (if_then_else (match_operand 0 "register_operand" "") + (const_string "RR") (const_string "RX"))) + (set_attr "atype" "mem")]) + + +;; +;;- Subtract one and jump if not zero. +;; + +;(define_expand "decrement_and_branch_on_count" +; [(use (match_operand 0 "register_operand" "")) +; (use (label_ref (match_operand 1 "" "")))] +; "" +; " +;{ +;/* if (TARGET_64BIT) +; emit_jump_insn (gen_brctdi (operands[0], operands[1])); +; else */ +; emit_jump_insn (gen_brctsi (operands[0], operands[1])); +; DONE; +;}") +; +;(define_insn "brctsi" +; [(set (pc) +; (if_then_else +; (ne (match_operand:SI 0 "register_operand" "+a") +; (const_int 1)) +; (label_ref (match_operand 1 "" "")) +; (pc))) +; (set (match_dup 0) +; (plus:SI (match_dup 0) (const_int -1)))] +; "" +; "brct\\t%0,%l1" +; [(set_attr "op_type" "RI") +; (set_attr "type" "branch")] +;) +; +;(define_insn "ibrctsi" +; [(set (pc) +; (if_then_else +; (eq (match_operand:SI 0 "register_operand" "+a") +; (const_int 1)) +; (pc) +; (label_ref (match_operand 1 "" "")))) +; (set (match_dup 0) +; (plus:SI (match_dup 0) (const_int -1)))] +; "" +; "brct\\t%0,%l1" +; [(set_attr "op_type" "RI") +; (set_attr "type" "branch")] +;) + + +;; +;;- Unconditional jump instructions. +;; + +; +; jump instruction pattern(s). +; + +(define_insn "jump" + [(set (pc) (label_ref (match_operand 0 "" "")))] + "" + "* +{ + if (get_attr_length (insn) == 4) + return \"j\\t%l0\"; + else if (TARGET_64BIT) + return \"jg\\t%l0\"; + else + abort (); +}" + [(set_attr "op_type" "RI") + (set (attr "length") + (cond [(lt (abs (minus (pc) (match_dup 0))) (const_int 60000)) + (const_int 4) + (ne (symbol_ref "TARGET_64BIT") (const_int 0)) + (const_int 6) + (eq (symbol_ref "flag_pic") (const_int 0)) + (const_int 6)] (const_int 8)))]) + +; +; indirect-jump instruction pattern(s). +; + +(define_insn "indirect_jump" + [(set (pc) (match_operand 0 "address_operand" "p"))] + "" + "* +{ + if (get_attr_op_type (insn) == OP_TYPE_RR) + return \"br\\t%0\"; + else + return \"b\\t%a0\"; +}" + [(set (attr "op_type") + (if_then_else (match_operand 0 "register_operand" "") + (const_string "RR") (const_string "RX"))) + (set_attr "atype" "mem")]) + +; +; casesi instruction pattern(s). +; + +(define_insn "casesi_jump" + [(set (pc) (match_operand 0 "address_operand" "p")) + (use (label_ref (match_operand 1 "" "")))] + "" + "* +{ + if (get_attr_op_type (insn) == OP_TYPE_RR) + return \"br\\t%0\"; + else + return \"b\\t%a0\"; +}" + [(set (attr "op_type") + (if_then_else (match_operand 0 "register_operand" "") + (const_string "RR") (const_string "RX"))) + (set_attr "atype" "mem")]) + +(define_expand "casesi" + [(match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_operand" "") + (label_ref (match_operand 3 "" "")) + (label_ref (match_operand 4 "" ""))] + "" + " +{ + rtx index = gen_reg_rtx (SImode); + rtx base = gen_reg_rtx (Pmode); + rtx target = gen_reg_rtx (Pmode); + + emit_move_insn (index, operands[0]); + emit_insn (gen_subsi3 (index, index, operands[1])); + emit_cmp_and_jump_insns (index, operands[2], GTU, NULL_RTX, SImode, 1, + operands[4]); + + if (Pmode != SImode) + index = convert_to_mode (Pmode, index, 1); + if (GET_CODE (index) != REG) + index = copy_to_mode_reg (Pmode, index); + + if (TARGET_64BIT) + emit_insn (gen_ashldi3 (index, index, GEN_INT (3))); + else + emit_insn (gen_ashlsi3 (index, index, GEN_INT (2))); + + emit_move_insn (base, gen_rtx_LABEL_REF (Pmode, operands[3])); + + index = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, base, index)); + emit_move_insn (target, index); + + if (flag_pic) + target = gen_rtx_PLUS (Pmode, base, target); + emit_jump_insn (gen_casesi_jump (target, operands[3])); + + DONE; +}") + + +;; +;;- Jump to subroutine. +;; +;; + +; +; untyped call instruction pattern(s). +; + +;; Call subroutine returning any type. +(define_expand "untyped_call" + [(parallel [(call (match_operand 0 "" "") + (const_int 0)) + (match_operand 1 "" "") + (match_operand 2 "" "")])] + "" + " +{ + int i; + + emit_call_insn (gen_call (operands[0], const0_rtx, const0_rtx)); + + for (i = 0; i < XVECLEN (operands[2], 0); i++) + { + rtx set = XVECEXP (operands[2], 0, i); + emit_move_insn (SET_DEST (set), SET_SRC (set)); + } + + /* The optimizer does not know that the call sets the function value + registers we stored in the result block. We avoid problems by + claiming that all hard registers are used and clobbered at this + point. */ + emit_insn (gen_blockage ()); + + DONE; +}") + +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and +;; all of memory. This blocks insns from being moved across this point. + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] 0)] + "" + "" + [(set_attr "type" "none")]) + + + +; +; call instruction pattern(s). +; + +(define_expand "call" + [(call (match_operand 0 "" "") + (match_operand 1 "" "")) + (use (match_operand 2 "" ""))] + "" + " +{ + int plt_call = 0; + rtx insn; + + /* Direct function calls need special treatment. */ + if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF) + { + rtx sym = XEXP (operands[0], 0); + + /* When calling a global routine in PIC mode, we must + replace the symbol itself with the PLT stub. */ + if (flag_pic && !SYMBOL_REF_FLAG (sym)) + { + sym = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), 113); + sym = gen_rtx_CONST (Pmode, sym); + + plt_call = 1; + } + + /* Unless we can use the bras(l) insn, force the + routine address into a register. */ + if (!TARGET_SMALL_EXEC && !TARGET_64BIT) + { + rtx target = gen_reg_rtx (Pmode); + emit_move_insn (target, sym); + sym = target; + } + + operands[0] = gen_rtx_MEM (QImode, sym); + } + + /* Emit insn. */ + insn = emit_call_insn (gen_call_exp (operands[0], operands[1], + gen_rtx_REG (Pmode, RETURN_REGNUM))); + + /* In 31-bit, we must load the GOT register even if the + compiler doesn't know about it, because the PLT glue + code uses it. In 64-bit, this is not necessary. */ + if (plt_call && !TARGET_64BIT) + { + current_function_uses_pic_offset_table = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx); + } + + DONE; +}") + +(define_expand "call_exp" + [(parallel [(call (match_operand 0 "" "") + (match_operand 1 "" "")) + (clobber (match_operand 2 "" ""))])] + "" + "") + +(define_insn "brasl" + [(call (mem:QI (match_operand:DI 0 "bras_sym_operand" "X")) + (match_operand:SI 1 "const_int_operand" "n")) + (clobber (match_operand:DI 2 "register_operand" "=r"))] + "TARGET_64BIT" + "brasl\\t%2,%0" + [(set_attr "op_type" "RIL") + (set_attr "type" "jsr")]) + +(define_insn "bras" + [(call (mem:QI (match_operand:SI 0 "bras_sym_operand" "X")) + (match_operand:SI 1 "const_int_operand" "n")) + (clobber (match_operand:SI 2 "register_operand" "=r"))] + "TARGET_SMALL_EXEC" + "bras\\t%2,%0" + [(set_attr "op_type" "RI") + (set_attr "type" "jsr")]) + +(define_insn "basr_64" + [(call (mem:QI (match_operand:DI 0 "register_operand" "a")) + (match_operand:SI 1 "const_int_operand" "n")) + (clobber (match_operand:DI 2 "register_operand" "=r"))] + "TARGET_64BIT" + "basr\\t%2,%0" + [(set_attr "op_type" "RR") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + +(define_insn "basr_31" + [(call (mem:QI (match_operand:SI 0 "register_operand" "a")) + (match_operand:SI 1 "const_int_operand" "n")) + (clobber (match_operand:SI 2 "register_operand" "=r"))] + "!TARGET_64BIT" + "basr\\t%2,%0" + [(set_attr "op_type" "RR") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + +(define_insn "bas_64" + [(call (mem:QI (match_operand:QI 0 "address_operand" "p")) + (match_operand:SI 1 "const_int_operand" "n")) + (clobber (match_operand:DI 2 "register_operand" "=r"))] + "TARGET_64BIT" + "bas\\t%2,%a0" + [(set_attr "op_type" "RX") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + +(define_insn "bas_31" + [(call (mem:QI (match_operand:QI 0 "address_operand" "p")) + (match_operand:SI 1 "const_int_operand" "n")) + (clobber (match_operand:SI 2 "register_operand" "=r"))] + "!TARGET_64BIT" + "bas\\t%2,%a0" + [(set_attr "op_type" "RX") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + + +; +; call_value instruction pattern(s). +; + +(define_expand "call_value" + [(set (match_operand 0 "" "") + (call (match_operand 1 "" "") + (match_operand 2 "" ""))) + (use (match_operand 3 "" ""))] + "" + " +{ + int plt_call = 0; + rtx insn; + + /* Direct function calls need special treatment. */ + if (GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF) + { + rtx sym = XEXP (operands[1], 0); + + /* When calling a global routine in PIC mode, we must + replace the symbol itself with the PLT stub. */ + if (flag_pic && !SYMBOL_REF_FLAG (sym)) + { + sym = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), 113); + sym = gen_rtx_CONST (Pmode, sym); + + plt_call = 1; + } + + /* Unless we can use the bras(l) insn, force the + routine address into a register. */ + if (!TARGET_SMALL_EXEC && !TARGET_64BIT) + { + rtx target = gen_reg_rtx (Pmode); + emit_move_insn (target, sym); + sym = target; + } + + operands[1] = gen_rtx_MEM (QImode, sym); + } + + /* Emit insn. */ + insn = emit_call_insn ( + gen_call_value_exp (operands[0], operands[1], operands[2], + gen_rtx_REG (Pmode, RETURN_REGNUM))); + + /* In 31-bit, we must load the GOT register even if the + compiler doesn't know about it, because the PLT glue + code uses it. In 64-bit, this is not necessary. */ + if (plt_call && !TARGET_64BIT) + { + current_function_uses_pic_offset_table = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx); + } + + DONE; +}") + +(define_expand "call_value_exp" + [(parallel [(set (match_operand 0 "" "") + (call (match_operand 1 "" "") + (match_operand 2 "" ""))) + (clobber (match_operand 3 "" ""))])] + "" + "") + +(define_insn "brasl_r" + [(set (match_operand 0 "register_operand" "=df") + (call (mem:QI (match_operand:DI 1 "bras_sym_operand" "X")) + (match_operand:SI 2 "const_int_operand" "n"))) + (clobber (match_operand:DI 3 "register_operand" "=r"))] + "TARGET_64BIT" + "brasl\\t%3,%1" + [(set_attr "op_type" "RIL") + (set_attr "type" "jsr")]) + +(define_insn "bras_r" + [(set (match_operand 0 "register_operand" "=df") + (call (mem:QI (match_operand:SI 1 "bras_sym_operand" "X")) + (match_operand:SI 2 "const_int_operand" "n"))) + (clobber (match_operand:SI 3 "register_operand" "=r"))] + "TARGET_SMALL_EXEC" + "bras\\t%3,%1" + [(set_attr "op_type" "RI") + (set_attr "type" "jsr")]) + +(define_insn "basr_r_64" + [(set (match_operand 0 "register_operand" "=df") + (call (mem:QI (match_operand:DI 1 "register_operand" "a")) + (match_operand:SI 2 "const_int_operand" "n"))) + (clobber (match_operand:DI 3 "register_operand" "=r"))] + "TARGET_64BIT" + "basr\\t%3,%1" + [(set_attr "op_type" "RR") + (set_attr "type" "jsr")]) + +(define_insn "basr_r_31" + [(set (match_operand 0 "register_operand" "=df") + (call (mem:QI (match_operand:SI 1 "register_operand" "a")) + (match_operand:SI 2 "const_int_operand" "n"))) + (clobber (match_operand:SI 3 "register_operand" "=r"))] + "!TARGET_64BIT" + "basr\\t%3,%1" + [(set_attr "op_type" "RR") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + +(define_insn "bas_r_64" + [(set (match_operand 0 "register_operand" "=df") + (call (mem:QI (match_operand:QI 1 "address_operand" "p")) + (match_operand:SI 2 "const_int_operand" "n"))) + (clobber (match_operand:DI 3 "register_operand" "=r"))] + "TARGET_64BIT" + "bas\\t%3,%a1" + [(set_attr "op_type" "RX") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + +(define_insn "bas_r_31" + [(set (match_operand 0 "register_operand" "=df") + (call (mem:QI (match_operand:QI 1 "address_operand" "p")) + (match_operand:SI 2 "const_int_operand" "n"))) + (clobber (match_operand:SI 3 "register_operand" "=r"))] + "!TARGET_64BIT" + "bas\\t%3,%a1" + [(set_attr "op_type" "RX") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + + +;; +;;- Miscellaneous instructions. +;; + +; +; allocate stack instruction pattern(s). +; + +(define_expand "allocate_stack" + [(set (reg 15) + (plus (reg 15) (match_operand 1 "general_operand" ""))) + (set (match_operand 0 "general_operand" "") + (reg 15))] + "" + " +{ + rtx stack = gen_rtx (REG, Pmode, STACK_POINTER_REGNUM); + rtx chain = gen_rtx (MEM, Pmode, stack); + rtx temp = gen_reg_rtx (Pmode); + + emit_move_insn (temp, chain); + + if (TARGET_64BIT) + emit_insn (gen_adddi3 (stack, stack, negate_rtx (Pmode, operands[1]))); + else + emit_insn (gen_addsi3 (stack, stack, negate_rtx (Pmode, operands[1]))); + + emit_move_insn (chain, temp); + + emit_move_insn (operands[0], virtual_stack_dynamic_rtx); + DONE; +}") + + +; +; setjmp/longjmp instruction pattern(s). +; + +(define_expand "builtin_setjmp_setup" + [(unspec [(match_operand 0 "register_operand" "a")] 1)] + "" + " +{ + rtx base = gen_rtx_MEM (Pmode, plus_constant (operands[0], 4 * GET_MODE_SIZE (Pmode))); + rtx basereg = gen_rtx_REG (Pmode, BASE_REGISTER); + + emit_move_insn (base, basereg); + DONE; +}") + +(define_expand "builtin_setjmp_receiver" + [(unspec_volatile [(label_ref (match_operand 0 "" ""))] 2)] + "flag_pic" + " +{ + rtx gotreg = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM); + rtx got = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\"); + SYMBOL_REF_FLAG (got) = 1; + + emit_move_insn (gotreg, got); + emit_insn (gen_rtx_USE (VOIDmode, gotreg)); + DONE; +}") + +(define_expand "builtin_longjmp" + [(unspec_volatile [(match_operand 0 "register_operand" "r")] 3)] + "" + " +{ + /* The elements of the buffer are, in order: */ + rtx fp = gen_rtx_MEM (Pmode, operands[0]); + rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0], GET_MODE_SIZE (Pmode))); + rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0], 2 * GET_MODE_SIZE (Pmode))); + rtx base = gen_rtx_MEM (Pmode, plus_constant (operands[0], 4 * GET_MODE_SIZE (Pmode))); + rtx basereg = gen_rtx_REG (Pmode, BASE_REGISTER); + rtx jmp = gen_rtx_REG (Pmode, 14); + + emit_move_insn (jmp, lab); + emit_move_insn (basereg, base); + emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX); + emit_move_insn (hard_frame_pointer_rtx, fp); + + emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx)); + emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx)); + emit_insn (gen_rtx_USE (VOIDmode, basereg)); + emit_indirect_jump (jmp); + DONE; +}") + + +;; These patterns say how to save and restore the stack pointer. We need not +;; save the stack pointer at function level since we are careful to +;; preserve the backchain. At block level, we have to restore the backchain +;; when we restore the stack pointer. +;; +;; For nonlocal gotos, we must save both the stack pointer and its +;; backchain and restore both. Note that in the nonlocal case, the +;; save area is a memory location. + +(define_expand "save_stack_function" + [(match_operand 0 "general_operand" "") + (match_operand 1 "general_operand" "")] + "" + "DONE;") + +(define_expand "restore_stack_function" + [(match_operand 0 "general_operand" "") + (match_operand 1 "general_operand" "")] + "" + "DONE;") + +(define_expand "restore_stack_block" + [(use (match_operand 0 "register_operand" "")) + (set (match_dup 2) (match_dup 3)) + (set (match_dup 0) (match_operand 1 "register_operand" "")) + (set (match_dup 3) (match_dup 2))] + "" + " +{ + operands[2] = gen_reg_rtx (Pmode); + operands[3] = gen_rtx_MEM (Pmode, operands[0]); +}") + +(define_expand "save_stack_nonlocal" + [(match_operand 0 "memory_operand" "") + (match_operand 1 "register_operand" "")] + "" + " +{ + rtx temp = gen_reg_rtx (Pmode); + + /* Copy the backchain to the first word, sp to the second. */ + emit_move_insn (temp, gen_rtx_MEM (Pmode, operands[1])); + emit_move_insn (operand_subword (operands[0], 0, 0, + TARGET_64BIT ? TImode : DImode), + temp); + emit_move_insn (operand_subword (operands[0], 1, 0, + TARGET_64BIT ? TImode : DImode), + operands[1]); + DONE; +}") + +(define_expand "restore_stack_nonlocal" + [(match_operand 0 "register_operand" "") + (match_operand 1 "memory_operand" "")] + "" + " +{ + rtx temp = gen_reg_rtx (Pmode); + + /* Restore the backchain from the first word, sp from the second. */ + emit_move_insn (temp, + operand_subword (operands[1], 0, 0, + TARGET_64BIT ? TImode : DImode)); + emit_move_insn (operands[0], + operand_subword (operands[1], 1, 0, + TARGET_64BIT ? TImode : DImode)); + emit_move_insn (gen_rtx_MEM (Pmode, operands[0]), temp); + DONE; +}") + + +; +; nop instruction pattern(s). +; + +(define_insn "nop" + [(const_int 0)] + "" + "lr\\t0,0" + [(set_attr "op_type" "RR")]) + + +; +; Special literal pool access instruction pattern(s). +; + +(define_insn "consttable_qi" + [(unspec_volatile [(match_operand:QI 0 "consttable_operand" "X")] 200)] + "" + "* +{ + assemble_integer (operands[0], 1, BITS_PER_UNIT, 1); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "1")]) + +(define_insn "consttable_hi" + [(unspec_volatile [(match_operand:HI 0 "consttable_operand" "X")] 201)] + "" + "* +{ + assemble_integer (operands[0], 2, 2*BITS_PER_UNIT, 1); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "2")]) + +(define_insn "consttable_si" + [(unspec_volatile [(match_operand:SI 0 "consttable_operand" "X")] 202)] + "" + "* +{ + if (!TARGET_64BIT && flag_pic && SYMBOLIC_CONST (operands[0])) + return \".long\\t%0\"; + + assemble_integer (operands[0], 4, 4*BITS_PER_UNIT, 1); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "4")]) + +(define_insn "consttable_di" + [(unspec_volatile [(match_operand:DI 0 "consttable_operand" "X")] 203)] + "" + "* +{ + assemble_integer (operands[0], 8, 8*BITS_PER_UNIT, 1); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "8")]) + +(define_insn "consttable_sf" + [(unspec_volatile [(match_operand:SF 0 "consttable_operand" "X")] 204)] + "" + "* +{ + REAL_VALUE_TYPE r; + + if (GET_CODE (operands[0]) != CONST_DOUBLE) + abort (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); + assemble_real (r, SFmode, 4*BITS_PER_UNIT); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "4")]) + +(define_insn "consttable_df" + [(unspec_volatile [(match_operand:DF 0 "consttable_operand" "X")] 205)] + "" + "* +{ + REAL_VALUE_TYPE r; + + if (GET_CODE (operands[0]) != CONST_DOUBLE) + abort (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); + assemble_real (r, DFmode, 8*BITS_PER_UNIT); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "length" "8")]) + +(define_insn "pool_start_31" + [(unspec_volatile [(const_int 0)] 206)] + "!TARGET_64BIT" + ".align\\t4" + [(set_attr "op_type" "NN") + (set_attr "length" "2")]) + +(define_insn "pool_end_31" + [(unspec_volatile [(const_int 0)] 207)] + "!TARGET_64BIT" + ".align\\t2" + [(set_attr "op_type" "NN") + (set_attr "length" "2")]) + +(define_insn "pool_start_64" + [(unspec_volatile [(const_int 0)] 206)] + "TARGET_64BIT" + ".section\\t.rodata\;.align\\t8" + [(set_attr "op_type" "NN") + (set_attr "length" "0")]) + +(define_insn "pool_end_64" + [(unspec_volatile [(const_int 0)] 207)] + "TARGET_64BIT" + ".previous" + [(set_attr "op_type" "NN") + (set_attr "length" "0")]) + +(define_insn "reload_base" + [(set (match_operand:SI 0 "register_operand" "=a") + (unspec:SI [(label_ref (match_operand 1 "" ""))] 210))] + "!TARGET_64BIT" + "basr\\t%0,0\;la\\t%0,%1-.(%0)" + [(set_attr "op_type" "NN") + (set_attr "type" "la") + (set_attr "length" "6")]) + +(define_insn "reload_base2" + [(set (match_operand:SI 0 "register_operand" "=a") + (unspec:SI [(label_ref (match_operand 1 "" ""))] 211))] + "!TARGET_64BIT" + "la\\t%0,%1-.(%0)" + [(set_attr "op_type" "NN") + (set_attr "type" "la") + (set_attr "length" "4")]) + + +;; +;; Insns related to generating the function prologue and epilogue. +;; + + +(define_expand "prologue" + [(use (const_int 0))] + "" + " +{ + s390_emit_prologue (); + DONE; +}") + +(define_expand "epilogue" + [(use (const_int 1))] + "" + " +{ + s390_emit_epilogue (); + DONE; +}") + + +(define_insn "*return_si" + [(return) + (use (match_operand:SI 0 "register_operand" "a"))] + "!TARGET_64BIT" + "br\\t%0" + [(set_attr "op_type" "RR") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + +(define_insn "*return_di" + [(return) + (use (match_operand:DI 0 "register_operand" "a"))] + "TARGET_64BIT" + "br\\t%0" + [(set_attr "op_type" "RR") + (set_attr "type" "jsr") + (set_attr "atype" "mem")]) + + +(define_insn "lit" + [(set (reg 13) (pc)) + (unspec_volatile [(const_int 0)] 200)] + "" + "* +{ + s390_output_constant_pool (asm_out_file); + return \"\"; +}" + [(set_attr "op_type" "NN") + (set_attr "type" "integer")]) + + +;; +;; Peephole optimization patterns. +;; + +(define_peephole + [(set (match_operand:SI 0 "memory_operand" "m") + (match_operand:SI 1 "register_operand" "d")) + (set (match_dup 1) + (match_dup 0))] + "" + "st\\t%1,%0") + +(define_peephole + [(set (match_operand:SI 0 "memory_operand" "m") + (match_operand:SI 1 "register_operand" "d")) + (set (match_dup 0) + (match_dup 1))] + "" + "st\\t%1,%0") + +(define_peephole + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "register_operand" "")) + (parallel + [(set (match_dup 0) + (plus:SI (match_dup 0) + (match_operand:SI 2 "immediate_operand" ""))) + (clobber (reg:CC 33))])] + "(REGNO (operands[0]) == STACK_POINTER_REGNUM || + REGNO (operands[1]) == STACK_POINTER_REGNUM || + REGNO (operands[0]) == BASE_REGISTER || + REGNO (operands[1]) == BASE_REGISTER) && + INTVAL (operands[2]) > 0 && INTVAL (operands[2]) < 4096" + "la\\t%0,%c2(%1)") + +; +; peepholes for fast char instructions +; + +;(define_peephole +; [(set (match_operand:QI 0 "register_operand" "d") +; (match_operand:QI 1 "s_operand" "Q")) +; (set (match_operand:SI 2 "register_operand" "0") +; (zero_extend:SI (match_dup 0)))] +; "REGNO(operands[0]) == REGNO(operands[2])" +; "icm\\t%0,8,%1\;srl\\t%0,24") + +;(define_peephole +; [(set (match_operand:QI 0 "register_operand" "d") +; (match_operand:QI 1 "s_operand" "Q")) +; (set (match_operand:SI 2 "register_operand" "0") +; (sign_extend:SI (match_dup 0)))] +; "REGNO(operands[0]) == REGNO(operands[2])" +; "icm\\t%0,8,%1\;sra\\t%0,24") + +(define_peephole + [(set (match_operand:QI 0 "register_operand" "d") + (match_operand:QI 1 "immediate_operand" "J")) + (set (match_operand:SI 2 "register_operand" "0" ) + (sign_extend:SI (match_dup 0) ) )] + "REGNO(operands[0]) == REGNO(operands[2])" + "lhi\\t%0,%h1") + +; +; peepholes for fast short instructions +; + +;(define_peephole +; [(set (match_operand:HI 0 "register_operand" "d") +; (match_operand:HI 1 "s_operand" "Q")) +; (set (match_operand:SI 2 "register_operand" "0" ) +; (zero_extend:SI (match_dup 0)))] +; "REGNO(operands[0]) == REGNO(operands[2])" +; "icm\\t%0,12,%1\;srl\\t%0,16") + +(define_peephole + [(set (match_operand:HI 0 "register_operand" "d") + (match_operand:HI 1 "memory_operand" "m")) + (set (match_operand:SI 2 "register_operand" "0" ) + (sign_extend:SI (match_dup 0)))] + "REGNO(operands[0]) == REGNO(operands[2])" + "lh\\t%0,%1") + +(define_peephole + [(set (match_operand:HI 0 "register_operand" "d") + (match_operand:HI 1 "immediate_operand" "K")) + (set (match_operand:SI 2 "register_operand" "0" ) + (sign_extend:SI (match_dup 0) ) )] + "REGNO(operands[0]) == REGNO(operands[2])" + "lhi\\t%0,%h1") + +; +; peepholes for divide instructions +; + +(define_peephole + [(set (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "memory_operand" "m")) + (set (match_dup 0) + (lshiftrt:DI (match_dup 0) + (match_operand:SI 2 "immediate_operand" "J"))) + (set (match_dup 0) + (div:SI (match_dup 0) + (match_operand:SI 3 "nonimmediate_operand" "g"))) + (set (match_dup 1) + (match_dup 0))] + "" + "* +{ + output_asm_insn (\"l\\t%0,%1\", operands); + output_asm_insn (\"srdl\\t%0,%b2\", operands); + + if (REG_P (operands[3])) + output_asm_insn (\"dr\\t%0,%3\", operands); + else + output_asm_insn (\"d\\t%0,%3\", operands); + + return \"st\\t%N0,%N1\"; +}") + +(define_peephole + [(set (match_operand:DI 0 "register_operand" "d") + (match_operand:DI 1 "memory_operand" "m")) + (set (match_dup 0) + (lshiftrt:DI (match_dup 0) + (match_operand:SI 2 "immediate_operand" "J"))) + (set (match_dup 0) + (mod:SI (match_dup 0) + (match_operand:SI 3 "nonimmediate_operand" "g"))) + (set (match_dup 1) + (match_dup 0))] + "" + "* +{ + output_asm_insn (\"l\\t%0,%1\", operands); + output_asm_insn (\"srdl\\t%0,%b2\", operands); + + if (REG_P (operands[3])) + output_asm_insn (\"dr\\t%0,%3\", operands); + else + output_asm_insn (\"d\\t%0,%3\", operands); + + return \"st\\t%0,%1\"; +}") + diff --git a/contrib/gcc/config/s390/s390x.h b/contrib/gcc/config/s390/s390x.h new file mode 100644 index 0000000..c79acf5 --- /dev/null +++ b/contrib/gcc/config/s390/s390x.h @@ -0,0 +1,27 @@ +/* Definitions of target machine for IBM zSeries 64-bit + Copyright (C) 2002 Free Software Foundation, Inc. + Contributed by Hartmut Penner (hpenner@de.ibm.com) and + Ulrich Weigand (uweigand@de.ibm.com). +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#ifndef _S390X_H +#define _S390X_H + +#define DEFAULT_TARGET_64BIT + +#endif diff --git a/contrib/gcc/config/s390/t-linux b/contrib/gcc/config/s390/t-linux new file mode 100644 index 0000000..bc8d519 --- /dev/null +++ b/contrib/gcc/config/s390/t-linux @@ -0,0 +1,7 @@ +# The crtbegin and crtend must not depend on a small GOT +CRTSTUFF_T_CFLAGS = -O2 -fPIC +CRTSTUFF_T_CFLAGS_S = -O2 -fPIC + +# Compile libgcc2.a with pic. +TARGET_LIBGCC2_CFLAGS = -fPIC -include $(srcdir)/config/s390/fixdfdi.h + diff --git a/contrib/gcc/config/s390/t-linux64 b/contrib/gcc/config/s390/t-linux64 new file mode 100644 index 0000000..d5a9278 --- /dev/null +++ b/contrib/gcc/config/s390/t-linux64 @@ -0,0 +1,3 @@ +# Override t-slibgcc-elf-ver to export some libgcc symbols with +# the symbol versions that glibc used. +SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/s390/libgcc-glibc.ver |