summaryrefslogtreecommitdiffstats
path: root/gnu/usr.bin/cc
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>1994-08-08 04:45:29 +0000
committerphk <phk@FreeBSD.org>1994-08-08 04:45:29 +0000
commit65124546420002b47db351951fc327c02c55218d (patch)
tree8666c6a8a3c824aea8b85e2ed913833228b4ae86 /gnu/usr.bin/cc
parent1c1ce074d822b9bf8f172a7ef77173812bb0f5fa (diff)
parent4c5e434fdaef40513ed9bc2059aa202f7e1b9929 (diff)
downloadFreeBSD-src-65124546420002b47db351951fc327c02c55218d.zip
FreeBSD-src-65124546420002b47db351951fc327c02c55218d.tar.gz
This commit was generated by cvs2svn to compensate for changes in r1933,
which included commits to RCS files with non-trunk default branches.
Diffstat (limited to 'gnu/usr.bin/cc')
-rw-r--r--gnu/usr.bin/cc/Makefile.inc3
-rw-r--r--gnu/usr.bin/cc/README16
-rw-r--r--gnu/usr.bin/cc/legal/gen-protos.c155
-rw-r--r--gnu/usr.bin/cc/legal/md5295
4 files changed, 5468 insertions, 1 deletions
diff --git a/gnu/usr.bin/cc/Makefile.inc b/gnu/usr.bin/cc/Makefile.inc
index 21d3580..95525cb 100644
--- a/gnu/usr.bin/cc/Makefile.inc
+++ b/gnu/usr.bin/cc/Makefile.inc
@@ -3,8 +3,9 @@
#
CFLAGS+= -I${.CURDIR} -I${.CURDIR}/../include
+CFLAGS+= -Dbsd4_4
CFLAGS+= -DGCC_INCLUDE_DIR=\"FOO\"
CFLAGS+= -DDEFAULT_TARGET_VERSION=\"2.6.0\"
-CFLAGS+= -DDEFAULT_TARGET_MACHINE=\"i386-unknown-freebsd\"
+CFLAGS+= -DDEFAULT_TARGET_MACHINE=\"i386--freebsd\"
CFLAGS+= -DMD_EXEC_PREFIX=\"/usr/libexec/\"
CFLAGS+= -DSTANDARD_STARTFILE_PREFIX=\"/usr/lib\"
diff --git a/gnu/usr.bin/cc/README b/gnu/usr.bin/cc/README
new file mode 100644
index 0000000..01303ea
--- /dev/null
+++ b/gnu/usr.bin/cc/README
@@ -0,0 +1,16 @@
+
+$FreeBSD$
+
+This directory contains gcc in a form that uses "bmake" makefiles.
+This is not the place you want to start, if you want to hack gcc.
+we have included everything here which is part of the source-code
+of gcc, but still, don't use this as a hacking-base.
+
+If you suspect a problem with gcc, or just want to hack it in general,
+get a complete gcc-X.Y.Z.tar.gz from somewhere, and use that.
+
+Please look in the directory src/gnu/gnu2bmake to find the tools
+to generate these files.
+
+Thankyou.
+
diff --git a/gnu/usr.bin/cc/legal/gen-protos.c b/gnu/usr.bin/cc/legal/gen-protos.c
new file mode 100644
index 0000000..094ce2a
--- /dev/null
+++ b/gnu/usr.bin/cc/legal/gen-protos.c
@@ -0,0 +1,155 @@
+/* gen-protos.c - massages a list of prototypes, for use by fixproto.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include <ctype.h>
+#include "hconfig.h"
+#include "scan.h"
+
+#define HASH_SIZE 2503 /* a prime */
+
+int hash_tab[HASH_SIZE];
+int verbose = 0;
+
+sstring linebuf;
+
+/* Avoid error if config defines abort as fancy_abort.
+ It's not worth "really" implementing this because ordinary
+ compiler users never run fix-header. */
+
+void
+fancy_abort ()
+{
+ abort ();
+}
+
+int
+main (argc, argv)
+ int argc;
+ char** argv;
+{
+ FILE *inf = stdin;
+ FILE *outf = stdout;
+ int next_index = 0;
+ int i, i0;
+
+ fprintf (outf, "struct fn_decl std_protos[] = {\n");
+
+ for (;;)
+ {
+ int c = skip_spaces (inf, ' ');
+ int param_nesting = 1;
+ char *param_start, *param_end, *decl_start,
+ *name_start, *name_end;
+ register char *ptr;
+ if (c == EOF)
+ break;
+ linebuf.ptr = linebuf.base;
+ ungetc (c, inf);
+ c = read_upto (inf, &linebuf, '\n');
+ if (linebuf.base[0] == '#') /* skip cpp command */
+ continue;
+ if (linebuf.base[0] == '\0') /* skip empty line */
+ continue;
+
+ ptr = linebuf.ptr - 1;
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+ if (*ptr-- != ';')
+ {
+ fprintf (stderr, "Funny input line: %s\n", linebuf.base);
+ continue;
+ }
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+ if (*ptr != ')')
+ {
+ fprintf (stderr, "Funny input line: %s\n", linebuf.base);
+ continue;
+ }
+ param_end = ptr;
+ for (;;)
+ {
+ int c = *--ptr;
+ if (c == '(' && --param_nesting == 0)
+ break;
+ else if (c == ')')
+ param_nesting++;
+ }
+ param_start = ptr+1;
+
+ ptr--;
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+
+ if (!isalnum (*ptr))
+ {
+ if (verbose)
+ fprintf (stderr, "%s: Can't handle this complex prototype: %s\n",
+ argv[0], linebuf.base);
+ continue;
+ }
+ name_end = ptr+1;
+
+ while (isalnum (*ptr) || *ptr == '_') --ptr;
+ name_start = ptr+1;
+ while (*ptr == ' ' || *ptr == '\t') ptr--;
+ ptr[1] = 0;
+ *name_end = 0;
+ *param_end = 0;
+ *name_end = 0;
+
+ decl_start = linebuf.base;
+ if (strncmp (decl_start, "typedef ", 8) == 0)
+ continue;
+ if (strncmp (decl_start, "extern ", 7) == 0)
+ decl_start += 7;
+
+
+ /* NOTE: If you edit this,
+ also edit lookup_std_proto in fix-header.c !! */
+ i = hash (name_start) % HASH_SIZE;
+ i0 = i;
+ if (hash_tab[i] != 0)
+ {
+ for (;;)
+ {
+ i = (i+1) % HASH_SIZE;
+ if (i == i0)
+ abort ();
+ if (hash_tab[i] == 0)
+ break;
+ }
+ }
+ hash_tab[i] = next_index;
+
+ fprintf (outf, " {\"%s\", \"%s\", \"%s\" },\n",
+ name_start, decl_start, param_start);
+
+ next_index++;
+
+ if (c == EOF)
+ break;
+ }
+ fprintf (outf, "{0, 0, 0}\n};\n");
+
+
+ fprintf (outf, "#define HASH_SIZE %d\n", HASH_SIZE);
+ fprintf (outf, "short hash_tab[HASH_SIZE] = {\n");
+ for (i = 0; i < HASH_SIZE; i++)
+ fprintf (outf, " %d,\n", hash_tab[i]);
+ fprintf (outf, "};\n");
+
+ return 0;
+}
diff --git a/gnu/usr.bin/cc/legal/md b/gnu/usr.bin/cc/legal/md
new file mode 100644
index 0000000..3e43fb0
--- /dev/null
+++ b/gnu/usr.bin/cc/legal/md
@@ -0,0 +1,5295 @@
+;; GCC machine description for Intel 80386.
+;; Copyright (C) 1988, 1994 Free Software Foundation, Inc.
+;; Mostly by William Schelter.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+;; The original PO technology requires these to be ordered by speed,
+;; so that assigner will pick the fastest.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; Macro #define NOTICE_UPDATE_CC in file i386.h handles condition code
+;; updates for most instructions.
+
+;; Macro REG_CLASS_FROM_LETTER in file i386.h defines the register
+;; constraint letters.
+
+;; the special asm out single letter directives following a '%' are:
+;; 'z' mov%z1 would be movl, movw, or movb depending on the mode of
+;; operands[1].
+;; 'L' Print the opcode suffix for a 32-bit integer opcode.
+;; 'W' Print the opcode suffix for a 16-bit integer opcode.
+;; 'B' Print the opcode suffix for an 8-bit integer opcode.
+;; 'S' Print the opcode suffix for a 32-bit float opcode.
+;; 'Q' Print the opcode suffix for a 64-bit float opcode.
+
+;; 'b' Print the QImode name of the register for the indicated operand.
+;; %b0 would print %al if operands[0] is reg 0.
+;; 'w' Likewise, print the HImode name of the register.
+;; 'k' Likewise, print the SImode name of the register.
+;; 'h' Print the QImode name for a "high" register, either ah, bh, ch or dh.
+;; 'y' Print "st(0)" instead of "st" as a register.
+;; 'T' Print the opcode suffix for an 80-bit extended real XFmode float opcode.
+
+;; UNSPEC usage:
+;; 0 This is a `scas' operation. The mode of the UNSPEC is always SImode.
+;; operand 0 is the memory address to scan.
+;; operand 1 is a register containing the value to scan for. The mode
+;; of the scas opcode will be the same as the mode of this operand.
+;; operand 2 is the known alignment of operand 0.
+;; 1 This is a `sin' operation. The mode of the UNSPEC is MODE_FLOAT.
+;; operand 0 is the argument for `sin'.
+;; 2 This is a `cos' operation. The mode of the UNSPEC is MODE_FLOAT.
+;; operand 0 is the argument for `cos'.
+
+;; "movl MEM,REG / testl REG,REG" is faster on a 486 than "cmpl $0,MEM".
+;; But restricting MEM here would mean that gcc could not remove a redundant
+;; test in cases like "incl MEM / je TARGET".
+;;
+;; We don't want to allow a constant operand for test insns because
+;; (set (cc0) (const_int foo)) has no mode information. Such insns will
+;; be folded while optimizing anyway.
+
+;; All test insns have expanders that save the operands away without
+;; actually generating RTL. The bCOND or sCOND (emitted immediately
+;; after the tstM or cmp) will actually emit the tstM or cmpM.
+
+(define_insn "tstsi_1"
+ [(set (cc0)
+ (match_operand:SI 0 "nonimmediate_operand" "rm"))]
+ ""
+ "*
+{
+ if (REG_P (operands[0]))
+ return AS2 (test%L0,%0,%0);
+
+ operands[1] = const0_rtx;
+ return AS2 (cmp%L0,%1,%0);
+}")
+
+(define_expand "tstsi"
+ [(set (cc0)
+ (match_operand:SI 0 "nonimmediate_operand" ""))]
+ ""
+ "
+{
+ i386_compare_gen = gen_tstsi_1;
+ i386_compare_op0 = operands[0];
+ DONE;
+}")
+
+(define_insn "tsthi_1"
+ [(set (cc0)
+ (match_operand:HI 0 "nonimmediate_operand" "rm"))]
+ ""
+ "*
+{
+ if (REG_P (operands[0]))
+ return AS2 (test%W0,%0,%0);
+
+ operands[1] = const0_rtx;
+ return AS2 (cmp%W0,%1,%0);
+}")
+
+(define_expand "tsthi"
+ [(set (cc0)
+ (match_operand:HI 0 "nonimmediate_operand" ""))]
+ ""
+ "
+{
+ i386_compare_gen = gen_tsthi_1;
+ i386_compare_op0 = operands[0];
+ DONE;
+}")
+
+(define_insn "tstqi_1"
+ [(set (cc0)
+ (match_operand:QI 0 "nonimmediate_operand" "qm"))]
+ ""
+ "*
+{
+ if (REG_P (operands[0]))
+ return AS2 (test%B0,%0,%0);
+
+ operands[1] = const0_rtx;
+ return AS2 (cmp%B0,%1,%0);
+}")
+
+(define_expand "tstqi"
+ [(set (cc0)
+ (match_operand:QI 0 "nonimmediate_operand" ""))]
+ ""
+ "
+{
+ i386_compare_gen = gen_tstqi_1;
+ i386_compare_op0 = operands[0];
+ DONE;
+}")
+
+(define_insn "tstsf_cc"
+ [(set (cc0)
+ (match_operand:SF 0 "register_operand" "f"))
+ (clobber (match_scratch:HI 1 "=a"))]
+ "TARGET_80387 && ! TARGET_IEEE_FP"
+ "*
+{
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ output_asm_insn (\"ftst\", operands);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp,%y0), operands);
+
+ return (char *) output_fp_cc0_set (insn);
+}")
+
+;; Don't generate tstsf if generating IEEE code, since the `ftst' opcode
+;; isn't IEEE compliant.
+
+(define_expand "tstsf"
+ [(parallel [(set (cc0)
+ (match_operand:SF 0 "register_operand" ""))
+ (clobber (match_scratch:HI 1 ""))])]
+ "TARGET_80387 && ! TARGET_IEEE_FP"
+ "
+{
+ i386_compare_gen = gen_tstsf_cc;
+ i386_compare_op0 = operands[0];
+ DONE;
+}")
+
+(define_insn "tstdf_cc"
+ [(set (cc0)
+ (match_operand:DF 0 "register_operand" "f"))
+ (clobber (match_scratch:HI 1 "=a"))]
+ "TARGET_80387 && ! TARGET_IEEE_FP"
+ "*
+{
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ output_asm_insn (\"ftst\", operands);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp,%y0), operands);
+
+ return (char *) output_fp_cc0_set (insn);
+}")
+
+;; Don't generate tstdf if generating IEEE code, since the `ftst' opcode
+;; isn't IEEE compliant.
+
+(define_expand "tstdf"
+ [(parallel [(set (cc0)
+ (match_operand:DF 0 "register_operand" ""))
+ (clobber (match_scratch:HI 1 ""))])]
+ "TARGET_80387 && ! TARGET_IEEE_FP"
+ "
+{
+ i386_compare_gen = gen_tstdf_cc;
+ i386_compare_op0 = operands[0];
+ DONE;
+}")
+
+(define_insn "tstxf_cc"
+ [(set (cc0)
+ (match_operand:XF 0 "register_operand" "f"))
+ (clobber (match_scratch:HI 1 "=a"))]
+ "TARGET_80387 && ! TARGET_IEEE_FP"
+ "*
+{
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ output_asm_insn (\"ftst\", operands);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp,%y0), operands);
+
+ return (char *) output_fp_cc0_set (insn);
+}")
+
+;; Don't generate tstdf if generating IEEE code, since the `ftst' opcode
+;; isn't IEEE compliant.
+
+(define_expand "tstxf"
+ [(parallel [(set (cc0)
+ (match_operand:XF 0 "register_operand" ""))
+ (clobber (match_scratch:HI 1 ""))])]
+ "TARGET_80387 && ! TARGET_IEEE_FP"
+ "
+{
+ i386_compare_gen = gen_tstxf_cc;
+ i386_compare_op0 = operands[0];
+ DONE;
+}")
+
+;;- compare instructions. See comments above tstM patterns about
+;; expansion of these insns.
+
+(define_insn "cmpsi_1"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "mr,r")
+ (match_operand:SI 1 "general_operand" "ri,mr")))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+ "*
+{
+ if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
+ {
+ cc_status.flags |= CC_REVERSED;
+ return AS2 (cmp%L0,%0,%1);
+ }
+ return AS2 (cmp%L0,%1,%0);
+}")
+
+(define_expand "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (SImode, operands[0]);
+
+ i386_compare_gen = gen_cmpsi_1;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_insn "cmphi_1"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "mr,r")
+ (match_operand:HI 1 "general_operand" "ri,mr")))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+ "*
+{
+ if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
+ {
+ cc_status.flags |= CC_REVERSED;
+ return AS2 (cmp%W0,%0,%1);
+ }
+ return AS2 (cmp%W0,%1,%0);
+}")
+
+(define_expand "cmphi"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (HImode, operands[0]);
+
+ i386_compare_gen = gen_cmphi_1;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_insn "cmpqi_1"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "q,mq")
+ (match_operand:QI 1 "general_operand" "qm,nq")))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+ "*
+{
+ if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
+ {
+ cc_status.flags |= CC_REVERSED;
+ return AS2 (cmp%B0,%0,%1);
+ }
+ return AS2 (cmp%B0,%1,%0);
+}")
+
+(define_expand "cmpqi"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (QImode, operands[0]);
+
+ i386_compare_gen = gen_cmpqi_1;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}")
+
+;; These implement float point compares. For each of DFmode and
+;; SFmode, there is the normal insn, and an insn where the second operand
+;; is converted to the desired mode.
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:XF 0 "nonimmediate_operand" "f")
+ (match_operand:XF 1 "nonimmediate_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:XF 0 "register_operand" "f")
+ (float:XF
+ (match_operand:SI 1 "nonimmediate_operand" "rm"))]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(float:XF
+ (match_operand:SI 0 "nonimmediate_operand" "rm"))
+ (match_operand:XF 1 "register_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:XF 0 "register_operand" "f")
+ (float_extend:XF
+ (match_operand:DF 1 "nonimmediate_operand" "fm"))]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:XF 0 "register_operand" "f")
+ (float_extend:XF
+ (match_operand:SF 1 "nonimmediate_operand" "fm"))]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (compare:CCFPEQ (match_operand:XF 0 "register_operand" "f")
+ (match_operand:XF 1 "register_operand" "f")))
+ (clobber (match_scratch:HI 2 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:DF 0 "nonimmediate_operand" "f,fm")
+ (match_operand:DF 1 "nonimmediate_operand" "fm,f")]))
+ (clobber (match_scratch:HI 3 "=a,a"))]
+ "TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:DF 0 "register_operand" "f")
+ (float:DF
+ (match_operand:SI 1 "nonimmediate_operand" "rm"))]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(float:DF
+ (match_operand:SI 0 "nonimmediate_operand" "rm"))
+ (match_operand:DF 1 "register_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:DF 0 "register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "nonimmediate_operand" "fm"))]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(float_extend:DF
+ (match_operand:SF 0 "nonimmediate_operand" "fm"))
+ (match_operand:DF 1 "register_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (compare:CCFPEQ (match_operand:DF 0 "register_operand" "f")
+ (match_operand:DF 1 "register_operand" "f")))
+ (clobber (match_scratch:HI 2 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+;; These two insns will never be generated by combine due to the mode of
+;; the COMPARE.
+;(define_insn ""
+; [(set (cc0)
+; (compare:CCFPEQ (match_operand:DF 0 "register_operand" "f")
+; (float_extend:DF
+; (match_operand:SF 1 "register_operand" "f"))))
+; (clobber (match_scratch:HI 2 "=a"))]
+; "TARGET_80387"
+; "* return (char *) output_float_compare (insn, operands);")
+;
+;(define_insn ""
+; [(set (cc0)
+; (compare:CCFPEQ (float_extend:DF
+; (match_operand:SF 0 "register_operand" "f"))
+; (match_operand:DF 1 "register_operand" "f")))
+; (clobber (match_scratch:HI 2 "=a"))]
+; "TARGET_80387"
+; "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn "cmpsf_cc_1"
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:SF 0 "nonimmediate_operand" "f,fm")
+ (match_operand:SF 1 "nonimmediate_operand" "fm,f")]))
+ (clobber (match_scratch:HI 3 "=a,a"))]
+ "TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(match_operand:SF 0 "register_operand" "f")
+ (float:SF
+ (match_operand:SI 1 "nonimmediate_operand" "rm"))]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(float:SF
+ (match_operand:SI 0 "nonimmediate_operand" "rm"))
+ (match_operand:SF 1 "register_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (compare:CCFPEQ (match_operand:SF 0 "register_operand" "f")
+ (match_operand:SF 1 "register_operand" "f")))
+ (clobber (match_scratch:HI 2 "=a"))]
+ "TARGET_80387"
+ "* return (char *) output_float_compare (insn, operands);")
+
+(define_expand "cmpxf"
+ [(set (cc0)
+ (compare (match_operand:XF 0 "register_operand" "")
+ (match_operand:XF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "
+{
+ i386_compare_gen = gen_cmpxf_cc;
+ i386_compare_gen_eq = gen_cmpxf_ccfpeq;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ [(set (cc0)
+ (compare (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "
+{
+ i386_compare_gen = gen_cmpdf_cc;
+ i386_compare_gen_eq = gen_cmpdf_ccfpeq;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ [(set (cc0)
+ (compare (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "
+{
+ i386_compare_gen = gen_cmpsf_cc;
+ i386_compare_gen_eq = gen_cmpsf_ccfpeq;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpxf_cc"
+ [(parallel [(set (cc0)
+ (compare (match_operand:XF 0 "register_operand" "")
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_scratch:HI 2 ""))])]
+ "TARGET_80387"
+ "")
+
+(define_expand "cmpxf_ccfpeq"
+ [(parallel [(set (cc0)
+ (compare:CCFPEQ (match_operand:XF 0 "register_operand" "")
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_scratch:HI 2 ""))])]
+ "TARGET_80387"
+ "
+{
+ if (! register_operand (operands[1], XFmode))
+ operands[1] = copy_to_mode_reg (XFmode, operands[1]);
+}")
+
+(define_expand "cmpdf_cc"
+ [(parallel [(set (cc0)
+ (compare (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" "")))
+ (clobber (match_scratch:HI 2 ""))])]
+ "TARGET_80387"
+ "")
+
+(define_expand "cmpdf_ccfpeq"
+ [(parallel [(set (cc0)
+ (compare:CCFPEQ (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" "")))
+ (clobber (match_scratch:HI 2 ""))])]
+ "TARGET_80387"
+ "
+{
+ if (! register_operand (operands[1], DFmode))
+ operands[1] = copy_to_mode_reg (DFmode, operands[1]);
+}")
+
+(define_expand "cmpsf_cc"
+ [(parallel [(set (cc0)
+ (compare (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")))
+ (clobber (match_scratch:HI 2 ""))])]
+ "TARGET_80387"
+ "")
+
+(define_expand "cmpsf_ccfpeq"
+ [(parallel [(set (cc0)
+ (compare:CCFPEQ (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")))
+ (clobber (match_scratch:HI 2 ""))])]
+ "TARGET_80387"
+ "
+{
+ if (! register_operand (operands[1], SFmode))
+ operands[1] = copy_to_mode_reg (SFmode, operands[1]);
+}")
+
+;; logical compare
+
+(define_insn ""
+ [(set (cc0)
+ (and:SI (match_operand:SI 0 "general_operand" "%ro")
+ (match_operand:SI 1 "general_operand" "ri")))]
+ ""
+ "*
+{
+ /* For small integers, we may actually use testb. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ && (! REG_P (operands[0]) || QI_REG_P (operands[0])))
+ {
+ /* We may set the sign bit spuriously. */
+
+ if ((INTVAL (operands[1]) & ~0xff) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((INTVAL (operands[1]) & ~0xff00) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 8);
+
+ if (QI_REG_P (operands[0]))
+ return AS2 (test%B0,%1,%h0);
+ else
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (GET_CODE (operands[0]) == MEM
+ && (INTVAL (operands[1]) & ~0xff0000) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 16);
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if (GET_CODE (operands[0]) == MEM
+ && (INTVAL (operands[1]) & ~0xff000000) == 0)
+ {
+ operands[1] = GEN_INT ((INTVAL (operands[1]) >> 24) & 0xff);
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%L0,%1,%0);
+
+ return AS2 (test%L1,%0,%1);
+}")
+
+(define_insn ""
+ [(set (cc0)
+ (and:HI (match_operand:HI 0 "general_operand" "%ro")
+ (match_operand:HI 1 "general_operand" "ri")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ && (! REG_P (operands[0]) || QI_REG_P (operands[0])))
+ {
+ if ((INTVAL (operands[1]) & 0xff00) == 0)
+ {
+ /* ??? This might not be necessary. */
+ if (INTVAL (operands[1]) & 0xffff0000)
+ operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+
+ /* We may set the sign bit spuriously. */
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((INTVAL (operands[1]) & 0xff) == 0)
+ {
+ operands[1] = GEN_INT ((INTVAL (operands[1]) >> 8) & 0xff);
+
+ if (QI_REG_P (operands[0]))
+ return AS2 (test%B0,%1,%h0);
+ else
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+ }
+
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%W0,%1,%0);
+
+ return AS2 (test%W1,%0,%1);
+}")
+
+(define_insn ""
+ [(set (cc0)
+ (and:QI (match_operand:QI 0 "general_operand" "%qm")
+ (match_operand:QI 1 "general_operand" "qi")))]
+ ""
+ "*
+{
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%B0,%1,%0);
+
+ return AS2 (test%B1,%0,%1);
+}")
+
+;; move instructions.
+;; There is one for each machine mode,
+;; and each is preceded by a corresponding push-insn pattern
+;; (since pushes are not general_operands on the 386).
+
+(define_insn ""
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (match_operand:SI 1 "general_operand" "g"))]
+ "! TARGET_486"
+ "push%L0 %1")
+
+;; On a 486, it is faster to move MEM to a REG and then push, rather than
+;; push MEM directly.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (match_operand:SI 1 "general_operand" "ri"))]
+ "TARGET_486"
+ "push%L0 %1")
+
+;; General case of fullword move.
+
+;; If generating PIC code and operands[1] is a symbolic CONST, emit a
+;; move to get the address of the symbolic object from the GOT.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ extern int flag_pic;
+
+ if (flag_pic && SYMBOLIC_CONST (operands[1]))
+ emit_pic_move (operands, SImode);
+}")
+
+;; On i486, incl reg is faster than movl $1,reg.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g,r")
+ (match_operand:SI 1 "general_operand" "ri,m"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%L0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%L0,%0);
+
+ return AS2 (mov%L0,%1,%0);
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "push_operand" "=<")
+ (match_operand:HI 1 "general_operand" "g"))]
+ ""
+ "push%W0 %1")
+
+;; On i486, an incl and movl are both faster than incw and movw.
+
+(define_insn "movhi"
+ [(set (match_operand:HI 0 "general_operand" "=g,r")
+ (match_operand:HI 1 "general_operand" "ri,m"))]
+ ""
+ "*
+{
+ rtx link;
+ if (REG_P (operands[0]) && operands[1] == const0_rtx)
+ return AS2 (xor%L0,%k0,%k0);
+
+ if (REG_P (operands[0]) && operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%L0,%k0);
+
+ if (REG_P (operands[0]))
+ {
+ if (REG_P (operands[1]))
+ return AS2 (mov%L0,%k1,%k0);
+ else if (CONSTANT_P (operands[1]))
+ return AS2 (mov%L0,%1,%k0);
+ }
+
+ return AS2 (mov%W0,%1,%0);
+}")
+
+(define_insn "movstricthi"
+ [(set (strict_low_part (match_operand:HI 0 "general_operand" "+g,r"))
+ (match_operand:HI 1 "general_operand" "ri,m"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%W0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%W0,%0);
+
+ return AS2 (mov%W0,%1,%0);
+}")
+
+;; emit_push_insn when it calls move_by_pieces
+;; requires an insn to "push a byte".
+;; But actually we use pushw, which has the effect of rounding
+;; the amount pushed up to a halfword.
+(define_insn ""
+ [(set (match_operand:QI 0 "push_operand" "=<")
+ (match_operand:QI 1 "general_operand" "q"))]
+ ""
+ "*
+{
+ operands[1] = gen_rtx (REG, HImode, REGNO (operands[1]));
+ return AS1 (push%W0,%1);
+}")
+
+;; On i486, incb reg is faster than movb $1,reg.
+
+;; ??? Do a recognizer for zero_extract that looks just like this, but reads
+;; or writes %ah, %bh, %ch, %dh.
+
+(define_insn "movqi"
+ [(set (match_operand:QI 0 "general_operand" "=q,*r,qm")
+ (match_operand:QI 1 "general_operand" "*g,q,qn"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%B0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%B0,%0);
+
+ /* If mov%B0 isn't allowed for one of these regs, use mov%L0. */
+ if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1]))
+ return (AS2 (mov%L0,%k1,%k0));
+
+ return (AS2 (mov%B0,%1,%0));
+}")
+
+;; If it becomes necessary to support movstrictqi into %esi or %edi,
+;; use the insn sequence:
+;;
+;; shrdl $8,srcreg,dstreg
+;; rorl $24,dstreg
+;;
+;; If operands[1] is a constant, then an andl/orl sequence would be
+;; faster.
+
+(define_insn "movstrictqi"
+ [(set (strict_low_part (match_operand:QI 0 "general_operand" "+qm,q"))
+ (match_operand:QI 1 "general_operand" "*qn,m"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%B0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%B0,%0);
+
+ /* If mov%B0 isn't allowed for one of these regs, use mov%L0. */
+ if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1]))
+ {
+ abort ();
+ return (AS2 (mov%L0,%k1,%k0));
+ }
+
+ return AS2 (mov%B0,%1,%0);
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "push_operand" "=<,<")
+ (match_operand:SF 1 "general_operand" "gF,f"))]
+ ""
+ "*
+{
+ if (STACK_REG_P (operands[1]))
+ {
+ rtx xops[3];
+
+ if (! STACK_TOP_P (operands[1]))
+ abort ();
+
+ xops[0] = AT_SP (SFmode);
+ xops[1] = GEN_INT (4);
+ xops[2] = stack_pointer_rtx;
+
+ output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp%S0,%0), xops);
+ else
+ output_asm_insn (AS1 (fst%S0,%0), xops);
+ RET;
+ }
+ return AS1 (push%L1,%1);
+}")
+
+;; Allow MEM-MEM moves before reload. The reload class for such a
+;; move will be ALL_REGS. PREFERRED_RELOAD_CLASS will narrow this to
+;; GENERAL_REGS. For the purposes of regclass, prefer FLOAT_REGS.
+
+(define_insn "movsf"
+ [(set (match_operand:SF 0 "general_operand" "=*rfm,*rf,f,!*rm")
+ (match_operand:SF 1 "general_operand" "*rf,*rfm,fG,fF"))]
+ ""
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ /* First handle a `pop' insn or a `fld %st(0)' */
+
+ if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp,%y0);
+ else
+ return AS1 (fld,%y0);
+ }
+
+ /* Handle a transfer between the 387 and a 386 register */
+
+ if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ /* Handle other kinds of writes from the 387 */
+
+ if (STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%y0);
+ else
+ return AS1 (fst%z0,%y0);
+ }
+
+ /* Handle other kinds of reads to the 387 */
+
+ if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE)
+ return (char *) output_move_const_single (operands);
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ /* Handle all SFmode moves not involving the 387 */
+
+ return (char *) singlemove_string (operands);
+}")
+
+;;should change to handle the memory operands[1] without doing df push..
+(define_insn ""
+ [(set (match_operand:DF 0 "push_operand" "=<,<")
+ (match_operand:DF 1 "general_operand" "gF,f"))]
+ ""
+ "*
+{
+ if (STACK_REG_P (operands[1]))
+ {
+ rtx xops[3];
+
+ xops[0] = AT_SP (SFmode);
+ xops[1] = GEN_INT (8);
+ xops[2] = stack_pointer_rtx;
+
+ output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp%Q0,%0), xops);
+ else
+ output_asm_insn (AS1 (fst%Q0,%0), xops);
+
+ RET;
+ }
+ else
+ return (char *) output_move_double (operands);
+}")
+
+(define_insn "swapdf"
+ [(set (match_operand:DF 0 "register_operand" "f")
+ (match_operand:DF 1 "register_operand" "f"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ ""
+ "*
+{
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fxch,%1);
+ else
+ return AS1 (fxch,%0);
+}")
+
+;; Allow MEM-MEM moves before reload. The reload class for such a
+;; move will be ALL_REGS. PREFERRED_RELOAD_CLASS will narrow this to
+;; GENERAL_REGS. For the purposes of regclass, prefer FLOAT_REGS.
+
+(define_insn "movdf"
+ [(set (match_operand:DF 0 "general_operand" "=*rfm,*rf,f,!*rm")
+ (match_operand:DF 1 "general_operand" "*rf,*rfm,fG,fF"))]
+ ""
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ /* First handle a `pop' insn or a `fld %st(0)' */
+
+ if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp,%y0);
+ else
+ return AS1 (fld,%y0);
+ }
+
+ /* Handle a transfer between the 387 and a 386 register */
+
+ if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ /* Handle other kinds of writes from the 387 */
+
+ if (STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%y0);
+ else
+ return AS1 (fst%z0,%y0);
+ }
+
+ /* Handle other kinds of reads to the 387 */
+
+ if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE)
+ return (char *) output_move_const_single (operands);
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ /* Handle all DFmode moves not involving the 387 */
+
+ return (char *) output_move_double (operands);
+}")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "push_operand" "=<,<")
+ (match_operand:XF 1 "general_operand" "gF,f"))]
+ ""
+ "*
+{
+ if (STACK_REG_P (operands[1]))
+ {
+ rtx xops[3];
+
+ xops[0] = AT_SP (SFmode);
+ xops[1] = GEN_INT (12);
+ xops[2] = stack_pointer_rtx;
+
+ output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+ output_asm_insn (AS1 (fstp%T0,%0), xops);
+ if (! find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fld%T0,%0), xops);
+
+ RET;
+ }
+ else
+ return (char *) output_move_double (operands);
+ }")
+
+(define_insn "swapxf"
+ [(set (match_operand:XF 0 "register_operand" "f")
+ (match_operand:XF 1 "register_operand" "f"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ ""
+ "*
+{
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fxch,%1);
+ else
+ return AS1 (fxch,%0);
+}")
+
+(define_insn "movxf"
+ [(set (match_operand:XF 0 "general_operand" "=f,fm,!*rf,!*rm")
+ (match_operand:XF 1 "general_operand" "fmG,f,*rfm,*rfF"))]
+;; [(set (match_operand:XF 0 "general_operand" "=*rf,*rfm,f,!*rm")
+;; (match_operand:XF 1 "general_operand" "*rfm,*rf,fG,fF"))]
+ ""
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ /* First handle a `pop' insn or a `fld %st(0)' */
+
+ if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp,%y0);
+ else
+ return AS1 (fld,%y0);
+ }
+
+ /* Handle a transfer between the 387 and a 386 register */
+
+ if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ /* Handle other kinds of writes from the 387 */
+
+ if (STACK_TOP_P (operands[1]))
+ {
+ output_asm_insn (AS1 (fstp%z0,%y0), operands);
+ if (! stack_top_dies)
+ return AS1 (fld%z0,%y0);
+
+ RET;
+ }
+
+ /* Handle other kinds of reads to the 387 */
+
+ if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE)
+ return (char *) output_move_const_single (operands);
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ /* Handle all XFmode moves not involving the 387 */
+
+ return (char *) output_move_double (operands);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (match_operand:DI 1 "general_operand" "roiF"))]
+ ""
+ "*
+{
+ return (char *) output_move_double (operands);
+}")
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "general_operand" "=r,rm")
+ (match_operand:DI 1 "general_operand" "m,riF"))]
+ ""
+ "*
+{
+ return (char *) output_move_double (operands);
+}")
+
+;;- conversion instructions
+;;- NONE
+
+;;- zero extension instructions
+;; See comments by `andsi' for when andl is faster than movzx.
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ ""
+ "*
+{
+ if ((TARGET_486 || REGNO (operands[0]) == 0)
+ && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xffff);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%0);
+#else
+ return AS2 (movz%W0%L0,%1,%0);
+#endif
+}")
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=r")
+ (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ ""
+ "*
+{
+ if ((TARGET_486 || REGNO (operands[0]) == 0)
+ && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%0);
+#else
+ return AS2 (movz%B0%W0,%1,%0);
+#endif
+}")
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ ""
+ "*
+{
+ if ((TARGET_486 || REGNO (operands[0]) == 0)
+ && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%0);
+#else
+ return AS2 (movz%B0%L0,%1,%0);
+#endif
+}")
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "0")))]
+ ""
+ "*
+{
+ operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ return AS2 (xor%L0,%0,%0);
+}")
+
+;;- sign extension instructions
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (match_operand:SI 1 "register_operand" "0")))]
+ ""
+ "*
+{
+ if (REGNO (operands[0]) == 0)
+ {
+ /* This used to be cwtl, but that extends HI to SI somehow. */
+#ifdef INTEL_SYNTAX
+ return \"cdq\";
+#else
+ return \"cltd\";
+#endif
+ }
+
+ operands[1] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ output_asm_insn (AS2 (mov%L0,%0,%1), operands);
+
+ operands[0] = GEN_INT (31);
+ return AS2 (sar%L1,%0,%1);
+}")
+
+;; Note that the i386 programmers' manual says that the opcodes
+;; are named movsx..., but the assembler on Unix does not accept that.
+;; We use what the Unix assembler expects.
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (sign_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ ""
+ "*
+{
+ if (REGNO (operands[0]) == 0
+ && REG_P (operands[1]) && REGNO (operands[1]) == 0)
+#ifdef INTEL_SYNTAX
+ return \"cwde\";
+#else
+ return \"cwtl\";
+#endif
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movsx,%1,%0);
+#else
+ return AS2 (movs%W0%L0,%1,%0);
+#endif
+}")
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=r")
+ (sign_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ ""
+ "*
+{
+ if (REGNO (operands[0]) == 0
+ && REG_P (operands[1]) && REGNO (operands[1]) == 0)
+ return \"cbtw\";
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movsx,%1,%0);
+#else
+ return AS2 (movs%B0%W0,%1,%0);
+#endif
+}")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (sign_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ ""
+ "*
+{
+#ifdef INTEL_SYNTAX
+ return AS2 (movsx,%1,%0);
+#else
+ return AS2 (movs%B0%L0,%1,%0);
+#endif
+}")
+
+;; Conversions between float and double.
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "general_operand" "=fm,f")
+ (float_extend:DF
+ (match_operand:SF 1 "general_operand" "f,fm")))]
+ "TARGET_80387"
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%y0);
+ else
+ return AS1 (fst%z0,%y0);
+ }
+
+ abort ();
+}")
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "general_operand" "=fm,f,f,!*r")
+ (float_extend:XF
+ (match_operand:DF 1 "general_operand" "f,fm,!*r,f")))]
+ "TARGET_80387"
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ output_asm_insn (AS1 (fstp%z0,%y0), operands);
+ if (! stack_top_dies)
+ return AS1 (fld%z0,%y0);
+ RET;
+ }
+
+ abort ();
+}")
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "general_operand" "=fm,f,f,!*r")
+ (float_extend:XF
+ (match_operand:SF 1 "general_operand" "f,fm,!*r,f")))]
+ "TARGET_80387"
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ output_asm_insn (AS1 (fstp%z0,%y0), operands);
+ if (! stack_top_dies)
+ return AS1 (fld%z0,%y0);
+ RET;
+ }
+
+ abort ();
+}")
+
+(define_expand "truncdfsf2"
+ [(parallel [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (float_truncate:SF
+ (match_operand:DF 1 "register_operand" "")))
+ (clobber (match_dup 2))])]
+ "TARGET_80387"
+ "
+{
+ operands[2] = (rtx) assign_386_stack_local (SFmode, 0);
+}")
+
+;; This cannot output into an f-reg because there is no way to be sure
+;; of truncating in that case. Otherwise this is just like a simple move
+;; insn. So we pretend we can output to a reg in order to get better
+;; register preferencing, but we really use a stack slot.
+
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,m")
+ (float_truncate:SF
+ (match_operand:DF 1 "register_operand" "0,f")))
+ (clobber (match_operand:SF 2 "memory_operand" "m,m"))]
+ "TARGET_80387"
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%0);
+ else
+ return AS1 (fst%z0,%0);
+ }
+ else if (STACK_TOP_P (operands[0]))
+ {
+ output_asm_insn (AS1 (fstp%z2,%y2), operands);
+ return AS1 (fld%z2,%y2);
+ }
+ else
+ abort ();
+}")
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "general_operand" "=m,!*r")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "f,f")))]
+ "TARGET_80387"
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ if (stack_top_dies == 0)
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ stack_top_dies = 1;
+ }
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+ else if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%0);
+ else
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ return AS1 (fstp%z0,%0);
+ }
+ }
+ else
+ abort ();
+}")
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "general_operand" "=m,!*r")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "f,f")))]
+ "TARGET_80387"
+ "*
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ if (stack_top_dies == 0)
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ stack_top_dies = 1;
+ }
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+ else if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%0);
+ else
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ return AS1 (fstp%z0,%0);
+ }
+ }
+ else
+ abort ();
+}")
+
+
+;; The 387 requires that the stack top dies after converting to DImode.
+
+;; Represent an unsigned conversion from SImode to MODE_FLOAT by first
+;; doing a signed conversion to DImode, and then taking just the low
+;; part.
+
+(define_expand "fixuns_truncxfsi2"
+ [(set (match_dup 4)
+ (match_operand:XF 1 "register_operand" ""))
+ (parallel [(set (match_dup 2)
+ (fix:DI (fix:XF (match_dup 4))))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))
+ (clobber (match_scratch:SI 7 ""))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (match_dup 3))]
+ "TARGET_80387"
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_reg_rtx (XFmode);
+ operands[5] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_expand "fixuns_truncdfsi2"
+ [(set (match_dup 4)
+ (match_operand:DF 1 "register_operand" ""))
+ (parallel [(set (match_dup 2)
+ (fix:DI (fix:DF (match_dup 4))))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))
+ (clobber (match_scratch:SI 7 ""))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (match_dup 3))]
+ "TARGET_80387"
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_expand "fixuns_truncsfsi2"
+ [(set (match_dup 4)
+ (match_operand:SF 1 "register_operand" ""))
+ (parallel [(set (match_dup 2)
+ (fix:DI (fix:SF (match_dup 4))))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))
+ (clobber (match_scratch:SI 7 ""))])
+ (set (match_operand:SI 0 "general_operand" "")
+ (match_dup 3))]
+ "TARGET_80387"
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_reg_rtx (SFmode);
+ operands[5] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+;; Signed conversion to DImode.
+
+(define_expand "fix_truncxfdi2"
+ [(set (match_dup 2)
+ (match_operand:XF 1 "register_operand" ""))
+ (parallel [(set (match_operand:DI 0 "general_operand" "")
+ (fix:DI (fix:XF (match_dup 2))))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_80387"
+ "
+{
+ operands[1] = copy_to_mode_reg (XFmode, operands[1]);
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_expand "fix_truncdfdi2"
+ [(set (match_dup 2)
+ (match_operand:DF 1 "register_operand" ""))
+ (parallel [(set (match_operand:DI 0 "general_operand" "")
+ (fix:DI (fix:DF (match_dup 2))))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_80387"
+ "
+{
+ operands[1] = copy_to_mode_reg (DFmode, operands[1]);
+ operands[2] = gen_reg_rtx (DFmode);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_expand "fix_truncsfdi2"
+ [(set (match_dup 2)
+ (match_operand:SF 1 "register_operand" ""))
+ (parallel [(set (match_operand:DI 0 "general_operand" "")
+ (fix:DI (fix:SF (match_dup 2))))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_80387"
+ "
+{
+ operands[1] = copy_to_mode_reg (SFmode, operands[1]);
+ operands[2] = gen_reg_rtx (SFmode);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+;; These match a signed conversion of either DFmode or SFmode to DImode.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=rm")
+ (fix:DI (fix:XF (match_operand:XF 1 "register_operand" "f"))))
+ (clobber (match_dup 1))
+ (clobber (match_operand:SI 2 "memory_operand" "m"))
+ (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_scratch:SI 4 "=&q"))]
+ "TARGET_80387"
+ "* return (char *) output_fix_trunc (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=rm")
+ (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+ (clobber (match_dup 1))
+ (clobber (match_operand:SI 2 "memory_operand" "m"))
+ (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_scratch:SI 4 "=&q"))]
+ "TARGET_80387"
+ "* return (char *) output_fix_trunc (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=rm")
+ (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
+ (clobber (match_dup 1))
+ (clobber (match_operand:SI 2 "memory_operand" "m"))
+ (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_scratch:SI 4 "=&q"))]
+ "TARGET_80387"
+ "* return (char *) output_fix_trunc (insn, operands);")
+
+;; Signed MODE_FLOAT conversion to SImode.
+
+(define_expand "fix_truncxfsi2"
+ [(parallel [(set (match_operand:SI 0 "general_operand" "")
+ (fix:SI
+ (fix:XF (match_operand:XF 1 "register_operand" ""))))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_80387"
+ "
+{
+ operands[2] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_expand "fix_truncdfsi2"
+ [(parallel [(set (match_operand:SI 0 "general_operand" "")
+ (fix:SI
+ (fix:DF (match_operand:DF 1 "register_operand" ""))))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_80387"
+ "
+{
+ operands[2] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_expand "fix_truncsfsi2"
+ [(parallel [(set (match_operand:SI 0 "general_operand" "")
+ (fix:SI
+ (fix:SF (match_operand:SF 1 "register_operand" ""))))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_80387"
+ "
+{
+ operands[2] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (fix:SI (fix:XF (match_operand:XF 1 "register_operand" "f"))))
+ (clobber (match_operand:SI 2 "memory_operand" "m"))
+ (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_scratch:SI 4 "=&q"))]
+ "TARGET_80387"
+ "* return (char *) output_fix_trunc (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+ (clobber (match_operand:SI 2 "memory_operand" "m"))
+ (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_scratch:SI 4 "=&q"))]
+ "TARGET_80387"
+ "* return (char *) output_fix_trunc (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
+ (clobber (match_operand:SI 2 "memory_operand" "m"))
+ (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_scratch:SI 4 "=&q"))]
+ "TARGET_80387"
+ "* return (char *) output_fix_trunc (insn, operands);")
+
+;; Conversion between fixed point and floating point.
+;; The actual pattern that matches these is at the end of this file.
+
+;; ??? Possibly represent floatunssidf2 here in gcc2.
+
+(define_expand "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float:SF (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float:DF (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "floatsixf2"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (float:XF (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "floatdixf2"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (float:XF (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+;; This will convert from SImode or DImode to MODE_FLOAT.
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (float:XF (match_operand:DI 1 "general_operand" "rm")))]
+ "TARGET_80387"
+ "*
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:DI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_80387"
+ "*
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_80387"
+ "*
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_80387"
+ "*
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (float:XF (match_operand:SI 1 "general_operand" "m,!*r")))]
+ "TARGET_80387"
+ "*
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_80387"
+ "*
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}")
+
+;;- add instructions
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "general_operand" "=&r,ro")
+ (plus:DI (match_operand:DI 1 "general_operand" "%0,0")
+ (match_operand:DI 2 "general_operand" "o,riF")))]
+ ""
+ "*
+{
+ rtx low[3], high[3];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 3, low, high);
+
+ if (GET_CODE (low[2]) != CONST_INT || INTVAL (low[2]) != 0)
+ {
+ output_asm_insn (AS2 (add%L0,%2,%0), low);
+ output_asm_insn (AS2 (adc%L0,%2,%0), high);
+ }
+ else
+ output_asm_insn (AS2 (add%L0,%2,%0), high);
+ RET;
+}")
+
+;; On a 486, it is faster to do movl/addl than to do a single leal if
+;; operands[1] and operands[2] are both registers.
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "general_operand" "=?r,rm,r")
+ (plus:SI (match_operand:SI 1 "general_operand" "%r,0,0")
+ (match_operand:SI 2 "general_operand" "ri,ri,rm")))]
+ ""
+ "*
+{
+ if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
+ return AS2 (add%L0,%1,%0);
+
+ if (! TARGET_486 || ! REG_P (operands[2]))
+ {
+ CC_STATUS_INIT;
+
+ if (operands[2] == stack_pointer_rtx)
+ {
+ rtx temp;
+
+ temp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = temp;
+ }
+ if (operands[2] != stack_pointer_rtx)
+ {
+ operands[1] = SET_SRC (PATTERN (insn));
+ return AS2 (lea%L0,%a1,%0);
+ }
+ }
+
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ }
+
+ if (operands[2] == const1_rtx)
+ return AS1 (inc%L0,%0);
+
+ if (operands[2] == constm1_rtx)
+ return AS1 (dec%L0,%0);
+
+ return AS2 (add%L0,%2,%0);
+}")
+
+;; ??? `lea' here, for three operand add? If leaw is used, only %bx,
+;; %si and %di can appear in SET_SRC, and output_asm_insn might not be
+;; able to handle the operand. But leal always works?
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm,r")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))]
+ ""
+ "*
+{
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0])
+ && GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) & 0xff) == 0)
+ {
+ int byteval = (INTVAL (operands[2]) >> 8) & 0xff;
+ CC_STATUS_INIT;
+
+ if (byteval == 1)
+ return AS1 (inc%B0,%h0);
+ else if (byteval == 255)
+ return AS1 (dec%B0,%h0);
+
+ operands[2] = GEN_INT (byteval);
+ return AS2 (add%B0,%2,%h0);
+ }
+
+ if (operands[2] == const1_rtx)
+ return AS1 (inc%W0,%0);
+
+ if (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 65535))
+ return AS1 (dec%W0,%0);
+
+ return AS2 (add%W0,%2,%0);
+}")
+
+(define_insn "addqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm,q")
+ (plus:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn")))]
+ ""
+ "*
+{
+ if (operands[2] == const1_rtx)
+ return AS1 (inc%B0,%0);
+
+ if (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255))
+ return AS1 (dec%B0,%0);
+
+ return AS2 (add%B0,%2,%0);
+}")
+
+;Lennart Augustsson <augustss@cs.chalmers.se>
+;says this pattern just makes slower code:
+; pushl %ebp
+; addl $-80,(%esp)
+;instead of
+; leal -80(%ebp),%eax
+; pushl %eax
+;
+;(define_insn ""
+; [(set (match_operand:SI 0 "push_operand" "=<")
+; (plus:SI (match_operand:SI 1 "general_operand" "%r")
+; (match_operand:SI 2 "general_operand" "ri")))]
+; ""
+; "*
+;{
+; rtx xops[4];
+; xops[0] = operands[0];
+; xops[1] = operands[1];
+; xops[2] = operands[2];
+; xops[3] = gen_rtx (MEM, SImode, stack_pointer_rtx);
+; output_asm_insn (\"push%z1 %1\", xops);
+; output_asm_insn (AS2 (add%z3,%2,%3), xops);
+; RET;
+;}")
+
+;; addsi3 is faster, so put this after.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:QI 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ /* Adding a constant to a register is faster with an add. */
+ /* ??? can this ever happen? */
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && rtx_equal_p (operands[0], XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 1);
+
+ if (operands[1] == const1_rtx)
+ return AS1 (inc%L0,%0);
+
+ if (operands[1] == constm1_rtx)
+ return AS1 (dec%L0,%0);
+
+ return AS2 (add%L0,%1,%0);
+ }
+ return AS2 (lea%L0,%a1,%0);
+}")
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "addxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (plus:XF (match_operand:XF 1 "nonimmediate_operand" "")
+ (match_operand:XF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (plus:DF (match_operand:DF 1 "nonimmediate_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (plus:SF (match_operand:SF 1 "nonimmediate_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+;;- subtract instructions
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "general_operand" "=&r,ro")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,0")
+ (match_operand:DI 2 "general_operand" "o,riF")))]
+ ""
+ "*
+{
+ rtx low[3], high[3];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 3, low, high);
+
+ if (GET_CODE (low[2]) != CONST_INT || INTVAL (low[2]) != 0)
+ {
+ output_asm_insn (AS2 (sub%L0,%2,%0), low);
+ output_asm_insn (AS2 (sbb%L0,%2,%0), high);
+ }
+ else
+ output_asm_insn (AS2 (sub%L0,%2,%0), high);
+
+ RET;
+}")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm,r")
+ (minus:SI (match_operand:SI 1 "general_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))]
+ ""
+ "* return AS2 (sub%L0,%2,%0);")
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm,r")
+ (minus:HI (match_operand:HI 1 "general_operand" "0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))]
+ ""
+ "* return AS2 (sub%W0,%2,%0);")
+
+(define_insn "subqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm,q")
+ (minus:QI (match_operand:QI 1 "general_operand" "0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn")))]
+ ""
+ "* return AS2 (sub%B0,%2,%0);")
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "subxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (minus:XF (match_operand:XF 1 "nonimmediate_operand" "")
+ (match_operand:XF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (minus:DF (match_operand:DF 1 "nonimmediate_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (minus:SF (match_operand:SF 1 "nonimmediate_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+;;- multiply instructions
+
+;(define_insn "mulqi3"
+; [(set (match_operand:QI 0 "general_operand" "=a")
+; (mult:QI (match_operand:QI 1 "general_operand" "%0")
+; (match_operand:QI 2 "general_operand" "qm")))]
+; ""
+; "imul%B0 %2,%0")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=r")
+ (mult:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_operand" "r")))]
+ "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80"
+ "* return AS2 (imul%W0,%2,%0);")
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "general_operand" "=r,r")
+ (mult:HI (match_operand:HI 1 "general_operand" "%0,rm")
+ (match_operand:HI 2 "general_operand" "g,i")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[0])
+ && (GET_CODE (operands[2]) == MEM || GET_CODE (operands[2]) == REG))
+ /* Assembler has weird restrictions. */
+ return AS2 (imul%W0,%2,%0);
+ return AS3 (imul%W0,%2,%1,%0);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (mult:SI (match_operand:SI 1 "general_operand" "%0")
+ (match_operand:SI 2 "general_operand" "r")))]
+ "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80"
+ "* return AS2 (imul%L0,%2,%0);")
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "general_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "general_operand" "%0,rm")
+ (match_operand:SI 2 "general_operand" "g,i")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[0])
+ && (GET_CODE (operands[2]) == MEM || GET_CODE (operands[2]) == REG))
+ /* Assembler has weird restrictions. */
+ return AS2 (imul%L0,%2,%0);
+ return AS3 (imul%L0,%2,%1,%0);
+}")
+
+(define_insn "umulqihi3"
+ [(set (match_operand:HI 0 "general_operand" "=a")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:HI (match_operand:QI 2 "nonimmediate_operand" "qm"))))]
+ ""
+ "mul%B0 %2")
+
+(define_insn "mulqihi3"
+ [(set (match_operand:HI 0 "general_operand" "=a")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:HI (match_operand:QI 2 "nonimmediate_operand" "qm"))))]
+ ""
+ "imul%B0 %2")
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=A")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%0"))
+ (zero_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm"))))]
+ ""
+ "mul%L0 %2")
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=A")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%0"))
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm"))))]
+ ""
+ "imul%L0 %2")
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "mulxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (mult:XF (match_operand:XF 1 "nonimmediate_operand" "")
+ (match_operand:XF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (mult:DF (match_operand:DF 1 "nonimmediate_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (mult:SF (match_operand:SF 1 "nonimmediate_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+;;- divide instructions
+
+(define_insn "divqi3"
+ [(set (match_operand:QI 0 "general_operand" "=a")
+ (div:QI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:QI 2 "general_operand" "qm")))]
+ ""
+ "idiv%B0 %2")
+
+(define_insn "udivqi3"
+ [(set (match_operand:QI 0 "general_operand" "=a")
+ (udiv:QI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:QI 2 "general_operand" "qm")))]
+ ""
+ "div%B0 %2")
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "divxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (div:XF (match_operand:XF 1 "nonimmediate_operand" "")
+ (match_operand:XF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (div:DF (match_operand:DF 1 "nonimmediate_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (div:SF (match_operand:SF 1 "nonimmediate_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
+;; Remainder instructions.
+
+(define_insn "divmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (div:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "rm")))
+ (set (match_operand:SI 3 "register_operand" "=&d")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "*
+{
+#ifdef INTEL_SYNTAX
+ output_asm_insn (\"cdq\", operands);
+#else
+ output_asm_insn (\"cltd\", operands);
+#endif
+ return AS1 (idiv%L0,%2);
+}")
+
+(define_insn "divmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (div:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "rm")))
+ (set (match_operand:HI 3 "register_operand" "=&d")
+ (mod:HI (match_dup 1) (match_dup 2)))]
+ ""
+ "cwtd\;idiv%W0 %2")
+
+;; ??? Can we make gcc zero extend operand[0]?
+(define_insn "udivmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "rm")))
+ (set (match_operand:SI 3 "register_operand" "=&d")
+ (umod:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "*
+{
+ output_asm_insn (AS2 (xor%L3,%3,%3), operands);
+ return AS1 (div%L0,%2);
+}")
+
+;; ??? Can we make gcc zero extend operand[0]?
+(define_insn "udivmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (udiv:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "general_operand" "rm")))
+ (set (match_operand:HI 3 "register_operand" "=&d")
+ (umod:HI (match_dup 1) (match_dup 2)))]
+ ""
+ "*
+{
+ output_asm_insn (AS2 (xor%W0,%3,%3), operands);
+ return AS1 (div%W0,%2);
+}")
+
+/*
+;;this should be a valid double division which we may want to add
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (udiv:DI (match_operand:DI 1 "register_operand" "a")
+ (match_operand:SI 2 "general_operand" "rm")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (umod:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "div%L0 %2,%0")
+*/
+
+;;- and instructions
+
+;; On i386,
+;; movzbl %bl,%ebx
+;; is faster than
+;; andl $255,%ebx
+;;
+;; but if the reg is %eax, then the "andl" is faster.
+;;
+;; On i486, the "andl" is always faster than the "movzbl".
+;;
+;; On both i386 and i486, a three operand AND is as fast with movzbl or
+;; movzwl as with andl, if operands[0] != operands[1].
+
+;; The `r' in `rm' for operand 3 looks redundant, but it causes
+;; optional reloads to be generated if op 3 is a pseudo in a stack slot.
+
+;; ??? What if we only change one byte of an offsettable memory reference?
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,rm,r")
+ (and:SI (match_operand:SI 1 "general_operand" "%rm,qm,0,0")
+ (match_operand:SI 2 "general_operand" "L,K,ri,rm")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ if (INTVAL (operands[2]) == 0xffff && REG_P (operands[0])
+ && (! REG_P (operands[1])
+ || REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
+ && (! TARGET_486 || ! rtx_equal_p (operands[0], operands[1])))
+ {
+ /* ??? tege: Should forget CC_STATUS only if we clobber a
+ remembered operand. Fix that later. */
+ CC_STATUS_INIT;
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%w1,%0);
+#else
+ return AS2 (movz%W0%L0,%w1,%0);
+#endif
+ }
+
+ if (INTVAL (operands[2]) == 0xff && REG_P (operands[0])
+ && !(REG_P (operands[1]) && NON_QI_REG_P (operands[1]))
+ && (! REG_P (operands[1])
+ || REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
+ && (! TARGET_486 || ! rtx_equal_p (operands[0], operands[1])))
+ {
+ /* ??? tege: Should forget CC_STATUS only if we clobber a
+ remembered operand. Fix that later. */
+ CC_STATUS_INIT;
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%b1,%0);
+#else
+ return AS2 (movz%B0%L0,%b1,%0);
+#endif
+ }
+
+ if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xffffff00)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%b0);
+ }
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
+ return AS2 (and%B0,%2,%b0);
+ }
+
+ if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xffff00ff)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%h0);
+ }
+
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+ return AS2 (and%B0,%2,%h0);
+ }
+
+ if (GET_CODE (operands[0]) == MEM && INTVAL (operands[2]) == 0xffff0000)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%W0,%2,%w0);
+ }
+ }
+
+ return AS2 (and%L0,%2,%0);
+}")
+
+(define_insn "andhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm,r")
+ (and:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ /* Can we ignore the upper byte? */
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & 0xff00) == 0xff00)
+ {
+ CC_STATUS_INIT;
+
+ if ((INTVAL (operands[2]) & 0xff) == 0)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%b0);
+ }
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
+ return AS2 (and%B0,%2,%b0);
+ }
+
+ /* Can we ignore the lower byte? */
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & 0xff) == 0xff)
+ {
+ CC_STATUS_INIT;
+
+ if ((INTVAL (operands[2]) & 0xff00) == 0)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%h0);
+ }
+
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+ return AS2 (and%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (and%W0,%2,%0);
+}")
+
+(define_insn "andqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm,q")
+ (and:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn")))]
+ ""
+ "* return AS2 (and%B0,%2,%0);")
+
+/* I am nervous about these two.. add them later..
+;I presume this means that we have something in say op0= eax which is small
+;and we want to and it with memory so we can do this by just an
+;andb m,%al and have success.
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=r")
+ (and:SI (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "rm"))
+ (match_operand:SI 2 "general_operand" "0")))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && (unsigned int) INTVAL (operands[2]) < (1 << GET_MODE_BITSIZE (HImode))"
+ "and%W0 %1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=q")
+ (and:SI
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm"))
+ (match_operand:SI 2 "general_operand" "0")))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && (unsigned int) INTVAL (operands[2]) < (1 << GET_MODE_BITSIZE (QImode))"
+ "and%L0 %1,%0")
+
+*/
+
+;;- Bit set (inclusive or) instructions
+
+;; ??? What if we only change one byte of an offsettable memory reference?
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm,r")
+ (ior:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & ~0xff) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%b0);
+
+ return AS2 (or%B0,%2,%b0);
+ }
+
+ if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%h0);
+
+ return AS2 (or%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (or%L0,%2,%0);
+}")
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm,r")
+ (ior:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ /* Can we ignore the upper byte? */
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & 0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) & 0xffff0000)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%b0);
+
+ return AS2 (or%B0,%2,%b0);
+ }
+
+ /* Can we ignore the lower byte? */
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0])
+ && (INTVAL (operands[2]) & 0xff) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%h0);
+
+ return AS2 (or%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (or%W0,%2,%0);
+}")
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm,q")
+ (ior:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn")))]
+ ""
+ "* return AS2 (or%B0,%2,%0);")
+
+;;- xor instructions
+
+;; ??? What if we only change one byte of an offsettable memory reference?
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm,r")
+ (xor:SI (match_operand:SI 1 "general_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & ~0xff) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%b0);
+
+ return AS2 (xor%B0,%2,%b0);
+ }
+
+ if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%h0);
+
+ return AS2 (xor%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (xor%L0,%2,%0);
+}")
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm,r")
+ (xor:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ /* Can we ignore the upper byte? */
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & 0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) & 0xffff0000)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%b0);
+
+ return AS2 (xor%B0,%2,%b0);
+ }
+
+ /* Can we ignore the lower byte? */
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0])
+ && (INTVAL (operands[2]) & 0xff) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%h0);
+
+ return AS2 (xor%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (xor%W0,%2,%0);
+}")
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm,q")
+ (xor:QI (match_operand:QI 1 "general_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qn,qm")))]
+ ""
+ "* return AS2 (xor%B0,%2,%0);")
+
+;;- negation instructions
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "general_operand" "=&ro")
+ (neg:DI (match_operand:DI 1 "general_operand" "0")))]
+ ""
+ "*
+{
+ rtx xops[2], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = const0_rtx;
+ xops[1] = high[0];
+
+ output_asm_insn (AS1 (neg%L0,%0), low);
+ output_asm_insn (AS2 (adc%L1,%0,%1), xops);
+ output_asm_insn (AS1 (neg%L0,%0), high);
+ RET;
+}")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (neg:SI (match_operand:SI 1 "general_operand" "0")))]
+ ""
+ "neg%L0 %0")
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (neg:HI (match_operand:HI 1 "general_operand" "0")))]
+ ""
+ "neg%W0 %0")
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (neg:QI (match_operand:QI 1 "general_operand" "0")))]
+ ""
+ "neg%B0 %0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "general_operand" "0")))]
+ "TARGET_80387"
+ "fchs")
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "general_operand" "0")))]
+ "TARGET_80387"
+ "fchs")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (float_extend:DF (match_operand:SF 1 "general_operand" "0"))))]
+ "TARGET_80387"
+ "fchs")
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "general_operand" "0")))]
+ "TARGET_80387"
+ "fchs")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (neg:XF (float_extend:XF (match_operand:DF 1 "general_operand" "0"))))]
+ "TARGET_80387"
+ "fchs")
+
+;; Absolute value instructions
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "general_operand" "0")))]
+ "TARGET_80387"
+ "fabs")
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "general_operand" "0")))]
+ "TARGET_80387"
+ "fabs")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (abs:DF (float_extend:DF (match_operand:SF 1 "general_operand" "0"))))]
+ "TARGET_80387"
+ "fabs")
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "general_operand" "0")))]
+ "TARGET_80387"
+ "fabs")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (abs:XF (float_extend:XF (match_operand:DF 1 "general_operand" "0"))))]
+ "TARGET_80387"
+ "fabs")
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "general_operand" "0")))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsqrt")
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "general_operand" "0")))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsqrt")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "general_operand" "0"))))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsqrt")
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "general_operand" "0")))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsqrt")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (sqrt:XF (float_extend:XF
+ (match_operand:DF 1 "general_operand" "0"))))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsqrt")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (sqrt:XF (float_extend:XF
+ (match_operand:SF 1 "general_operand" "0"))))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsqrt")
+
+(define_insn "sindf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")] 1))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsin")
+
+(define_insn "sinsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")] 1))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsin")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))] 1))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fsin")
+
+(define_insn "cosdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")] 2))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fcos")
+
+(define_insn "cossf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")] 2))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fcos")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))] 2))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) "
+ "fcos")
+
+;;- one complement instructions
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (not:SI (match_operand:SI 1 "general_operand" "0")))]
+ ""
+ "not%L0 %0")
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (not:HI (match_operand:HI 1 "general_operand" "0")))]
+ ""
+ "not%W0 %0")
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (not:QI (match_operand:QI 1 "general_operand" "0")))]
+ ""
+ "not%B0 %0")
+
+;;- arithmetic shift instructions
+
+;; DImode shifts are implemented using the i386 "shift double" opcode,
+;; which is written as "sh[lr]d[lw] imm,reg,reg/mem". If the shift count
+;; is variable, then the count is in %cl and the "imm" operand is dropped
+;; from the assembler input.
+
+;; This instruction shifts the target reg/mem as usual, but instead of
+;; shifting in zeros, bits are shifted in from reg operand. If the insn
+;; is a left shift double, bits are taken from the high order bits of
+;; reg, else if the insn is a shift right double, bits are taken from the
+;; low order bits of reg. So if %eax is "1234" and %edx is "5678",
+;; "shldl $8,%edx,%eax" leaves %edx unchanged and sets %eax to "2345".
+
+;; Since sh[lr]d does not change the `reg' operand, that is done
+;; separately, making all shifts emit pairs of shift double and normal
+;; shift. Since sh[lr]d does not shift more than 31 bits, and we wish to
+;; support a 63 bit shift, each shift where the count is in a reg expands
+;; to three pairs. If the overall shift is by N bits, then the first two
+;; pairs shift by N / 2 and the last pair by N & 1.
+
+;; If the shift count is a constant, we need never emit more than one
+;; shift pair, instead using moves and sign extension for counts greater
+;; than 31.
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ {
+ operands[2] = copy_to_mode_reg (QImode, operands[2]);
+ emit_insn (gen_ashldi3_non_const_int (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_ashldi3_const_int (operands[0], operands[1], operands[2]));
+
+ DONE;
+}")
+
+(define_insn "ashldi3_const_int"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "J")))]
+ ""
+ "*
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ if (INTVAL (xops[0]) > 31)
+ {
+ output_asm_insn (AS2 (mov%L3,%2,%3), xops); /* Fast shift by 32 */
+ output_asm_insn (AS2 (xor%L2,%2,%2), xops);
+
+ if (INTVAL (xops[0]) > 32)
+ {
+ xops[0] = GEN_INT (INTVAL (xops[0]) - 32);
+ output_asm_insn (AS2 (sal%L3,%0,%3), xops); /* Remaining shift */
+ }
+ }
+ else
+ {
+ output_asm_insn (AS3 (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+ }
+ RET;
+}")
+
+(define_insn "ashldi3_non_const_int"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "c")))
+ (clobber (match_dup 2))]
+ ""
+ "*
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+ output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+
+ xops[1] = GEN_INT (7); /* shift count & 1 */
+
+ output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+
+ RET;
+}")
+
+;; On i386 and i486, "addl reg,reg" is faster than "sall $1,reg"
+;; On i486, movl/sall appears slightly faster than leal, but the leal
+;; is smaller - use leal for now unless the shift count is 1.
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "general_operand" "=r,rm")
+ (ashift:SI (match_operand:SI 1 "general_operand" "r,0")
+ (match_operand:SI 2 "nonmemory_operand" "M,cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (TARGET_486 && INTVAL (operands[2]) == 1)
+ {
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ return AS2 (add%L0,%1,%0);
+ }
+ else
+ {
+ CC_STATUS_INIT;
+
+ if (operands[1] == stack_pointer_rtx)
+ {
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ operands[1] = operands[0];
+ }
+ operands[1] = gen_rtx (MULT, SImode, operands[1],
+ GEN_INT (1 << INTVAL (operands[2])));
+ return AS2 (lea%L0,%a1,%0);
+ }
+ }
+
+ if (REG_P (operands[2]))
+ return AS2 (sal%L0,%b2,%0);
+
+ if (REG_P (operands[0]) && operands[2] == const1_rtx)
+ return AS2 (add%L0,%0,%0);
+
+ return AS2 (sal%L0,%2,%0);
+}")
+
+(define_insn "ashlhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (ashift:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (sal%W0,%b2,%0);
+
+ if (REG_P (operands[0]) && operands[2] == const1_rtx)
+ return AS2 (add%W0,%0,%0);
+
+ return AS2 (sal%W0,%2,%0);
+}")
+
+(define_insn "ashlqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (ashift:QI (match_operand:QI 1 "general_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (sal%B0,%b2,%0);
+
+ if (REG_P (operands[0]) && operands[2] == const1_rtx)
+ return AS2 (add%B0,%0,%0);
+
+ return AS2 (sal%B0,%2,%0);
+}")
+
+;; See comment above `ashldi3' about how this works.
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ {
+ operands[2] = copy_to_mode_reg (QImode, operands[2]);
+ emit_insn (gen_ashrdi3_non_const_int (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_ashrdi3_const_int (operands[0], operands[1], operands[2]));
+
+ DONE;
+}")
+
+(define_insn "ashrdi3_const_int"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "J")))]
+ ""
+ "*
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ if (INTVAL (xops[0]) > 31)
+ {
+ xops[1] = GEN_INT (31);
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%1,%3), xops); /* shift by 32 */
+
+ if (INTVAL (xops[0]) > 32)
+ {
+ xops[0] = GEN_INT (INTVAL (xops[0]) - 32);
+ output_asm_insn (AS2 (sar%L2,%0,%2), xops); /* Remaining shift */
+ }
+ }
+ else
+ {
+ output_asm_insn (AS3 (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+ }
+
+ RET;
+}")
+
+(define_insn "ashrdi3_non_const_int"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "c")))
+ (clobber (match_dup 2))]
+ ""
+ "*
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+
+ xops[1] = GEN_INT (7); /* shift count & 1 */
+
+ output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+
+ RET;
+}")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (sar%L0,%b2,%0);
+ else
+ return AS2 (sar%L0,%2,%0);
+}")
+
+(define_insn "ashrhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (ashiftrt:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (sar%W0,%b2,%0);
+ else
+ return AS2 (sar%W0,%2,%0);
+}")
+
+(define_insn "ashrqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (ashiftrt:QI (match_operand:QI 1 "general_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (sar%B0,%b2,%0);
+ else
+ return AS2 (sar%B0,%2,%0);
+}")
+
+;;- logical shift instructions
+
+;; See comment above `ashldi3' about how this works.
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ {
+ operands[2] = copy_to_mode_reg (QImode, operands[2]);
+ emit_insn (gen_lshrdi3_non_const_int (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_lshrdi3_const_int (operands[0], operands[1], operands[2]));
+
+ DONE;
+}")
+
+(define_insn "lshrdi3_const_int"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "J")))]
+ ""
+ "*
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ if (INTVAL (xops[0]) > 31)
+ {
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops); /* Fast shift by 32 */
+ output_asm_insn (AS2 (xor%L3,%3,%3), xops);
+
+ if (INTVAL (xops[0]) > 32)
+ {
+ xops[0] = GEN_INT (INTVAL (xops[0]) - 32);
+ output_asm_insn (AS2 (shr%L2,%0,%2), xops); /* Remaining shift */
+ }
+ }
+ else
+ {
+ output_asm_insn (AS3 (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+ }
+
+ RET;
+}")
+
+(define_insn "lshrdi3_non_const_int"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "c")))
+ (clobber (match_dup 2))]
+ ""
+ "*
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+
+ xops[1] = GEN_INT (7); /* shift count & 1 */
+
+ output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+
+ RET;
+}")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (lshiftrt:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (shr%L0,%b2,%0);
+ else
+ return AS2 (shr%L0,%2,%1);
+}")
+
+(define_insn "lshrhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (lshiftrt:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (shr%W0,%b2,%0);
+ else
+ return AS2 (shr%W0,%2,%0);
+}")
+
+(define_insn "lshrqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (lshiftrt:QI (match_operand:QI 1 "general_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (shr%B0,%b2,%0);
+ else
+ return AS2 (shr%B0,%2,%0);
+}")
+
+;;- rotate instructions
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (rotate:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (rol%L0,%b2,%0);
+ else
+ return AS2 (rol%L0,%2,%0);
+}")
+
+(define_insn "rotlhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (rotate:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (rol%W0,%b2,%0);
+ else
+ return AS2 (rol%W0,%2,%0);
+}")
+
+(define_insn "rotlqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (rotate:QI (match_operand:QI 1 "general_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (rol%B0,%b2,%0);
+ else
+ return AS2 (rol%B0,%2,%0);
+}")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (ror%L0,%b2,%0);
+ else
+ return AS2 (ror%L0,%2,%0);
+}")
+
+(define_insn "rotrhi3"
+ [(set (match_operand:HI 0 "general_operand" "=rm")
+ (rotatert:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (ror%W0,%b2,%0);
+ else
+ return AS2 (ror%W0,%2,%0);
+}")
+
+(define_insn "rotrqi3"
+ [(set (match_operand:QI 0 "general_operand" "=qm")
+ (rotatert:QI (match_operand:QI 1 "general_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "cI")))]
+ ""
+ "*
+{
+ if (REG_P (operands[2]))
+ return AS2 (ror%B0,%b2,%0);
+ else
+ return AS2 (ror%B0,%2,%0);
+}")
+
+/*
+;; This usually looses. But try a define_expand to recognize a few case
+;; we can do efficiently, such as accessing the "high" QImode registers,
+;; %ah, %bh, %ch, %dh.
+(define_insn "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+&r")
+ (match_operand:SI 1 "general_operand" "i")
+ (match_operand:SI 2 "general_operand" "i"))
+ (match_operand:SI 3 "general_operand" "ri"))]
+ ""
+ "*
+{
+ if (INTVAL (operands[1]) + INTVAL (operands[2]) > GET_MODE_BITSIZE (SImode))
+ abort ();
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ unsigned int mask = (1 << INTVAL (operands[1])) - 1;
+ operands[1] = GEN_INT (~(mask << INTVAL (operands[2])));
+ output_asm_insn (AS2 (and%L0,%1,%0), operands);
+ operands[3] = GEN_INT (INTVAL (operands[3]) << INTVAL (operands[2]));
+ output_asm_insn (AS2 (or%L0,%3,%0), operands);
+ }
+ else
+ {
+ operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ if (INTVAL (operands[2]))
+ output_asm_insn (AS2 (ror%L0,%2,%0), operands);
+ output_asm_insn (AS3 (shrd%L0,%1,%3,%0), operands);
+ operands[2] = GEN_INT (BITS_PER_WORD
+ - INTVAL (operands[1]) - INTVAL (operands[2]));
+ if (INTVAL (operands[2]))
+ output_asm_insn (AS2 (ror%L0,%2,%0), operands);
+ }
+ RET;
+}")
+*/
+/*
+;; ??? There are problems with the mode of operand[3]. The point of this
+;; is to represent an HImode move to a "high byte" register.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "immediate_operand" "")
+ (match_operand:SI 2 "immediate_operand" ""))
+ (match_operand:QI 3 "general_operand" "ri"))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) != CONST_INT
+ || GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+
+ if (! (INTVAL (operands[1]) == 8
+ && (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 0))
+ && ! INTVAL (operands[1]) == 1)
+ FAIL;
+}")
+
+;; ??? Are these constraints right?
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+&qo")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:QI 1 "general_operand" "qn"))]
+ ""
+ "*
+{
+ if (REG_P (operands[0]))
+ return AS2 (mov%B0,%1,%h0);
+
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (mov%B0,%1,%0);
+}")
+*/
+
+;; On i386, the register count for a bit operation is *not* truncated,
+;; so SHIFT_COUNT_TRUNCATED must not be defined.
+
+;; On i486, the shift & or/and code is faster than bts or btr. If
+;; operands[0] is a MEM, the bt[sr] is half as fast as the normal code.
+
+;; On i386, bts is a little faster if operands[0] is a reg, and a
+;; little slower if operands[0] is a MEM, than the shift & or/and code.
+;; Use bts & btr, since they reload better.
+
+;; General bit set and clear.
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+rm")
+ (const_int 1)
+ (match_operand:SI 2 "general_operand" "r"))
+ (match_operand:SI 3 "const_int_operand" "n"))]
+ "! TARGET_486 && GET_CODE (operands[2]) != CONST_INT"
+ "*
+{
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[3]) == 1)
+ return AS2 (bts%L0,%2,%0);
+ else
+ return AS2 (btr%L0,%2,%0);
+}")
+
+;; Bit complement. See comments on previous pattern.
+;; ??? Is this really worthwhile?
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (xor:SI (ashift:SI (const_int 1)
+ (match_operand:SI 1 "general_operand" "r"))
+ (match_operand:SI 2 "general_operand" "0")))]
+ "! TARGET_486 && GET_CODE (operands[1]) != CONST_INT"
+ "*
+{
+ CC_STATUS_INIT;
+
+ return AS2 (btc%L0,%1,%0);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=rm")
+ (xor:SI (match_operand:SI 1 "general_operand" "0")
+ (ashift:SI (const_int 1)
+ (match_operand:SI 2 "general_operand" "r"))))]
+ "! TARGET_486 && GET_CODE (operands[2]) != CONST_INT"
+ "*
+{
+ CC_STATUS_INIT;
+
+ return AS2 (btc%L0,%2,%0);
+}")
+
+;; Recognizers for bit-test instructions.
+
+;; The bt opcode allows a MEM in operands[0]. But on both i386 and
+;; i486, it is faster to copy a MEM to REG and then use bt, than to use
+;; bt on the MEM directly.
+
+;; ??? The first argument of a zero_extract must not be reloaded, so
+;; don't allow a MEM in the operand predicate without allowing it in the
+;; constraint.
+
+(define_insn ""
+ [(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "r")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "*
+{
+ cc_status.flags |= CC_Z_IN_NOT_C;
+ return AS2 (bt%L0,%1,%0);
+}")
+
+(define_insn ""
+ [(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ unsigned int mask;
+
+ mask = ((1 << INTVAL (operands[1])) - 1) << INTVAL (operands[2]);
+ operands[1] = GEN_INT (mask);
+
+ if (QI_REG_P (operands[0]))
+ {
+ if ((mask & ~0xff) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((mask & ~0xff00) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 8);
+ return AS2 (test%B0,%1,%h0);
+ }
+ }
+
+ return AS2 (test%L0,%1,%0);
+}")
+
+;; ??? All bets are off if operand 0 is a volatile MEM reference.
+;; The CPU may access unspecified bytes around the actual target byte.
+
+(define_insn ""
+ [(set (cc0) (zero_extract (match_operand:QI 0 "general_operand" "rm")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "GET_CODE (operands[0]) != MEM || ! MEM_VOLATILE_P (operands[0])"
+ "*
+{
+ unsigned int mask;
+
+ mask = ((1 << INTVAL (operands[1])) - 1) << INTVAL (operands[2]);
+ operands[1] = GEN_INT (mask);
+
+ if (! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ {
+ if ((mask & ~0xff) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((mask & ~0xff00) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 8);
+
+ if (QI_REG_P (operands[0]))
+ return AS2 (test%B0,%1,%h0);
+ else
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (GET_CODE (operands[0]) == MEM && (mask & ~0xff0000) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 16);
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if (GET_CODE (operands[0]) == MEM && (mask & ~0xff000000) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 24);
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%L0,%1,%0);
+
+ return AS2 (test%L1,%0,%1);
+}")
+
+;; Store-flag instructions.
+
+;; For all sCOND expanders, also expand the compare or test insn that
+;; generates cc0. Generate an equality comparison if `seq' or `sne'.
+
+;; The 386 sCOND opcodes can write to memory. But a gcc sCOND insn may
+;; not have any input reloads. A MEM write might need an input reload
+;; for the address of the MEM. So don't allow MEM as the SET_DEST.
+
+(define_expand "seq"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (eq:QI (cc0) (const_int 0)))]
+ ""
+ "
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (eq:QI (cc0) (const_int 0)))]
+ ""
+ "*
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return AS1 (setnb,%0);
+ else
+ return AS1 (sete,%0);
+}")
+
+(define_expand "sne"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (ne:QI (cc0) (const_int 0)))]
+ ""
+ "
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (ne:QI (cc0) (const_int 0)))]
+ ""
+ "*
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return AS1 (setb,%0);
+ else
+ return AS1 (setne,%0);
+}
+")
+
+(define_expand "sgt"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (gt:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (gt:QI (cc0) (const_int 0)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (sete,%0);
+
+ OUTPUT_JUMP (\"setg %0\", \"seta %0\", NULL_PTR);
+}")
+
+(define_expand "sgtu"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (gtu:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (gtu:QI (cc0) (const_int 0)))]
+ ""
+ "* return \"seta %0\"; ")
+
+(define_expand "slt"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (lt:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (lt:QI (cc0) (const_int 0)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (sete,%0);
+
+ OUTPUT_JUMP (\"setl %0\", \"setb %0\", \"sets %0\");
+}")
+
+(define_expand "sltu"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (ltu:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (ltu:QI (cc0) (const_int 0)))]
+ ""
+ "* return \"setb %0\"; ")
+
+(define_expand "sge"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (ge:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (ge:QI (cc0) (const_int 0)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (sete,%0);
+
+ OUTPUT_JUMP (\"setge %0\", \"setae %0\", \"setns %0\");
+}")
+
+(define_expand "sgeu"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (geu:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (geu:QI (cc0) (const_int 0)))]
+ ""
+ "* return \"setae %0\"; ")
+
+(define_expand "sle"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (le:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (le:QI (cc0) (const_int 0)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (setb,%0);
+
+ OUTPUT_JUMP (\"setle %0\", \"setbe %0\", NULL_PTR);
+}")
+
+(define_expand "sleu"
+ [(match_dup 1)
+ (set (match_operand:QI 0 "register_operand" "")
+ (leu:QI (cc0) (const_int 0)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=q")
+ (leu:QI (cc0) (const_int 0)))]
+ ""
+ "* return \"setbe %0\"; ")
+
+;; Basic conditional jump instructions.
+;; We ignore the overflow flag for signed branch instructions.
+
+;; For all bCOND expanders, also expand the compare or test insn that
+;; generates cc0. Generate an equality comparison if `beq' or `bne'.
+
+(define_expand "beq"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return \"jnc %l0\";
+ else
+ return \"je %l0\";
+}")
+
+(define_expand "bne"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return \"jc %l0\";
+ else
+ return \"jne %l0\";
+}")
+
+(define_expand "bgt"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (je,%l0);
+
+ OUTPUT_JUMP (\"jg %l0\", \"ja %l0\", NULL_PTR);
+}")
+
+(define_expand "bgtu"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ja %l0")
+
+(define_expand "blt"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (je,%l0);
+
+ OUTPUT_JUMP (\"jl %l0\", \"jb %l0\", \"js %l0\");
+}")
+
+(define_expand "bltu"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jb %l0")
+
+(define_expand "bge"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (je,%l0);
+
+ OUTPUT_JUMP (\"jge %l0\", \"jae %l0\", \"jns %l0\");
+}")
+
+(define_expand "bgeu"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jae %l0")
+
+(define_expand "ble"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jb,%l0);
+
+ OUTPUT_JUMP (\"jle %l0\", \"jbe %l0\", NULL_PTR);
+}")
+
+(define_expand "bleu"
+ [(match_dup 1)
+ (set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jbe %l0")
+
+;; Negated conditional jump instructions.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return \"jc %l0\";
+ else
+ return \"jne %l0\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return \"jnc %l0\";
+ else
+ return \"je %l0\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jne,%l0);
+
+ OUTPUT_JUMP (\"jle %l0\", \"jbe %l0\", NULL_PTR);
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "jbe %l0")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jne,%l0);
+
+ OUTPUT_JUMP (\"jge %l0\", \"jae %l0\", \"jns %l0\");
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "jae %l0")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jne,%l0);
+
+ OUTPUT_JUMP (\"jl %l0\", \"jb %l0\", \"js %l0\");
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "jb %l0")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jae,%l0);
+
+ OUTPUT_JUMP (\"jg %l0\", \"ja %l0\", NULL_PTR);
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "ja %l0")
+
+;; Unconditional and other jump instructions
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jmp %l0")
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "general_operand" "rm"))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+
+ return AS1 (jmp,%*%0);
+}")
+
+;; Implement switch statements when generating PIC code. Switches are
+;; implemented by `tablejump' when not using -fpic.
+
+;; Emit code here to do the range checking and make the index zero based.
+
+(define_expand "casesi"
+ [(set (match_dup 5)
+ (minus:SI (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" "")))
+ (set (cc0)
+ (compare:CC (match_dup 5)
+ (match_operand:SI 2 "general_operand" "")))
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (parallel
+ [(set (pc)
+ (minus:SI (reg:SI 3)
+ (mem:SI (plus:SI (mult:SI (match_dup 5)
+ (const_int 4))
+ (label_ref (match_operand 3 "" ""))))))
+ (clobber (match_scratch:SI 6 ""))])]
+ "flag_pic"
+ "
+{
+ operands[5] = gen_reg_rtx (SImode);
+ current_function_uses_pic_offset_table = 1;
+}")
+
+;; Implement a casesi insn.
+
+;; Each entry in the "addr_diff_vec" looks like this as the result of the
+;; two rules below:
+;;
+;; .long _GLOBAL_OFFSET_TABLE_+[.-.L2]
+;;
+;; 1. An expression involving an external reference may only use the
+;; addition operator, and only with an assembly-time constant.
+;; The example above satisfies this because ".-.L2" is a constant.
+;;
+;; 2. The symbol _GLOBAL_OFFSET_TABLE_ is magic, and at link time is
+;; given the value of "GOT - .", where GOT is the actual address of
+;; the Global Offset Table. Therefore, the .long above actually
+;; stores the value "( GOT - . ) + [ . - .L2 ]", or "GOT - .L2". The
+;; expression "GOT - .L2" by itself would generate an error from as(1).
+;;
+;; The pattern below emits code that looks like this:
+;;
+;; movl %ebx,reg
+;; subl TABLE@GOTOFF(%ebx,index,4),reg
+;; jmp reg
+;;
+;; The addr_diff_vec contents may be directly referenced with @GOTOFF, since
+;; the addr_diff_vec is known to be part of this module.
+;;
+;; The subl above calculates "GOT - (( GOT - . ) + [ . - .L2 ])", which
+;; evaluates to just ".L2".
+
+(define_insn ""
+ [(set (pc)
+ (minus:SI (reg:SI 3)
+ (mem:SI (plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 4))
+ (label_ref (match_operand 1 "" ""))))))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ ""
+ "*
+{
+ rtx xops[4];
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ xops[3] = pic_offset_table_rtx;
+
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ output_asm_insn (\"sub%L2 %l1@GOTOFF(%3,%0,4),%2\", xops);
+ output_asm_insn (AS1 (jmp,%*%2), xops);
+ ASM_OUTPUT_ALIGN_CODE (asm_out_file);
+ RET;
+}")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "general_operand" "rm"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+
+ return AS1 (jmp,%*%0);
+}")
+
+;; Call insns.
+
+;; If generating PIC code, the predicate indirect_operand will fail
+;; for operands[0] containing symbolic references on all of the named
+;; call* patterns. Each named pattern is followed by an unnamed pattern
+;; that matches any call to a symbolic CONST (ie, a symbol_ref). The
+;; unnamed patterns are only used while generating PIC code, because
+;; otherwise the named patterns match.
+
+;; Call subroutine returning no value.
+
+(define_expand "call_pop"
+ [(parallel [(call (match_operand:QI 0 "indirect_operand" "")
+ (match_operand:SI 1 "general_operand" ""))
+ (set (reg:SI 7)
+ (plus:SI (reg:SI 7)
+ (match_operand:SI 3 "immediate_operand" "")))])]
+ ""
+ "
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[0], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[0], QImode))
+ operands[0]
+ = change_address (operands[0], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
+}")
+
+(define_insn ""
+ [(call (match_operand:QI 0 "call_insn_operand" "m")
+ (match_operand:SI 1 "general_operand" "g"))
+ (set (reg:SI 7) (plus:SI (reg:SI 7)
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = XEXP (operands[0], 0);
+ return AS1 (call,%*%0);
+ }
+ else
+ return AS1 (call,%P0);
+}")
+
+(define_insn ""
+ [(call (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:SI 1 "general_operand" "g"))
+ (set (reg:SI 7) (plus:SI (reg:SI 7)
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ "!HALF_PIC_P ()"
+ "call %P0")
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "indirect_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ;; Operand 1 not used on the i386.
+ ""
+ "
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[0], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[0], QImode))
+ operands[0]
+ = change_address (operands[0], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
+}")
+
+(define_insn ""
+ [(call (match_operand:QI 0 "call_insn_operand" "m")
+ (match_operand:SI 1 "general_operand" "g"))]
+ ;; Operand 1 not used on the i386.
+ ""
+ "*
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = XEXP (operands[0], 0);
+ return AS1 (call,%*%0);
+ }
+ else
+ return AS1 (call,%P0);
+}")
+
+(define_insn ""
+ [(call (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:SI 1 "general_operand" "g"))]
+ ;; Operand 1 not used on the i386.
+ "!HALF_PIC_P ()"
+ "call %P0")
+
+;; Call subroutine, returning value in operand 0
+;; (which must be a hard register).
+
+(define_expand "call_value_pop"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "indirect_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (set (reg:SI 7)
+ (plus:SI (reg:SI 7)
+ (match_operand:SI 4 "immediate_operand" "")))])]
+ ""
+ "
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[1], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[1], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[1], QImode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+}")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=rf")
+ (call (match_operand:QI 1 "call_insn_operand" "m")
+ (match_operand:SI 2 "general_operand" "g")))
+ (set (reg:SI 7) (plus:SI (reg:SI 7)
+ (match_operand:SI 4 "immediate_operand" "i")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 0);
+ output_asm_insn (AS1 (call,%*%1), operands);
+ }
+ else
+ output_asm_insn (AS1 (call,%P1), operands);
+
+ RET;
+}")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:QI (match_operand:SI 1 "symbolic_operand" ""))
+ (match_operand:SI 2 "general_operand" "g")))
+ (set (reg:SI 7) (plus:SI (reg:SI 7)
+ (match_operand:SI 4 "immediate_operand" "i")))]
+ "!HALF_PIC_P ()"
+ "call %P1")
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "indirect_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ;; Operand 2 not used on the i386.
+ ""
+ "
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[1], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[1], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[1], QImode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+}")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=rf")
+ (call (match_operand:QI 1 "call_insn_operand" "m")
+ (match_operand:SI 2 "general_operand" "g")))]
+ ;; Operand 2 not used on the i386.
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 0);
+ output_asm_insn (AS1 (call,%*%1), operands);
+ }
+ else
+ output_asm_insn (AS1 (call,%P1), operands);
+
+ RET;
+}")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:QI (match_operand:SI 1 "symbolic_operand" ""))
+ (match_operand:SI 2 "general_operand" "g")))]
+ ;; Operand 2 not used on the i386.
+ "!HALF_PIC_P ()"
+ "call %P1")
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand:QI 0 "indirect_operand" "")
+ (const_int 0))
+ (match_operand:BLK 1 "memory_operand" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[0], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+
+ operands[1] = change_address (operands[1], DImode, XEXP (operands[1], 0));
+ if (! expander_call_insn_operand (operands[1], QImode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+}")
+
+(define_insn ""
+ [(call (match_operand:QI 0 "call_insn_operand" "m")
+ (const_int 0))
+ (match_operand:DI 1 "memory_operand" "o")
+ (match_operand 2 "" "")]
+ ""
+ "*
+{
+ rtx addr = operands[1];
+
+ if (GET_CODE (operands[0]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = XEXP (operands[0], 0);
+ output_asm_insn (AS1 (call,%*%0), operands);
+ }
+ else
+ output_asm_insn (AS1 (call,%P0), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 0);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 1);
+ operands[1] = adj_offsettable_operand (addr, 4);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[1] = adj_offsettable_operand (addr, 8);
+ return AS1 (fnsave,%1);
+}")
+
+(define_insn ""
+ [(call (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
+ (const_int 0))
+ (match_operand:DI 1 "memory_operand" "o")
+ (match_operand 2 "" "")]
+ "!HALF_PIC_P ()"
+ "*
+{
+ rtx addr = operands[1];
+
+ output_asm_insn (AS1 (call,%P0), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 0);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 1);
+ operands[1] = adj_offsettable_operand (addr, 4);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[1] = adj_offsettable_operand (addr, 8);
+ return AS1 (fnsave,%1);
+}")
+
+;; We use fnsave and frstor to save and restore the floating point result.
+;; These are expensive instructions and require a large space to save the
+;; FPU state. An more complicated alternative is to use fnstenv to store
+;; the FPU environment and test whether the stack top is valid. Store the
+;; result of the test, and if it is valid, pop and save the value. The
+;; untyped_return would check the test and optionally push the saved value.
+
+(define_expand "untyped_return"
+ [(match_operand:BLK 0 "memory_operand" "")
+ (match_operand 1 "" "")]
+ ""
+ "
+{
+ rtx valreg1 = gen_rtx (REG, SImode, 0);
+ rtx valreg2 = gen_rtx (REG, SImode, 1);
+ rtx result = operands[0];
+
+ /* Restore the FPU state. */
+ emit_insn (gen_update_return (change_address (result, SImode,
+ plus_constant (XEXP (result, 0),
+ 8))));
+
+ /* Reload the function value registers. */
+ emit_move_insn (valreg1, change_address (result, SImode, XEXP (result, 0)));
+ emit_move_insn (valreg2,
+ change_address (result, SImode,
+ plus_constant (XEXP (result, 0), 4)));
+
+ /* Put USE insns before the return. */
+ emit_insn (gen_rtx (USE, VOIDmode, valreg1));
+ emit_insn (gen_rtx (USE, VOIDmode, valreg2));
+
+ /* Construct the return. */
+ expand_null_return ();
+
+ DONE;
+}")
+
+(define_insn "update_return"
+ [(unspec:SI [(match_operand:SI 0 "memory_operand" "m")] 0)]
+ ""
+ "frstor %0")
+
+;; Insn emitted into the body of a function to return from a function.
+;; This is only done if the function's epilogue is known to be simple.
+;; See comments for simple_386_epilogue in i386.c.
+
+(define_insn "return"
+ [(return)]
+ "simple_386_epilogue ()"
+ "*
+{
+ function_epilogue (asm_out_file, get_frame_size ());
+ RET;
+}")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+(define_expand "movstrsi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:SI 2 "const_int_operand" ""))
+ (use (match_operand:SI 3 "const_int_operand" ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))])]
+ ""
+ "
+{
+ rtx addr0, addr1;
+
+ if (GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+
+ addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+ addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+
+ operands[5] = addr0;
+ operands[6] = addr1;
+
+ operands[0] = gen_rtx (MEM, BLKmode, addr0);
+ operands[1] = gen_rtx (MEM, BLKmode, addr1);
+}")
+
+;; It might seem that operands 0 & 1 could use predicate register_operand.
+;; But strength reduction might offset the MEM expression. So we let
+;; reload put the address into %edi & %esi.
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "address_operand" "D"))
+ (mem:BLK (match_operand:SI 1 "address_operand" "S")))
+ (use (match_operand:SI 2 "const_int_operand" "n"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&c"))
+ (clobber (match_dup 0))
+ (clobber (match_dup 1))]
+ ""
+ "*
+{
+ rtx xops[2];
+
+ output_asm_insn (\"cld\", operands);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) & ~0x03)
+ {
+ xops[0] = GEN_INT ((INTVAL (operands[2]) >> 2) & 0x3fffffff);
+ xops[1] = operands[4];
+
+ output_asm_insn (AS2 (mov%L1,%0,%1), xops);
+#ifdef INTEL_SYNTAX
+ output_asm_insn (\"rep movsd\", xops);
+#else
+ output_asm_insn (\"rep\;movsl\", xops);
+#endif
+ }
+ if (INTVAL (operands[2]) & 0x02)
+ output_asm_insn (\"movsw\", operands);
+ if (INTVAL (operands[2]) & 0x01)
+ output_asm_insn (\"movsb\", operands);
+ }
+ else
+ abort ();
+ RET;
+}")
+
+(define_expand "cmpstrsi"
+ [(parallel [(set (match_operand:SI 0 "general_operand" "")
+ (compare:SI (match_operand:BLK 1 "general_operand" "")
+ (match_operand:BLK 2 "general_operand" "")))
+ (use (match_operand:SI 3 "general_operand" ""))
+ (use (match_operand:SI 4 "immediate_operand" ""))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))
+ (clobber (match_dup 3))])]
+ ""
+ "
+{
+ rtx addr1, addr2;
+
+ addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+ addr2 = copy_to_mode_reg (Pmode, XEXP (operands[2], 0));
+ operands[3] = copy_to_mode_reg (SImode, operands[3]);
+
+ operands[5] = addr1;
+ operands[6] = addr2;
+
+ operands[1] = gen_rtx (MEM, BLKmode, addr1);
+ operands[2] = gen_rtx (MEM, BLKmode, addr2);
+
+}")
+
+;; memcmp recognizers. The `cmpsb' opcode does nothing if the count is
+;; zero. Emit extra code to make sure that a zero-length compare is EQ.
+
+;; It might seem that operands 0 & 1 could use predicate register_operand.
+;; But strength reduction might offset the MEM expression. So we let
+;; reload put the address into %edi & %esi.
+
+;; ??? Most comparisons have a constant length, and it's therefore
+;; possible to know that the length is non-zero, and to avoid the extra
+;; code to handle zero-length compares.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=&r")
+ (compare:SI (mem:BLK (match_operand:SI 1 "address_operand" "S"))
+ (mem:BLK (match_operand:SI 2 "address_operand" "D"))))
+ (use (match_operand:SI 3 "register_operand" "c"))
+ (use (match_operand:SI 4 "immediate_operand" "i"))
+ (clobber (match_dup 1))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))]
+ ""
+ "*
+{
+ rtx xops[4], label;
+
+ label = gen_label_rtx ();
+
+ output_asm_insn (\"cld\", operands);
+ output_asm_insn (AS2 (xor%L0,%0,%0), operands);
+ output_asm_insn (\"repz\;cmps%B2\", operands);
+ output_asm_insn (\"je %l0\", &label);
+
+ xops[0] = operands[0];
+ xops[1] = gen_rtx (MEM, QImode,
+ gen_rtx (PLUS, SImode, operands[1], constm1_rtx));
+ xops[2] = gen_rtx (MEM, QImode,
+ gen_rtx (PLUS, SImode, operands[2], constm1_rtx));
+ xops[3] = operands[3];
+
+ output_asm_insn (AS2 (movz%B1%L0,%1,%0), xops);
+ output_asm_insn (AS2 (movz%B2%L3,%2,%3), xops);
+
+ output_asm_insn (AS2 (sub%L0,%3,%0), xops);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\", CODE_LABEL_NUMBER (label));
+ RET;
+}")
+
+(define_insn ""
+ [(set (cc0)
+ (compare:SI (mem:BLK (match_operand:SI 0 "address_operand" "S"))
+ (mem:BLK (match_operand:SI 1 "address_operand" "D"))))
+ (use (match_operand:SI 2 "register_operand" "c"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_dup 0))
+ (clobber (match_dup 1))
+ (clobber (match_dup 2))]
+ ""
+ "*
+{
+ rtx xops[2];
+
+ cc_status.flags |= CC_NOT_SIGNED;
+
+ xops[0] = gen_rtx (REG, QImode, 0);
+ xops[1] = CONST0_RTX (QImode);
+
+ output_asm_insn (\"cld\", operands);
+ output_asm_insn (AS2 (test%B0,%1,%0), xops);
+ return \"repz\;cmps%B2\";
+}")
+
+(define_expand "ffssi2"
+ [(set (match_dup 2)
+ (plus:SI (ffs:SI (match_operand:SI 1 "general_operand" ""))
+ (const_int -1)))
+ (set (match_operand:SI 0 "general_operand" "")
+ (plus:SI (match_dup 2) (const_int 1)))]
+ ""
+ "operands[2] = gen_reg_rtx (SImode);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=&r")
+ (plus:SI (ffs:SI (match_operand:SI 1 "general_operand" "rm"))
+ (const_int -1)))]
+ ""
+ "*
+{
+ rtx xops[3];
+ static int ffssi_label_number;
+ char buffer[30];
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = constm1_rtx;
+ /* Can there be a way to avoid the jump here? */
+ output_asm_insn (AS2 (bsf%L0,%1,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, \"jnz %sLFFSSI%d\",
+ LOCAL_LABEL_PREFIX, ffssi_label_number);
+#else
+ sprintf (buffer, \"jnz %sLFFSSI%d\",
+ \"\", ffssi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+ output_asm_insn (AS2 (mov%L0,%2,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, \"%sLFFSSI%d:\",
+ LOCAL_LABEL_PREFIX, ffssi_label_number);
+#else
+ sprintf (buffer, \"%sLFFSSI%d:\",
+ \"\", ffssi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+
+ ffssi_label_number++;
+ return \"\";
+}")
+
+(define_expand "ffshi2"
+ [(set (match_dup 2)
+ (plus:HI (ffs:HI (match_operand:HI 1 "general_operand" ""))
+ (const_int -1)))
+ (set (match_operand:HI 0 "general_operand" "")
+ (plus:HI (match_dup 2) (const_int 1)))]
+ ""
+ "operands[2] = gen_reg_rtx (HImode);")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=&r")
+ (plus:HI (ffs:HI (match_operand:SI 1 "general_operand" "rm"))
+ (const_int -1)))]
+ ""
+ "*
+{
+ rtx xops[3];
+ static int ffshi_label_number;
+ char buffer[30];
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = constm1_rtx;
+ output_asm_insn (AS2 (bsf%W0,%1,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, \"jnz %sLFFSHI%d\",
+ LOCAL_LABEL_PREFIX, ffshi_label_number);
+#else
+ sprintf (buffer, \"jnz %sLFFSHI%d\",
+ \"\", ffshi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+ output_asm_insn (AS2 (mov%W0,%2,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, \"%sLFFSHI%d:\",
+ LOCAL_LABEL_PREFIX, ffshi_label_number);
+#else
+ sprintf (buffer, \"%sLFFSHI%d:\",
+ \"\", ffshi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+
+ ffshi_label_number++;
+ return \"\";
+}")
+
+;; These patterns match the binary 387 instructions for addM3, subM3,
+;; mulM3 and divM3. There are three patterns for each of DFmode and
+;; SFmode. The first is the normal insn, the second the same insn but
+;; with one operand a conversion, and the third the same insn but with
+;; the other operand a conversion. The conversion may be SFmode or
+;; SImode if the target mode DFmode, but only SImode if the target mode
+;; is SFmode.
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_387_op"
+ [(match_operand:DF 1 "nonimmediate_operand" "0,fm")
+ (match_operand:DF 2 "nonimmediate_operand" "fm,0")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 3 "binary_387_op"
+ [(float:DF (match_operand:SI 1 "general_operand" "rm"))
+ (match_operand:DF 2 "general_operand" "0")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_387_op"
+ [(match_operand:XF 1 "nonimmediate_operand" "0,f")
+ (match_operand:XF 2 "nonimmediate_operand" "f,0")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (match_operator:XF 3 "binary_387_op"
+ [(float:XF (match_operand:SI 1 "general_operand" "rm"))
+ (match_operand:XF 2 "general_operand" "0")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_387_op"
+ [(float_extend:XF (match_operand:SF 1 "general_operand" "fm,0"))
+ (match_operand:XF 2 "general_operand" "0,f")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (match_operator:XF 3 "binary_387_op"
+ [(match_operand:XF 1 "general_operand" "0")
+ (float:XF (match_operand:SI 2 "general_operand" "rm"))]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_387_op"
+ [(match_operand:XF 1 "general_operand" "0,f")
+ (float_extend:XF
+ (match_operand:SF 2 "general_operand" "fm,0"))]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_387_op"
+ [(float_extend:DF (match_operand:SF 1 "general_operand" "fm,0"))
+ (match_operand:DF 2 "general_operand" "0,f")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 3 "binary_387_op"
+ [(match_operand:DF 1 "general_operand" "0")
+ (float:DF (match_operand:SI 2 "general_operand" "rm"))]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_387_op"
+ [(match_operand:DF 1 "general_operand" "0,f")
+ (float_extend:DF
+ (match_operand:SF 2 "general_operand" "fm,0"))]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (match_operator:SF 3 "binary_387_op"
+ [(match_operand:SF 1 "nonimmediate_operand" "0,fm")
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (match_operator:SF 3 "binary_387_op"
+ [(float:SF (match_operand:SI 1 "general_operand" "rm"))
+ (match_operand:SF 2 "general_operand" "0")]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (match_operator:SF 3 "binary_387_op"
+ [(match_operand:SF 1 "general_operand" "0")
+ (float:SF (match_operand:SI 2 "general_operand" "rm"))]))]
+ "TARGET_80387"
+ "* return (char *) output_387_binary_op (insn, operands);")
+
+(define_expand "strlensi"
+ [(parallel [(set (match_dup 4)
+ (unspec:SI [(mem:BLK (match_operand:BLK 1 "general_operand" ""))
+ (match_operand:QI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")] 0))
+ (clobber (match_dup 1))])
+ (set (match_dup 5)
+ (not:SI (match_dup 4)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_dup 5)
+ (const_int 1)))]
+ ""
+ "
+{
+ operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = gen_reg_rtx (SImode);
+}")
+
+;; It might seem that operands 0 & 1 could use predicate register_operand.
+;; But strength reduction might offset the MEM expression. So we let
+;; reload put the address into %edi.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=&c")
+ (unspec:SI [(mem:BLK (match_operand:SI 1 "address_operand" "D"))
+ (match_operand:QI 2 "register_operand" "a")
+ (match_operand:SI 3 "immediate_operand" "i")] 0))
+ (clobber (match_dup 1))]
+ ""
+ "*
+{
+ rtx xops[2];
+
+ xops[0] = operands[0];
+ xops[1] = constm1_rtx;
+ output_asm_insn (\"cld\", operands);
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+ return \"repnz\;scas%B2\";
+}")
OpenPOWER on IntegriCloud