summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/combine.c
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>2002-02-01 18:16:02 +0000
committerobrien <obrien@FreeBSD.org>2002-02-01 18:16:02 +0000
commitc9ab9ae440a8066b2c2b85b157b1fdadcf09916a (patch)
tree086d9d6c8fbd4fc8fe4495059332f66bc0f8d12b /contrib/gcc/combine.c
parent2ecfd8bd04b63f335c1ec6295740a4bfd97a4fa6 (diff)
downloadFreeBSD-src-c9ab9ae440a8066b2c2b85b157b1fdadcf09916a.zip
FreeBSD-src-c9ab9ae440a8066b2c2b85b157b1fdadcf09916a.tar.gz
Enlist the FreeBSD-CURRENT users as testers of what is to become Gcc 3.1.0.
These bits are taken from the FSF anoncvs repo on 1-Feb-2002 08:20 PST.
Diffstat (limited to 'contrib/gcc/combine.c')
-rw-r--r--contrib/gcc/combine.c4061
1 files changed, 2333 insertions, 1728 deletions
diff --git a/contrib/gcc/combine.c b/contrib/gcc/combine.c
index eb75481..47ac3a8 100644
--- a/contrib/gcc/combine.c
+++ b/contrib/gcc/combine.c
@@ -1,24 +1,23 @@
/* Optimize by combining instructions for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
/* This module is essentially the "combiner" phase of the U. of Arizona
Portable Optimizer, but redone to work on our list-structured
@@ -59,7 +58,7 @@ Boston, MA 02111-1307, USA. */
- there are extremely rare cases (see distribute_regnotes) when a
REG_DEAD note is lost
- a LOG_LINKS entry that refers to an insn with multiple SETs may be
- removed because there is no way to know which register it was
+ removed because there is no way to know which register it was
linking
To simplify substitution, we combine only when the earlier insn(s)
@@ -77,16 +76,16 @@ Boston, MA 02111-1307, USA. */
#include "config.h"
#include "system.h"
-#include "rtl.h" /* stdio.h must precede rtl.h for FFS. */
+#include "rtl.h"
+#include "tm_p.h"
#include "flags.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "insn-config.h"
-/* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
+#include "function.h"
+/* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
#include "expr.h"
-#include "insn-flags.h"
-#include "insn-codes.h"
#include "insn-attr.h"
#include "recog.h"
#include "real.h"
@@ -116,12 +115,6 @@ static int combine_successes;
static int total_attempts, total_merges, total_extras, total_successes;
-/* Define a default value for REVERSIBLE_CC_MODE.
- We can never assume that a condition code mode is safe to reverse unless
- the md tells us so. */
-#ifndef REVERSIBLE_CC_MODE
-#define REVERSIBLE_CC_MODE(MODE) 0
-#endif
/* Vector mapping INSN_UIDs to cuids.
The cuids are like uids but increase monotonically always.
@@ -138,9 +131,15 @@ static int max_uid_cuid;
#define INSN_CUID(INSN) \
(INSN_UID (INSN) > max_uid_cuid ? insn_cuid (INSN) : uid_cuid[INSN_UID (INSN)])
+/* In case BITS_PER_WORD == HOST_BITS_PER_WIDE_INT, shifting by
+ BITS_PER_WORD would invoke undefined behavior. Work around it. */
+
+#define UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD(val) \
+ (((unsigned HOST_WIDE_INT) (val) << (BITS_PER_WORD - 1)) << 1)
+
/* Maximum register number, which is the size of the tables below. */
-static int combine_max_regno;
+static unsigned int combine_max_regno;
/* Record last point of death of (hard or pseudo) register n. */
@@ -195,6 +194,12 @@ static rtx added_links_insn;
/* Basic block number of the block in which we are performing combines. */
static int this_basic_block;
+
+/* A bitmap indicating which blocks had registers go dead at entry.
+ After combine, we'll need to re-do global life analysis with
+ those blocks as starting points. */
+static sbitmap refresh_blocks;
+static int need_refresh;
/* The next group of arrays allows the recording of the last value assigned
to (hard or pseudo) register n. We use this information to see if a
@@ -283,7 +288,7 @@ static enum machine_mode nonzero_bits_mode;
/* Nonzero if we know that a register has some leading bits that are always
equal to the sign bit. */
-static char *reg_sign_bit_copies;
+static unsigned char *reg_sign_bit_copies;
/* Nonzero when reg_nonzero_bits and reg_sign_bit_copies can be safely used.
It is zero while computing them and after combine has completed. This
@@ -309,193 +314,203 @@ struct undo
{
struct undo *next;
int is_int;
- union {rtx r; int i;} old_contents;
- union {rtx *r; int *i;} where;
+ union {rtx r; unsigned int i;} old_contents;
+ union {rtx *r; unsigned int *i;} where;
};
/* Record a bunch of changes to be undone, up to MAX_UNDO of them.
num_undo says how many are currently recorded.
- storage is nonzero if we must undo the allocation of new storage.
- The value of storage is what to pass to obfree.
-
other_insn is nonzero if we have modified some other insn in the process
- of working on subst_insn. It must be verified too.
-
- previous_undos is the value of undobuf.undos when we started processing
- this substitution. This will prevent gen_rtx_combine from re-used a piece
- from the previous expression. Doing so can produce circular rtl
- structures. */
+ of working on subst_insn. It must be verified too. */
struct undobuf
{
- char *storage;
struct undo *undos;
struct undo *frees;
- struct undo *previous_undos;
rtx other_insn;
};
static struct undobuf undobuf;
+/* Number of times the pseudo being substituted for
+ was found and replaced. */
+
+static int n_occurrences;
+
+static void do_SUBST PARAMS ((rtx *, rtx));
+static void do_SUBST_INT PARAMS ((unsigned int *,
+ unsigned int));
+static void init_reg_last_arrays PARAMS ((void));
+static void setup_incoming_promotions PARAMS ((void));
+static void set_nonzero_bits_and_sign_copies PARAMS ((rtx, rtx, void *));
+static int cant_combine_insn_p PARAMS ((rtx));
+static int can_combine_p PARAMS ((rtx, rtx, rtx, rtx, rtx *, rtx *));
+static int sets_function_arg_p PARAMS ((rtx));
+static int combinable_i3pat PARAMS ((rtx, rtx *, rtx, rtx, int, rtx *));
+static int contains_muldiv PARAMS ((rtx));
+static rtx try_combine PARAMS ((rtx, rtx, rtx, int *));
+static void undo_all PARAMS ((void));
+static void undo_commit PARAMS ((void));
+static rtx *find_split_point PARAMS ((rtx *, rtx));
+static rtx subst PARAMS ((rtx, rtx, rtx, int, int));
+static rtx combine_simplify_rtx PARAMS ((rtx, enum machine_mode, int, int));
+static rtx simplify_if_then_else PARAMS ((rtx));
+static rtx simplify_set PARAMS ((rtx));
+static rtx simplify_logical PARAMS ((rtx, int));
+static rtx expand_compound_operation PARAMS ((rtx));
+static rtx expand_field_assignment PARAMS ((rtx));
+static rtx make_extraction PARAMS ((enum machine_mode, rtx, HOST_WIDE_INT,
+ rtx, unsigned HOST_WIDE_INT, int,
+ int, int));
+static rtx extract_left_shift PARAMS ((rtx, int));
+static rtx make_compound_operation PARAMS ((rtx, enum rtx_code));
+static int get_pos_from_mask PARAMS ((unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT *));
+static rtx force_to_mode PARAMS ((rtx, enum machine_mode,
+ unsigned HOST_WIDE_INT, rtx, int));
+static rtx if_then_else_cond PARAMS ((rtx, rtx *, rtx *));
+static rtx known_cond PARAMS ((rtx, enum rtx_code, rtx, rtx));
+static int rtx_equal_for_field_assignment_p PARAMS ((rtx, rtx));
+static rtx make_field_assignment PARAMS ((rtx));
+static rtx apply_distributive_law PARAMS ((rtx));
+static rtx simplify_and_const_int PARAMS ((rtx, enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT));
+static unsigned HOST_WIDE_INT nonzero_bits PARAMS ((rtx, enum machine_mode));
+static unsigned int num_sign_bit_copies PARAMS ((rtx, enum machine_mode));
+static int merge_outer_ops PARAMS ((enum rtx_code *, HOST_WIDE_INT *,
+ enum rtx_code, HOST_WIDE_INT,
+ enum machine_mode, int *));
+static rtx simplify_shift_const PARAMS ((rtx, enum rtx_code, enum machine_mode,
+ rtx, int));
+static int recog_for_combine PARAMS ((rtx *, rtx, rtx *));
+static rtx gen_lowpart_for_combine PARAMS ((enum machine_mode, rtx));
+static rtx gen_binary PARAMS ((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static enum rtx_code simplify_comparison PARAMS ((enum rtx_code, rtx *, rtx *));
+static void update_table_tick PARAMS ((rtx));
+static void record_value_for_reg PARAMS ((rtx, rtx, rtx));
+static void check_promoted_subreg PARAMS ((rtx, rtx));
+static void record_dead_and_set_regs_1 PARAMS ((rtx, rtx, void *));
+static void record_dead_and_set_regs PARAMS ((rtx));
+static int get_last_value_validate PARAMS ((rtx *, rtx, int, int));
+static rtx get_last_value PARAMS ((rtx));
+static int use_crosses_set_p PARAMS ((rtx, int));
+static void reg_dead_at_p_1 PARAMS ((rtx, rtx, void *));
+static int reg_dead_at_p PARAMS ((rtx, rtx));
+static void move_deaths PARAMS ((rtx, rtx, int, rtx, rtx *));
+static int reg_bitfield_target_p PARAMS ((rtx, rtx));
+static void distribute_notes PARAMS ((rtx, rtx, rtx, rtx, rtx, rtx));
+static void distribute_links PARAMS ((rtx));
+static void mark_used_regs_combine PARAMS ((rtx));
+static int insn_cuid PARAMS ((rtx));
+static void record_promoted_value PARAMS ((rtx, rtx));
+static rtx reversed_comparison PARAMS ((rtx, enum machine_mode, rtx, rtx));
+static enum rtx_code combine_reversed_comparison_code PARAMS ((rtx));
+
/* Substitute NEWVAL, an rtx expression, into INTO, a place in some
insn. The substitution can be undone by undo_all. If INTO is already
set to NEWVAL, do not record this change. Because computing NEWVAL might
also call SUBST, we have to compute it before we put anything into
the undo table. */
-#define SUBST(INTO, NEWVAL) \
- do { rtx _new = (NEWVAL); \
- struct undo *_buf; \
- \
- if (undobuf.frees) \
- _buf = undobuf.frees, undobuf.frees = _buf->next; \
- else \
- _buf = (struct undo *) xmalloc (sizeof (struct undo)); \
- \
- _buf->is_int = 0; \
- _buf->where.r = &INTO; \
- _buf->old_contents.r = INTO; \
- INTO = _new; \
- if (_buf->old_contents.r == INTO) \
- _buf->next = undobuf.frees, undobuf.frees = _buf; \
- else \
- _buf->next = undobuf.undos, undobuf.undos = _buf; \
- } while (0)
+static void
+do_SUBST (into, newval)
+ rtx *into, newval;
+{
+ struct undo *buf;
+ rtx oldval = *into;
+
+ if (oldval == newval)
+ return;
+
+ if (undobuf.frees)
+ buf = undobuf.frees, undobuf.frees = buf->next;
+ else
+ buf = (struct undo *) xmalloc (sizeof (struct undo));
+
+ buf->is_int = 0;
+ buf->where.r = into;
+ buf->old_contents.r = oldval;
+ *into = newval;
+
+ buf->next = undobuf.undos, undobuf.undos = buf;
+}
+
+#define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
/* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
for the value of a HOST_WIDE_INT value (including CONST_INT) is
not safe. */
-#define SUBST_INT(INTO, NEWVAL) \
- do { struct undo *_buf; \
- \
- if (undobuf.frees) \
- _buf = undobuf.frees, undobuf.frees = _buf->next; \
- else \
- _buf = (struct undo *) xmalloc (sizeof (struct undo)); \
- \
- _buf->is_int = 1; \
- _buf->where.i = (int *) &INTO; \
- _buf->old_contents.i = INTO; \
- INTO = NEWVAL; \
- if (_buf->old_contents.i == INTO) \
- _buf->next = undobuf.frees, undobuf.frees = _buf; \
- else \
- _buf->next = undobuf.undos, undobuf.undos = _buf; \
- } while (0)
+static void
+do_SUBST_INT (into, newval)
+ unsigned int *into, newval;
+{
+ struct undo *buf;
+ unsigned int oldval = *into;
-/* Number of times the pseudo being substituted for
- was found and replaced. */
+ if (oldval == newval)
+ return;
-static int n_occurrences;
+ if (undobuf.frees)
+ buf = undobuf.frees, undobuf.frees = buf->next;
+ else
+ buf = (struct undo *) xmalloc (sizeof (struct undo));
+
+ buf->is_int = 1;
+ buf->where.i = into;
+ buf->old_contents.i = oldval;
+ *into = newval;
+
+ buf->next = undobuf.undos, undobuf.undos = buf;
+}
-static void init_reg_last_arrays PROTO((void));
-static void setup_incoming_promotions PROTO((void));
-static void set_nonzero_bits_and_sign_copies PROTO((rtx, rtx));
-static int cant_combine_insn_p PROTO((rtx));
-static int can_combine_p PROTO((rtx, rtx, rtx, rtx, rtx *, rtx *));
-static int sets_function_arg_p PROTO((rtx));
-static int combinable_i3pat PROTO((rtx, rtx *, rtx, rtx, int, rtx *));
-static rtx try_combine PROTO((rtx, rtx, rtx));
-static void undo_all PROTO((void));
-static rtx *find_split_point PROTO((rtx *, rtx));
-static rtx subst PROTO((rtx, rtx, rtx, int, int));
-static rtx simplify_rtx PROTO((rtx, enum machine_mode, int, int));
-static rtx simplify_if_then_else PROTO((rtx));
-static rtx simplify_set PROTO((rtx));
-static rtx simplify_logical PROTO((rtx, int));
-static rtx expand_compound_operation PROTO((rtx));
-static rtx expand_field_assignment PROTO((rtx));
-static rtx make_extraction PROTO((enum machine_mode, rtx, int, rtx, int,
- int, int, int));
-static rtx extract_left_shift PROTO((rtx, int));
-static rtx make_compound_operation PROTO((rtx, enum rtx_code));
-static int get_pos_from_mask PROTO((unsigned HOST_WIDE_INT, int *));
-static rtx force_to_mode PROTO((rtx, enum machine_mode,
- unsigned HOST_WIDE_INT, rtx, int));
-static rtx if_then_else_cond PROTO((rtx, rtx *, rtx *));
-static rtx known_cond PROTO((rtx, enum rtx_code, rtx, rtx));
-static int rtx_equal_for_field_assignment_p PROTO((rtx, rtx));
-static rtx make_field_assignment PROTO((rtx));
-static rtx apply_distributive_law PROTO((rtx));
-static rtx simplify_and_const_int PROTO((rtx, enum machine_mode, rtx,
- unsigned HOST_WIDE_INT));
-static unsigned HOST_WIDE_INT nonzero_bits PROTO((rtx, enum machine_mode));
-static int num_sign_bit_copies PROTO((rtx, enum machine_mode));
-static int merge_outer_ops PROTO((enum rtx_code *, HOST_WIDE_INT *,
- enum rtx_code, HOST_WIDE_INT,
- enum machine_mode, int *));
-static rtx simplify_shift_const PROTO((rtx, enum rtx_code, enum machine_mode,
- rtx, int));
-static int recog_for_combine PROTO((rtx *, rtx, rtx *));
-static rtx gen_lowpart_for_combine PROTO((enum machine_mode, rtx));
-static rtx gen_rtx_combine PVPROTO((enum rtx_code code, enum machine_mode mode,
- ...));
-static rtx gen_binary PROTO((enum rtx_code, enum machine_mode,
- rtx, rtx));
-static rtx gen_unary PROTO((enum rtx_code, enum machine_mode,
- enum machine_mode, rtx));
-static enum rtx_code simplify_comparison PROTO((enum rtx_code, rtx *, rtx *));
-static int reversible_comparison_p PROTO((rtx));
-static void update_table_tick PROTO((rtx));
-static void record_value_for_reg PROTO((rtx, rtx, rtx));
-static void record_dead_and_set_regs_1 PROTO((rtx, rtx));
-static void record_dead_and_set_regs PROTO((rtx));
-static int get_last_value_validate PROTO((rtx *, rtx, int, int));
-static rtx get_last_value PROTO((rtx));
-static int use_crosses_set_p PROTO((rtx, int));
-static void reg_dead_at_p_1 PROTO((rtx, rtx));
-static int reg_dead_at_p PROTO((rtx, rtx));
-static void move_deaths PROTO((rtx, rtx, int, rtx, rtx *));
-static int reg_bitfield_target_p PROTO((rtx, rtx));
-static void distribute_notes PROTO((rtx, rtx, rtx, rtx, rtx, rtx));
-static void distribute_links PROTO((rtx));
-static void mark_used_regs_combine PROTO((rtx));
-static int insn_cuid PROTO((rtx));
+#define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
/* Main entry point for combiner. F is the first insn of the function.
- NREGS is the first unused pseudo-reg number. */
+ NREGS is the first unused pseudo-reg number.
-void
+ Return non-zero if the combiner has turned an indirect jump
+ instruction into a direct jump. */
+int
combine_instructions (f, nregs)
rtx f;
- int nregs;
+ unsigned int nregs;
{
- register rtx insn, next;
+ rtx insn, next;
#ifdef HAVE_cc0
- register rtx prev;
+ rtx prev;
#endif
- register int i;
- register rtx links, nextlinks;
+ int i;
+ rtx links, nextlinks;
+
+ int new_direct_jump_p = 0;
combine_attempts = 0;
combine_merges = 0;
combine_extras = 0;
combine_successes = 0;
- undobuf.undos = undobuf.previous_undos = 0;
combine_max_regno = nregs;
- reg_nonzero_bits
- = (unsigned HOST_WIDE_INT *) alloca (nregs * sizeof (HOST_WIDE_INT));
- reg_sign_bit_copies = (char *) alloca (nregs * sizeof (char));
-
- bzero ((char *) reg_nonzero_bits, nregs * sizeof (HOST_WIDE_INT));
- bzero (reg_sign_bit_copies, nregs * sizeof (char));
-
- reg_last_death = (rtx *) alloca (nregs * sizeof (rtx));
- reg_last_set = (rtx *) alloca (nregs * sizeof (rtx));
- reg_last_set_value = (rtx *) alloca (nregs * sizeof (rtx));
- reg_last_set_table_tick = (int *) alloca (nregs * sizeof (int));
- reg_last_set_label = (int *) alloca (nregs * sizeof (int));
- reg_last_set_invalid = (char *) alloca (nregs * sizeof (char));
+ reg_nonzero_bits = ((unsigned HOST_WIDE_INT *)
+ xcalloc (nregs, sizeof (unsigned HOST_WIDE_INT)));
+ reg_sign_bit_copies
+ = (unsigned char *) xcalloc (nregs, sizeof (unsigned char));
+
+ reg_last_death = (rtx *) xmalloc (nregs * sizeof (rtx));
+ reg_last_set = (rtx *) xmalloc (nregs * sizeof (rtx));
+ reg_last_set_value = (rtx *) xmalloc (nregs * sizeof (rtx));
+ reg_last_set_table_tick = (int *) xmalloc (nregs * sizeof (int));
+ reg_last_set_label = (int *) xmalloc (nregs * sizeof (int));
+ reg_last_set_invalid = (char *) xmalloc (nregs * sizeof (char));
reg_last_set_mode
- = (enum machine_mode *) alloca (nregs * sizeof (enum machine_mode));
+ = (enum machine_mode *) xmalloc (nregs * sizeof (enum machine_mode));
reg_last_set_nonzero_bits
- = (unsigned HOST_WIDE_INT *) alloca (nregs * sizeof (HOST_WIDE_INT));
+ = (unsigned HOST_WIDE_INT *) xmalloc (nregs * sizeof (HOST_WIDE_INT));
reg_last_set_sign_bit_copies
- = (char *) alloca (nregs * sizeof (char));
+ = (char *) xmalloc (nregs * sizeof (char));
init_reg_last_arrays ();
@@ -507,7 +522,7 @@ combine_instructions (f, nregs)
if (INSN_UID (insn) > i)
i = INSN_UID (insn);
- uid_cuid = (int *) alloca ((i + 1) * sizeof (int));
+ uid_cuid = (int *) xmalloc ((i + 1) * sizeof (int));
max_uid_cuid = i;
nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
@@ -519,7 +534,7 @@ combine_instructions (f, nregs)
/* Compute the mapping from uids to cuids.
Cuids are numbers assigned to insns, like uids,
- except that cuids increase monotonically through the code.
+ except that cuids increase monotonically through the code.
Scan all SETs and see if we can deduce anything about what
bits are known to be zero for some registers and how many copies
@@ -536,21 +551,27 @@ combine_instructions (f, nregs)
setup_incoming_promotions ();
+ refresh_blocks = sbitmap_alloc (n_basic_blocks);
+ sbitmap_zero (refresh_blocks);
+ need_refresh = 0;
+
for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
{
uid_cuid[INSN_UID (insn)] = ++i;
subst_low_cuid = i;
subst_insn = insn;
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ if (INSN_P (insn))
{
- note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies);
+ note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
+ NULL);
record_dead_and_set_regs (insn);
#ifdef AUTO_INC_DEC
for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
if (REG_NOTE_KIND (links) == REG_INC)
- set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX);
+ set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
+ NULL);
#endif
}
@@ -581,22 +602,38 @@ combine_instructions (f, nregs)
if (GET_CODE (insn) == CODE_LABEL)
label_tick++;
- else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ else if (INSN_P (insn))
{
+ /* See if we know about function return values before this
+ insn based upon SUBREG flags. */
+ check_promoted_subreg (insn, PATTERN (insn));
+
/* Try this insn with each insn it links back to. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX)) != 0)
+ if ((next = try_combine (insn, XEXP (links, 0),
+ NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
/* Try each sequence of three linked insns ending with this one. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- for (nextlinks = LOG_LINKS (XEXP (links, 0)); nextlinks;
- nextlinks = XEXP (nextlinks, 1))
- if ((next = try_combine (insn, XEXP (links, 0),
- XEXP (nextlinks, 0))) != 0)
- goto retry;
+ {
+ rtx link = XEXP (links, 0);
+
+ /* If the linked insn has been replaced by a note, then there
+ is no point in pursuing this chain any further. */
+ if (GET_CODE (link) == NOTE)
+ continue;
+
+ for (nextlinks = LOG_LINKS (link);
+ nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, link,
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
+ goto retry;
+ }
#ifdef HAVE_cc0
/* Try to combine a jump insn that uses CC0
@@ -611,13 +648,15 @@ combine_instructions (f, nregs)
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev)))
{
- if ((next = try_combine (insn, prev, NULL_RTX)) != 0)
+ if ((next = try_combine (insn, prev,
+ NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
for (nextlinks = LOG_LINKS (prev); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, prev,
- XEXP (nextlinks, 0))) != 0)
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
goto retry;
}
@@ -629,13 +668,15 @@ combine_instructions (f, nregs)
&& GET_CODE (PATTERN (insn)) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
{
- if ((next = try_combine (insn, prev, NULL_RTX)) != 0)
+ if ((next = try_combine (insn, prev,
+ NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
for (nextlinks = LOG_LINKS (prev); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, prev,
- XEXP (nextlinks, 0))) != 0)
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
goto retry;
}
@@ -649,7 +690,8 @@ combine_instructions (f, nregs)
&& (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev))
- && (next = try_combine (insn, XEXP (links, 0), prev)) != 0)
+ && (next = try_combine (insn, XEXP (links, 0),
+ prev, &new_direct_jump_p)) != 0)
goto retry;
#endif
@@ -659,7 +701,8 @@ combine_instructions (f, nregs)
for (nextlinks = XEXP (links, 1); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, XEXP (links, 0),
- XEXP (nextlinks, 0))) != 0)
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
goto retry;
if (GET_CODE (insn) != NOTE)
@@ -670,6 +713,39 @@ combine_instructions (f, nregs)
}
}
+ delete_noop_moves (f);
+
+ if (need_refresh)
+ {
+ update_life_info (refresh_blocks, UPDATE_LIFE_GLOBAL_RM_NOTES,
+ PROP_DEATH_NOTES);
+ }
+
+ /* Clean up. */
+ sbitmap_free (refresh_blocks);
+ free (reg_nonzero_bits);
+ free (reg_sign_bit_copies);
+ free (reg_last_death);
+ free (reg_last_set);
+ free (reg_last_set_value);
+ free (reg_last_set_table_tick);
+ free (reg_last_set_label);
+ free (reg_last_set_invalid);
+ free (reg_last_set_mode);
+ free (reg_last_set_nonzero_bits);
+ free (reg_last_set_sign_bit_copies);
+ free (uid_cuid);
+
+ {
+ struct undo *undo, *next;
+ for (undo = undobuf.frees; undo; undo = next)
+ {
+ next = undo->next;
+ free (undo);
+ }
+ undobuf.frees = 0;
+ }
+
total_attempts += combine_attempts;
total_merges += combine_merges;
total_extras += combine_extras;
@@ -679,6 +755,8 @@ combine_instructions (f, nregs)
/* Make recognizer allow volatile MEMs again. */
init_recog ();
+
+ return new_direct_jump_p;
}
/* Wipe the reg_last_xxx arrays in preparation for another pass. */
@@ -686,17 +764,17 @@ combine_instructions (f, nregs)
static void
init_reg_last_arrays ()
{
- int nregs = combine_max_regno;
-
- bzero ((char *) reg_last_death, nregs * sizeof (rtx));
- bzero ((char *) reg_last_set, nregs * sizeof (rtx));
- bzero ((char *) reg_last_set_value, nregs * sizeof (rtx));
- bzero ((char *) reg_last_set_table_tick, nregs * sizeof (int));
- bzero ((char *) reg_last_set_label, nregs * sizeof (int));
- bzero (reg_last_set_invalid, nregs * sizeof (char));
- bzero ((char *) reg_last_set_mode, nregs * sizeof (enum machine_mode));
- bzero ((char *) reg_last_set_nonzero_bits, nregs * sizeof (HOST_WIDE_INT));
- bzero (reg_last_set_sign_bit_copies, nregs * sizeof (char));
+ unsigned int nregs = combine_max_regno;
+
+ memset ((char *) reg_last_death, 0, nregs * sizeof (rtx));
+ memset ((char *) reg_last_set, 0, nregs * sizeof (rtx));
+ memset ((char *) reg_last_set_value, 0, nregs * sizeof (rtx));
+ memset ((char *) reg_last_set_table_tick, 0, nregs * sizeof (int));
+ memset ((char *) reg_last_set_label, 0, nregs * sizeof (int));
+ memset (reg_last_set_invalid, 0, nregs * sizeof (char));
+ memset ((char *) reg_last_set_mode, 0, nregs * sizeof (enum machine_mode));
+ memset ((char *) reg_last_set_nonzero_bits, 0, nregs * sizeof (HOST_WIDE_INT));
+ memset (reg_last_set_sign_bit_copies, 0, nregs * sizeof (char));
}
/* Set up any promoted values for incoming argument registers. */
@@ -705,14 +783,20 @@ static void
setup_incoming_promotions ()
{
#ifdef PROMOTE_FUNCTION_ARGS
- int regno;
+ unsigned int regno;
rtx reg;
enum machine_mode mode;
int unsignedp;
rtx first = get_insns ();
+#ifndef OUTGOING_REGNO
+#define OUTGOING_REGNO(N) N
+#endif
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (FUNCTION_ARG_REGNO_P (regno)
+ /* Check whether this register can hold an incoming pointer
+ argument. FUNCTION_ARG_REGNO_P tests outgoing register
+ numbers, so translate if necessary due to register windows. */
+ if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (regno))
&& (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0)
{
record_value_for_reg
@@ -732,15 +816,16 @@ setup_incoming_promotions ()
be happening.
Similarly, set how many bits of X are known to be copies of the sign bit
- at all locations in the function. This is the smallest number implied
+ at all locations in the function. This is the smallest number implied
by any set of X. */
static void
-set_nonzero_bits_and_sign_copies (x, set)
+set_nonzero_bits_and_sign_copies (x, set, data)
rtx x;
rtx set;
+ void *data ATTRIBUTE_UNUSED;
{
- int num;
+ unsigned int num;
if (GET_CODE (x) == REG
&& REGNO (x) >= FIRST_PSEUDO_REGISTER
@@ -776,7 +861,7 @@ set_nonzero_bits_and_sign_copies (x, set)
constant that would appear negative in the mode of X,
sign-extend it for use in reg_nonzero_bits because some
machines (maybe most) will actually do the sign-extension
- and this is the conservative approach.
+ and this is the conservative approach.
??? For 2.5, try to tighten up the MD files in this regard
instead of this kludge. */
@@ -813,7 +898,7 @@ set_nonzero_bits_and_sign_copies (x, set)
Return 0 if the combination is not allowed for any reason.
- If the combination is allowed, *PDEST will be set to the single
+ If the combination is allowed, *PDEST will be set to the single
destination of INSN and *PSRC to the single source, and this function
will return 1. */
@@ -836,20 +921,20 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
: next_active_insn (insn) == i3);
/* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
- or a PARALLEL consisting of such a SET and CLOBBERs.
+ or a PARALLEL consisting of such a SET and CLOBBERs.
If INSN has CLOBBER parallel parts, ignore them for our processing.
By definition, these happen during the execution of the insn. When it
is merged with another insn, all bets are off. If they are, in fact,
needed and aren't also supplied in I3, they may be added by
- recog_for_combine. Otherwise, it won't match.
+ recog_for_combine. Otherwise, it won't match.
We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
note.
- Get the source and destination of INSN. If more than one, can't
+ Get the source and destination of INSN. If more than one, can't
combine. */
-
+
if (GET_CODE (PATTERN (insn)) == SET)
set = PATTERN (insn);
else if (GET_CODE (PATTERN (insn)) == PARALLEL
@@ -865,27 +950,29 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
for the SH4 port. */
case USE:
/* Combining an isolated USE doesn't make sense.
- We depend here on combinable_i3_pat to reject them. */
+ We depend here on combinable_i3pat to reject them. */
/* The code below this loop only verifies that the inputs of
the SET in INSN do not change. We call reg_set_between_p
- to verify that the REG in the USE does not change betweeen
+ to verify that the REG in the USE does not change between
I3 and INSN.
If the USE in INSN was for a pseudo register, the matching
insn pattern will likely match any register; combining this
with any other USE would only be safe if we knew that the
used registers have identical values, or if there was
something to tell them apart, e.g. different modes. For
- now, we forgo such compilcated tests and simply disallow
+ now, we forgo such complicated tests and simply disallow
combining of USES of pseudo registers with any other USE. */
if (GET_CODE (XEXP (elt, 0)) == REG
&& GET_CODE (PATTERN (i3)) == PARALLEL)
{
rtx i3pat = PATTERN (i3);
int i = XVECLEN (i3pat, 0) - 1;
- int regno = REGNO (XEXP (elt, 0));
+ unsigned int regno = REGNO (XEXP (elt, 0));
+
do
{
rtx i3elt = XVECEXP (i3pat, 0, i);
+
if (GET_CODE (i3elt) == USE
&& GET_CODE (XEXP (i3elt, 0)) == REG
&& (REGNO (XEXP (i3elt, 0)) == regno
@@ -945,6 +1032,8 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
/* Don't combine with an insn that sets a register to itself if it has
a REG_EQUAL note. This may be part of a REG_NO_CONFLICT sequence. */
|| (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ /* Can't merge an ASM_OPERANDS. */
+ || GET_CODE (src) == ASM_OPERANDS
/* Can't merge a function call. */
|| GET_CODE (src) == CALL
/* Don't eliminate a function call argument. */
@@ -1004,12 +1093,7 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
insns. Eliminate this problem by not combining with such an insn.
Also, on some machines we don't want to extend the life of a hard
- register.
-
- This is the same test done in can_combine except that we don't test
- if SRC is a CALL operation to permit a hard register with
- SMALL_REGISTER_CLASSES, and that we have to take all_adjacent
- into account. */
+ register. */
if (GET_CODE (src) == REG
&& ((REGNO (dest) < FIRST_PSEUDO_REGISTER
@@ -1022,11 +1106,7 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
reload can't handle a conflict with constraints of other
inputs. */
|| (REGNO (src) < FIRST_PSEUDO_REGISTER
- && (! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src))
- || (SMALL_REGISTER_CLASSES
- && ((! all_adjacent && ! REG_USERVAR_P (src))
- || (FUNCTION_VALUE_REGNO_P (REGNO (src))
- && ! REG_USERVAR_P (src))))))))
+ && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
return 0;
}
else if (GET_CODE (dest) != CC0)
@@ -1051,11 +1131,10 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
/* Make sure succ doesn't contain a volatile reference. */
if (succ != 0 && volatile_refs_p (PATTERN (succ)))
return 0;
-
+
for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
- if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
- && p != succ && volatile_refs_p (PATTERN (p)))
- return 0;
+ if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p)))
+ return 0;
}
/* If INSN is an asm, and DEST is a hard register, reject, since it has
@@ -1069,8 +1148,7 @@ can_combine_p (insn, i3, pred, succ, pdest, psrc)
they might affect machine state. */
for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
- if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
- && p != succ && volatile_insn_p (PATTERN (p)))
+ if (INSN_P (p) && p != succ && volatile_insn_p (PATTERN (p)))
return 0;
/* If INSN or I2 contains an autoincrement or autodecrement,
@@ -1168,10 +1246,10 @@ sets_function_arg_p (pat)
This is NOT equivalent to:
(parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
- (set (reg:DI 101) (reg:DI 100))])
+ (set (reg:DI 101) (reg:DI 100))])
Not only does this modify 100 (in which case it might still be valid
- if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
+ if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
We can also run into a problem if I2 sets a register that I1
uses and I1 gets directly substituted into I3 (not via I2). In that
@@ -1181,10 +1259,6 @@ sets_function_arg_p (pat)
If I1_NOT_IN_SRC is non-zero, it means that finding I1 in the source
of a SET must prevent combination from occurring.
- On machines where SMALL_REGISTER_CLASSES is non-zero, we don't combine
- if the destination of a SET is a hard register that isn't a user
- variable.
-
Before doing the above check, we first try to expand a field assignment
into a set of logical operations.
@@ -1211,7 +1285,7 @@ combinable_i3pat (i3, loc, i2dest, i1dest, i1_not_in_src, pi3dest_killed)
rtx dest = SET_DEST (set);
rtx src = SET_SRC (set);
rtx inner_dest = dest;
-
+
#if 0
rtx inner_src = src;
#endif
@@ -1253,32 +1327,25 @@ combinable_i3pat (i3, loc, i2dest, i1dest, i1_not_in_src, pi3dest_killed)
&& (reg_overlap_mentioned_p (i2dest, inner_dest)
|| (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))))
- /* This is the same test done in can_combine_p except that we
- allow a hard register with SMALL_REGISTER_CLASSES if SRC is a
- CALL operation. Moreover, we can't test all_adjacent; we don't
- have to, since this instruction will stay in place, thus we are
- not considering increasing the lifetime of INNER_DEST.
+ /* This is the same test done in can_combine_p except we can't test
+ all_adjacent; we don't have to, since this instruction will stay
+ in place, thus we are not considering increasing the lifetime of
+ INNER_DEST.
Also, if this insn sets a function argument, combining it with
something that might need a spill could clobber a previous
function argument; the all_adjacent test in can_combine_p also
checks this; here, we do a more specific test for this case. */
-
+
|| (GET_CODE (inner_dest) == REG
&& REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
&& (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
- GET_MODE (inner_dest))
- || (SMALL_REGISTER_CLASSES && GET_CODE (src) != CALL
- && ! REG_USERVAR_P (inner_dest)
- && (FUNCTION_VALUE_REGNO_P (REGNO (inner_dest))
- || (FUNCTION_ARG_REGNO_P (REGNO (inner_dest))
- && i3 != 0
- && sets_function_arg_p (prev_nonnote_insn (i3)))))))
+ GET_MODE (inner_dest))))
|| (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)))
return 0;
/* If DEST is used in I3, it is being killed in this insn,
- so record that for later.
+ so record that for later.
Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
STACK_POINTER_REGNUM, since these are always considered to be
live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
@@ -1314,6 +1381,37 @@ combinable_i3pat (i3, loc, i2dest, i1dest, i1_not_in_src, pi3dest_killed)
return 1;
}
+/* Return 1 if X is an arithmetic expression that contains a multiplication
+ and division. We don't count multiplications by powers of two here. */
+
+static int
+contains_muldiv (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case MOD: case DIV: case UMOD: case UDIV:
+ return 1;
+
+ case MULT:
+ return ! (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0);
+ default:
+ switch (GET_RTX_CLASS (GET_CODE (x)))
+ {
+ case 'c': case '<': case '2':
+ return contains_muldiv (XEXP (x, 0))
+ || contains_muldiv (XEXP (x, 1));
+
+ case '1':
+ return contains_muldiv (XEXP (x, 0));
+
+ default:
+ return 0;
+ }
+ }
+}
+
/* Determine whether INSN can be used in a combination. Return nonzero if
not. This is used in try_combine to detect early some cases where we
can't perform combinations. */
@@ -1324,18 +1422,13 @@ cant_combine_insn_p (insn)
{
rtx set;
rtx src, dest;
-
+
/* If this isn't really an insn, we can't do anything.
This can occur when flow deletes an insn that it has merged into an
auto-increment address. */
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
return 1;
- /* For the 2.95.3 release, restrict this code to only handle the machines
- where it's strictly needed. */
- if (! SMALL_REGISTER_CLASSES)
- return 0;
-
/* Never combine loads and stores involving hard regs. The register
allocator can usually handle such reg-reg moves by tying. If we allow
the combiner to make substitutions of hard regs, we risk aborting in
@@ -1365,21 +1458,25 @@ cant_combine_insn_p (insn)
/* Try to combine the insns I1 and I2 into I3.
Here I1 and I2 appear earlier than I3.
I1 can be zero; then we combine just I2 into I3.
-
- It we are combining three insns and the resulting insn is not recognized,
+
+ If we are combining three insns and the resulting insn is not recognized,
try splitting it into two insns. If that happens, I2 and I3 are retained
and I1 is pseudo-deleted by turning it into a NOTE. Otherwise, I1 and I2
are pseudo-deleted.
- Return 0 if the combination does not work. Then nothing is changed.
+ Return 0 if the combination does not work. Then nothing is changed.
If we did the combination, return the insn at which combine should
- resume scanning. */
+ resume scanning.
+
+ Set NEW_DIRECT_JUMP_P to a non-zero value if try_combine creates a
+ new direct jump instruction. */
static rtx
-try_combine (i3, i2, i1)
- register rtx i3, i2, i1;
+try_combine (i3, i2, i1, new_direct_jump_p)
+ rtx i3, i2, i1;
+ int *new_direct_jump_p;
{
- /* New patterns for I3 and I3, respectively. */
+ /* New patterns for I3 and I2, respectively. */
rtx newpat, newi2pat = 0;
/* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead. */
int added_sets_1, added_sets_2;
@@ -1388,7 +1485,7 @@ try_combine (i3, i2, i1)
/* Nonzero is I2's body now appears in I3. */
int i2_is_used;
/* INSN_CODEs for new I3, new I2, and user of condition code. */
- int insn_code_number, i2_code_number, other_code_number;
+ int insn_code_number, i2_code_number = 0, other_code_number = 0;
/* Contains I3 if the destination of I3 is used in its source, which means
that the old life of I3 is being killed. If that usage is placed into
I2 and not in I3, a REG_DEAD note must be made. */
@@ -1409,7 +1506,7 @@ try_combine (i3, i2, i1)
int maxreg;
rtx temp;
- register rtx link;
+ rtx link;
int i;
/* Exit early if one of the insns involved can't be used for
@@ -1429,14 +1526,8 @@ try_combine (i3, i2, i1)
return 0;
combine_attempts++;
-
- undobuf.undos = undobuf.previous_undos = 0;
undobuf.other_insn = 0;
- /* Save the current high-water-mark so we can free storage if we didn't
- accept this combination. */
- undobuf.storage = (char *) oballoc (0);
-
/* Reset the hard register usage information. */
CLEAR_HARD_REG_SET (newpat_used_regs);
@@ -1448,13 +1539,16 @@ try_combine (i3, i2, i1)
added_links_insn = 0;
/* First check for one important special-case that the code below will
- not handle. Namely, the case where I1 is zero, I2 has multiple sets,
+ not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
we may be able to replace that destination with the destination of I3.
This occurs in the common code where we compute both a quotient and
remainder into a structure, in which case we want to do the computation
directly into the structure to avoid register-register copies.
+ Note that this case handles both multiple sets in I2 and also
+ cases where I2 has a number of CLOBBER or PARALLELs.
+
We make very conservative checks below and only try to handle the
most common cases of this. For example, we only handle the case
where I2 and I3 are adjacent to avoid making difficult register
@@ -1463,10 +1557,6 @@ try_combine (i3, i2, i1)
if (i1 == 0 && GET_CODE (i3) == INSN && GET_CODE (PATTERN (i3)) == SET
&& GET_CODE (SET_SRC (PATTERN (i3))) == REG
&& REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
- && (! SMALL_REGISTER_CLASSES
- || (GET_CODE (SET_DEST (PATTERN (i3))) != REG
- || REGNO (SET_DEST (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
- || REG_USERVAR_P (SET_DEST (PATTERN (i3)))))
&& find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
&& GET_CODE (PATTERN (i2)) == PARALLEL
&& ! side_effects_p (SET_DEST (PATTERN (i3)))
@@ -1501,7 +1591,9 @@ try_combine (i3, i2, i1)
if (i == XVECLEN (p2, 0))
for (i = 0; i < XVECLEN (p2, 0); i++)
- if (SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
+ if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
+ || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
+ && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
{
combine_merges++;
@@ -1514,7 +1606,7 @@ try_combine (i3, i2, i1)
/* Replace the dest in I2 with our dest and make the resulting
insn the new pattern for I3. Then skip to where we
validate the pattern. Everything was set up above. */
- SUBST (SET_DEST (XVECEXP (p2, 0, i)),
+ SUBST (SET_DEST (XVECEXP (p2, 0, i)),
SET_DEST (PATTERN (i3)));
newpat = p2;
@@ -1523,6 +1615,76 @@ try_combine (i3, i2, i1)
}
}
+ /* If I2 is setting a double-word pseudo to a constant and I3 is setting
+ one of those words to another constant, merge them by making a new
+ constant. */
+ if (i1 == 0
+ && (temp = single_set (i2)) != 0
+ && (GET_CODE (SET_SRC (temp)) == CONST_INT
+ || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
+ && GET_CODE (SET_DEST (temp)) == REG
+ && GET_MODE_CLASS (GET_MODE (SET_DEST (temp))) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (SET_DEST (temp))) == 2 * UNITS_PER_WORD
+ && GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_DEST (PATTERN (i3))) == SUBREG
+ && SUBREG_REG (SET_DEST (PATTERN (i3))) == SET_DEST (temp)
+ && GET_MODE_CLASS (GET_MODE (SET_DEST (PATTERN (i3)))) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (i3)))) == UNITS_PER_WORD
+ && GET_CODE (SET_SRC (PATTERN (i3))) == CONST_INT)
+ {
+ HOST_WIDE_INT lo, hi;
+
+ if (GET_CODE (SET_SRC (temp)) == CONST_INT)
+ lo = INTVAL (SET_SRC (temp)), hi = lo < 0 ? -1 : 0;
+ else
+ {
+ lo = CONST_DOUBLE_LOW (SET_SRC (temp));
+ hi = CONST_DOUBLE_HIGH (SET_SRC (temp));
+ }
+
+ if (subreg_lowpart_p (SET_DEST (PATTERN (i3))))
+ {
+ /* We don't handle the case of the target word being wider
+ than a host wide int. */
+ if (HOST_BITS_PER_WIDE_INT < BITS_PER_WORD)
+ abort ();
+
+ lo &= ~(UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1);
+ lo |= (INTVAL (SET_SRC (PATTERN (i3)))
+ & (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1));
+ }
+ else if (HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ hi = INTVAL (SET_SRC (PATTERN (i3)));
+ else if (HOST_BITS_PER_WIDE_INT >= 2 * BITS_PER_WORD)
+ {
+ int sign = -(int) ((unsigned HOST_WIDE_INT) lo
+ >> (HOST_BITS_PER_WIDE_INT - 1));
+
+ lo &= ~ (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD
+ (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1));
+ lo |= (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD
+ (INTVAL (SET_SRC (PATTERN (i3)))));
+ if (hi == sign)
+ hi = lo < 0 ? -1 : 0;
+ }
+ else
+ /* We don't handle the case of the higher word not fitting
+ entirely in either hi or lo. */
+ abort ();
+
+ combine_merges++;
+ subst_insn = i3;
+ subst_low_cuid = INSN_CUID (i2);
+ added_sets_2 = added_sets_1 = 0;
+ i2dest = SET_DEST (temp);
+
+ SUBST (SET_SRC (temp),
+ immed_double_const (lo, hi, GET_MODE (SET_DEST (temp))));
+
+ newpat = PATTERN (i2);
+ goto validate_replacement;
+ }
+
#ifndef HAVE_cc0
/* If we have no I1 and I2 looks like:
(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
@@ -1549,7 +1711,7 @@ try_combine (i3, i2, i1)
&& rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
{
- for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
+ for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
break;
@@ -1611,7 +1773,7 @@ try_combine (i3, i2, i1)
/* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
We used to do this EXCEPT in one case: I3 has a post-inc in an
output operand. However, that exception can give rise to insns like
- mov r3,(r3)+
+ mov r3,(r3)+
which is a famous insn on the PDP-11 where the value of r3 used as the
source was model-dependent. Avoid this sort of thing. */
@@ -1703,8 +1865,6 @@ try_combine (i3, i2, i1)
subst_low_cuid = INSN_CUID (i2);
i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
}
-
- undobuf.previous_undos = undobuf.undos;
}
#ifndef HAVE_cc0
@@ -1747,7 +1907,7 @@ try_combine (i3, i2, i1)
i2src, const0_rtx))
!= GET_MODE (SET_DEST (newpat))))
{
- int regno = REGNO (SET_DEST (newpat));
+ unsigned int regno = REGNO (SET_DEST (newpat));
rtx new_dest = gen_rtx_REG (compare_mode, regno);
if (regno < FIRST_PSEUDO_REGISTER
@@ -1760,13 +1920,12 @@ try_combine (i3, i2, i1)
SUBST (SET_DEST (newpat), new_dest);
SUBST (XEXP (*cc_use, 0), new_dest);
SUBST (SET_SRC (newpat),
- gen_rtx_combine (COMPARE, compare_mode,
- i2src, const0_rtx));
+ gen_rtx_COMPARE (compare_mode, i2src, const0_rtx));
}
else
undobuf.other_insn = 0;
}
-#endif
+#endif
}
else
#endif
@@ -1780,7 +1939,6 @@ try_combine (i3, i2, i1)
subst_low_cuid = INSN_CUID (i2);
newpat = subst (PATTERN (i3), i2dest, i2src, 0,
! i1_feeds_i3 && i1dest_in_i1src);
- undobuf.previous_undos = undobuf.undos;
/* Record whether i2's body now appears within i3's body. */
i2_is_used = n_occurrences;
@@ -1796,7 +1954,7 @@ try_combine (i3, i2, i1)
isn't mentioned in any SETs in NEWPAT that are field assignments. */
if (! combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX,
- 0, NULL_PTR))
+ 0, (rtx*) 0))
{
undo_all ();
return 0;
@@ -1805,7 +1963,6 @@ try_combine (i3, i2, i1)
n_occurrences = 0;
subst_low_cuid = INSN_CUID (i1);
newpat = subst (newpat, i1dest, i1src, 0, 0);
- undobuf.previous_undos = undobuf.undos;
}
/* Fail if an autoincrement side-effect has been duplicated. Be careful
@@ -1843,8 +2000,8 @@ try_combine (i3, i2, i1)
rtvec old = XVEC (newpat, 0);
total_sets = XVECLEN (newpat, 0) + added_sets_1 + added_sets_2;
newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
- bcopy ((char *) &old->elem[0], (char *) XVEC (newpat, 0)->elem,
- sizeof (old->elem[0]) * old->num_elem);
+ memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
+ sizeof (old->elem[0]) * old->num_elem);
}
else
{
@@ -1854,12 +2011,12 @@ try_combine (i3, i2, i1)
XVECEXP (newpat, 0, 0) = old;
}
- if (added_sets_1)
- XVECEXP (newpat, 0, --total_sets)
- = (GET_CODE (PATTERN (i1)) == PARALLEL
- ? gen_rtx_SET (VOIDmode, i1dest, i1src) : PATTERN (i1));
+ if (added_sets_1)
+ XVECEXP (newpat, 0, --total_sets)
+ = (GET_CODE (PATTERN (i1)) == PARALLEL
+ ? gen_rtx_SET (VOIDmode, i1dest, i1src) : PATTERN (i1));
- if (added_sets_2)
+ if (added_sets_2)
{
/* If there is no I1, use I2's body as is. We used to also not do
the subst call below if I2 was substituted into I3,
@@ -1920,7 +2077,7 @@ try_combine (i3, i2, i1)
/* If we were combining three insns and the result is a simple SET
with no ASM_OPERANDS that wasn't recognized, try to split it into two
- insns. There are two ways to do this. It can be split using a
+ insns. There are two ways to do this. It can be split using a
machine-specific method (like when you have an addition of a large
constant) or by combine in the function find_split_point. */
@@ -1954,21 +2111,45 @@ try_combine (i3, i2, i1)
|| (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
&& ! REG_USERVAR_P (i2dest))))
ni2dest = gen_rtx_REG (GET_MODE (SET_DEST (newpat)),
- REGNO (i2dest));
-
- m_split = split_insns
- (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2, newpat,
- gen_rtx_CLOBBER (VOIDmode,
- ni2dest))),
- i3);
+ REGNO (i2dest));
+
+ m_split = split_insns (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, newpat,
+ gen_rtx_CLOBBER (VOIDmode,
+ ni2dest))),
+ i3);
+ /* If the split with the mode-changed register didn't work, try
+ the original register. */
+ if (! m_split && ni2dest != i2dest)
+ {
+ ni2dest = i2dest;
+ m_split = split_insns (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, newpat,
+ gen_rtx_CLOBBER (VOIDmode,
+ i2dest))),
+ i3);
+ }
}
+ /* If we've split a jump pattern, we'll wind up with a sequence even
+ with one instruction. We can handle that below, so extract it. */
if (m_split && GET_CODE (m_split) == SEQUENCE
- && XVECLEN (m_split, 0) == 2
- && (next_real_insn (i2) == i3
- || ! use_crosses_set_p (PATTERN (XVECEXP (m_split, 0, 0)),
- INSN_CUID (i2))))
+ && XVECLEN (m_split, 0) == 1)
+ m_split = PATTERN (XVECEXP (m_split, 0, 0));
+
+ if (m_split && GET_CODE (m_split) != SEQUENCE)
+ {
+ insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
+ if (insn_code_number >= 0)
+ newpat = m_split;
+ }
+ else if (m_split && GET_CODE (m_split) == SEQUENCE
+ && XVECLEN (m_split, 0) == 2
+ && (next_real_insn (i2) == i3
+ || ! use_crosses_set_p (PATTERN (XVECEXP (m_split, 0, 0)),
+ INSN_CUID (i2))))
{
rtx i2set, i3set;
rtx newi3pat = PATTERN (XVECEXP (m_split, 0, 1));
@@ -2062,10 +2243,11 @@ try_combine (i3, i2, i1)
appeared to be a memory address. This is a kludge. */
if (split_code == MULT
&& GET_CODE (XEXP (*split, 1)) == CONST_INT
+ && INTVAL (XEXP (*split, 1)) > 0
&& (i = exact_log2 (INTVAL (XEXP (*split, 1)))) >= 0)
{
- SUBST (*split, gen_rtx_combine (ASHIFT, split_mode,
- XEXP (*split, 0), GEN_INT (i)));
+ SUBST (*split, gen_rtx_ASHIFT (split_mode,
+ XEXP (*split, 0), GEN_INT (i)));
/* Update split_code because we may not have a multiply
anymore. */
split_code = GET_CODE (*split);
@@ -2075,11 +2257,11 @@ try_combine (i3, i2, i1)
/* If *SPLIT is a paradoxical SUBREG, when we split it, it should
be written as a ZERO_EXTEND. */
if (split_code == SUBREG && GET_CODE (SUBREG_REG (*split)) == MEM)
- SUBST (*split, gen_rtx_combine (ZERO_EXTEND, split_mode,
- XEXP (*split, 0)));
+ SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
+ SUBREG_REG (*split)));
#endif
- newi2pat = gen_rtx_combine (SET, VOIDmode, newdest, *split);
+ newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
SUBST (*split, newdest);
i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
@@ -2176,8 +2358,7 @@ try_combine (i3, i2, i1)
|| insn != BLOCK_HEAD (this_basic_block + 1));
insn = NEXT_INSN (insn))
{
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
- && reg_referenced_p (ni2dest, PATTERN (insn)))
+ if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
{
for (link = LOG_LINKS (insn); link;
link = XEXP (link, 1))
@@ -2189,7 +2370,7 @@ try_combine (i3, i2, i1)
}
}
}
-
+
/* Similarly, check for a case where we have a PARALLEL of two independent
SETs but we started with three insns. In this case, we can do the sets
as two separate insns. This case occurs when some SET allows two
@@ -2212,7 +2393,9 @@ try_combine (i3, i2, i1)
&& ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
XVECEXP (newpat, 0, 0))
&& ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
- XVECEXP (newpat, 0, 1)))
+ XVECEXP (newpat, 0, 1))
+ && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
+ && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
{
/* Normally, it doesn't matter which of the two is done first,
but it does if one references cc0. In that case, it has to
@@ -2290,15 +2473,28 @@ try_combine (i3, i2, i1)
distribute_notes (new_other_notes, undobuf.other_insn,
undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX);
}
+#ifdef HAVE_cc0
+ /* If I2 is the setter CC0 and I3 is the user CC0 then check whether
+ they are adjacent to each other or not. */
+ {
+ rtx p = prev_nonnote_insn (i3);
+ if (p && p != i2 && GET_CODE (p) == INSN && newi2pat
+ && sets_cc0_p (newi2pat))
+ {
+ undo_all ();
+ return 0;
+ }
+ }
+#endif
- /* We now know that we can do this combination. Merge the insns and
+ /* We now know that we can do this combination. Merge the insns and
update the status of registers and LOG_LINKS. */
{
rtx i3notes, i2notes, i1notes = 0;
rtx i3links, i2links, i1links = 0;
rtx midnotes = 0;
- register int regno;
+ unsigned int regno;
/* Compute which registers we expect to eliminate. newi2pat may be setting
either i3dest or i2dest, so we must check it. Also, i1dest may be the
same as i3dest, in which case newi2pat may be setting i1dest. */
@@ -2353,7 +2549,8 @@ try_combine (i3, i2, i1)
if (i3_subst_into_i2)
{
for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
- if (GET_CODE (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) == REG
+ if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != USE
+ && GET_CODE (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) == REG
&& SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
&& ! find_reg_note (i2, REG_UNUSED,
SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
@@ -2361,7 +2558,7 @@ try_combine (i3, i2, i1)
temp && (this_basic_block == n_basic_blocks - 1
|| BLOCK_HEAD (this_basic_block) != temp);
temp = NEXT_INSN (temp))
- if (temp != i3 && GET_RTX_CLASS (GET_CODE (temp)) == 'i')
+ if (temp != i3 && INSN_P (temp))
for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
if (XEXP (link, 0) == i2)
XEXP (link, 0) = i3;
@@ -2405,7 +2602,7 @@ try_combine (i3, i2, i1)
}
/* Get death notes for everything that is now used in either I3 or
- I2 and used to die in a previous insn. If we built two new
+ I2 and used to die in a previous insn. If we built two new
patterns, move from I1 to I2 then I2 to I3 so that we get the
proper movement on registers that I2 modifies. */
@@ -2434,7 +2631,7 @@ try_combine (i3, i2, i1)
/* Distribute any notes added to I2 or I3 by recog_for_combine. We
know these are REG_UNUSED and want them to go to the desired insn,
- so we always pass it as i3. We have not counted the notes in
+ so we always pass it as i3. We have not counted the notes in
reg_n_deaths yet, so we need to do so now. */
if (newi2pat && new_i2_notes)
@@ -2442,7 +2639,7 @@ try_combine (i3, i2, i1)
for (temp = new_i2_notes; temp; temp = XEXP (temp, 1))
if (GET_CODE (XEXP (temp, 0)) == REG)
REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
-
+
distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
}
@@ -2451,7 +2648,7 @@ try_combine (i3, i2, i1)
for (temp = new_i3_notes; temp; temp = XEXP (temp, 1))
if (GET_CODE (XEXP (temp, 0)) == REG)
REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
-
+
distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);
}
@@ -2517,7 +2714,7 @@ try_combine (i3, i2, i1)
/* The insn that used to set this register doesn't exist, and
this life of the register may not exist either. See if one of
- I3's links points to an insn that sets I2DEST. If it does,
+ I3's links points to an insn that sets I2DEST. If it does,
that is now the last known value for I2DEST. If we don't update
this and I2 set the register to a value that depended on its old
contents, we will get confused. If this insn is used, thing
@@ -2538,10 +2735,6 @@ try_combine (i3, i2, i1)
{
regno = REGNO (i2dest);
REG_N_SETS (regno)--;
- if (REG_N_SETS (regno) == 0
- && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
- regno))
- REG_N_REFS (regno) = 0;
}
}
@@ -2559,33 +2752,41 @@ try_combine (i3, i2, i1)
regno = REGNO (i1dest);
if (! added_sets_1 && ! i1dest_in_i1src)
- {
- REG_N_SETS (regno)--;
- if (REG_N_SETS (regno) == 0
- && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
- regno))
- REG_N_REFS (regno) = 0;
- }
+ REG_N_SETS (regno)--;
}
/* Update reg_nonzero_bits et al for any changes that may have been made
- to this insn. */
-
- note_stores (newpat, set_nonzero_bits_and_sign_copies);
+ to this insn. The order of set_nonzero_bits_and_sign_copies() is
+ important. Because newi2pat can affect nonzero_bits of newpat */
if (newi2pat)
- note_stores (newi2pat, set_nonzero_bits_and_sign_copies);
+ note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
+ note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
- /* If I3 is now an unconditional jump, ensure that it has a
+ /* Set new_direct_jump_p if a new return or simple jump instruction
+ has been created.
+
+ If I3 is now an unconditional jump, ensure that it has a
BARRIER following it since it may have initially been a
conditional jump. It may also be the last nonnote insn. */
- if ((GET_CODE (newpat) == RETURN || simplejump_p (i3))
- && ((temp = next_nonnote_insn (i3)) == NULL_RTX
- || GET_CODE (temp) != BARRIER))
- emit_barrier_after (i3);
+ if (GET_CODE (newpat) == RETURN || any_uncondjump_p (i3))
+ {
+ *new_direct_jump_p = 1;
+
+ if ((temp = next_nonnote_insn (i3)) == NULL_RTX
+ || GET_CODE (temp) != BARRIER)
+ emit_barrier_after (i3);
+ }
+ /* An NOOP jump does not need barrier, but it does need cleaning up
+ of CFG. */
+ if (GET_CODE (newpat) == SET
+ && SET_SRC (newpat) == pc_rtx
+ && SET_DEST (newpat) == pc_rtx)
+ *new_direct_jump_p = 1;
}
combine_successes++;
+ undo_commit ();
/* Clear this here, so that subsequent get_last_value calls are not
affected. */
@@ -2618,13 +2819,30 @@ undo_all ()
undobuf.frees = undo;
}
- obfree (undobuf.storage);
- undobuf.undos = undobuf.previous_undos = 0;
+ undobuf.undos = 0;
/* Clear this here, so that subsequent get_last_value calls are not
affected. */
subst_prev_insn = NULL_RTX;
}
+
+/* We've committed to accepting the changes we made. Move all
+ of the undos to the free list. */
+
+static void
+undo_commit ()
+{
+ struct undo *undo, *next;
+
+ for (undo = undobuf.undos; undo; undo = next)
+ {
+ next = undo->next;
+ undo->next = undobuf.frees;
+ undobuf.frees = undo;
+ }
+ undobuf.undos = 0;
+}
+
/* Find the innermost point within the rtx at LOC, possibly LOC itself,
where we have an arithmetic expression and return that point. LOC will
@@ -2641,8 +2859,10 @@ find_split_point (loc, insn)
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
rtx *split;
- int len = 0, pos, unsignedp;
- rtx inner;
+ unsigned HOST_WIDE_INT len = 0;
+ HOST_WIDE_INT pos = 0;
+ int unsignedp = 0;
+ rtx inner = NULL_RTX;
/* First special-case some codes. */
switch (code)
@@ -2664,9 +2884,9 @@ find_split_point (loc, insn)
|| GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
{
SUBST (XEXP (x, 0),
- gen_rtx_combine (LO_SUM, Pmode,
- gen_rtx_combine (HIGH, Pmode, XEXP (x, 0)),
- XEXP (x, 0)));
+ gen_rtx_LO_SUM (Pmode,
+ gen_rtx_HIGH (Pmode, XEXP (x, 0)),
+ XEXP (x, 0)));
return &XEXP (XEXP (x, 0), 0);
}
#endif
@@ -2723,7 +2943,7 @@ find_split_point (loc, insn)
return split;
}
}
-
+
/* If that didn't work, perhaps the first operand is complex and
needs to be computed separately, so make a split point there.
This will occur on machines that just support REG + CONST
@@ -2772,13 +2992,13 @@ find_split_point (loc, insn)
&& GET_CODE (XEXP (SET_DEST (x), 2)) == CONST_INT
&& GET_CODE (SET_SRC (x)) == CONST_INT
&& ((INTVAL (XEXP (SET_DEST (x), 1))
- + INTVAL (XEXP (SET_DEST (x), 2)))
+ + INTVAL (XEXP (SET_DEST (x), 2)))
<= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
&& ! side_effects_p (XEXP (SET_DEST (x), 0)))
{
- int pos = INTVAL (XEXP (SET_DEST (x), 2));
- int len = INTVAL (XEXP (SET_DEST (x), 1));
- int src = INTVAL (SET_SRC (x));
+ HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
+ unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
+ unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
rtx dest = XEXP (SET_DEST (x), 0);
enum machine_mode mode = GET_MODE (dest);
unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT) 1 << len) - 1;
@@ -2786,14 +3006,14 @@ find_split_point (loc, insn)
if (BITS_BIG_ENDIAN)
pos = GET_MODE_BITSIZE (mode) - len - pos;
- if ((unsigned HOST_WIDE_INT) src == mask)
+ if (src == mask)
SUBST (SET_SRC (x),
gen_binary (IOR, mode, dest, GEN_INT (src << pos)));
else
SUBST (SET_SRC (x),
gen_binary (IOR, mode,
- gen_binary (AND, mode, dest,
- GEN_INT (~ (mask << pos)
+ gen_binary (AND, mode, dest,
+ GEN_INT (~(mask << pos)
& GET_MODE_MASK (mode))),
GEN_INT (src << pos)));
@@ -2824,7 +3044,7 @@ find_split_point (loc, insn)
&& GET_CODE (XEXP (SET_SRC (x), 0)) == REG
&& (pos = exact_log2 (INTVAL (XEXP (SET_SRC (x), 1)))) >= 7
&& GET_CODE (SET_DEST (x)) == REG
- && (split = find_single_use (SET_DEST (x), insn, NULL_PTR)) != 0
+ && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
&& (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
&& XEXP (*split, 0) == SET_DEST (x)
&& XEXP (*split, 1) == const0_rtx)
@@ -2842,7 +3062,7 @@ find_split_point (loc, insn)
case NE:
/* if STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
- is known to be on, this can be converted into a NEG of a shift. */
+ is known to be on, this can be converted into a NEG of a shift. */
if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
&& GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
&& 1 <= (pos = exact_log2
@@ -2852,10 +3072,10 @@ find_split_point (loc, insn)
enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
SUBST (SET_SRC (x),
- gen_rtx_combine (NEG, mode,
- gen_rtx_combine (LSHIFTRT, mode,
- XEXP (SET_SRC (x), 0),
- GEN_INT (pos))));
+ gen_rtx_NEG (mode,
+ gen_rtx_LSHIFTRT (mode,
+ XEXP (SET_SRC (x), 0),
+ GEN_INT (pos))));
split = find_split_point (&SET_SRC (x), insn);
if (split && split != &SET_SRC (x))
@@ -2910,12 +3130,11 @@ find_split_point (loc, insn)
if (unsignedp && len <= 8)
{
SUBST (SET_SRC (x),
- gen_rtx_combine
- (AND, mode,
- gen_rtx_combine (LSHIFTRT, mode,
- gen_lowpart_for_combine (mode, inner),
- GEN_INT (pos)),
- GEN_INT (((HOST_WIDE_INT) 1 << len) - 1)));
+ gen_rtx_AND (mode,
+ gen_rtx_LSHIFTRT
+ (mode, gen_lowpart_for_combine (mode, inner),
+ GEN_INT (pos)),
+ GEN_INT (((HOST_WIDE_INT) 1 << len) - 1)));
split = find_split_point (&SET_SRC (x), insn);
if (split && split != &SET_SRC (x))
@@ -2924,12 +3143,12 @@ find_split_point (loc, insn)
else
{
SUBST (SET_SRC (x),
- gen_rtx_combine
+ gen_rtx_fmt_ee
(unsignedp ? LSHIFTRT : ASHIFTRT, mode,
- gen_rtx_combine (ASHIFT, mode,
- gen_lowpart_for_combine (mode, inner),
- GEN_INT (GET_MODE_BITSIZE (mode)
- - len - pos)),
+ gen_rtx_ASHIFT (mode,
+ gen_lowpart_for_combine (mode, inner),
+ GEN_INT (GET_MODE_BITSIZE (mode)
+ - len - pos)),
GEN_INT (GET_MODE_BITSIZE (mode) - len)));
split = find_split_point (&SET_SRC (x), insn);
@@ -2973,11 +3192,11 @@ find_split_point (loc, insn)
if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
{
SUBST (*loc,
- gen_rtx_combine (NOT, GET_MODE (x),
- gen_rtx_combine (code == IOR ? AND : IOR,
- GET_MODE (x),
- XEXP (XEXP (x, 0), 0),
- XEXP (XEXP (x, 1), 0))));
+ gen_rtx_NOT (GET_MODE (x),
+ gen_rtx_fmt_ee (code == IOR ? AND : IOR,
+ GET_MODE (x),
+ XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 1), 0))));
return find_split_point (loc, insn);
}
@@ -3041,7 +3260,7 @@ find_split_point (loc, insn)
the caller can tell whether the result is valid.
`n_occurrences' is incremented each time FROM is replaced.
-
+
IN_DEST is non-zero if we are processing the SET_DEST of a SET.
UNIQUE_COPY is non-zero if each substitution must be unique. We do this
@@ -3049,14 +3268,14 @@ find_split_point (loc, insn)
static rtx
subst (x, from, to, in_dest, unique_copy)
- register rtx x, from, to;
+ rtx x, from, to;
int in_dest;
int unique_copy;
{
- register enum rtx_code code = GET_CODE (x);
+ enum rtx_code code = GET_CODE (x);
enum machine_mode op0_mode = VOIDmode;
- register char *fmt;
- register int len, i;
+ const char *fmt;
+ int len, i;
rtx new;
/* Two expressions are equal if they are identical copies of a shared
@@ -3075,7 +3294,7 @@ subst (x, from, to, in_dest, unique_copy)
}
/* If X and FROM are the same register but different modes, they will
- not have been seen as equal above. However, flow.c will make a
+ not have been seen as equal above. However, flow.c will make a
LOG_LINKS entry for that case. If we do nothing, we will try to
rerecognize our original insn and, when it succeeds, we will
delete the feeding insn, which is incorrect.
@@ -3124,7 +3343,7 @@ subst (x, from, to, in_dest, unique_copy)
for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
{
rtx dest = SET_DEST (XVECEXP (x, 0, i));
-
+
if (GET_CODE (dest) != REG
&& GET_CODE (dest) != CC0
&& GET_CODE (dest) != PC)
@@ -3164,7 +3383,7 @@ subst (x, from, to, in_dest, unique_copy)
{
if (fmt[i] == 'E')
{
- register int j;
+ int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
{
if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
@@ -3190,7 +3409,16 @@ subst (x, from, to, in_dest, unique_copy)
}
else if (fmt[i] == 'e')
{
- if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
+ /* If this is a register being set, ignore it. */
+ new = XEXP (x, i);
+ if (in_dest
+ && (code == SUBREG || code == STRICT_LOW_PART
+ || code == ZERO_EXTRACT)
+ && i == 0
+ && GET_CODE (new) == REG)
+ ;
+
+ else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
{
/* In general, don't install a subreg involving two
modes not tieable. It can worsen register
@@ -3217,6 +3445,18 @@ subst (x, from, to, in_dest, unique_copy)
)
return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
+#ifdef CLASS_CANNOT_CHANGE_MODE
+ if (code == SUBREG
+ && GET_CODE (to) == REG
+ && REGNO (to) < FIRST_PSEUDO_REGISTER
+ && (TEST_HARD_REG_BIT
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
+ REGNO (to)))
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (to),
+ GET_MODE (x)))
+ return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
+#endif
+
new = (unique_copy && n_occurrences ? copy_rtx (to) : to);
n_occurrences++;
}
@@ -3225,7 +3465,7 @@ subst (x, from, to, in_dest, unique_copy)
have gone inside a MEM, in which case we want to
simplify the address. We assume here that things that
are actually part of the destination have their inner
- parts in the first expression. This is true for SUBREG,
+ parts in the first expression. This is true for SUBREG,
STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
things aside from REG and MEM that should appear in a
SET_DEST. */
@@ -3259,7 +3499,7 @@ subst (x, from, to, in_dest, unique_copy)
/* If X is sufficiently simple, don't bother trying to do anything
with it. */
if (code != CONST_INT && code != REG && code != CLOBBER)
- x = simplify_rtx (x, op0_mode, i == 3, in_dest);
+ x = combine_simplify_rtx (x, op0_mode, i == 3, in_dest);
if (GET_CODE (x) == code)
break;
@@ -3283,7 +3523,7 @@ subst (x, from, to, in_dest, unique_copy)
X is returned; IN_DEST is nonzero if we are inside a SET_DEST. */
static rtx
-simplify_rtx (x, op0_mode, last, in_dest)
+combine_simplify_rtx (x, op0_mode, last, in_dest)
rtx x;
enum machine_mode op0_mode;
int last;
@@ -3292,17 +3532,13 @@ simplify_rtx (x, op0_mode, last, in_dest)
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
rtx temp;
+ rtx reversed;
int i;
/* If this is a commutative operation, put a constant last and a complex
expression first. We don't need to do this for comparisons here. */
if (GET_RTX_CLASS (code) == 'c'
- && ((CONSTANT_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) != CONST_INT)
- || (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == 'o'
- && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o')
- || (GET_CODE (XEXP (x, 0)) == SUBREG
- && GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0)))) == 'o'
- && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o')))
+ && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
{
temp = XEXP (x, 0);
SUBST (XEXP (x, 0), XEXP (x, 1));
@@ -3350,7 +3586,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
SUBST (XEXP (x, 0), gen_binary (PLUS, mode, new, temp));
}
- /* If this is a simple operation applied to an IF_THEN_ELSE, try
+ /* If this is a simple operation applied to an IF_THEN_ELSE, try
applying it to the arms of the IF_THEN_ELSE. This often simplifies
things. Check for cases where both arms are testing the same
condition.
@@ -3373,15 +3609,15 @@ simplify_rtx (x, op0_mode, last, in_dest)
&& (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0))))
== 'o'))))))
{
- rtx cond, true, false;
+ rtx cond, true_rtx, false_rtx;
- cond = if_then_else_cond (x, &true, &false);
+ cond = if_then_else_cond (x, &true_rtx, &false_rtx);
if (cond != 0
/* If everything is a comparison, what we have is highly unlikely
to be simpler, so don't use it. */
&& ! (GET_RTX_CLASS (code) == '<'
- && (GET_RTX_CLASS (GET_CODE (true)) == '<'
- || GET_RTX_CLASS (GET_CODE (false)) == '<')))
+ && (GET_RTX_CLASS (GET_CODE (true_rtx)) == '<'
+ || GET_RTX_CLASS (GET_CODE (false_rtx)) == '<')))
{
rtx cop1 = const0_rtx;
enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
@@ -3389,42 +3625,54 @@ simplify_rtx (x, op0_mode, last, in_dest)
if (cond_code == NE && GET_RTX_CLASS (GET_CODE (cond)) == '<')
return x;
- /* Simplify the alternative arms; this may collapse the true and
+ /* Simplify the alternative arms; this may collapse the true and
false arms to store-flag values. */
- true = subst (true, pc_rtx, pc_rtx, 0, 0);
- false = subst (false, pc_rtx, pc_rtx, 0, 0);
-
- /* Restarting if we generate a store-flag expression will cause
- us to loop. Just drop through in this case. */
-
- /* If the result values are STORE_FLAG_VALUE and zero, we can
- just make the comparison operation. */
- if (true == const_true_rtx && false == const0_rtx)
- x = gen_binary (cond_code, mode, cond, cop1);
- else if (true == const0_rtx && false == const_true_rtx)
- x = gen_binary (reverse_condition (cond_code), mode, cond, cop1);
-
- /* Likewise, we can make the negate of a comparison operation
- if the result values are - STORE_FLAG_VALUE and zero. */
- else if (GET_CODE (true) == CONST_INT
- && INTVAL (true) == - STORE_FLAG_VALUE
- && false == const0_rtx)
- x = gen_unary (NEG, mode, mode,
- gen_binary (cond_code, mode, cond, cop1));
- else if (GET_CODE (false) == CONST_INT
- && INTVAL (false) == - STORE_FLAG_VALUE
- && true == const0_rtx)
- x = gen_unary (NEG, mode, mode,
- gen_binary (reverse_condition (cond_code),
- mode, cond, cop1));
- else
- return gen_rtx_IF_THEN_ELSE (mode,
- gen_binary (cond_code, VOIDmode,
- cond, cop1),
- true, false);
+ true_rtx = subst (true_rtx, pc_rtx, pc_rtx, 0, 0);
+ false_rtx = subst (false_rtx, pc_rtx, pc_rtx, 0, 0);
+
+ /* If true_rtx and false_rtx are not general_operands, an if_then_else
+ is unlikely to be simpler. */
+ if (general_operand (true_rtx, VOIDmode)
+ && general_operand (false_rtx, VOIDmode))
+ {
+ /* Restarting if we generate a store-flag expression will cause
+ us to loop. Just drop through in this case. */
+
+ /* If the result values are STORE_FLAG_VALUE and zero, we can
+ just make the comparison operation. */
+ if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
+ x = gen_binary (cond_code, mode, cond, cop1);
+ else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
+ && reverse_condition (cond_code) != UNKNOWN)
+ x = gen_binary (reverse_condition (cond_code),
+ mode, cond, cop1);
+
+ /* Likewise, we can make the negate of a comparison operation
+ if the result values are - STORE_FLAG_VALUE and zero. */
+ else if (GET_CODE (true_rtx) == CONST_INT
+ && INTVAL (true_rtx) == - STORE_FLAG_VALUE
+ && false_rtx == const0_rtx)
+ x = simplify_gen_unary (NEG, mode,
+ gen_binary (cond_code, mode, cond,
+ cop1),
+ mode);
+ else if (GET_CODE (false_rtx) == CONST_INT
+ && INTVAL (false_rtx) == - STORE_FLAG_VALUE
+ && true_rtx == const0_rtx)
+ x = simplify_gen_unary (NEG, mode,
+ gen_binary (reverse_condition
+ (cond_code),
+ mode, cond, cop1),
+ mode);
+ else
+ return gen_rtx_IF_THEN_ELSE (mode,
+ gen_binary (cond_code, VOIDmode,
+ cond, cop1),
+ true_rtx, false_rtx);
- code = GET_CODE (x);
- op0_mode = VOIDmode;
+ code = GET_CODE (x);
+ op0_mode = VOIDmode;
+ }
}
}
@@ -3437,12 +3685,25 @@ simplify_rtx (x, op0_mode, last, in_dest)
temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
break;
case '<':
- temp = simplify_relational_operation (code, op0_mode,
- XEXP (x, 0), XEXP (x, 1));
+ {
+ enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
+ if (cmp_mode == VOIDmode)
+ {
+ cmp_mode = GET_MODE (XEXP (x, 1));
+ if (cmp_mode == VOIDmode)
+ cmp_mode = op0_mode;
+ }
+ temp = simplify_relational_operation (code, cmp_mode,
+ XEXP (x, 0), XEXP (x, 1));
+ }
#ifdef FLOAT_STORE_FLAG_VALUE
- if (temp != 0 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
- temp = ((temp == const0_rtx) ? CONST0_RTX (GET_MODE (x))
- : immed_real_const_1 (FLOAT_STORE_FLAG_VALUE, GET_MODE (x)));
+ if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ if (temp == const0_rtx)
+ temp = CONST0_RTX (mode);
+ else
+ temp = immed_real_const_1 (FLOAT_STORE_FLAG_VALUE (mode), mode);
+ }
#endif
break;
case 'c':
@@ -3457,7 +3718,12 @@ simplify_rtx (x, op0_mode, last, in_dest)
}
if (temp)
- x = temp, code = GET_CODE (temp);
+ {
+ x = temp;
+ code = GET_CODE (temp);
+ op0_mode = VOIDmode;
+ mode = GET_MODE (temp);
+ }
/* First see if we can apply the inverse distributive law. */
if (code == PLUS || code == MINUS
@@ -3465,16 +3731,17 @@ simplify_rtx (x, op0_mode, last, in_dest)
{
x = apply_distributive_law (x);
code = GET_CODE (x);
+ op0_mode = VOIDmode;
}
/* If CODE is an associative operation not otherwise handled, see if we
can associate some operands. This can win if they are constants or
- if they are logically related (i.e. (a & b) & a. */
- if ((code == PLUS || code == MINUS
- || code == MULT || code == AND || code == IOR || code == XOR
- || code == DIV || code == UDIV
+ if they are logically related (i.e. (a & b) & a). */
+ if ((code == PLUS || code == MINUS || code == MULT || code == DIV
+ || code == AND || code == IOR || code == XOR
|| code == SMAX || code == SMIN || code == UMAX || code == UMIN)
- && INTEGRAL_MODE_P (mode))
+ && ((INTEGRAL_MODE_P (mode) && code != DIV)
+ || (flag_unsafe_math_optimizations && FLOAT_MODE_P (mode))))
{
if (GET_CODE (XEXP (x, 0)) == code)
{
@@ -3482,7 +3749,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
rtx inner_op0 = XEXP (XEXP (x, 0), 1);
rtx inner_op1 = XEXP (x, 1);
rtx inner;
-
+
/* Make sure we pass the constant operand if any as the second
one if this is a commutative operation. */
if (CONSTANT_P (inner_op0) && GET_RTX_CLASS (code) == 'c')
@@ -3493,7 +3760,6 @@ simplify_rtx (x, op0_mode, last, in_dest)
}
inner = simplify_binary_operation (code == MINUS ? PLUS
: code == DIV ? MULT
- : code == UDIV ? MULT
: code,
mode, inner_op0, inner_op1);
@@ -3523,121 +3789,23 @@ simplify_rtx (x, op0_mode, last, in_dest)
break;
case SUBREG:
- /* (subreg:A (mem:B X) N) becomes a modified MEM unless the SUBREG
- is paradoxical. If we can't do that safely, then it becomes
- something nonsensical so that this combination won't take place. */
-
- if (GET_CODE (SUBREG_REG (x)) == MEM
- && (GET_MODE_SIZE (mode)
- <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
- {
- rtx inner = SUBREG_REG (x);
- int endian_offset = 0;
- /* Don't change the mode of the MEM
- if that would change the meaning of the address. */
- if (MEM_VOLATILE_P (SUBREG_REG (x))
- || mode_dependent_address_p (XEXP (inner, 0)))
- return gen_rtx_CLOBBER (mode, const0_rtx);
+ if (op0_mode == VOIDmode)
+ op0_mode = GET_MODE (SUBREG_REG (x));
- if (BYTES_BIG_ENDIAN)
- {
- if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
- endian_offset += UNITS_PER_WORD - GET_MODE_SIZE (mode);
- if (GET_MODE_SIZE (GET_MODE (inner)) < UNITS_PER_WORD)
- endian_offset -= (UNITS_PER_WORD
- - GET_MODE_SIZE (GET_MODE (inner)));
- }
- /* Note if the plus_constant doesn't make a valid address
- then this combination won't be accepted. */
- x = gen_rtx_MEM (mode,
- plus_constant (XEXP (inner, 0),
- (SUBREG_WORD (x) * UNITS_PER_WORD
- + endian_offset)));
- RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (inner);
- MEM_COPY_ATTRIBUTES (x, inner);
- return x;
- }
-
- /* If we are in a SET_DEST, these other cases can't apply. */
- if (in_dest)
- return x;
-
- /* Changing mode twice with SUBREG => just change it once,
- or not at all if changing back to starting mode. */
- if (GET_CODE (SUBREG_REG (x)) == SUBREG)
- {
- if (mode == GET_MODE (SUBREG_REG (SUBREG_REG (x)))
- && SUBREG_WORD (x) == 0 && SUBREG_WORD (SUBREG_REG (x)) == 0)
- return SUBREG_REG (SUBREG_REG (x));
-
- SUBST_INT (SUBREG_WORD (x),
- SUBREG_WORD (x) + SUBREG_WORD (SUBREG_REG (x)));
- SUBST (SUBREG_REG (x), SUBREG_REG (SUBREG_REG (x)));
- }
-
- /* SUBREG of a hard register => just change the register number
- and/or mode. If the hard register is not valid in that mode,
- suppress this combination. If the hard register is the stack,
- frame, or argument pointer, leave this as a SUBREG. */
-
- if (GET_CODE (SUBREG_REG (x)) == REG
- && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER
- && REGNO (SUBREG_REG (x)) != FRAME_POINTER_REGNUM
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- && REGNO (SUBREG_REG (x)) != HARD_FRAME_POINTER_REGNUM
-#endif
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- && REGNO (SUBREG_REG (x)) != ARG_POINTER_REGNUM
-#endif
- && REGNO (SUBREG_REG (x)) != STACK_POINTER_REGNUM)
- {
- if (HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (x)) + SUBREG_WORD (x),
- mode))
- return gen_rtx_REG (mode,
- REGNO (SUBREG_REG (x)) + SUBREG_WORD (x));
- else
- return gen_rtx_CLOBBER (mode, const0_rtx);
- }
-
- /* For a constant, try to pick up the part we want. Handle a full
- word and low-order part. Only do this if we are narrowing
- the constant; if it is being widened, we have no idea what
- the extra bits will have been set to. */
-
- if (CONSTANT_P (SUBREG_REG (x)) && op0_mode != VOIDmode
- && GET_MODE_SIZE (mode) == UNITS_PER_WORD
- && GET_MODE_SIZE (op0_mode) > UNITS_PER_WORD
- && GET_MODE_CLASS (mode) == MODE_INT)
- {
- temp = operand_subword (SUBREG_REG (x), SUBREG_WORD (x),
- 0, op0_mode);
- if (temp)
- return temp;
- }
-
- /* If we want a subreg of a constant, at offset 0,
- take the low bits. On a little-endian machine, that's
- always valid. On a big-endian machine, it's valid
- only if the constant's mode fits in one word. Note that we
- cannot use subreg_lowpart_p since SUBREG_REG may be VOIDmode. */
+ /* simplify_subreg can't use gen_lowpart_for_combine. */
if (CONSTANT_P (SUBREG_REG (x))
- && ((GET_MODE_SIZE (op0_mode) <= UNITS_PER_WORD
- || ! WORDS_BIG_ENDIAN)
- ? SUBREG_WORD (x) == 0
- : (SUBREG_WORD (x)
- == ((GET_MODE_SIZE (op0_mode)
- - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
- / UNITS_PER_WORD)))
- && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (op0_mode)
- && (! WORDS_BIG_ENDIAN
- || GET_MODE_BITSIZE (op0_mode) <= BITS_PER_WORD))
+ && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x))
return gen_lowpart_for_combine (mode, SUBREG_REG (x));
- /* A paradoxical SUBREG of a VOIDmode constant is the same constant,
- since we are saying that the high bits don't matter. */
- if (CONSTANT_P (SUBREG_REG (x)) && GET_MODE (SUBREG_REG (x)) == VOIDmode
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (op0_mode))
- return SUBREG_REG (x);
+ if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
+ break;
+ {
+ rtx temp;
+ temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
+ SUBREG_BYTE (x));
+ if (temp)
+ return temp;
+ }
/* Note that we cannot do any narrowing for non-constants since
we might have been counting on using the fact that some bits were
@@ -3649,30 +3817,30 @@ simplify_rtx (x, op0_mode, last, in_dest)
/* (not (plus X -1)) can become (neg X). */
if (GET_CODE (XEXP (x, 0)) == PLUS
&& XEXP (XEXP (x, 0), 1) == constm1_rtx)
- return gen_rtx_combine (NEG, mode, XEXP (XEXP (x, 0), 0));
+ return gen_rtx_NEG (mode, XEXP (XEXP (x, 0), 0));
/* Similarly, (not (neg X)) is (plus X -1). */
if (GET_CODE (XEXP (x, 0)) == NEG)
- return gen_rtx_combine (PLUS, mode, XEXP (XEXP (x, 0), 0),
- constm1_rtx);
+ return gen_rtx_PLUS (mode, XEXP (XEXP (x, 0), 0), constm1_rtx);
- /* (not (xor X C)) for C constant is (xor X D) with D = ~ C. */
+ /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (XEXP (x, 0)) == XOR
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (XEXP (x, 0), 1),
mode)) != 0)
return gen_binary (XOR, mode, XEXP (XEXP (x, 0), 0), temp);
-
+
/* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for operands
other than 1, but that is not valid. We could do a similar
simplification for (not (lshiftrt C X)) where C is just the sign bit,
but this doesn't seem common enough to bother with. */
if (GET_CODE (XEXP (x, 0)) == ASHIFT
&& XEXP (XEXP (x, 0), 0) == const1_rtx)
- return gen_rtx_ROTATE (mode, gen_unary (NOT, mode, mode, const1_rtx),
+ return gen_rtx_ROTATE (mode, simplify_gen_unary (NOT, mode,
+ const1_rtx, mode),
XEXP (XEXP (x, 0), 1));
-
+
if (GET_CODE (XEXP (x, 0)) == SUBREG
&& subreg_lowpart_p (XEXP (x, 0))
&& (GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))
@@ -3683,70 +3851,64 @@ simplify_rtx (x, op0_mode, last, in_dest)
enum machine_mode inner_mode = GET_MODE (SUBREG_REG (XEXP (x, 0)));
x = gen_rtx_ROTATE (inner_mode,
- gen_unary (NOT, inner_mode, inner_mode,
- const1_rtx),
+ simplify_gen_unary (NOT, inner_mode, const1_rtx,
+ inner_mode),
XEXP (SUBREG_REG (XEXP (x, 0)), 1));
return gen_lowpart_for_combine (mode, x);
}
-
+
/* If STORE_FLAG_VALUE is -1, (not (comparison foo bar)) can be done by
reversing the comparison code if valid. */
if (STORE_FLAG_VALUE == -1
&& GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
- && reversible_comparison_p (XEXP (x, 0)))
- return gen_rtx_combine (reverse_condition (GET_CODE (XEXP (x, 0))),
- mode, XEXP (XEXP (x, 0), 0),
- XEXP (XEXP (x, 0), 1));
+ && (reversed = reversed_comparison (x, mode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1))))
+ return reversed;
- /* (ashiftrt foo C) where C is the number of bits in FOO minus 1
- is (lt foo (const_int 0)) if STORE_FLAG_VALUE is -1, so we can
+ /* (not (ashiftrt foo C)) where C is the number of bits in FOO minus 1
+ is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, so we can
perform the above simplification. */
if (STORE_FLAG_VALUE == -1
- && XEXP (x, 1) == const1_rtx
&& GET_CODE (XEXP (x, 0)) == ASHIFTRT
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
&& INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (mode) - 1)
- return gen_rtx_combine (GE, mode, XEXP (XEXP (x, 0), 0), const0_rtx);
+ return gen_rtx_GE (mode, XEXP (XEXP (x, 0), 0), const0_rtx);
/* Apply De Morgan's laws to reduce number of patterns for machines
- with negating logical insns (and-not, nand, etc.). If result has
- only one NOT, put it first, since that is how the patterns are
- coded. */
+ with negating logical insns (and-not, nand, etc.). If result has
+ only one NOT, put it first, since that is how the patterns are
+ coded. */
if (GET_CODE (XEXP (x, 0)) == IOR || GET_CODE (XEXP (x, 0)) == AND)
- {
- rtx in1 = XEXP (XEXP (x, 0), 0), in2 = XEXP (XEXP (x, 0), 1);
-
- if (GET_CODE (in1) == NOT)
- in1 = XEXP (in1, 0);
- else
- in1 = gen_rtx_combine (NOT, GET_MODE (in1), in1);
-
- if (GET_CODE (in2) == NOT)
- in2 = XEXP (in2, 0);
- else if (GET_CODE (in2) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- in2 = GEN_INT (GET_MODE_MASK (mode) & ~ INTVAL (in2));
- else
- in2 = gen_rtx_combine (NOT, GET_MODE (in2), in2);
-
- if (GET_CODE (in2) == NOT)
- {
- rtx tem = in2;
- in2 = in1; in1 = tem;
- }
-
- return gen_rtx_combine (GET_CODE (XEXP (x, 0)) == IOR ? AND : IOR,
+ {
+ rtx in1 = XEXP (XEXP (x, 0), 0), in2 = XEXP (XEXP (x, 0), 1);
+ enum machine_mode op_mode;
+
+ op_mode = GET_MODE (in1);
+ in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
+
+ op_mode = GET_MODE (in2);
+ if (op_mode == VOIDmode)
+ op_mode = mode;
+ in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
+
+ if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
+ {
+ rtx tem = in2;
+ in2 = in1; in1 = tem;
+ }
+
+ return gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)) == IOR ? AND : IOR,
mode, in1, in2);
- }
+ }
break;
case NEG:
/* (neg (plus X 1)) can become (not X). */
if (GET_CODE (XEXP (x, 0)) == PLUS
&& XEXP (XEXP (x, 0), 1) == const1_rtx)
- return gen_rtx_combine (NOT, mode, XEXP (XEXP (x, 0), 0));
+ return gen_rtx_NOT (mode, XEXP (XEXP (x, 0), 0));
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (XEXP (x, 0)) == NOT)
@@ -3757,7 +3919,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
&& (! FLOAT_MODE_P (mode)
/* x-y != -(y-x) with IEEE floating point. */
|| TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math))
+ || flag_unsafe_math_optimizations))
return gen_binary (MINUS, mode, XEXP (XEXP (x, 0), 1),
XEXP (XEXP (x, 0), 0));
@@ -3775,16 +3937,13 @@ simplify_rtx (x, op0_mode, last, in_dest)
temp = simplify_unary_operation (NEG, mode,
XEXP (XEXP (x, 0), 0), mode);
if (temp)
- {
- SUBST (XEXP (XEXP (x, 0), 0), temp);
- return XEXP (x, 0);
- }
+ return gen_binary (ASHIFT, mode, temp, XEXP (XEXP (x, 0), 1));
}
temp = expand_compound_operation (XEXP (x, 0));
/* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
- replaced by (lshiftrt X C). This will convert
+ replaced by (lshiftrt X C). This will convert
(neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
if (GET_CODE (temp) == ASHIFTRT
@@ -3848,8 +4007,8 @@ simplify_rtx (x, op0_mode, last, in_dest)
&& (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND
|| GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND)
&& GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
- return gen_unary (GET_CODE (XEXP (x, 0)), mode, mode,
- XEXP (XEXP (XEXP (x, 0), 0), 0));
+ return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0), mode);
/* (truncate:SI (subreg:DI (truncate:SI X) 0)) is
(truncate:SI x). */
@@ -3859,12 +4018,16 @@ simplify_rtx (x, op0_mode, last, in_dest)
return SUBREG_REG (XEXP (x, 0));
/* If we know that the value is already truncated, we can
- replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION is
- nonzero for the corresponding modes. */
+ replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
+ is nonzero for the corresponding modes. But don't do this
+ for an (LSHIFTRT (MULT ...)) since this will cause problems
+ with the umulXi3_highpart patterns. */
if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
&& num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
- >= GET_MODE_BITSIZE (mode) + 1)
+ >= GET_MODE_BITSIZE (mode) + 1
+ && ! (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT))
return gen_lowpart_for_combine (mode, XEXP (x, 0));
/* A truncate of a comparison can be replaced with a subreg if
@@ -3873,14 +4036,14 @@ simplify_rtx (x, op0_mode, last, in_dest)
than HOST_BITS_PER_WIDE_INT. */
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE &~ GET_MODE_MASK (mode)) == 0)
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
return gen_lowpart_for_combine (mode, XEXP (x, 0));
/* Similarly, a truncate of a register whose value is a
comparison can be replaced with a subreg if STORE_FLAG_VALUE
permits. */
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE &~ GET_MODE_MASK (mode)) == 0
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
&& (temp = get_last_value (XEXP (x, 0)))
&& GET_RTX_CLASS (GET_CODE (temp)) == '<')
return gen_lowpart_for_combine (mode, XEXP (x, 0));
@@ -3891,7 +4054,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
/* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == mode)
- return XEXP (XEXP (x, 0), 0);
+ return XEXP (XEXP (x, 0), 0);
/* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
(OP:SF foo:SF) if OP is NEG or ABS. */
@@ -3899,8 +4062,8 @@ simplify_rtx (x, op0_mode, last, in_dest)
|| GET_CODE (XEXP (x, 0)) == NEG)
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == FLOAT_EXTEND
&& GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
- return gen_unary (GET_CODE (XEXP (x, 0)), mode, mode,
- XEXP (XEXP (XEXP (x, 0), 0), 0));
+ return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0), mode);
/* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
is (float_truncate:SF x). */
@@ -3908,7 +4071,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
&& subreg_lowpart_p (XEXP (x, 0))
&& GET_CODE (SUBREG_REG (XEXP (x, 0))) == FLOAT_TRUNCATE)
return SUBREG_REG (XEXP (x, 0));
- break;
+ break;
#ifdef HAVE_cc0
case COMPARE:
@@ -3921,7 +4084,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
/* In IEEE floating point, x-0 is not the same as x. */
if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
|| ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& XEXP (x, 1) == CONST0_RTX (GET_MODE (XEXP (x, 0))))
return XEXP (x, 0);
break;
@@ -3961,12 +4124,14 @@ simplify_rtx (x, op0_mode, last, in_dest)
/* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
when c is (const_int (pow2 + 1) / 2) is a sign extension of a
bit-field and can be replaced by either a sign_extend or a
- sign_extract. The `and' may be a zero_extend. */
+ sign_extract. The `and' may be a zero_extend and the two
+ <c>, -<c> constants may be reversed. */
if (GET_CODE (XEXP (x, 0)) == XOR
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && INTVAL (XEXP (x, 1)) == - INTVAL (XEXP (XEXP (x, 0), 1))
- && (i = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
+ && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
+ && ((i = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
+ || (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
&& GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
@@ -3974,7 +4139,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
== ((HOST_WIDE_INT) 1 << (i + 1)) - 1))
|| (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
&& (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
- == i + 1))))
+ == (unsigned int) i + 1))))
return simplify_shift_const
(NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode,
@@ -3987,14 +4152,13 @@ simplify_rtx (x, op0_mode, last, in_dest)
is 1. This produces better code than the alternative immediately
below. */
if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
- && reversible_comparison_p (XEXP (x, 0))
&& ((STORE_FLAG_VALUE == -1 && XEXP (x, 1) == const1_rtx)
- || (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx)))
+ || (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx))
+ && (reversed = reversed_comparison (XEXP (x, 0), mode,
+ XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1))))
return
- gen_unary (NEG, mode, mode,
- gen_binary (reverse_condition (GET_CODE (XEXP (x, 0))),
- mode, XEXP (XEXP (x, 0), 0),
- XEXP (XEXP (x, 0), 1)));
+ simplify_gen_unary (NEG, mode, reversed, mode);
/* If only the low-order bit of X is possibly nonzero, (plus x -1)
can become (ashiftrt (ashift (xor x 1) C) C) where C is
@@ -4007,8 +4171,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
&& nonzero_bits (XEXP (x, 0), mode) == 1)
return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode,
- gen_rtx_combine (XOR, mode,
- XEXP (x, 0), const1_rtx),
+ gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
GET_MODE_BITSIZE (mode) - 1),
GET_MODE_BITSIZE (mode) - 1);
@@ -4020,7 +4183,17 @@ simplify_rtx (x, op0_mode, last, in_dest)
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (XEXP (x, 0), mode)
& nonzero_bits (XEXP (x, 1), mode)) == 0)
- return gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
+ {
+ /* Try to simplify the expression further. */
+ rtx tor = gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
+ temp = combine_simplify_rtx (tor, mode, last, in_dest);
+
+ /* If we could, great. If not, do not go ahead with the IOR
+ replacement, since PLUS appears in many special purpose
+ address arithmetic instructions. */
+ if (GET_CODE (temp) != CLOBBER && temp != tor)
+ return temp;
+ }
break;
case MINUS:
@@ -4029,19 +4202,19 @@ simplify_rtx (x, op0_mode, last, in_dest)
if (STORE_FLAG_VALUE == 1
&& XEXP (x, 0) == const1_rtx
&& GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<'
- && reversible_comparison_p (XEXP (x, 1)))
- return gen_binary (reverse_condition (GET_CODE (XEXP (x, 1))),
- mode, XEXP (XEXP (x, 1), 0),
- XEXP (XEXP (x, 1), 1));
+ && (reversed = reversed_comparison (XEXP (x, 1), mode,
+ XEXP (XEXP (x, 1), 0),
+ XEXP (XEXP (x, 1), 1))))
+ return reversed;
/* (minus <foo> (and <foo> (const_int -pow2))) becomes
(and <foo> (const_int pow2-1)) */
if (GET_CODE (XEXP (x, 1)) == AND
&& GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
- && exact_log2 (- INTVAL (XEXP (XEXP (x, 1), 1))) >= 0
+ && exact_log2 (-INTVAL (XEXP (XEXP (x, 1), 1))) >= 0
&& rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
- - INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
+ -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
/* Canonicalize (minus A (plus B C)) to (minus (minus A B) C) for
integers. */
@@ -4070,6 +4243,16 @@ simplify_rtx (x, op0_mode, last, in_dest)
if (GET_CODE (x) != MULT)
return x;
}
+ /* Try simplify a*(b/c) as (a*b)/c. */
+ if (FLOAT_MODE_P (mode) && flag_unsafe_math_optimizations
+ && GET_CODE (XEXP (x, 0)) == DIV)
+ {
+ rtx tem = simplify_binary_operation (MULT, mode,
+ XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1));
+ if (tem)
+ return gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
+ }
break;
case UDIV:
@@ -4088,6 +4271,10 @@ simplify_rtx (x, op0_mode, last, in_dest)
case EQ: case NE:
case GT: case GTU: case GE: case GEU:
case LT: case LTU: case LE: case LEU:
+ case UNEQ: case LTGT:
+ case UNGT: case UNGE:
+ case UNLT: case UNLE:
+ case UNORDERED: case ORDERED:
/* If the first operand is a condition code, we can't do anything
with it. */
if (GET_CODE (XEXP (x, 0)) == COMPARE
@@ -4095,7 +4282,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
#ifdef HAVE_cc0
&& XEXP (x, 0) != cc0_rtx
#endif
- ))
+ ))
{
rtx op0 = XEXP (x, 0);
rtx op1 = XEXP (x, 1);
@@ -4121,24 +4308,29 @@ simplify_rtx (x, op0_mode, last, in_dest)
if (STORE_FLAG_VALUE == 1
&& new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
- && op1 == const0_rtx && nonzero_bits (op0, mode) == 1)
+ && op1 == const0_rtx
+ && mode == GET_MODE (op0)
+ && nonzero_bits (op0, mode) == 1)
return gen_lowpart_for_combine (mode,
expand_compound_operation (op0));
else if (STORE_FLAG_VALUE == 1
&& new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
+ && mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
== GET_MODE_BITSIZE (mode)))
{
op0 = expand_compound_operation (op0);
- return gen_unary (NEG, mode, mode,
- gen_lowpart_for_combine (mode, op0));
+ return simplify_gen_unary (NEG, mode,
+ gen_lowpart_for_combine (mode, op0),
+ mode);
}
else if (STORE_FLAG_VALUE == 1
&& new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
+ && mode == GET_MODE (op0)
&& nonzero_bits (op0, mode) == 1)
{
op0 = expand_compound_operation (op0);
@@ -4150,6 +4342,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
else if (STORE_FLAG_VALUE == 1
&& new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
+ && mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
== GET_MODE_BITSIZE (mode)))
{
@@ -4170,28 +4363,33 @@ simplify_rtx (x, op0_mode, last, in_dest)
else if (STORE_FLAG_VALUE == -1
&& new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
+ && mode == GET_MODE (op0)
&& nonzero_bits (op0, mode) == 1)
{
op0 = expand_compound_operation (op0);
- return gen_unary (NEG, mode, mode,
- gen_lowpart_for_combine (mode, op0));
+ return simplify_gen_unary (NEG, mode,
+ gen_lowpart_for_combine (mode, op0),
+ mode);
}
else if (STORE_FLAG_VALUE == -1
&& new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
+ && mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
== GET_MODE_BITSIZE (mode)))
{
op0 = expand_compound_operation (op0);
- return gen_unary (NOT, mode, mode,
- gen_lowpart_for_combine (mode, op0));
+ return simplify_gen_unary (NOT, mode,
+ gen_lowpart_for_combine (mode, op0),
+ mode);
}
/* If X is 0/1, (eq X 0) is X-1. */
else if (STORE_FLAG_VALUE == -1
&& new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
+ && mode == GET_MODE (op0)
&& nonzero_bits (op0, mode) == 1)
{
op0 = expand_compound_operation (op0);
@@ -4222,15 +4420,15 @@ simplify_rtx (x, op0_mode, last, in_dest)
/* If the code changed, return a whole new comparison. */
if (new_code != code)
- return gen_rtx_combine (new_code, mode, op0, op1);
+ return gen_rtx_fmt_ee (new_code, mode, op0, op1);
- /* Otherwise, keep this operation, but maybe change its operands.
+ /* Otherwise, keep this operation, but maybe change its operands.
This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
SUBST (XEXP (x, 0), op0);
SUBST (XEXP (x, 1), op1);
}
break;
-
+
case IF_THEN_ELSE:
return simplify_if_then_else (x);
@@ -4252,7 +4450,7 @@ simplify_rtx (x, op0_mode, last, in_dest)
case XOR:
return simplify_logical (x, last);
- case ABS:
+ case ABS:
/* (abs (neg <foo>)) -> (abs <foo>) */
if (GET_CODE (XEXP (x, 0)) == NEG)
SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
@@ -4272,10 +4470,9 @@ simplify_rtx (x, op0_mode, last, in_dest)
== 0)))
return XEXP (x, 0);
-
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
if (num_sign_bit_copies (XEXP (x, 0), mode) == GET_MODE_BITSIZE (mode))
- return gen_rtx_combine (NEG, mode, XEXP (x, 0));
+ return gen_rtx_NEG (mode, XEXP (x, 0));
break;
@@ -4299,14 +4496,14 @@ simplify_rtx (x, op0_mode, last, in_dest)
case ROTATERT:
/* If this is a shift by a constant amount, simplify it. */
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- return simplify_shift_const (x, code, mode, XEXP (x, 0),
+ return simplify_shift_const (x, code, mode, XEXP (x, 0),
INTVAL (XEXP (x, 1)));
#ifdef SHIFT_COUNT_TRUNCATED
else if (SHIFT_COUNT_TRUNCATED && GET_CODE (XEXP (x, 1)) != REG)
SUBST (XEXP (x, 1),
force_to_mode (XEXP (x, 1), GET_MODE (x),
- ((HOST_WIDE_INT) 1
+ ((HOST_WIDE_INT) 1
<< exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
- 1,
NULL_RTX, 0));
@@ -4314,6 +4511,45 @@ simplify_rtx (x, op0_mode, last, in_dest)
break;
+ case VEC_SELECT:
+ {
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ int len;
+
+ if (GET_CODE (op1) != PARALLEL)
+ abort ();
+ len = XVECLEN (op1, 0);
+ if (len == 1
+ && GET_CODE (XVECEXP (op1, 0, 0)) == CONST_INT
+ && GET_CODE (op0) == VEC_CONCAT)
+ {
+ int offset = INTVAL (XVECEXP (op1, 0, 0)) * GET_MODE_SIZE (GET_MODE (x));
+
+ /* Try to find the element in the VEC_CONCAT. */
+ for (;;)
+ {
+ if (GET_MODE (op0) == GET_MODE (x))
+ return op0;
+ if (GET_CODE (op0) == VEC_CONCAT)
+ {
+ HOST_WIDE_INT op0_size = GET_MODE_SIZE (GET_MODE (XEXP (op0, 0)));
+ if (op0_size < offset)
+ op0 = XEXP (op0, 0);
+ else
+ {
+ offset -= op0_size;
+ op0 = XEXP (op0, 1);
+ }
+ }
+ else
+ break;
+ }
+ }
+ }
+
+ break;
+
default:
break;
}
@@ -4329,33 +4565,37 @@ simplify_if_then_else (x)
{
enum machine_mode mode = GET_MODE (x);
rtx cond = XEXP (x, 0);
- rtx true = XEXP (x, 1);
- rtx false = XEXP (x, 2);
+ rtx true_rtx = XEXP (x, 1);
+ rtx false_rtx = XEXP (x, 2);
enum rtx_code true_code = GET_CODE (cond);
int comparison_p = GET_RTX_CLASS (true_code) == '<';
rtx temp;
int i;
+ enum rtx_code false_code;
+ rtx reversed;
/* Simplify storing of the truth value. */
- if (comparison_p && true == const_true_rtx && false == const0_rtx)
+ if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
return gen_binary (true_code, mode, XEXP (cond, 0), XEXP (cond, 1));
-
+
/* Also when the truth value has to be reversed. */
- if (comparison_p && reversible_comparison_p (cond)
- && true == const0_rtx && false == const_true_rtx)
- return gen_binary (reverse_condition (true_code),
- mode, XEXP (cond, 0), XEXP (cond, 1));
+ if (comparison_p
+ && true_rtx == const0_rtx && false_rtx == const_true_rtx
+ && (reversed = reversed_comparison (cond, mode, XEXP (cond, 0),
+ XEXP (cond, 1))))
+ return reversed;
/* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
in it is being compared against certain values. Get the true and false
comparisons and see if that says anything about the value of each arm. */
- if (comparison_p && reversible_comparison_p (cond)
+ if (comparison_p
+ && ((false_code = combine_reversed_comparison_code (cond))
+ != UNKNOWN)
&& GET_CODE (XEXP (cond, 0)) == REG)
{
HOST_WIDE_INT nzb;
rtx from = XEXP (cond, 0);
- enum rtx_code false_code = reverse_condition (true_code);
rtx true_val = XEXP (cond, 1);
rtx false_val = true_val;
int swapped = 0;
@@ -4365,7 +4605,7 @@ simplify_if_then_else (x)
if (false_code == EQ)
{
swapped = 1, true_code = EQ, false_code = NE;
- temp = true, true = false, false = temp;
+ temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
}
/* If we are comparing against zero and the expression being tested has
@@ -4384,18 +4624,21 @@ simplify_if_then_else (x)
branch and it is used in the arm. Be careful due to the potential
of locally-shared RTL. */
- if (reg_mentioned_p (from, true))
- true = subst (known_cond (copy_rtx (true), true_code, from, true_val),
+ if (reg_mentioned_p (from, true_rtx))
+ true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
+ from, true_val),
pc_rtx, pc_rtx, 0, 0);
- if (reg_mentioned_p (from, false))
- false = subst (known_cond (copy_rtx (false), false_code,
+ if (reg_mentioned_p (from, false_rtx))
+ false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
from, false_val),
pc_rtx, pc_rtx, 0, 0);
- SUBST (XEXP (x, 1), swapped ? false : true);
- SUBST (XEXP (x, 2), swapped ? true : false);
+ SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
+ SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
- true = XEXP (x, 1), false = XEXP (x, 2), true_code = GET_CODE (cond);
+ true_rtx = XEXP (x, 1);
+ false_rtx = XEXP (x, 2);
+ true_code = GET_CODE (cond);
}
/* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
@@ -4404,28 +4647,30 @@ simplify_if_then_else (x)
arm, the false arm is the same as the first operand of the comparison, or
the false arm is more complicated than the true arm. */
- if (comparison_p && reversible_comparison_p (cond)
- && (true == pc_rtx
- || (CONSTANT_P (true)
- && GET_CODE (false) != CONST_INT && false != pc_rtx)
- || true == const0_rtx
- || (GET_RTX_CLASS (GET_CODE (true)) == 'o'
- && GET_RTX_CLASS (GET_CODE (false)) != 'o')
- || (GET_CODE (true) == SUBREG
- && GET_RTX_CLASS (GET_CODE (SUBREG_REG (true))) == 'o'
- && GET_RTX_CLASS (GET_CODE (false)) != 'o')
- || reg_mentioned_p (true, false)
- || rtx_equal_p (false, XEXP (cond, 0))))
- {
- true_code = reverse_condition (true_code);
+ if (comparison_p
+ && combine_reversed_comparison_code (cond) != UNKNOWN
+ && (true_rtx == pc_rtx
+ || (CONSTANT_P (true_rtx)
+ && GET_CODE (false_rtx) != CONST_INT && false_rtx != pc_rtx)
+ || true_rtx == const0_rtx
+ || (GET_RTX_CLASS (GET_CODE (true_rtx)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (false_rtx)) != 'o')
+ || (GET_CODE (true_rtx) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (true_rtx))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (false_rtx)) != 'o')
+ || reg_mentioned_p (true_rtx, false_rtx)
+ || rtx_equal_p (false_rtx, XEXP (cond, 0))))
+ {
+ true_code = reversed_comparison_code (cond, NULL);
SUBST (XEXP (x, 0),
- gen_binary (true_code, GET_MODE (cond), XEXP (cond, 0),
- XEXP (cond, 1)));
+ reversed_comparison (cond, GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1)));
- SUBST (XEXP (x, 1), false);
- SUBST (XEXP (x, 2), true);
+ SUBST (XEXP (x, 1), false_rtx);
+ SUBST (XEXP (x, 2), true_rtx);
- temp = true, true = false, false = temp, cond = XEXP (x, 0);
+ temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
+ cond = XEXP (x, 0);
/* It is possible that the conditional has been simplified out. */
true_code = GET_CODE (cond);
@@ -4434,64 +4679,69 @@ simplify_if_then_else (x)
/* If the two arms are identical, we don't need the comparison. */
- if (rtx_equal_p (true, false) && ! side_effects_p (cond))
- return true;
+ if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
+ return true_rtx;
/* Convert a == b ? b : a to "a". */
if (true_code == EQ && ! side_effects_p (cond)
- && rtx_equal_p (XEXP (cond, 0), false)
- && rtx_equal_p (XEXP (cond, 1), true))
- return false;
+ && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
+ && rtx_equal_p (XEXP (cond, 0), false_rtx)
+ && rtx_equal_p (XEXP (cond, 1), true_rtx))
+ return false_rtx;
else if (true_code == NE && ! side_effects_p (cond)
- && rtx_equal_p (XEXP (cond, 0), true)
- && rtx_equal_p (XEXP (cond, 1), false))
- return true;
+ && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
+ && rtx_equal_p (XEXP (cond, 0), true_rtx)
+ && rtx_equal_p (XEXP (cond, 1), false_rtx))
+ return true_rtx;
/* Look for cases where we have (abs x) or (neg (abs X)). */
if (GET_MODE_CLASS (mode) == MODE_INT
- && GET_CODE (false) == NEG
- && rtx_equal_p (true, XEXP (false, 0))
+ && GET_CODE (false_rtx) == NEG
+ && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
&& comparison_p
- && rtx_equal_p (true, XEXP (cond, 0))
- && ! side_effects_p (true))
+ && rtx_equal_p (true_rtx, XEXP (cond, 0))
+ && ! side_effects_p (true_rtx))
switch (true_code)
{
case GT:
case GE:
- return gen_unary (ABS, mode, mode, true);
+ return simplify_gen_unary (ABS, mode, true_rtx, mode);
case LT:
case LE:
- return gen_unary (NEG, mode, mode, gen_unary (ABS, mode, mode, true));
- default:
- break;
+ return
+ simplify_gen_unary (NEG, mode,
+ simplify_gen_unary (ABS, mode, true_rtx, mode),
+ mode);
+ default:
+ break;
}
/* Look for MIN or MAX. */
- if ((! FLOAT_MODE_P (mode) || flag_fast_math)
+ if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
&& comparison_p
- && rtx_equal_p (XEXP (cond, 0), true)
- && rtx_equal_p (XEXP (cond, 1), false)
+ && rtx_equal_p (XEXP (cond, 0), true_rtx)
+ && rtx_equal_p (XEXP (cond, 1), false_rtx)
&& ! side_effects_p (cond))
switch (true_code)
{
case GE:
case GT:
- return gen_binary (SMAX, mode, true, false);
+ return gen_binary (SMAX, mode, true_rtx, false_rtx);
case LE:
case LT:
- return gen_binary (SMIN, mode, true, false);
+ return gen_binary (SMIN, mode, true_rtx, false_rtx);
case GEU:
case GTU:
- return gen_binary (UMAX, mode, true, false);
+ return gen_binary (UMAX, mode, true_rtx, false_rtx);
case LEU:
case LTU:
- return gen_binary (UMIN, mode, true, false);
+ return gen_binary (UMIN, mode, true_rtx, false_rtx);
default:
break;
}
-
+
/* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
second operand is zero, this can be done as (OP Z (mult COND C2)) where
C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
@@ -4502,13 +4752,13 @@ simplify_if_then_else (x)
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& comparison_p && mode != VOIDmode && ! side_effects_p (x))
{
- rtx t = make_compound_operation (true, SET);
- rtx f = make_compound_operation (false, SET);
+ rtx t = make_compound_operation (true_rtx, SET);
+ rtx f = make_compound_operation (false_rtx, SET);
rtx cond_op0 = XEXP (cond, 0);
rtx cond_op1 = XEXP (cond, 1);
- enum rtx_code op, extend_op = NIL;
+ enum rtx_code op = NIL, extend_op = NIL;
enum machine_mode m = mode;
- rtx z = 0, c1;
+ rtx z = 0, c1 = NULL_RTX;
if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
|| GET_CODE (t) == IOR || GET_CODE (t) == XOR
@@ -4570,7 +4820,7 @@ simplify_if_then_else (x)
&& subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
&& ((nonzero_bits (f, GET_MODE (f))
- & ~ GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
+ & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
== 0))
{
c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
@@ -4586,14 +4836,14 @@ simplify_if_then_else (x)
&& subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
&& ((nonzero_bits (f, GET_MODE (f))
- & ~ GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
+ & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
== 0))
{
c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
extend_op = ZERO_EXTEND;
m = GET_MODE (XEXP (t, 0));
}
-
+
if (z)
{
temp = subst (gen_binary (true_code, m, cond_op0, cond_op1),
@@ -4604,7 +4854,7 @@ simplify_if_then_else (x)
temp = gen_binary (op, m, gen_lowpart_for_combine (m, z), temp);
if (extend_op != NIL)
- temp = gen_unary (extend_op, mode, m, temp);
+ temp = simplify_gen_unary (extend_op, mode, temp, m);
return temp;
}
@@ -4616,12 +4866,12 @@ simplify_if_then_else (x)
can actually do this more generally, but it doesn't seem worth it. */
if (true_code == NE && XEXP (cond, 1) == const0_rtx
- && false == const0_rtx && GET_CODE (true) == CONST_INT
+ && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT
&& ((1 == nonzero_bits (XEXP (cond, 0), mode)
- && (i = exact_log2 (INTVAL (true))) >= 0)
+ && (i = exact_log2 (INTVAL (true_rtx))) >= 0)
|| ((num_sign_bit_copies (XEXP (cond, 0), mode)
== GET_MODE_BITSIZE (mode))
- && (i = exact_log2 (- INTVAL (true))) >= 0)))
+ && (i = exact_log2 (-INTVAL (true_rtx))) >= 0)))
return
simplify_shift_const (NULL_RTX, ASHIFT, mode,
gen_lowpart_for_combine (mode, XEXP (cond, 0)), i);
@@ -4651,7 +4901,10 @@ simplify_set (x)
low-order bits. */
if (GET_MODE_CLASS (mode) == MODE_INT)
- src = force_to_mode (src, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+ {
+ src = force_to_mode (src, mode, ~(HOST_WIDE_INT) 0, NULL_RTX, 0);
+ SUBST (SET_SRC (x), src);
+ }
/* If we are setting CC0 or if the source is a COMPARE, look for the use of
the comparison result and try to simplify it unless we already have used
@@ -4694,7 +4947,7 @@ simplify_set (x)
which case we can safely change its mode. */
if (compare_mode != GET_MODE (dest))
{
- int regno = REGNO (dest);
+ unsigned int regno = REGNO (dest);
rtx new_dest = gen_rtx_REG (compare_mode, regno);
if (regno < FIRST_PSEUDO_REGISTER
@@ -4718,8 +4971,8 @@ simplify_set (x)
{
unsigned HOST_WIDE_INT mask;
- SUBST (*cc_use, gen_rtx_combine (new_code, GET_MODE (*cc_use),
- dest, const0_rtx));
+ SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
+ dest, const0_rtx));
/* If the only change we made was to change an EQ into an NE or
vice versa, OP0 has only one bit that might be nonzero, and OP1
@@ -4766,8 +5019,7 @@ simplify_set (x)
correct mode, we need one. */
if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
{
- SUBST (SET_SRC (x),
- gen_rtx_combine (COMPARE, compare_mode, op0, op1));
+ SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
src = SET_SRC (x);
}
else
@@ -4787,7 +5039,7 @@ simplify_set (x)
/* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
and X being a REG or (subreg (reg)), we may be able to convert this to
- (set (subreg:m2 x) (op)).
+ (set (subreg:m2 x) (op)).
We can always do this if M1 is narrower than M2 because that means that
we only care about the low bits of the result.
@@ -4796,7 +5048,7 @@ simplify_set (x)
perform a narrower operation than requested since the high-order bits will
be undefined. On machine where it is defined, this transformation is safe
as long as M1 and M2 have the same number of words. */
-
+
if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
&& GET_RTX_CLASS (GET_CODE (SUBREG_REG (src))) != 'o'
&& (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
@@ -4807,14 +5059,14 @@ simplify_set (x)
&& (GET_MODE_SIZE (GET_MODE (src))
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
#endif
-#ifdef CLASS_CANNOT_CHANGE_SIZE
+#ifdef CLASS_CANNOT_CHANGE_MODE
&& ! (GET_CODE (dest) == REG && REGNO (dest) < FIRST_PSEUDO_REGISTER
&& (TEST_HARD_REG_BIT
- (reg_class_contents[(int) CLASS_CANNOT_CHANGE_SIZE],
+ (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
REGNO (dest)))
- && (GET_MODE_SIZE (GET_MODE (src))
- != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
-#endif
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (src),
+ GET_MODE (SUBREG_REG (src))))
+#endif
&& (GET_CODE (dest) == REG
|| (GET_CODE (dest) == SUBREG
&& GET_CODE (SUBREG_REG (dest)) == REG)))
@@ -4834,14 +5086,14 @@ simplify_set (x)
if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
&& LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != NIL
- && SUBREG_WORD (src) == 0
+ && SUBREG_BYTE (src) == 0
&& (GET_MODE_SIZE (GET_MODE (src))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
&& GET_CODE (SUBREG_REG (src)) == MEM)
{
SUBST (SET_SRC (x),
- gen_rtx_combine (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
- GET_MODE (src), XEXP (src, 0)));
+ gen_rtx (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
+ GET_MODE (src), SUBREG_REG (src)));
src = SET_SRC (x);
}
@@ -4867,29 +5119,32 @@ simplify_set (x)
== GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
&& ! side_effects_p (src))
{
- rtx true = (GET_CODE (XEXP (src, 0)) == NE
+ rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
? XEXP (src, 1) : XEXP (src, 2));
- rtx false = (GET_CODE (XEXP (src, 0)) == NE
+ rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
? XEXP (src, 2) : XEXP (src, 1));
rtx term1 = const0_rtx, term2, term3;
- if (GET_CODE (true) == IOR && rtx_equal_p (XEXP (true, 0), false))
- term1 = false, true = XEXP (true, 1), false = const0_rtx;
- else if (GET_CODE (true) == IOR
- && rtx_equal_p (XEXP (true, 1), false))
- term1 = false, true = XEXP (true, 0), false = const0_rtx;
- else if (GET_CODE (false) == IOR
- && rtx_equal_p (XEXP (false, 0), true))
- term1 = true, false = XEXP (false, 1), true = const0_rtx;
- else if (GET_CODE (false) == IOR
- && rtx_equal_p (XEXP (false, 1), true))
- term1 = true, false = XEXP (false, 0), true = const0_rtx;
-
- term2 = gen_binary (AND, GET_MODE (src), XEXP (XEXP (src, 0), 0), true);
+ if (GET_CODE (true_rtx) == IOR
+ && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
+ term1 = false_rtx, true_rtx = XEXP(true_rtx, 1), false_rtx = const0_rtx;
+ else if (GET_CODE (true_rtx) == IOR
+ && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
+ term1 = false_rtx, true_rtx = XEXP(true_rtx, 0), false_rtx = const0_rtx;
+ else if (GET_CODE (false_rtx) == IOR
+ && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
+ term1 = true_rtx, false_rtx = XEXP(false_rtx, 1), true_rtx = const0_rtx;
+ else if (GET_CODE (false_rtx) == IOR
+ && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
+ term1 = true_rtx, false_rtx = XEXP(false_rtx, 0), true_rtx = const0_rtx;
+
+ term2 = gen_binary (AND, GET_MODE (src),
+ XEXP (XEXP (src, 0), 0), true_rtx);
term3 = gen_binary (AND, GET_MODE (src),
- gen_unary (NOT, GET_MODE (src), GET_MODE (src),
- XEXP (XEXP (src, 0), 0)),
- false);
+ simplify_gen_unary (NOT, GET_MODE (src),
+ XEXP (XEXP (src, 0), 0),
+ GET_MODE (src)),
+ false_rtx);
SUBST (SET_SRC (x),
gen_binary (IOR, GET_MODE (src),
@@ -4921,25 +5176,28 @@ simplify_logical (x, last)
enum machine_mode mode = GET_MODE (x);
rtx op0 = XEXP (x, 0);
rtx op1 = XEXP (x, 1);
+ rtx reversed;
switch (GET_CODE (x))
{
case AND:
- /* Convert (A ^ B) & A to A & (~ B) since the latter is often a single
+ /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
insn (and may simplify more). */
if (GET_CODE (op0) == XOR
&& rtx_equal_p (XEXP (op0, 0), op1)
&& ! side_effects_p (op1))
x = gen_binary (AND, mode,
- gen_unary (NOT, mode, mode, XEXP (op0, 1)), op1);
+ simplify_gen_unary (NOT, mode, XEXP (op0, 1), mode),
+ op1);
if (GET_CODE (op0) == XOR
&& rtx_equal_p (XEXP (op0, 1), op1)
&& ! side_effects_p (op1))
x = gen_binary (AND, mode,
- gen_unary (NOT, mode, mode, XEXP (op0, 0)), op1);
+ simplify_gen_unary (NOT, mode, XEXP (op0, 0), mode),
+ op1);
- /* Similarly for (~ (A ^ B)) & A. */
+ /* Similarly for (~(A ^ B)) & A. */
if (GET_CODE (op0) == NOT
&& GET_CODE (XEXP (op0, 0)) == XOR
&& rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
@@ -4952,7 +5210,12 @@ simplify_logical (x, last)
&& ! side_effects_p (op1))
x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
- if (GET_CODE (op1) == CONST_INT)
+ /* We can call simplify_and_const_int only if we don't lose
+ any (sign) bits when converting INTVAL (op1) to
+ "unsigned HOST_WIDE_INT". */
+ if (GET_CODE (op1) == CONST_INT
+ && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) > 0))
{
x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
@@ -4966,12 +5229,12 @@ simplify_logical (x, last)
return gen_binary (IOR, mode,
gen_binary (AND, mode, XEXP (op0, 0),
GEN_INT (INTVAL (XEXP (op0, 1))
- & ~ INTVAL (op1))), op1);
+ & ~INTVAL (op1))), op1);
if (GET_CODE (x) != AND)
return x;
- if (GET_RTX_CLASS (GET_CODE (x)) == 'c'
+ if (GET_RTX_CLASS (GET_CODE (x)) == 'c'
|| GET_RTX_CLASS (GET_CODE (x)) == '2')
op0 = XEXP (x, 0), op1 = XEXP (x, 1);
}
@@ -4994,7 +5257,7 @@ simplify_logical (x, last)
For example, (and (ior A B) (not B)) can occur as the result of
expanding a bit field assignment. When we apply the distributive
law to this, we get (ior (and (A (not B))) (and (B (not B)))),
- which then simplifies to (and (A (not B))).
+ which then simplifies to (and (A (not B))).
If we have (and (ior A B) C), apply the distributive law and then
the inverse distributive law to see if things simplify. */
@@ -5026,7 +5289,7 @@ simplify_logical (x, last)
gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 0)),
gen_binary (IOR, mode, copy_rtx (XEXP (op0, 0)),
XEXP (op1, 1))));
-
+
else if (GET_CODE (op1) == NOT && GET_CODE (op0) == XOR)
return apply_distributive_law
(gen_binary (XOR, mode,
@@ -5038,7 +5301,7 @@ simplify_logical (x, last)
/* (ior A C) is C if all bits of A that might be nonzero are on in C. */
if (GET_CODE (op1) == CONST_INT
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~ INTVAL (op1)) == 0)
+ && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
return op1;
/* Convert (A & B) | A to A. */
@@ -5115,6 +5378,15 @@ simplify_logical (x, last)
break;
case XOR:
+ /* If we are XORing two things that have no bits in common,
+ convert them into an IOR. This helps to detect rotation encoded
+ using those methods and possibly other simplifications. */
+
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & nonzero_bits (op1, mode)) == 0)
+ return (gen_binary (IOR, mode, op0, op1));
+
/* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
(NOT y). */
@@ -5132,7 +5404,9 @@ simplify_logical (x, last)
SUBST (XEXP (x, 1), op1);
}
else if (num_negated == 1)
- return gen_unary (NOT, mode, mode, gen_binary (XOR, mode, op0, op1));
+ return
+ simplify_gen_unary (NOT, mode, gen_binary (XOR, mode, op0, op1),
+ mode);
}
/* Convert (xor (and A B) B) to (and (not A) B). The latter may
@@ -5143,14 +5417,14 @@ simplify_logical (x, last)
&& rtx_equal_p (XEXP (op0, 1), op1)
&& ! side_effects_p (op1))
return gen_binary (AND, mode,
- gen_unary (NOT, mode, mode, XEXP (op0, 0)),
+ simplify_gen_unary (NOT, mode, XEXP (op0, 0), mode),
op1);
else if (GET_CODE (op0) == AND
&& rtx_equal_p (XEXP (op0, 0), op1)
&& ! side_effects_p (op1))
return gen_binary (AND, mode,
- gen_unary (NOT, mode, mode, XEXP (op0, 1)),
+ simplify_gen_unary (NOT, mode, XEXP (op0, 1), mode),
op1);
/* (xor (comparison foo bar) (const_int 1)) can become the reversed
@@ -5158,9 +5432,9 @@ simplify_logical (x, last)
if (STORE_FLAG_VALUE == 1
&& op1 == const1_rtx
&& GET_RTX_CLASS (GET_CODE (op0)) == '<'
- && reversible_comparison_p (op0))
- return gen_rtx_combine (reverse_condition (GET_CODE (op0)),
- mode, XEXP (op0, 0), XEXP (op0, 1));
+ && (reversed = reversed_comparison (op0, mode, XEXP (op0, 0),
+ XEXP (op0, 1))))
+ return reversed;
/* (lshiftrt foo C) where C is the number of bits in FOO minus 1
is (lt foo (const_int 0)), so we can perform the above
@@ -5171,7 +5445,7 @@ simplify_logical (x, last)
&& GET_CODE (op0) == LSHIFTRT
&& GET_CODE (XEXP (op0, 1)) == CONST_INT
&& INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
- return gen_rtx_combine (GE, mode, XEXP (op0, 0), const0_rtx);
+ return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
/* (xor (comparison foo bar) (const_int sign-bit))
when STORE_FLAG_VALUE is the sign bit. */
@@ -5180,9 +5454,10 @@ simplify_logical (x, last)
== (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
&& op1 == const_true_rtx
&& GET_RTX_CLASS (GET_CODE (op0)) == '<'
- && reversible_comparison_p (op0))
- return gen_rtx_combine (reverse_condition (GET_CODE (op0)),
- mode, XEXP (op0, 0), XEXP (op0, 1));
+ && (reversed = reversed_comparison (op0, mode, XEXP (op0, 0),
+ XEXP (op0, 1))))
+ return reversed;
+
break;
default:
@@ -5198,7 +5473,7 @@ simplify_logical (x, last)
an AND operation, which is simpler, though only one operation.
The function expand_compound_operation is called with an rtx expression
- and will convert it to the appropriate shifts and AND operations,
+ and will convert it to the appropriate shifts and AND operations,
simplifying at each stage.
The function make_compound_operation is called to convert an expression
@@ -5209,9 +5484,9 @@ static rtx
expand_compound_operation (x)
rtx x;
{
- int pos = 0, len;
+ unsigned HOST_WIDE_INT pos = 0, len;
int unsignedp = 0;
- int modewidth;
+ unsigned int modewidth;
rtx tem;
switch (GET_CODE (x))
@@ -5238,7 +5513,7 @@ expand_compound_operation (x)
Reject MODEs bigger than a word, because we might not be able
to reference a two-register group starting with an arbitrary register
(and currently gen_lowpart might crash for a SUBREG). */
-
+
if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
return x;
@@ -5280,6 +5555,20 @@ expand_compound_operation (x)
default:
return x;
}
+ /* Convert sign extension to zero extension, if we know that the high
+ bit is not set, as this is easier to optimize. It will be converted
+ back to cheaper alternative in make_extraction. */
+ if (GET_CODE (x) == SIGN_EXTEND
+ && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ & ~(((unsigned HOST_WIDE_INT)
+ GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
+ >> 1))
+ == 0)))
+ {
+ rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
+ return expand_compound_operation (temp);
+ }
/* We can optimize some special cases of ZERO_EXTEND. */
if (GET_CODE (x) == ZERO_EXTEND)
@@ -5291,7 +5580,7 @@ expand_compound_operation (x)
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
- & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return XEXP (XEXP (x, 0), 0);
/* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
@@ -5300,7 +5589,7 @@ expand_compound_operation (x)
&& subreg_lowpart_p (XEXP (x, 0))
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
- & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return SUBREG_REG (XEXP (x, 0));
/* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
@@ -5312,8 +5601,8 @@ expand_compound_operation (x)
&& GET_RTX_CLASS (GET_CODE (XEXP (XEXP (x, 0), 0))) == '<'
&& (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
<= HOST_BITS_PER_WIDE_INT)
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE
- & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ && ((HOST_WIDE_INT) STORE_FLAG_VALUE
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return XEXP (XEXP (x, 0), 0);
/* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
@@ -5324,33 +5613,9 @@ expand_compound_operation (x)
&& (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
<= HOST_BITS_PER_WIDE_INT)
&& ((HOST_WIDE_INT) STORE_FLAG_VALUE
- & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return SUBREG_REG (XEXP (x, 0));
- /* If sign extension is cheaper than zero extension, then use it
- if we know that no extraneous bits are set, and that the high
- bit is not set. */
- if (flag_expensive_optimizations
- && ((GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
- && ((nonzero_bits (XEXP (x, 0), GET_MODE (x))
- & ~ (((unsigned HOST_WIDE_INT)
- GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
- >> 1))
- == 0))
- || (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
- <= HOST_BITS_PER_WIDE_INT)
- && (((HOST_WIDE_INT) STORE_FLAG_VALUE
- & ~ (((unsigned HOST_WIDE_INT)
- GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
- >> 1))
- == 0))))
- {
- rtx temp = gen_rtx_SIGN_EXTEND (GET_MODE (x), XEXP (x, 0));
-
- if (rtx_cost (temp, SET) < rtx_cost (x, SET))
- return expand_compound_operation (temp);
- }
}
/* If we reach here, we want to return a pair of shifts. The inner
@@ -5364,12 +5629,12 @@ expand_compound_operation (x)
We must check for the case where the left shift would have a negative
count. This can happen in a case like (x >> 31) & 255 on machines
that can't shift by a constant. On those machines, we would first
- combine the shift with the AND to produce a variable-position
+ combine the shift with the AND to produce a variable-position
extraction. Then the constant of 31 would be substituted in to produce
a such a position. */
modewidth = GET_MODE_BITSIZE (GET_MODE (x));
- if (modewidth >= pos - len)
+ if (modewidth + len >= pos)
tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
GET_MODE (x),
simplify_shift_const (NULL_RTX, ASHIFT,
@@ -5387,7 +5652,6 @@ expand_compound_operation (x)
else
/* Any other cases we can't handle. */
return x;
-
/* If we couldn't do this for some reason, return the original
expression. */
@@ -5423,7 +5687,7 @@ expand_field_assignment (x)
{
inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
- pos = GEN_INT (BITS_PER_WORD * SUBREG_WORD (XEXP (SET_DEST (x), 0)));
+ pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
}
else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
&& GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT)
@@ -5460,14 +5724,17 @@ expand_field_assignment (x)
/* A SUBREG between two modes that occupy the same numbers of words
can be done by moving the SUBREG to the source. */
else if (GET_CODE (SET_DEST (x)) == SUBREG
+ /* We need SUBREGs to compute nonzero_bits properly. */
+ && nonzero_sign_valid
&& (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
== ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
{
x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
- gen_lowpart_for_combine (GET_MODE (SUBREG_REG (SET_DEST (x))),
- SET_SRC (x)));
+ gen_lowpart_for_combine
+ (GET_MODE (SUBREG_REG (SET_DEST (x))),
+ SET_SRC (x)));
continue;
}
else
@@ -5505,22 +5772,22 @@ expand_field_assignment (x)
/* Now compute the equivalent expression. Make a copy of INNER
for the SET_DEST in case it is a MEM into which we will substitute;
we don't want shared RTL in that case. */
- x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
- gen_binary (IOR, compute_mode,
- gen_binary (AND, compute_mode,
- gen_unary (NOT, compute_mode,
- compute_mode,
- gen_binary (ASHIFT,
- compute_mode,
- mask, pos)),
- inner),
- gen_binary (ASHIFT, compute_mode,
- gen_binary (AND, compute_mode,
- gen_lowpart_for_combine
- (compute_mode,
- SET_SRC (x)),
- mask),
- pos)));
+ x = gen_rtx_SET
+ (VOIDmode, copy_rtx (inner),
+ gen_binary (IOR, compute_mode,
+ gen_binary (AND, compute_mode,
+ simplify_gen_unary (NOT, compute_mode,
+ gen_binary (ASHIFT,
+ compute_mode,
+ mask, pos),
+ compute_mode),
+ inner),
+ gen_binary (ASHIFT, compute_mode,
+ gen_binary (AND, compute_mode,
+ gen_lowpart_for_combine
+ (compute_mode, SET_SRC (x)),
+ mask),
+ pos)));
}
return x;
@@ -5537,7 +5804,7 @@ expand_field_assignment (x)
code that understands the USE is this routine. If it is not removed,
it will cause the resulting insn not to match.
- UNSIGNEDP is non-zero for an unsigned reference and zero for a
+ UNSIGNEDP is non-zero for an unsigned reference and zero for a
signed reference.
IN_DEST is non-zero if this is a reference in the destination of a
@@ -5558,9 +5825,9 @@ make_extraction (mode, inner, pos, pos_rtx, len,
unsignedp, in_dest, in_compare)
enum machine_mode mode;
rtx inner;
- int pos;
+ HOST_WIDE_INT pos;
rtx pos_rtx;
- int len;
+ unsigned HOST_WIDE_INT len;
int unsignedp;
int in_dest, in_compare;
{
@@ -5577,7 +5844,7 @@ make_extraction (mode, inner, pos, pos_rtx, len,
int spans_byte = 0;
rtx new = 0;
rtx orig_pos_rtx = pos_rtx;
- int orig_pos;
+ HOST_WIDE_INT orig_pos;
/* Get some information about INNER and get the innermost object. */
if (GET_CODE (inner) == USE)
@@ -5621,8 +5888,7 @@ make_extraction (mode, inner, pos, pos_rtx, len,
&& GET_CODE (inner) != MEM
&& (! in_dest
|| (GET_CODE (inner) == REG
- && (movstrict_optab->handlers[(int) tmode].insn_code
- != CODE_FOR_nothing))))
+ && have_insn_for (STRICT_LOW_PART, tmode))))
|| (GET_CODE (inner) == MEM && pos_rtx == 0
&& (pos
% (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
@@ -5636,62 +5902,93 @@ make_extraction (mode, inner, pos, pos_rtx, len,
{
/* If INNER is a MEM, make a new MEM that encompasses just the desired
field. If the original and current mode are the same, we need not
- adjust the offset. Otherwise, we do if bytes big endian.
+ adjust the offset. Otherwise, we do if bytes big endian.
If INNER is not a MEM, get a piece consisting of just the field
of interest (in this case POS % BITS_PER_WORD must be 0). */
if (GET_CODE (inner) == MEM)
{
- int offset;
+ HOST_WIDE_INT offset;
+
/* POS counts from lsb, but make OFFSET count in memory order. */
if (BYTES_BIG_ENDIAN)
offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
else
offset = pos / BITS_PER_UNIT;
- new = gen_rtx_MEM (tmode, plus_constant (XEXP (inner, 0), offset));
- RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (inner);
- MEM_COPY_ATTRIBUTES (new, inner);
+ new = adjust_address_nv (inner, tmode, offset);
}
else if (GET_CODE (inner) == REG)
{
/* We can't call gen_lowpart_for_combine here since we always want
a SUBREG and it would sometimes return a new hard register. */
if (tmode != inner_mode)
- new = gen_rtx_SUBREG (tmode, inner,
- (WORDS_BIG_ENDIAN
- && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD
- ? (((GET_MODE_SIZE (inner_mode)
- - GET_MODE_SIZE (tmode))
- / UNITS_PER_WORD)
- - pos / BITS_PER_WORD)
- : pos / BITS_PER_WORD));
+ {
+ HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
+
+ if (WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
+ final_word = ((GET_MODE_SIZE (inner_mode)
+ - GET_MODE_SIZE (tmode))
+ / UNITS_PER_WORD) - final_word;
+
+ final_word *= UNITS_PER_WORD;
+ if (BYTES_BIG_ENDIAN &&
+ GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
+ final_word += (GET_MODE_SIZE (inner_mode)
+ - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
+
+ new = gen_rtx_SUBREG (tmode, inner, final_word);
+ }
else
new = inner;
}
else
new = force_to_mode (inner, tmode,
len >= HOST_BITS_PER_WIDE_INT
- ? GET_MODE_MASK (tmode)
- : ((HOST_WIDE_INT) 1 << len) - 1,
+ ? ~(unsigned HOST_WIDE_INT) 0
+ : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
NULL_RTX, 0);
- /* If this extraction is going into the destination of a SET,
+ /* If this extraction is going into the destination of a SET,
make a STRICT_LOW_PART unless we made a MEM. */
if (in_dest)
return (GET_CODE (new) == MEM ? new
: (GET_CODE (new) != SUBREG
? gen_rtx_CLOBBER (tmode, const0_rtx)
- : gen_rtx_combine (STRICT_LOW_PART, VOIDmode, new)));
+ : gen_rtx_STRICT_LOW_PART (VOIDmode, new)));
+
+ if (mode == tmode)
+ return new;
+
+ /* If we know that no extraneous bits are set, and that the high
+ bit is not set, convert the extraction to the cheaper of
+ sign and zero extension, that are equivalent in these cases. */
+ if (flag_expensive_optimizations
+ && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
+ && ((nonzero_bits (new, tmode)
+ & ~(((unsigned HOST_WIDE_INT)
+ GET_MODE_MASK (tmode))
+ >> 1))
+ == 0)))
+ {
+ rtx temp = gen_rtx_ZERO_EXTEND (mode, new);
+ rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new);
+
+ /* Prefer ZERO_EXTENSION, since it gives more information to
+ backends. */
+ if (rtx_cost (temp, SET) <= rtx_cost (temp1, SET))
+ return temp;
+ return temp1;
+ }
/* Otherwise, sign- or zero-extend unless we already are in the
proper mode. */
- return (mode == tmode ? new
- : gen_rtx_combine (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
- mode, new));
+ return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
+ mode, new));
}
/* Unless this is a COMPARE or we have a funny memory reference,
@@ -5701,62 +5998,40 @@ make_extraction (mode, inner, pos, pos_rtx, len,
&& ! in_compare && ! spans_byte && unsignedp)
return 0;
- /* Unless we are allowed to span bytes, reject this if we would be
- spanning bytes or if the position is not a constant and the length
- is not 1. In all other cases, we would only be going outside
- out object in cases when an original shift would have been
+ /* Unless we are allowed to span bytes or INNER is not MEM, reject this if
+ we would be spanning bytes or if the position is not a constant and the
+ length is not 1. In all other cases, we would only be going outside
+ our object in cases when an original shift would have been
undefined. */
- if (! spans_byte
+ if (! spans_byte && GET_CODE (inner) == MEM
&& ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
|| (pos_rtx != 0 && len != 1)))
return 0;
/* Get the mode to use should INNER not be a MEM, the mode for the position,
and the mode for the result. */
-#ifdef HAVE_insv
- if (in_dest)
- {
- wanted_inner_reg_mode
- = (insn_operand_mode[(int) CODE_FOR_insv][0] == VOIDmode
- ? word_mode
- : insn_operand_mode[(int) CODE_FOR_insv][0]);
- pos_mode = (insn_operand_mode[(int) CODE_FOR_insv][2] == VOIDmode
- ? word_mode : insn_operand_mode[(int) CODE_FOR_insv][2]);
- extraction_mode = (insn_operand_mode[(int) CODE_FOR_insv][3] == VOIDmode
- ? word_mode
- : insn_operand_mode[(int) CODE_FOR_insv][3]);
+ if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
+ {
+ wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
+ pos_mode = mode_for_extraction (EP_insv, 2);
+ extraction_mode = mode_for_extraction (EP_insv, 3);
}
-#endif
-#ifdef HAVE_extzv
- if (! in_dest && unsignedp)
+ if (! in_dest && unsignedp
+ && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
{
- wanted_inner_reg_mode
- = (insn_operand_mode[(int) CODE_FOR_extzv][1] == VOIDmode
- ? word_mode
- : insn_operand_mode[(int) CODE_FOR_extzv][1]);
- pos_mode = (insn_operand_mode[(int) CODE_FOR_extzv][3] == VOIDmode
- ? word_mode : insn_operand_mode[(int) CODE_FOR_extzv][3]);
- extraction_mode = (insn_operand_mode[(int) CODE_FOR_extzv][0] == VOIDmode
- ? word_mode
- : insn_operand_mode[(int) CODE_FOR_extzv][0]);
+ wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
+ pos_mode = mode_for_extraction (EP_extzv, 3);
+ extraction_mode = mode_for_extraction (EP_extzv, 0);
}
-#endif
-#ifdef HAVE_extv
- if (! in_dest && ! unsignedp)
+ if (! in_dest && ! unsignedp
+ && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
{
- wanted_inner_reg_mode
- = (insn_operand_mode[(int) CODE_FOR_extv][1] == VOIDmode
- ? word_mode
- : insn_operand_mode[(int) CODE_FOR_extv][1]);
- pos_mode = (insn_operand_mode[(int) CODE_FOR_extv][3] == VOIDmode
- ? word_mode : insn_operand_mode[(int) CODE_FOR_extv][3]);
- extraction_mode = (insn_operand_mode[(int) CODE_FOR_extv][0] == VOIDmode
- ? word_mode
- : insn_operand_mode[(int) CODE_FOR_extv][0]);
+ wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
+ pos_mode = mode_for_extraction (EP_extv, 3);
+ extraction_mode = mode_for_extraction (EP_extv, 0);
}
-#endif
/* Never narrow an object, since that might not be safe. */
@@ -5797,8 +6072,7 @@ make_extraction (mode, inner, pos, pos_rtx, len,
pos = width - len - pos;
else
pos_rtx
- = gen_rtx_combine (MINUS, GET_MODE (pos_rtx),
- GEN_INT (width - len), pos_rtx);
+ = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
/* POS may be less than 0 now, but we check for that below.
Note that it can only be less than 0 if GET_CODE (inner) != MEM. */
}
@@ -5818,7 +6092,7 @@ make_extraction (mode, inner, pos, pos_rtx, len,
/* The computations below will be correct if the machine is big
endian in both bits and bytes or little endian in bits and bytes.
If it is mixed, we must adjust. */
-
+
/* If bytes are big endian and we had a paradoxical SUBREG, we must
adjust OFFSET to compensate. */
if (BYTES_BIG_ENDIAN
@@ -5840,13 +6114,7 @@ make_extraction (mode, inner, pos, pos_rtx, len,
- GET_MODE_SIZE (wanted_inner_mode) - offset);
if (offset != 0 || inner_mode != wanted_inner_mode)
- {
- rtx newmem = gen_rtx_MEM (wanted_inner_mode,
- plus_constant (XEXP (inner, 0), offset));
- RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (inner);
- MEM_COPY_ATTRIBUTES (newmem, inner);
- inner = newmem;
- }
+ inner = adjust_address_nv (inner, wanted_inner_mode, offset);
}
/* If INNER is not memory, we can always get it into the proper mode. If we
@@ -5862,8 +6130,9 @@ make_extraction (mode, inner, pos, pos_rtx, len,
inner = force_to_mode (inner, wanted_inner_mode,
pos_rtx
|| len + orig_pos >= HOST_BITS_PER_WIDE_INT
- ? GET_MODE_MASK (wanted_inner_mode)
- : (((HOST_WIDE_INT) 1 << len) - 1) << orig_pos,
+ ? ~(unsigned HOST_WIDE_INT) 0
+ : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
+ << orig_pos),
NULL_RTX, 0);
}
@@ -5871,7 +6140,30 @@ make_extraction (mode, inner, pos, pos_rtx, len,
have to zero extend. Otherwise, we can just use a SUBREG. */
if (pos_rtx != 0
&& GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
- pos_rtx = gen_rtx_combine (ZERO_EXTEND, pos_mode, pos_rtx);
+ {
+ rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
+
+ /* If we know that no extraneous bits are set, and that the high
+ bit is not set, convert extraction to cheaper one - either
+ SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
+ cases. */
+ if (flag_expensive_optimizations
+ && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT
+ && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
+ & ~(((unsigned HOST_WIDE_INT)
+ GET_MODE_MASK (GET_MODE (pos_rtx)))
+ >> 1))
+ == 0)))
+ {
+ rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
+
+ /* Prefer ZERO_EXTENSION, since it gives more information to
+ backends. */
+ if (rtx_cost (temp1, SET) < rtx_cost (temp, SET))
+ temp = temp1;
+ }
+ pos_rtx = temp;
+ }
else if (pos_rtx != 0
&& GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
pos_rtx = gen_lowpart_for_combine (pos_mode, pos_rtx);
@@ -5886,7 +6178,7 @@ make_extraction (mode, inner, pos, pos_rtx, len,
pos_rtx = GEN_INT (pos);
/* Make the required operation. See if we can use existing rtx. */
- new = gen_rtx_combine (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
+ new = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
extraction_mode, inner, GEN_INT (len), pos_rtx);
if (! in_dest)
new = gen_lowpart_for_combine (mode, new);
@@ -5920,7 +6212,7 @@ extract_left_shift (x, count)
case NEG: case NOT:
if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
- return gen_unary (code, mode, mode, tem);
+ return simplify_gen_unary (code, mode, tem, mode);
break;
@@ -5930,11 +6222,11 @@ extract_left_shift (x, count)
if (GET_CODE (XEXP (x,1)) == CONST_INT
&& (INTVAL (XEXP (x, 1)) & ((((HOST_WIDE_INT) 1 << count)) - 1)) == 0
&& (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
- return gen_binary (code, mode, tem,
+ return gen_binary (code, mode, tem,
GEN_INT (INTVAL (XEXP (x, 1)) >> count));
break;
-
+
default:
break;
}
@@ -5948,7 +6240,7 @@ extract_left_shift (x, count)
Return the new rtx, usually just X.
- Also, for machines like the Vax that don't have logical shift insns,
+ Also, for machines like the VAX that don't have logical shift insns,
try to convert logical to arithmetic shift operations in cases where
they are equivalent. This undoes the canonicalizations to logical
shifts done elsewhere.
@@ -5973,7 +6265,7 @@ make_compound_operation (x, in_code)
int i;
rtx new = 0;
rtx tem;
- char *fmt;
+ const char *fmt;
/* Select the code to be used in recursive calls. Once we are inside an
address, we stay there. If we have a comparison, set to COMPARE,
@@ -5997,9 +6289,9 @@ make_compound_operation (x, in_code)
&& INTVAL (XEXP (x, 1)) >= 0)
{
new = make_compound_operation (XEXP (x, 0), next_code);
- new = gen_rtx_combine (MULT, mode, new,
- GEN_INT ((HOST_WIDE_INT) 1
- << INTVAL (XEXP (x, 1))));
+ new = gen_rtx_MULT (mode, new,
+ GEN_INT ((HOST_WIDE_INT) 1
+ << INTVAL (XEXP (x, 1))));
}
break;
@@ -6039,11 +6331,11 @@ make_compound_operation (x, in_code)
&& (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
{
/* Apply the distributive law, and then try to make extractions. */
- new = gen_rtx_combine (GET_CODE (XEXP (x, 0)), mode,
- gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
- XEXP (x, 1)),
- gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
- XEXP (x, 1)));
+ new = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
+ gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1)),
+ gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
+ XEXP (x, 1)));
new = make_compound_operation (new, in_code);
}
@@ -6065,10 +6357,9 @@ make_compound_operation (x, in_code)
/* On machines without logical shifts, if the operand of the AND is
a logical shift and our mask turns off all the propagated sign
bits, we can replace the logical shift with an arithmetic shift. */
- else if (ashr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
- && (lshr_optab->handlers[(int) mode].insn_code
- == CODE_FOR_nothing)
- && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && !have_insn_for (LSHIFTRT, mode)
+ && have_insn_for (ASHIFTRT, mode)
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
&& INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
&& INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
@@ -6079,10 +6370,10 @@ make_compound_operation (x, in_code)
mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
SUBST (XEXP (x, 0),
- gen_rtx_combine (ASHIFTRT, mode,
- make_compound_operation (XEXP (XEXP (x, 0), 0),
- next_code),
- XEXP (XEXP (x, 0), 1)));
+ gen_rtx_ASHIFTRT (mode,
+ make_compound_operation
+ (XEXP (XEXP (x, 0), 0), next_code),
+ XEXP (XEXP (x, 0), 1)));
}
/* If the constant is one less than a power of two, this might be
@@ -6109,15 +6400,15 @@ make_compound_operation (x, in_code)
case LSHIFTRT:
/* If the sign bit is known to be zero, replace this with an
arithmetic shift. */
- if (ashr_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing
- && lshr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
+ if (have_insn_for (ASHIFTRT, mode)
+ && ! have_insn_for (LSHIFTRT, mode)
&& mode_width <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
{
- new = gen_rtx_combine (ASHIFTRT, mode,
- make_compound_operation (XEXP (x, 0),
- next_code),
- XEXP (x, 1));
+ new = gen_rtx_ASHIFTRT (mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ XEXP (x, 1));
break;
}
@@ -6139,13 +6430,14 @@ make_compound_operation (x, in_code)
INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
NULL_RTX, mode_width - INTVAL (rhs),
code == LSHIFTRT, 0, in_code == COMPARE);
+ break;
}
/* See if we have operations between an ASHIFTRT and an ASHIFT.
If so, try to merge the shifts into a SIGN_EXTEND. We could
also do this for some cases of SIGN_EXTRACT, but it doesn't
seem worth the effort; the case checked for occurs on Alpha. */
-
+
if (GET_RTX_CLASS (GET_CODE (lhs)) != 'o'
&& ! (GET_CODE (lhs) == SUBREG
&& (GET_RTX_CLASS (GET_CODE (SUBREG_REG (lhs))) == 'o'))
@@ -6155,7 +6447,7 @@ make_compound_operation (x, in_code)
new = make_extraction (mode, make_compound_operation (new, next_code),
0, NULL_RTX, mode_width - INTVAL (rhs),
code == LSHIFTRT, 0, in_code == COMPARE);
-
+
break;
case SUBREG:
@@ -6168,11 +6460,11 @@ make_compound_operation (x, in_code)
&& GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
&& subreg_lowpart_p (x))
{
- rtx newer = force_to_mode (tem, mode,
- GET_MODE_MASK (mode), NULL_RTX, 0);
+ rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
+ NULL_RTX, 0);
/* If we have something other than a SUBREG, we might have
- done an expansion, so rerun outselves. */
+ done an expansion, so rerun ourselves. */
if (GET_CODE (newer) != SUBREG)
newer = make_compound_operation (newer, in_code);
@@ -6190,13 +6482,13 @@ make_compound_operation (x, in_code)
if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (tem))
|| (GET_MODE_SIZE (mode) >
GET_MODE_SIZE (GET_MODE (XEXP (tem, 0)))))
- tem = gen_rtx_combine (GET_CODE (tem), mode, XEXP (tem, 0));
+ tem = gen_rtx_fmt_e (GET_CODE (tem), mode, XEXP (tem, 0));
else
tem = gen_lowpart_for_combine (mode, XEXP (tem, 0));
return tem;
}
break;
-
+
default:
break;
}
@@ -6220,30 +6512,32 @@ make_compound_operation (x, in_code)
}
/* Given M see if it is a value that would select a field of bits
- within an item, but not the entire word. Return -1 if not.
- Otherwise, return the starting position of the field, where 0 is the
- low-order bit.
+ within an item, but not the entire word. Return -1 if not.
+ Otherwise, return the starting position of the field, where 0 is the
+ low-order bit.
*PLEN is set to the length of the field. */
static int
get_pos_from_mask (m, plen)
unsigned HOST_WIDE_INT m;
- int *plen;
+ unsigned HOST_WIDE_INT *plen;
{
/* Get the bit number of the first 1 bit from the right, -1 if none. */
- int pos = exact_log2 (m & - m);
+ int pos = exact_log2 (m & -m);
+ int len;
if (pos < 0)
return -1;
/* Now shift off the low-order zero bits and see if we have a power of
two minus 1. */
- *plen = exact_log2 ((m >> pos) + 1);
+ len = exact_log2 ((m >> pos) + 1);
- if (*plen <= 0)
+ if (len <= 0)
return -1;
+ *plen = len;
return pos;
}
@@ -6256,7 +6550,7 @@ get_pos_from_mask (m, plen)
Return a possibly simplified expression, but always convert X to
MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
- Also, if REG is non-zero and X is a register equal in value to REG,
+ Also, if REG is non-zero and X is a register equal in value to REG,
replace X with REG.
If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
@@ -6279,7 +6573,7 @@ force_to_mode (x, mode, mask, reg, just_select)
/* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
code below will do the wrong thing since the mode of such an
- expression is VOIDmode.
+ expression is VOIDmode.
Also do nothing if X is a CLOBBER; this can happen if X was
the return value from a call to gen_lowpart_for_combine. */
@@ -6290,9 +6584,7 @@ force_to_mode (x, mode, mask, reg, just_select)
that the operation is valid in MODE, in which case we do the operation
in MODE. */
op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
- && code_to_optab[(int) code] != 0
- && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
- != CODE_FOR_nothing))
+ && have_insn_for (code, mode))
? mode : GET_MODE (x));
/* It is not valid to do a right-shift in a narrower mode
@@ -6311,9 +6603,10 @@ force_to_mode (x, mode, mask, reg, just_select)
if (op_mode)
fuller_mask = (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT
? GET_MODE_MASK (op_mode)
- : ((HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1)) - 1);
+ : (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
+ - 1));
else
- fuller_mask = ~ (HOST_WIDE_INT) 0;
+ fuller_mask = ~(HOST_WIDE_INT) 0;
/* Determine what bits of X are guaranteed to be (non)zero. */
nonzero = nonzero_bits (x, mode);
@@ -6334,19 +6627,19 @@ force_to_mode (x, mode, mask, reg, just_select)
if (width > 0 && width < HOST_BITS_PER_WIDE_INT
&& (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
cval |= (HOST_WIDE_INT) -1 << width;
-
+
return GEN_INT (cval);
}
/* If X is narrower than MODE and we want all the bits in X's mode, just
get X in the proper mode. */
if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
- && (GET_MODE_MASK (GET_MODE (x)) & ~ mask) == 0)
+ && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
return gen_lowpart_for_combine (mode, x);
/* If we aren't changing the mode, X is not a SUBREG, and all zero bits in
MASK are already known to be zero in X, we need not do anything. */
- if (GET_MODE (x) == mode && code != SUBREG && (~ mask & nonzero) == 0)
+ if (GET_MODE (x) == mode && code != SUBREG && (~mask & nonzero) == 0)
return x;
switch (code)
@@ -6361,7 +6654,7 @@ force_to_mode (x, mode, mask, reg, just_select)
spanned the boundary of the MEM. If we are now masking so it is
within that boundary, we don't need the USE any more. */
if (! BITS_BIG_ENDIAN
- && (mask & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ && (mask & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
break;
@@ -6389,7 +6682,7 @@ force_to_mode (x, mode, mask, reg, just_select)
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
|| (0 == (mask
& GET_MODE_MASK (GET_MODE (x))
- & ~ GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
+ & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
return force_to_mode (SUBREG_REG (x), mode, mask, reg, next_select);
break;
@@ -6408,7 +6701,8 @@ force_to_mode (x, mode, mask, reg, just_select)
need it. */
if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) == mask)
+ && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
+ == (HOST_WIDE_INT) mask))
x = XEXP (x, 0);
/* If it remains an AND, try making another AND with the bits
@@ -6421,7 +6715,7 @@ force_to_mode (x, mode, mask, reg, just_select)
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
{
HOST_WIDE_INT cval = (INTVAL (XEXP (x, 1))
- | (GET_MODE_MASK (GET_MODE (x)) & ~ mask));
+ | (GET_MODE_MASK (GET_MODE (x)) & ~mask));
int width = GET_MODE_BITSIZE (GET_MODE (x));
rtx y;
@@ -6448,7 +6742,7 @@ force_to_mode (x, mode, mask, reg, just_select)
This may eliminate that PLUS and, later, the AND. */
{
- int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_BITSIZE (mode);
unsigned HOST_WIDE_INT smask = mask;
/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
@@ -6466,23 +6760,23 @@ force_to_mode (x, mode, mask, reg, just_select)
&& (XEXP (x, 0) == stack_pointer_rtx
|| XEXP (x, 0) == frame_pointer_rtx))
{
- int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
- unsigned HOST_WIDE_INT sp_mask = GET_MODE_MASK (mode);
-
- sp_mask &= ~ (sp_alignment - 1);
- if ((sp_mask & ~ smask) == 0
- && ((INTVAL (XEXP (x, 1)) - STACK_BIAS) & ~ smask) != 0)
+ int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT sp_mask = GET_MODE_MASK (mode);
+
+ sp_mask &= ~(sp_alignment - 1);
+ if ((sp_mask & ~smask) == 0
+ && ((INTVAL (XEXP (x, 1)) - STACK_BIAS) & ~smask) != 0)
return force_to_mode (plus_constant (XEXP (x, 0),
- ((INTVAL (XEXP (x, 1)) -
+ ((INTVAL (XEXP (x, 1)) -
STACK_BIAS) & smask)
+ STACK_BIAS),
- mode, smask, reg, next_select);
- }
+ mode, smask, reg, next_select);
+ }
#endif
- if ((nonzero_bits (XEXP (x, 0), mode) & ~ smask) == 0
- && (INTVAL (XEXP (x, 1)) & ~ smask) != 0)
+ if ((nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
+ && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
return force_to_mode (plus_constant (XEXP (x, 0),
- (INTVAL (XEXP (x, 1))
+ (INTVAL (XEXP (x, 1))
& smask)),
mode, smask, reg, next_select);
}
@@ -6490,7 +6784,6 @@ force_to_mode (x, mode, mask, reg, just_select)
/* ... fall through ... */
- case MINUS:
case MULT:
/* For PLUS, MINUS and MULT, we need any bits less significant than the
most significant bit in MASK since carries from those bits will
@@ -6498,6 +6791,33 @@ force_to_mode (x, mode, mask, reg, just_select)
mask = fuller_mask;
goto binop;
+ case MINUS:
+ /* If X is (minus C Y) where C's least set bit is larger than any bit
+ in the mask, then we may replace with (neg Y). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
+ & -INTVAL (XEXP (x, 0))))
+ > mask))
+ {
+ x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
+ GET_MODE (x));
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+
+ /* Similarly, if C contains every bit in the mask, then we may
+ replace with (not Y). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && ((INTVAL (XEXP (x, 0)) | (HOST_WIDE_INT) mask)
+ == INTVAL (XEXP (x, 0))))
+ {
+ x = simplify_gen_unary (NOT, GET_MODE (x),
+ XEXP (x, 1), GET_MODE (x));
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+
+ mask = fuller_mask;
+ goto binop;
+
case IOR:
case XOR:
/* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
@@ -6514,10 +6834,10 @@ force_to_mode (x, mode, mask, reg, just_select)
+ floor_log2 (INTVAL (XEXP (x, 1))))
< GET_MODE_BITSIZE (GET_MODE (x)))
&& (INTVAL (XEXP (x, 1))
- & ~ nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
+ & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
{
temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
- << INTVAL (XEXP (XEXP (x, 0), 1)));
+ << INTVAL (XEXP (XEXP (x, 0), 1)));
temp = gen_binary (GET_CODE (x), GET_MODE (x),
XEXP (XEXP (x, 0), 0), temp);
x = gen_binary (LSHIFTRT, GET_MODE (x), temp,
@@ -6527,7 +6847,7 @@ force_to_mode (x, mode, mask, reg, just_select)
binop:
/* For most binary operations, just propagate into the operation and
- change the mode if we have an operation of that mode. */
+ change the mode if we have an operation of that mode. */
op0 = gen_lowpart_for_combine (op_mode,
force_to_mode (XEXP (x, 0), mode, mask,
@@ -6543,7 +6863,7 @@ force_to_mode (x, mode, mask, reg, just_select)
if (GET_CODE (op1) == CONST_INT && (code == IOR || code == XOR)
&& (INTVAL (op1) & mask) != 0)
op1 = GEN_INT (INTVAL (op1) & mask);
-
+
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
x = gen_binary (code, op_mode, op0, op1);
break;
@@ -6562,7 +6882,7 @@ force_to_mode (x, mode, mask, reg, just_select)
&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
< (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
break;
-
+
/* If the shift count is a constant and we can do arithmetic in
the mode of the shift, refine which bits we need. Otherwise, use the
conservative form of the mask. */
@@ -6579,7 +6899,7 @@ force_to_mode (x, mode, mask, reg, just_select)
mask, reg, next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
- x = gen_binary (code, op_mode, op0, XEXP (x, 1));
+ x = gen_binary (code, op_mode, op0, XEXP (x, 1));
break;
case LSHIFTRT:
@@ -6592,18 +6912,19 @@ force_to_mode (x, mode, mask, reg, just_select)
&& GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
{
rtx inner = XEXP (x, 0);
+ unsigned HOST_WIDE_INT inner_mask;
/* Select the mask of the bits we need for the shift operand. */
- mask <<= INTVAL (XEXP (x, 1));
+ inner_mask = mask << INTVAL (XEXP (x, 1));
/* We can only change the mode of the shift if we can do arithmetic
- in the mode of the shift and MASK is no wider than the width of
- OP_MODE. */
+ in the mode of the shift and INNER_MASK is no wider than the
+ width of OP_MODE. */
if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT
- || (mask & ~ GET_MODE_MASK (op_mode)) != 0)
+ || (inner_mask & ~GET_MODE_MASK (op_mode)) != 0)
op_mode = GET_MODE (x);
- inner = force_to_mode (inner, op_mode, mask, reg, next_select);
+ inner = force_to_mode (inner, op_mode, inner_mask, reg, next_select);
if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
x = gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
@@ -6615,16 +6936,24 @@ force_to_mode (x, mode, mask, reg, just_select)
if (GET_CODE (x) == LSHIFTRT
&& GET_CODE (XEXP (x, 1)) == CONST_INT
+ /* The shift puts one of the sign bit copies in the least significant
+ bit. */
&& ((INTVAL (XEXP (x, 1))
+ num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
>= GET_MODE_BITSIZE (GET_MODE (x)))
&& exact_log2 (mask + 1) >= 0
- && (num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ /* Number of bits left after the shift must be more than the mask
+ needs. */
+ && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
+ <= GET_MODE_BITSIZE (GET_MODE (x)))
+ /* Must be more sign bit copies than the mask needs. */
+ && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
>= exact_log2 (mask + 1)))
x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
- exact_log2 (mask + 1)));
- break;
+
+ goto shiftrt;
case ASHIFTRT:
/* If we are just looking for the sign bit, we don't need this shift at
@@ -6646,13 +6975,13 @@ force_to_mode (x, mode, mask, reg, just_select)
{
int i = -1;
- /* If the considered data is wider then HOST_WIDE_INT, we can't
+ /* If the considered data is wider than HOST_WIDE_INT, we can't
represent a mask for all its bits in a single scalar.
But we only care about the lower bits, so calculate these. */
if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
{
- nonzero = ~ (HOST_WIDE_INT) 0;
+ nonzero = ~(HOST_WIDE_INT) 0;
/* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
is the number of bits a full-width mask would have set.
@@ -6671,7 +7000,7 @@ force_to_mode (x, mode, mask, reg, just_select)
nonzero >>= INTVAL (XEXP (x, 1));
}
- if ((mask & ~ nonzero) == 0
+ if ((mask & ~nonzero) == 0
|| (i = exact_log2 (mask)) >= 0)
{
x = simplify_shift_const
@@ -6689,7 +7018,9 @@ force_to_mode (x, mode, mask, reg, just_select)
if (mask == 1)
x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), XEXP (x, 1));
- /* If this is a sign-extension operation that just affects bits
+ shiftrt:
+
+ /* If this is a zero- or sign-extension operation that just affects bits
we don't care about, remove it. Be sure the call above returned
something that is still a shift. */
@@ -6724,10 +7055,10 @@ force_to_mode (x, mode, mask, reg, just_select)
INTVAL (temp), reg, next_select));
}
break;
-
+
case NEG:
/* If we just want the low-order bit, the NEG isn't needed since it
- won't change the low-order bit. */
+ won't change the low-order bit. */
if (mask == 1)
return force_to_mode (XEXP (x, 0), mode, mask, reg, just_select);
@@ -6765,14 +7096,14 @@ force_to_mode (x, mode, mask, reg, just_select)
force_to_mode (XEXP (x, 0), mode, mask,
reg, next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
- x = gen_unary (code, op_mode, op_mode, op0);
+ x = simplify_gen_unary (code, op_mode, op0, op_mode);
break;
case NE:
/* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
which is equal to STORE_FLAG_VALUE. */
- if ((mask & ~ STORE_FLAG_VALUE) == 0 && XEXP (x, 1) == const0_rtx
+ if ((mask & ~STORE_FLAG_VALUE) == 0 && XEXP (x, 1) == const0_rtx
&& exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
&& nonzero_bits (XEXP (x, 0), mode) == STORE_FLAG_VALUE)
return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
@@ -6792,7 +7123,7 @@ force_to_mode (x, mode, mask, reg, just_select)
force_to_mode (XEXP (x, 2), mode,
mask, reg,next_select)));
break;
-
+
default:
break;
}
@@ -6815,17 +7146,26 @@ if_then_else_cond (x, ptrue, pfalse)
{
enum machine_mode mode = GET_MODE (x);
enum rtx_code code = GET_CODE (x);
- int size = GET_MODE_BITSIZE (mode);
rtx cond0, cond1, true0, true1, false0, false1;
unsigned HOST_WIDE_INT nz;
+ /* If we are comparing a value against zero, we are done. */
+ if ((code == NE || code == EQ)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 0)
+ {
+ *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
+ *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
+ return XEXP (x, 0);
+ }
+
/* If this is a unary operation whose operand has one of two values, apply
our opcode to compute those values. */
- if (GET_RTX_CLASS (code) == '1'
- && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
+ else if (GET_RTX_CLASS (code) == '1'
+ && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
{
- *ptrue = gen_unary (code, mode, GET_MODE (XEXP (x, 0)), true0);
- *pfalse = gen_unary (code, mode, GET_MODE (XEXP (x, 0)), false0);
+ *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
+ *pfalse = simplify_gen_unary (code, mode, false0,
+ GET_MODE (XEXP (x, 0)));
return cond0;
}
@@ -6865,7 +7205,7 @@ if_then_else_cond (x, ptrue, pfalse)
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& (code == PLUS || code == IOR || code == XOR || code == MINUS
- || code == UMAX)
+ || code == UMAX)
&& GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
{
rtx op0 = XEXP (XEXP (x, 0), 1);
@@ -6876,26 +7216,27 @@ if_then_else_cond (x, ptrue, pfalse)
if (GET_RTX_CLASS (GET_CODE (cond0)) == '<'
&& GET_RTX_CLASS (GET_CODE (cond1)) == '<'
- && reversible_comparison_p (cond1)
- && ((GET_CODE (cond0) == reverse_condition (GET_CODE (cond1))
+ && ((GET_CODE (cond0) == combine_reversed_comparison_code (cond1)
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
|| ((swap_condition (GET_CODE (cond0))
- == reverse_condition (GET_CODE (cond1)))
+ == combine_reversed_comparison_code (cond1))
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
&& ! side_effects_p (x))
{
*ptrue = gen_binary (MULT, mode, op0, const_true_rtx);
- *pfalse = gen_binary (MULT, mode,
- (code == MINUS
- ? gen_unary (NEG, mode, mode, op1) : op1),
+ *pfalse = gen_binary (MULT, mode,
+ (code == MINUS
+ ? simplify_gen_unary (NEG, mode, op1,
+ mode)
+ : op1),
const_true_rtx);
return cond0;
}
}
- /* Similarly for MULT, AND and UMIN, execpt that for these the result
+ /* Similarly for MULT, AND and UMIN, except that for these the result
is always zero. */
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& (code == MULT || code == AND || code == UMIN)
@@ -6906,12 +7247,11 @@ if_then_else_cond (x, ptrue, pfalse)
if (GET_RTX_CLASS (GET_CODE (cond0)) == '<'
&& GET_RTX_CLASS (GET_CODE (cond1)) == '<'
- && reversible_comparison_p (cond1)
- && ((GET_CODE (cond0) == reverse_condition (GET_CODE (cond1))
+ && ((GET_CODE (cond0) == combine_reversed_comparison_code (cond1)
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
|| ((swap_condition (GET_CODE (cond0))
- == reverse_condition (GET_CODE (cond1)))
+ == combine_reversed_comparison_code (cond1))
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
&& ! side_effects_p (x))
@@ -6939,18 +7279,16 @@ if_then_else_cond (x, ptrue, pfalse)
return cond0;
}
- /* If X is a normal SUBREG with both inner and outer modes integral,
- we can narrow both the true and false values of the inner expression,
- if there is a condition. */
- else if (code == SUBREG && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
- && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ /* If X is a SUBREG, we can narrow both the true and false values
+ if the inner expression, if there is a condition. */
+ else if (code == SUBREG
&& 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
&true0, &false0)))
{
- *ptrue = force_to_mode (true0, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
- *pfalse
- = force_to_mode (false0, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+ *ptrue = simplify_gen_subreg (mode, true0,
+ GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
+ *pfalse = simplify_gen_subreg (mode, false0,
+ GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
return cond0;
}
@@ -6961,16 +7299,28 @@ if_then_else_cond (x, ptrue, pfalse)
|| ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
;
- /* If X is known to be either 0 or -1, those are the true and
+ /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
+ will be least confusing to the rest of the compiler. */
+ else if (mode == BImode)
+ {
+ *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
+ return x;
+ }
+
+ /* If X is known to be either 0 or -1, those are the true and
false values when testing X. */
- else if (num_sign_bit_copies (x, mode) == size)
+ else if (x == constm1_rtx || x == const0_rtx
+ || (mode != VOIDmode
+ && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
{
*ptrue = constm1_rtx, *pfalse = const0_rtx;
return x;
}
/* Likewise for 0 or a single bit. */
- else if (exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
+ else if (mode != VOIDmode
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
{
*ptrue = GEN_INT (nz), *pfalse = const0_rtx;
return x;
@@ -6997,13 +7347,21 @@ known_cond (x, cond, reg, val)
{
enum rtx_code code = GET_CODE (x);
rtx temp;
- char *fmt;
+ const char *fmt;
int i, j;
if (side_effects_p (x))
return x;
- if (cond == EQ && rtx_equal_p (x, reg))
+ /* If either operand of the condition is a floating point value,
+ then we have to avoid collapsing an EQ comparison. */
+ if (cond == EQ
+ && rtx_equal_p (x, reg)
+ && ! FLOAT_MODE_P (GET_MODE (x))
+ && ! FLOAT_MODE_P (GET_MODE (val)))
+ return val;
+
+ if (cond == UNEQ && rtx_equal_p (x, reg))
return val;
/* If X is (abs REG) and we know something about REG's relationship
@@ -7015,8 +7373,9 @@ known_cond (x, cond, reg, val)
case GE: case GT: case EQ:
return XEXP (x, 0);
case LT: case LE:
- return gen_unary (NEG, GET_MODE (XEXP (x, 0)), GET_MODE (XEXP (x, 0)),
- XEXP (x, 0));
+ return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
+ XEXP (x, 0),
+ GET_MODE (XEXP (x, 0)));
default:
break;
}
@@ -7032,17 +7391,28 @@ known_cond (x, cond, reg, val)
if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
{
if (GET_RTX_CLASS (code) == '<')
- return (comparison_dominates_p (cond, code) ? const_true_rtx
- : (comparison_dominates_p (cond,
- reverse_condition (code))
- ? const0_rtx : x));
+ {
+ if (comparison_dominates_p (cond, code))
+ return const_true_rtx;
+ code = combine_reversed_comparison_code (x);
+ if (code != UNKNOWN
+ && comparison_dominates_p (cond, code))
+ return const0_rtx;
+ else
+ return x;
+ }
else if (code == SMAX || code == SMIN
|| code == UMIN || code == UMAX)
{
int unsignedp = (code == UMIN || code == UMAX);
- if (code == SMAX || code == UMAX)
+ /* Do not reverse the condition when it is NE or EQ.
+ This is because we cannot conclude anything about
+ the value of 'SMAX (x, y)' when x is not equal to y,
+ but we can when x equals y. */
+ if ((code == SMAX || code == UMAX)
+ && ! (cond == EQ || cond == NE))
cond = reverse_condition (cond);
switch (cond)
@@ -7127,7 +7497,8 @@ make_field_assignment (x)
rtx assign;
rtx rhs, lhs;
HOST_WIDE_INT c1;
- int pos, len;
+ HOST_WIDE_INT pos;
+ unsigned HOST_WIDE_INT len;
rtx other;
enum machine_mode mode;
@@ -7150,7 +7521,7 @@ make_field_assignment (x)
else if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
&& subreg_lowpart_p (XEXP (src, 0))
- && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
+ && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
&& GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
&& INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
@@ -7202,7 +7573,7 @@ make_field_assignment (x)
else
return x;
- pos = get_pos_from_mask ((~ c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
+ pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
|| GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
|| (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
@@ -7214,7 +7585,7 @@ make_field_assignment (x)
/* The mode to use for the source is the mode of the assignment, or of
what is inside a possible STRICT_LOW_PART. */
- mode = (GET_CODE (assign) == STRICT_LOW_PART
+ mode = (GET_CODE (assign) == STRICT_LOW_PART
? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
/* Shift OTHER right POS places and make it the source, restricting it
@@ -7224,11 +7595,11 @@ make_field_assignment (x)
GET_MODE (src), other, pos),
mode,
GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
- ? GET_MODE_MASK (mode)
- : ((HOST_WIDE_INT) 1 << len) - 1,
+ ? ~(unsigned HOST_WIDE_INT) 0
+ : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
dest, 0);
- return gen_rtx_combine (SET, VOIDmode, assign, src);
+ return gen_rtx_SET (VOIDmode, assign, src);
}
/* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
@@ -7291,7 +7662,7 @@ apply_distributive_law (x)
case SUBREG:
/* Non-paradoxical SUBREGs distributes over all operations, provided
- the inner modes and word numbers are the same, this is an extraction
+ the inner modes and byte offsets are the same, this is an extraction
of a low-order part, we don't convert an fp operation to int or
vice versa, and we would not be converting a single-word
operation into a multi-word operation. The latter test is not
@@ -7302,7 +7673,7 @@ apply_distributive_law (x)
We produce the result slightly differently in this case. */
if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
- || SUBREG_WORD (lhs) != SUBREG_WORD (rhs)
+ || SUBREG_BYTE (lhs) != SUBREG_BYTE (rhs)
|| ! subreg_lowpart_p (lhs)
|| (GET_MODE_CLASS (GET_MODE (lhs))
!= GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
@@ -7345,7 +7716,7 @@ apply_distributive_law (x)
if (code == XOR && inner_code == IOR)
{
inner_code = AND;
- other = gen_unary (NOT, GET_MODE (x), GET_MODE (x), other);
+ other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
}
/* We may be able to continuing distributing the result, so call
@@ -7369,7 +7740,6 @@ simplify_and_const_int (x, mode, varop, constop)
unsigned HOST_WIDE_INT constop;
{
unsigned HOST_WIDE_INT nonzero;
- int width = GET_MODE_BITSIZE (mode);
int i;
/* Simplify VAROP knowing that we will be only looking at some of the
@@ -7387,19 +7757,6 @@ simplify_and_const_int (x, mode, varop, constop)
nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
- /* If this would be an entire word for the target, but is not for
- the host, then sign-extend on the host so that the number will look
- the same way on the host that it would on the target.
-
- For example, when building a 64 bit alpha hosted 32 bit sparc
- targeted compiler, then we want the 32 bit unsigned value -1 to be
- represented as a 64 bit value -1, and not as 0x00000000ffffffff.
- The later confuses the sparc backend. */
-
- if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
- && (nonzero & ((HOST_WIDE_INT) 1 << (width - 1))))
- nonzero |= ((HOST_WIDE_INT) (-1) << width);
-
/* Turn off all bits in the constant that are known to already be zero.
Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
which is tested below. */
@@ -7415,7 +7772,7 @@ simplify_and_const_int (x, mode, varop, constop)
if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
&& (i = exact_log2 (constop)) >= 0)
return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
-
+
/* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
or XOR, then try to apply the distributive law. This may eliminate
operations if either branch can be simplified because of the AND.
@@ -7433,6 +7790,23 @@ simplify_and_const_int (x, mode, varop, constop)
simplify_and_const_int (NULL_RTX, GET_MODE (varop),
XEXP (varop, 1), constop))));
+ /* If VAROP is PLUS, and the constant is a mask of low bite, distribute
+ the AND and see if one of the operands simplifies to zero. If so, we
+ may eliminate it. */
+
+ if (GET_CODE (varop) == PLUS
+ && exact_log2 (constop + 1) >= 0)
+ {
+ rtx o0, o1;
+
+ o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
+ o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
+ if (o0 == const0_rtx)
+ return o1;
+ if (o1 == const0_rtx)
+ return o0;
+ }
+
/* Get VAROP in MODE. Try to get a SUBREG if not. Don't make a new SUBREG
if we already had one (just check for the simplest cases). */
if (x && GET_CODE (XEXP (x, 0)) == SUBREG
@@ -7449,18 +7823,22 @@ simplify_and_const_int (x, mode, varop, constop)
/* If we are only masking insignificant bits, return VAROP. */
if (constop == nonzero)
x = varop;
-
- /* Otherwise, return an AND. See how much, if any, of X we can use. */
- else if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode)
- x = gen_binary (AND, mode, varop, GEN_INT (constop));
-
else
{
- if (GET_CODE (XEXP (x, 1)) != CONST_INT
- || (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) != constop)
- SUBST (XEXP (x, 1), GEN_INT (constop));
+ /* Otherwise, return an AND. */
+ constop = trunc_int_for_mode (constop, mode);
+ /* See how much, if any, of X we can use. */
+ if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode)
+ x = gen_binary (AND, mode, varop, GEN_INT (constop));
- SUBST (XEXP (x, 0), varop);
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) != constop)
+ SUBST (XEXP (x, 1), GEN_INT (constop));
+
+ SUBST (XEXP (x, 0), varop);
+ }
}
return x;
@@ -7487,7 +7865,7 @@ nonzero_bits (x, mode)
unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
unsigned HOST_WIDE_INT inner_nz;
enum rtx_code code;
- int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned int mode_width = GET_MODE_BITSIZE (mode);
rtx tem;
/* For floating-point values, assume all bits are needed. */
@@ -7509,7 +7887,7 @@ nonzero_bits (x, mode)
#ifndef WORD_REGISTER_OPERATIONS
/* If MODE is wider than X, but both are a single word for both the host
- and target machines, we can compute this from which bits of the
+ and target machines, we can compute this from which bits of the
object might be nonzero in its own mode, taking into account the fact
that on many CISC machines, accessing an object in a wider mode
causes the high-order bits to become undefined. So they are
@@ -7521,7 +7899,7 @@ nonzero_bits (x, mode)
&& GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
{
nonzero &= nonzero_bits (x, GET_MODE (x));
- nonzero |= GET_MODE_MASK (mode) & ~ GET_MODE_MASK (GET_MODE (x));
+ nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
return nonzero;
}
#endif
@@ -7530,11 +7908,11 @@ nonzero_bits (x, mode)
switch (code)
{
case REG:
-#ifdef POINTERS_EXTEND_UNSIGNED
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
/* If pointers extend unsigned and this is a pointer in Pmode, say that
all the bits above ptr_mode are known to be zero. */
if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
- && REGNO_POINTER_FLAG (REGNO (x)))
+ && REG_POINTER (x))
nonzero &= GET_MODE_MASK (ptr_mode);
#endif
@@ -7556,20 +7934,20 @@ nonzero_bits (x, mode)
&& REGNO (x) <= LAST_VIRTUAL_REGISTER))
#ifdef STACK_BIAS
&& !STACK_BIAS
-#endif
+#endif
)
{
int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
#ifdef PUSH_ROUNDING
- if (REGNO (x) == STACK_POINTER_REGNUM)
+ if (REGNO (x) == STACK_POINTER_REGNUM && PUSH_ARGS)
sp_alignment = MIN (PUSH_ROUNDING (1), sp_alignment);
#endif
/* We must return here, otherwise we may get a worse result from
one of the choices below. There is nothing useful below as
far as the stack pointer is concerned. */
- return nonzero &= ~ (sp_alignment - 1);
+ return nonzero &= ~(sp_alignment - 1);
}
#endif
@@ -7580,8 +7958,11 @@ nonzero_bits (x, mode)
if (reg_last_set_value[REGNO (x)] != 0
&& reg_last_set_mode[REGNO (x)] == mode
- && (REG_N_SETS (REGNO (x)) == 1
- || reg_last_set_label[REGNO (x)] == label_tick)
+ && (reg_last_set_label[REGNO (x)] == label_tick
+ || (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && REG_N_SETS (REGNO (x)) == 1
+ && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
+ REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
return reg_last_set_nonzero_bits[REGNO (x)];
@@ -7594,7 +7975,7 @@ nonzero_bits (x, mode)
constant that would appear negative in the mode of X,
sign-extend it for use in reg_nonzero_bits because some
machines (maybe most) will actually do the sign-extension
- and this is the conservative approach.
+ and this is the conservative approach.
??? For 2.5, try to tighten up the MD files in this regard
instead of this kludge. */
@@ -7612,7 +7993,14 @@ nonzero_bits (x, mode)
return nonzero_bits (tem, mode);
}
else if (nonzero_sign_valid && reg_nonzero_bits[REGNO (x)])
- return reg_nonzero_bits[REGNO (x)] & nonzero;
+ {
+ unsigned HOST_WIDE_INT mask = reg_nonzero_bits[REGNO (x)];
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < mode_width)
+ /* We don't know anything about the upper bits. */
+ mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
+ return nonzero & mask;
+ }
else
return nonzero;
@@ -7637,10 +8025,12 @@ nonzero_bits (x, mode)
break;
case EQ: case NE:
- case GT: case GTU:
- case LT: case LTU:
- case GE: case GEU:
- case LE: case LEU:
+ case UNEQ: case LTGT:
+ case GT: case GTU: case UNGT:
+ case LT: case LTU: case UNLT:
+ case GE: case GEU: case UNGE:
+ case LE: case LEU: case UNLE:
+ case UNORDERED: case ORDERED:
/* If this produces an integer result, we know which bits are set.
Code here used to clear bits outside the mode of X, but that is
@@ -7661,7 +8051,7 @@ nonzero_bits (x, mode)
#endif
if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
- nonzero |= (GET_MODE_MASK (mode) & ~ GET_MODE_MASK (GET_MODE (x)));
+ nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
break;
case ABS:
@@ -7696,7 +8086,7 @@ nonzero_bits (x, mode)
& (((HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
inner_nz |= (GET_MODE_MASK (mode)
- & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
}
nonzero &= inner_nz;
@@ -7732,7 +8122,7 @@ nonzero_bits (x, mode)
= (nz0 & ((HOST_WIDE_INT) 1 << (mode_width - 1)));
HOST_WIDE_INT op1_maybe_minusp
= (nz1 & ((HOST_WIDE_INT) 1 << (mode_width - 1)));
- int result_width = mode_width;
+ unsigned int result_width = mode_width;
int result_low = 0;
switch (code)
@@ -7740,20 +8130,20 @@ nonzero_bits (x, mode)
case PLUS:
#ifdef STACK_BIAS
if (STACK_BIAS
- && (XEXP (x, 0) == stack_pointer_rtx
- || XEXP (x, 0) == frame_pointer_rtx)
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && (XEXP (x, 0) == stack_pointer_rtx
+ || XEXP (x, 0) == frame_pointer_rtx)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
{
int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
- nz0 = (GET_MODE_MASK (mode) & ~ (sp_alignment - 1));
- nz1 = INTVAL (XEXP (x, 1)) - STACK_BIAS;
- width0 = floor_log2 (nz0) + 1;
- width1 = floor_log2 (nz1) + 1;
- low0 = floor_log2 (nz0 & -nz0);
- low1 = floor_log2 (nz1 & -nz1);
+ nz0 = (GET_MODE_MASK (mode) & ~(sp_alignment - 1));
+ nz1 = INTVAL (XEXP (x, 1)) - STACK_BIAS;
+ width0 = floor_log2 (nz0) + 1;
+ width1 = floor_log2 (nz1) + 1;
+ low0 = floor_log2 (nz0 & -nz0);
+ low1 = floor_log2 (nz1 & -nz1);
}
-#endif
+#endif
result_width = MAX (width0, width1) + 1;
result_low = MIN (low0, low1);
break;
@@ -7765,18 +8155,26 @@ nonzero_bits (x, mode)
result_low = low0 + low1;
break;
case DIV:
+ if (width1 == 0)
+ break;
if (! op0_maybe_minusp && ! op1_maybe_minusp)
result_width = width0;
break;
case UDIV:
+ if (width1 == 0)
+ break;
result_width = width0;
break;
case MOD:
+ if (width1 == 0)
+ break;
if (! op0_maybe_minusp && ! op1_maybe_minusp)
result_width = MIN (width0, width1);
result_low = MIN (low0, low1);
break;
case UMOD:
+ if (width1 == 0)
+ break;
result_width = MIN (width0, width1);
result_low = MIN (low0, low1);
break;
@@ -7788,7 +8186,17 @@ nonzero_bits (x, mode)
nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
if (result_low > 0)
- nonzero &= ~ (((HOST_WIDE_INT) 1 << result_low) - 1);
+ nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend unsigned and this is an addition or subtraction
+ to a pointer in Pmode, all the bits above ptr_mode are known to be
+ zero. */
+ if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
+ && (code == PLUS || code == MINUS)
+ && GET_CODE (XEXP (x, 0)) == REG && REG_POINTER (XEXP (x, 0)))
+ nonzero &= GET_MODE_MASK (ptr_mode);
+#endif
}
break;
@@ -7820,8 +8228,10 @@ nonzero_bits (x, mode)
/* If this is a typical RISC machine, we only have to worry
about the way loads are extended. */
if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
- ? (nonzero
- & (1L << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1)))
+ ? (((nonzero
+ & (((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
+ != 0))
: LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
#endif
{
@@ -7831,7 +8241,7 @@ nonzero_bits (x, mode)
if (GET_MODE_SIZE (GET_MODE (x))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
nonzero |= (GET_MODE_MASK (GET_MODE (x))
- & ~ GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
+ & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
}
}
break;
@@ -7851,7 +8261,7 @@ nonzero_bits (x, mode)
&& INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
{
enum machine_mode inner_mode = GET_MODE (x);
- int width = GET_MODE_BITSIZE (inner_mode);
+ unsigned int width = GET_MODE_BITSIZE (inner_mode);
int count = INTVAL (XEXP (x, 1));
unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
unsigned HOST_WIDE_INT op_nonzero = nonzero_bits (XEXP (x, 0), mode);
@@ -7859,7 +8269,7 @@ nonzero_bits (x, mode)
unsigned HOST_WIDE_INT outer = 0;
if (mode_width > width)
- outer = (op_nonzero & nonzero & ~ mode_mask);
+ outer = (op_nonzero & nonzero & ~mode_mask);
if (code == LSHIFTRT)
inner >>= count;
@@ -7892,7 +8302,7 @@ nonzero_bits (x, mode)
nonzero &= (nonzero_bits (XEXP (x, 1), mode)
| nonzero_bits (XEXP (x, 2), mode));
break;
-
+
default:
break;
}
@@ -7908,13 +8318,13 @@ nonzero_bits (x, mode)
VOIDmode, X will be used in its own mode. The returned value will always
be between 1 and the number of bits in MODE. */
-static int
+static unsigned int
num_sign_bit_copies (x, mode)
rtx x;
enum machine_mode mode;
{
enum rtx_code code = GET_CODE (x);
- int bitwidth;
+ unsigned int bitwidth;
int num0, num1, result;
unsigned HOST_WIDE_INT nonzero;
rtx tem;
@@ -7933,9 +8343,12 @@ num_sign_bit_copies (x, mode)
/* For a smaller object, just ignore the high bits. */
if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
- return MAX (1, (num_sign_bit_copies (x, GET_MODE (x))
- - (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth)));
-
+ {
+ num0 = num_sign_bit_copies (x, GET_MODE (x));
+ return MAX (1,
+ num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
+ }
+
if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
{
#ifndef WORD_REGISTER_OPERATIONS
@@ -7960,26 +8373,30 @@ num_sign_bit_copies (x, mode)
{
case REG:
-#ifdef POINTERS_EXTEND_UNSIGNED
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
/* If pointers extend signed and this is a pointer in Pmode, say that
all the bits above ptr_mode are known to be sign bit copies. */
if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
- && REGNO_POINTER_FLAG (REGNO (x)))
+ && REG_POINTER (x))
return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
#endif
if (reg_last_set_value[REGNO (x)] != 0
&& reg_last_set_mode[REGNO (x)] == mode
- && (REG_N_SETS (REGNO (x)) == 1
- || reg_last_set_label[REGNO (x)] == label_tick)
+ && (reg_last_set_label[REGNO (x)] == label_tick
+ || (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && REG_N_SETS (REGNO (x)) == 1
+ && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
+ REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
return reg_last_set_sign_bit_copies[REGNO (x)];
- tem = get_last_value (x);
+ tem = get_last_value (x);
if (tem != 0)
return num_sign_bit_copies (tem, mode);
- if (nonzero_sign_valid && reg_sign_bit_copies[REGNO (x)] != 0)
+ if (nonzero_sign_valid && reg_sign_bit_copies[REGNO (x)] != 0
+ && GET_MODE_BITSIZE (GET_MODE (x)) == bitwidth)
return reg_sign_bit_copies[REGNO (x)];
break;
@@ -7987,7 +8404,8 @@ num_sign_bit_copies (x, mode)
#ifdef LOAD_EXTEND_OP
/* Some RISC machines sign-extend all loads of smaller than a word. */
if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
- return MAX (1, bitwidth - GET_MODE_BITSIZE (GET_MODE (x)) + 1);
+ return MAX (1, ((int) bitwidth
+ - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
#endif
break;
@@ -7997,7 +8415,7 @@ num_sign_bit_copies (x, mode)
nonzero = INTVAL (x) & GET_MODE_MASK (mode);
if (bitwidth <= HOST_BITS_PER_WIDE_INT
&& (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
- nonzero = (~ nonzero) & GET_MODE_MASK (mode);
+ nonzero = (~nonzero) & GET_MODE_MASK (mode);
return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
@@ -8007,16 +8425,20 @@ num_sign_bit_copies (x, mode)
high-order bits are known to be sign bit copies. */
if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
- return MAX (bitwidth - GET_MODE_BITSIZE (GET_MODE (x)) + 1,
- num_sign_bit_copies (SUBREG_REG (x), mode));
+ {
+ num0 = num_sign_bit_copies (SUBREG_REG (x), mode);
+ return MAX ((int) bitwidth
+ - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ num0);
+ }
/* For a smaller object, just ignore the high bits. */
if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
{
num0 = num_sign_bit_copies (SUBREG_REG (x), VOIDmode);
return MAX (1, (num0
- - (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
- - bitwidth)));
+ - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - bitwidth)));
}
#ifdef WORD_REGISTER_OPERATIONS
@@ -8041,18 +8463,18 @@ num_sign_bit_copies (x, mode)
case SIGN_EXTRACT:
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- return MAX (1, bitwidth - INTVAL (XEXP (x, 1)));
+ return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
break;
- case SIGN_EXTEND:
+ case SIGN_EXTEND:
return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ num_sign_bit_copies (XEXP (x, 0), VOIDmode));
case TRUNCATE:
/* For a smaller object, just ignore the high bits. */
num0 = num_sign_bit_copies (XEXP (x, 0), VOIDmode);
- return MAX (1, (num0 - (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
- - bitwidth)));
+ return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ - bitwidth)));
case NOT:
return num_sign_bit_copies (XEXP (x, 0), mode);
@@ -8062,11 +8484,12 @@ num_sign_bit_copies (x, mode)
of sign bit copies, we can just subtract that amount from the
number. */
if (GET_CODE (XEXP (x, 1)) == CONST_INT
- && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < bitwidth)
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < (int) bitwidth)
{
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
- : bitwidth - INTVAL (XEXP (x, 1))));
+ : (int) bitwidth - INTVAL (XEXP (x, 1))));
}
break;
@@ -8114,8 +8537,21 @@ num_sign_bit_copies (x, mode)
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
num1 = num_sign_bit_copies (XEXP (x, 1), mode);
- return MAX (1, MIN (num0, num1) - 1);
-
+ result = MAX (1, MIN (num0, num1) - 1);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend signed and this is an addition or subtraction
+ to a pointer in Pmode, all the bits above ptr_mode are known to be
+ sign bit copies. */
+ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ && (code == PLUS || code == MINUS)
+ && GET_CODE (XEXP (x, 0)) == REG && REG_POINTER (XEXP (x, 0)))
+ result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
+ - GET_MODE_BITSIZE (ptr_mode) + 1),
+ result);
+#endif
+ return result;
+
case MULT:
/* The number of bits of the product is the sum of the number of
bits of both terms. However, unless one of the terms if known
@@ -8147,9 +8583,9 @@ num_sign_bit_copies (x, mode)
return 1;
else
return num_sign_bit_copies (XEXP (x, 0), mode);
-
+
case UMOD:
- /* The result must be <= the scond operand. */
+ /* The result must be <= the second operand. */
return num_sign_bit_copies (XEXP (x, 1), mode);
case DIV:
@@ -8181,7 +8617,7 @@ num_sign_bit_copies (x, mode)
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& INTVAL (XEXP (x, 1)) > 0)
- num0 = MIN (bitwidth, num0 + INTVAL (XEXP (x, 1)));
+ num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
return num0;
@@ -8189,7 +8625,7 @@ num_sign_bit_copies (x, mode)
/* Left shifts destroy copies. */
if (GET_CODE (XEXP (x, 1)) != CONST_INT
|| INTVAL (XEXP (x, 1)) < 0
- || INTVAL (XEXP (x, 1)) >= bitwidth)
+ || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
return 1;
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
@@ -8201,11 +8637,19 @@ num_sign_bit_copies (x, mode)
return MIN (num0, num1);
case EQ: case NE: case GE: case GT: case LE: case LT:
+ case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
case GEU: case GTU: case LEU: case LTU:
- if (STORE_FLAG_VALUE == -1)
- return bitwidth;
+ case UNORDERED: case ORDERED:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = STORE_FLAG_VALUE;
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
break;
-
+
default:
break;
}
@@ -8234,7 +8678,7 @@ num_sign_bit_copies (x, mode)
This function will always return 0 unless called during combine, which
implies that it must be called from a define_split. */
-int
+unsigned int
extended_count (x, mode, unsignedp)
rtx x;
enum machine_mode mode;
@@ -8245,8 +8689,9 @@ extended_count (x, mode, unsignedp)
return (unsignedp
? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (GET_MODE_BITSIZE (mode) - 1
- - floor_log2 (nonzero_bits (x, mode))))
+ ? (GET_MODE_BITSIZE (mode) - 1
+ - floor_log2 (nonzero_bits (x, mode)))
+ : 0)
: num_sign_bit_copies (x, mode) - 1);
}
@@ -8257,7 +8702,7 @@ extended_count (x, mode, unsignedp)
(with *POP0 being done last).
Return 1 if we can do the operation and update *POP0 and *PCONST0 with
- the resulting operation. *PCOMP_P is set to 1 if we would need to
+ the resulting operation. *PCOMP_P is set to 1 if we would need to
complement the innermost operand, otherwise it is unchanged.
MODE is the mode in which the operation will be done. No bits outside
@@ -8282,7 +8727,6 @@ merge_outer_ops (pop0, pconst0, op1, const1, mode, pcomp_p)
{
enum rtx_code op0 = *pop0;
HOST_WIDE_INT const0 = *pconst0;
- int width = GET_MODE_BITSIZE (mode);
const0 &= GET_MODE_MASK (mode);
const1 &= GET_MODE_MASK (mode);
@@ -8351,7 +8795,7 @@ merge_outer_ops (pop0, pconst0, op1, const1, mode, pcomp_p)
op0 = AND, *pcomp_p = 1;
else /* op1 == IOR */
/* (a | b) ^ b == a & ~b */
- op0 = AND, *pconst0 = ~ const0;
+ op0 = AND, *pconst0 = ~const0;
break;
case AND:
@@ -8377,18 +8821,10 @@ merge_outer_ops (pop0, pconst0, op1, const1, mode, pcomp_p)
&& op0 == AND)
op0 = NIL;
- /* If this would be an entire word for the target, but is not for
- the host, then sign-extend on the host so that the number will look
- the same way on the host that it would on the target.
-
- For example, when building a 64 bit alpha hosted 32 bit sparc
- targeted compiler, then we want the 32 bit unsigned value -1 to be
- represented as a 64 bit value -1, and not as 0x00000000ffffffff.
- The later confuses the sparc backend. */
-
- if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
- && (const0 & ((HOST_WIDE_INT) 1 << (width - 1))))
- const0 |= ((HOST_WIDE_INT) (-1) << width);
+ /* ??? Slightly redundant with the above mask, but not entirely.
+ Moving this above means we'd have to sign-extend the mode mask
+ for the final test. */
+ const0 = trunc_int_for_mode (const0, mode);
*pop0 = op0;
*pconst0 = const0;
@@ -8405,18 +8841,19 @@ merge_outer_ops (pop0, pconst0, op1, const1, mode, pcomp_p)
are ASHIFTRT and ROTATE, which are always done in their original mode, */
static rtx
-simplify_shift_const (x, code, result_mode, varop, count)
+simplify_shift_const (x, code, result_mode, varop, orig_count)
rtx x;
enum rtx_code code;
enum machine_mode result_mode;
rtx varop;
- int count;
+ int orig_count;
{
enum rtx_code orig_code = code;
- int orig_count = count;
+ unsigned int count;
+ int signed_count;
enum machine_mode mode = result_mode;
enum machine_mode shift_mode, tmode;
- int mode_words
+ unsigned int mode_words
= (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
/* We form (outer_op (code varop count) (outer_const)). */
enum rtx_code outer_op = NIL;
@@ -8425,17 +8862,27 @@ simplify_shift_const (x, code, result_mode, varop, count)
int complement_p = 0;
rtx new;
+ /* Make sure and truncate the "natural" shift on the way in. We don't
+ want to do this inside the loop as it makes it more difficult to
+ combine shifts. */
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ orig_count &= GET_MODE_BITSIZE (mode) - 1;
+#endif
+
/* If we were given an invalid count, don't do anything except exactly
what was requested. */
- if (count < 0 || count > GET_MODE_BITSIZE (mode))
+ if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
{
if (x)
return x;
- return gen_rtx_fmt_ee (code, mode, varop, GEN_INT (count));
+ return gen_rtx_fmt_ee (code, mode, varop, GEN_INT (orig_count));
}
+ count = orig_count;
+
/* Unless one of the branches of the `if' in this loop does a `continue',
we will `break' the loop after the `if'. */
@@ -8472,7 +8919,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
Since these shifts are being produced by the compiler by combining
multiple operations, each of which are defined, we know what the
result is supposed to be. */
-
+
if (count > GET_MODE_BITSIZE (shift_mode) - 1)
{
if (code == ASHIFTRT)
@@ -8489,12 +8936,6 @@ simplify_shift_const (x, code, result_mode, varop, count)
}
}
- /* Negative counts are invalid and should not have been made (a
- programmer-specified negative count should have been handled
- above). */
- else if (count < 0)
- abort ();
-
/* An arithmetic right shift of a quantity known to be -1 or 0
is a no-op. */
if (code == ASHIFTRT
@@ -8518,7 +8959,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
/* We simplify the tests below and elsewhere by converting
ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
`make_compound_operation' will convert it to a ASHIFTRT for
- those machines (such as Vax) that don't have a LSHIFTRT. */
+ those machines (such as VAX) that don't have a LSHIFTRT. */
if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
&& code == ASHIFTRT
&& ((nonzero_bits (varop, shift_mode)
@@ -8550,16 +8991,12 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
MODE_INT, 1)) != BLKmode)
{
- if (BYTES_BIG_ENDIAN)
- new = gen_rtx_MEM (tmode, XEXP (varop, 0));
- else
- new = gen_rtx_MEM (tmode,
- plus_constant (XEXP (varop, 0),
- count / BITS_PER_UNIT));
- RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (varop);
- MEM_COPY_ATTRIBUTES (new, varop);
- varop = gen_rtx_combine (code == ASHIFTRT ? SIGN_EXTEND
- : ZERO_EXTEND, mode, new);
+ new = adjust_address_nv (varop, tmode,
+ BYTES_BIG_ENDIAN ? 0
+ : count / BITS_PER_UNIT);
+
+ varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
+ : ZERO_EXTEND, mode, new);
count = 0;
continue;
}
@@ -8580,13 +9017,13 @@ simplify_shift_const (x, code, result_mode, varop, count)
else
{
new = copy_rtx (XEXP (varop, 0));
- SUBST (XEXP (new, 0),
+ SUBST (XEXP (new, 0),
plus_constant (XEXP (new, 0),
count / BITS_PER_UNIT));
}
- varop = gen_rtx_combine (code == ASHIFTRT ? SIGN_EXTEND
- : ZERO_EXTEND, mode, new);
+ varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
+ : ZERO_EXTEND, mode, new);
count = 0;
continue;
}
@@ -8617,8 +9054,9 @@ simplify_shift_const (x, code, result_mode, varop, count)
if (GET_CODE (XEXP (varop, 1)) == CONST_INT
&& exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
{
- varop = gen_binary (ASHIFT, GET_MODE (varop), XEXP (varop, 0),
- GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));;
+ varop
+ = gen_binary (ASHIFT, GET_MODE (varop), XEXP (varop, 0),
+ GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
continue;
}
break;
@@ -8628,16 +9066,22 @@ simplify_shift_const (x, code, result_mode, varop, count)
if (GET_CODE (XEXP (varop, 1)) == CONST_INT
&& exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
{
- varop = gen_binary (LSHIFTRT, GET_MODE (varop), XEXP (varop, 0),
- GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
+ varop
+ = gen_binary (LSHIFTRT, GET_MODE (varop), XEXP (varop, 0),
+ GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
continue;
}
break;
case ASHIFTRT:
- /* If we are extracting just the sign bit of an arithmetic right
- shift, that shift is not needed. */
- if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1)
+ /* If we are extracting just the sign bit of an arithmetic
+ right shift, that shift is not needed. However, the sign
+ bit of a wider mode may be different from what would be
+ interpreted as the sign bit in a narrower mode, so, if
+ the result is narrower, don't discard the shift. */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1
+ && (GET_MODE_BITSIZE (result_mode)
+ >= GET_MODE_BITSIZE (GET_MODE (varop))))
{
varop = XEXP (varop, 0);
continue;
@@ -8657,7 +9101,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
enum rtx_code first_code = GET_CODE (varop);
- int first_count = INTVAL (XEXP (varop, 1));
+ unsigned int first_count = INTVAL (XEXP (varop, 1));
unsigned HOST_WIDE_INT mask;
rtx mask_rtx;
@@ -8673,9 +9117,9 @@ simplify_shift_const (x, code, result_mode, varop, count)
- GET_MODE_BITSIZE (GET_MODE (varop))) == count)
{
/* C3 has the low-order C1 bits zero. */
-
+
mask = (GET_MODE_MASK (mode)
- & ~ (((HOST_WIDE_INT) 1 << first_count) - 1));
+ & ~(((HOST_WIDE_INT) 1 << first_count) - 1));
varop = simplify_and_const_int (NULL_RTX, result_mode,
XEXP (varop, 0), mask);
@@ -8685,11 +9129,11 @@ simplify_shift_const (x, code, result_mode, varop, count)
code = ASHIFTRT;
continue;
}
-
+
/* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
than C1 high-order bits equal to the sign bit, we can convert
this to either an ASHIFT or a ASHIFTRT depending on the
- two counts.
+ two counts.
We cannot do this if VAROP's mode is not SHIFT_MODE. */
@@ -8698,10 +9142,14 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
> first_count))
{
- count -= first_count;
- if (count < 0)
- count = - count, code = ASHIFT;
varop = XEXP (varop, 0);
+
+ signed_count = count - first_count;
+ if (signed_count < 0)
+ count = -signed_count, code = ASHIFT;
+ else
+ count = signed_count;
+
continue;
}
@@ -8742,7 +9190,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
break;
/* To compute the mask to apply after the shift, shift the
- nonzero bits of the inner shift the same way the
+ nonzero bits of the inner shift the same way the
outer shift will. */
mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
@@ -8750,7 +9198,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
mask_rtx
= simplify_binary_operation (code, result_mode, mask_rtx,
GEN_INT (count));
-
+
/* Give up if we can't compute an outer operation to use. */
if (mask_rtx == 0
|| GET_CODE (mask_rtx) != CONST_INT
@@ -8761,22 +9209,25 @@ simplify_shift_const (x, code, result_mode, varop, count)
/* If the shifts are in the same direction, we add the
counts. Otherwise, we subtract them. */
+ signed_count = count;
if ((code == ASHIFTRT || code == LSHIFTRT)
== (first_code == ASHIFTRT || first_code == LSHIFTRT))
- count += first_count;
+ signed_count += first_count;
else
- count -= first_count;
+ signed_count -= first_count;
- /* If COUNT is positive, the new shift is usually CODE,
+ /* If COUNT is positive, the new shift is usually CODE,
except for the two exceptions below, in which case it is
FIRST_CODE. If the count is negative, FIRST_CODE should
always be used */
- if (count > 0
+ if (signed_count > 0
&& ((first_code == ROTATE && code == ASHIFT)
|| (first_code == ASHIFTRT && code == LSHIFTRT)))
- code = first_code;
- else if (count < 0)
- code = first_code, count = - count;
+ code = first_code, count = signed_count;
+ else if (signed_count < 0)
+ code = first_code, count = -signed_count;
+ else
+ count = signed_count;
varop = XEXP (varop, 0);
continue;
@@ -8793,7 +9244,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
XEXP (varop, 0),
GEN_INT (count))))
{
- varop = gen_rtx_combine (code, mode, new, XEXP (varop, 1));
+ varop = gen_rtx_fmt_ee (code, mode, new, XEXP (varop, 1));
count = 0;
continue;
}
@@ -8801,8 +9252,8 @@ simplify_shift_const (x, code, result_mode, varop, count)
case NOT:
/* Make this fit the case below. */
- varop = gen_rtx_combine (XOR, mode, XEXP (varop, 0),
- GEN_INT (GET_MODE_MASK (mode)));
+ varop = gen_rtx_XOR (mode, XEXP (varop, 0),
+ GEN_INT (GET_MODE_MASK (mode)));
continue;
case IOR:
@@ -8823,11 +9274,11 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
{
count = 0;
- varop = gen_rtx_combine (LE, GET_MODE (varop), XEXP (varop, 1),
- const0_rtx);
+ varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
+ const0_rtx);
if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
- varop = gen_rtx_combine (NEG, GET_MODE (varop), varop);
+ varop = gen_rtx_NEG (GET_MODE (varop), varop);
continue;
}
@@ -8842,7 +9293,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& (new = simplify_binary_operation (code, result_mode,
XEXP (varop, 1),
GEN_INT (count))) != 0
- && GET_CODE(new) == CONST_INT
+ && GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
INTVAL (new), result_mode, &complement_p))
{
@@ -8877,7 +9328,8 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& count == GET_MODE_BITSIZE (result_mode) - 1
&& GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
&& ((STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
+ & ((HOST_WIDE_INT) 1
+ < (GET_MODE_BITSIZE (result_mode) - 1))))
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
&& merge_outer_ops (&outer_op, &outer_const, XOR,
(HOST_WIDE_INT) 1, result_mode,
@@ -8962,7 +9414,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& (new = simplify_binary_operation (ASHIFT, result_mode,
XEXP (varop, 1),
GEN_INT (count))) != 0
- && GET_CODE(new) == CONST_INT
+ && GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, PLUS,
INTVAL (new), result_mode, &complement_p))
{
@@ -8988,11 +9440,11 @@ simplify_shift_const (x, code, result_mode, varop, count)
&& rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
{
count = 0;
- varop = gen_rtx_combine (GT, GET_MODE (varop), XEXP (varop, 1),
- const0_rtx);
+ varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
+ const0_rtx);
if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
- varop = gen_rtx_combine (NEG, GET_MODE (varop), varop);
+ varop = gen_rtx_NEG (GET_MODE (varop), varop);
continue;
}
@@ -9010,17 +9462,17 @@ simplify_shift_const (x, code, result_mode, varop, count)
{
rtx varop_inner = XEXP (varop, 0);
- varop_inner = gen_rtx_combine (LSHIFTRT,
- GET_MODE (varop_inner),
- XEXP (varop_inner, 0),
- GEN_INT (count + INTVAL (XEXP (varop_inner, 1))));
- varop = gen_rtx_combine (TRUNCATE, GET_MODE (varop),
- varop_inner);
+ varop_inner
+ = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
+ XEXP (varop_inner, 0),
+ GEN_INT
+ (count + INTVAL (XEXP (varop_inner, 1))));
+ varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
count = 0;
continue;
}
break;
-
+
default:
break;
}
@@ -9068,13 +9520,7 @@ simplify_shift_const (x, code, result_mode, varop, count)
if (new != 0)
x = new;
else
- {
- if (x == 0 || GET_CODE (x) != code || GET_MODE (x) != shift_mode)
- x = gen_rtx_combine (code, shift_mode, varop, const_rtx);
-
- SUBST (XEXP (x, 0), varop);
- SUBST (XEXP (x, 1), const_rtx);
- }
+ x = gen_rtx_fmt_ee (code, shift_mode, varop, const_rtx);
/* If we have an outer operation and we just made a shift, it is
possible that we could have simplified the shift were it not
@@ -9091,36 +9537,19 @@ simplify_shift_const (x, code, result_mode, varop, count)
if (orig_code == LSHIFTRT && result_mode != shift_mode)
x = simplify_and_const_int (NULL_RTX, shift_mode, x,
GET_MODE_MASK (result_mode) >> orig_count);
-
+
/* Do the remainder of the processing in RESULT_MODE. */
x = gen_lowpart_for_combine (result_mode, x);
/* If COMPLEMENT_P is set, we have to complement X before doing the outer
operation. */
if (complement_p)
- x = gen_unary (NOT, result_mode, result_mode, x);
+ x =simplify_gen_unary (NOT, result_mode, x, result_mode);
if (outer_op != NIL)
{
if (GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
- {
- int width = GET_MODE_BITSIZE (result_mode);
-
- outer_const &= GET_MODE_MASK (result_mode);
-
- /* If this would be an entire word for the target, but is not for
- the host, then sign-extend on the host so that the number will
- look the same way on the host that it would on the target.
-
- For example, when building a 64 bit alpha hosted 32 bit sparc
- targeted compiler, then we want the 32 bit unsigned value -1 to be
- represented as a 64 bit value -1, and not as 0x00000000ffffffff.
- The later confuses the sparc backend. */
-
- if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
- && (outer_const & ((HOST_WIDE_INT) 1 << (width - 1))))
- outer_const |= ((HOST_WIDE_INT) (-1) << width);
- }
+ outer_const = trunc_int_for_mode (outer_const, result_mode);
if (outer_op == AND)
x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
@@ -9129,13 +9558,13 @@ simplify_shift_const (x, code, result_mode, varop, count)
equivalent to a constant. This should be rare. */
x = GEN_INT (outer_const);
else if (GET_RTX_CLASS (outer_op) == '1')
- x = gen_unary (outer_op, result_mode, result_mode, x);
+ x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
else
x = gen_binary (outer_op, result_mode, x, GEN_INT (outer_const));
}
return x;
-}
+}
/* Like recog, but we receive the address of a pointer to a new pattern.
We try to match the rtx that the pointer points to.
@@ -9156,11 +9585,12 @@ recog_for_combine (pnewpat, insn, pnotes)
rtx insn;
rtx *pnotes;
{
- register rtx pat = *pnewpat;
+ rtx pat = *pnewpat;
int insn_code_number;
int num_clobbers_to_add = 0;
int i;
rtx notes = 0;
+ rtx old_notes;
/* If PAT is a PARALLEL, check to see if it contains the CLOBBER
we use to indicate that something didn't match. If we find such a
@@ -9171,7 +9601,10 @@ recog_for_combine (pnewpat, insn, pnotes)
&& XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
return -1;
- /* Is the result of combination a valid instruction? */
+ /* Remove the old notes prior to trying to recognize the new pattern. */
+ old_notes = REG_NOTES (insn);
+ REG_NOTES (insn) = 0;
+
insn_code_number = recog (pat, insn, &num_clobbers_to_add);
/* If it isn't, there is the possibility that we previously had an insn
@@ -9200,14 +9633,21 @@ recog_for_combine (pnewpat, insn, pnotes)
insn_code_number = recog (pat, insn, &num_clobbers_to_add);
}
+ /* Recognize all noop sets, these will be killed by followup pass. */
+ if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
+ insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
+
+ REG_NOTES (insn) = old_notes;
+
/* If we had any clobbers to add, make a new pattern than contains
them. Then check to make sure that all of them are dead. */
if (num_clobbers_to_add)
{
rtx newpat = gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (GET_CODE (pat) == PARALLEL
- ? XVECLEN (pat, 0) + num_clobbers_to_add
- : num_clobbers_to_add + 1));
+ rtvec_alloc (GET_CODE (pat) == PARALLEL
+ ? (XVECLEN (pat, 0)
+ + num_clobbers_to_add)
+ : num_clobbers_to_add + 1));
if (GET_CODE (pat) == PARALLEL)
for (i = 0; i < XVECLEN (pat, 0); i++)
@@ -9249,7 +9689,7 @@ recog_for_combine (pnewpat, insn, pnotes)
static rtx
gen_lowpart_for_combine (mode, x)
enum machine_mode mode;
- register rtx x;
+ rtx x;
{
rtx result;
@@ -9277,21 +9717,22 @@ gen_lowpart_for_combine (mode, x)
}
result = gen_lowpart_common (mode, x);
+#ifdef CLASS_CANNOT_CHANGE_MODE
if (result != 0
&& GET_CODE (result) == SUBREG
&& GET_CODE (SUBREG_REG (result)) == REG
&& REGNO (SUBREG_REG (result)) >= FIRST_PSEUDO_REGISTER
- && (GET_MODE_SIZE (GET_MODE (result))
- != GET_MODE_SIZE (GET_MODE (SUBREG_REG (result)))))
- REG_CHANGES_SIZE (REGNO (SUBREG_REG (result))) = 1;
+ && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (result),
+ GET_MODE (SUBREG_REG (result))))
+ REG_CHANGES_MODE (REGNO (SUBREG_REG (result))) = 1;
+#endif
if (result)
return result;
if (GET_CODE (x) == MEM)
{
- register int offset = 0;
- rtx new;
+ int offset = 0;
/* Refuse to work on a volatile memory ref or one with a mode-dependent
address. */
@@ -9307,6 +9748,7 @@ gen_lowpart_for_combine (mode, x)
if (WORDS_BIG_ENDIAN)
offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
- MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+
if (BYTES_BIG_ENDIAN)
{
/* Adjust the address so that the address-after-the-data is
@@ -9314,107 +9756,31 @@ gen_lowpart_for_combine (mode, x)
offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
- MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
}
- new = gen_rtx_MEM (mode, plus_constant (XEXP (x, 0), offset));
- RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
- MEM_COPY_ATTRIBUTES (new, x);
- return new;
+
+ return adjust_address_nv (x, mode, offset);
}
/* If X is a comparison operator, rewrite it in a new mode. This
probably won't match, but may allow further simplifications. */
else if (GET_RTX_CLASS (GET_CODE (x)) == '<')
- return gen_rtx_combine (GET_CODE (x), mode, XEXP (x, 0), XEXP (x, 1));
+ return gen_rtx_fmt_ee (GET_CODE (x), mode, XEXP (x, 0), XEXP (x, 1));
/* If we couldn't simplify X any other way, just enclose it in a
SUBREG. Normally, this SUBREG won't match, but some patterns may
include an explicit SUBREG or we may simplify it further in combine. */
else
{
- int word = 0;
+ int offset = 0;
+ rtx res;
- if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
- word = ((GET_MODE_SIZE (GET_MODE (x))
- - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
- / UNITS_PER_WORD);
- return gen_rtx_SUBREG (mode, x, word);
+ offset = subreg_lowpart_offset (mode, GET_MODE (x));
+ res = simplify_gen_subreg (mode, x, GET_MODE (x), offset);
+ if (res)
+ return res;
+ return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
}
}
-/* Make an rtx expression. This is a subset of gen_rtx and only supports
- expressions of 1, 2, or 3 operands, each of which are rtx expressions.
-
- If the identical expression was previously in the insn (in the undobuf),
- it will be returned. Only if it is not found will a new expression
- be made. */
-
-/*VARARGS2*/
-static rtx
-gen_rtx_combine VPROTO((enum rtx_code code, enum machine_mode mode, ...))
-{
-#ifndef ANSI_PROTOTYPES
- enum rtx_code code;
- enum machine_mode mode;
-#endif
- va_list p;
- int n_args;
- rtx args[3];
- int j;
- char *fmt;
- rtx rt;
- struct undo *undo;
-
- VA_START (p, mode);
-
-#ifndef ANSI_PROTOTYPES
- code = va_arg (p, enum rtx_code);
- mode = va_arg (p, enum machine_mode);
-#endif
-
- n_args = GET_RTX_LENGTH (code);
- fmt = GET_RTX_FORMAT (code);
-
- if (n_args == 0 || n_args > 3)
- abort ();
-
- /* Get each arg and verify that it is supposed to be an expression. */
- for (j = 0; j < n_args; j++)
- {
- if (*fmt++ != 'e')
- abort ();
-
- args[j] = va_arg (p, rtx);
- }
-
- /* See if this is in undobuf. Be sure we don't use objects that came
- from another insn; this could produce circular rtl structures. */
-
- for (undo = undobuf.undos; undo != undobuf.previous_undos; undo = undo->next)
- if (!undo->is_int
- && GET_CODE (undo->old_contents.r) == code
- && GET_MODE (undo->old_contents.r) == mode)
- {
- for (j = 0; j < n_args; j++)
- if (XEXP (undo->old_contents.r, j) != args[j])
- break;
-
- if (j == n_args)
- return undo->old_contents.r;
- }
-
- /* Otherwise make a new rtx. We know we have 1, 2, or 3 args.
- Use rtx_alloc instead of gen_rtx because it's faster on RISC. */
- rt = rtx_alloc (code);
- PUT_MODE (rt, mode);
- XEXP (rt, 0) = args[0];
- if (n_args > 1)
- {
- XEXP (rt, 1) = args[1];
- if (n_args > 2)
- XEXP (rt, 2) = args[2];
- }
- return rt;
-}
-
/* These routines make binary and unary operations by first seeing if they
fold; if not, a new expression is allocated. */
@@ -9428,15 +9794,14 @@ gen_binary (code, mode, op0, op1)
rtx tem;
if (GET_RTX_CLASS (code) == 'c'
- && (GET_CODE (op0) == CONST_INT
- || (CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)))
+ && swap_commutative_operands_p (op0, op1))
tem = op0, op0 = op1, op1 = tem;
- if (GET_RTX_CLASS (code) == '<')
+ if (GET_RTX_CLASS (code) == '<')
{
enum machine_mode op_mode = GET_MODE (op0);
- /* Strip the COMPARE from (REL_OP (compare X Y) 0) to get
+ /* Strip the COMPARE from (REL_OP (compare X Y) 0) to get
just (REL_OP X Y). */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
{
@@ -9457,36 +9822,17 @@ gen_binary (code, mode, op0, op1)
/* Put complex operands first and constants second. */
if (GET_RTX_CLASS (code) == 'c'
- && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
- || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
- && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
- || (GET_CODE (op0) == SUBREG
- && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
- && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
- return gen_rtx_combine (code, mode, op1, op0);
+ && swap_commutative_operands_p (op0, op1))
+ return gen_rtx_fmt_ee (code, mode, op1, op0);
/* If we are turning off bits already known off in OP0, we need not do
an AND. */
else if (code == AND && GET_CODE (op1) == CONST_INT
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~ INTVAL (op1)) == 0)
+ && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
return op0;
- return gen_rtx_combine (code, mode, op0, op1);
-}
-
-static rtx
-gen_unary (code, mode, op0_mode, op0)
- enum rtx_code code;
- enum machine_mode mode, op0_mode;
- rtx op0;
-{
- rtx result = simplify_unary_operation (code, mode, op0, op0_mode);
-
- if (result)
- return result;
-
- return gen_rtx_combine (code, mode, op0);
+ return gen_rtx_fmt_ee (code, mode, op0, op1);
}
/* Simplify a comparison between *POP0 and *POP1 where CODE is the
@@ -9554,7 +9900,7 @@ simplify_comparison (code, pop0, pop1)
&& (code != GT && code != LT && code != GE && code != LE))
|| (GET_CODE (op0) == ASHIFTRT
&& (code != GTU && code != LTU
- && code != GEU && code != GEU)))
+ && code != GEU && code != LEU)))
&& GET_CODE (XEXP (op0, 1)) == CONST_INT
&& INTVAL (XEXP (op0, 1)) >= 0
&& INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
@@ -9569,8 +9915,8 @@ simplify_comparison (code, pop0, pop1)
else if (GET_CODE (op0) == ASHIFT)
mask = (mask & (mask << shift_count)) >> shift_count;
- if ((nonzero_bits (XEXP (op0, 0), mode) & ~ mask) == 0
- && (nonzero_bits (XEXP (op1, 0), mode) & ~ mask) == 0)
+ if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
+ && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
else
break;
@@ -9589,16 +9935,16 @@ simplify_comparison (code, pop0, pop1)
operations from some narrower mode even though a SUBREG is not
present. */
- else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (XEXP (op1, 1)) == CONST_INT)
+ else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT)
{
rtx inner_op0 = XEXP (op0, 0);
rtx inner_op1 = XEXP (op1, 0);
HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
int changed = 0;
-
+
if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
&& (GET_MODE_SIZE (GET_MODE (inner_op0))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
@@ -9649,11 +9995,11 @@ simplify_comparison (code, pop0, pop1)
else
break;
}
-
+
/* If the first operand is a constant, swap the operands and adjust the
comparison code appropriately, but don't do this if the second operand
is already a constant integer. */
- if (CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
+ if (swap_commutative_operands_p (op0, op1))
{
tem = op0, op0 = op1, op1 = tem;
code = swap_condition (code);
@@ -9667,7 +10013,7 @@ simplify_comparison (code, pop0, pop1)
while (GET_CODE (op1) == CONST_INT)
{
enum machine_mode mode = GET_MODE (op0);
- int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned int mode_width = GET_MODE_BITSIZE (mode);
unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
int equality_comparison_p;
int sign_bit_comparison_p;
@@ -9687,9 +10033,8 @@ simplify_comparison (code, pop0, pop1)
/* Get the constant we are comparing against and turn off all bits
not on in our mode. */
- const_op = INTVAL (op1);
- if (mode_width <= HOST_BITS_PER_WIDE_INT)
- const_op &= mask;
+ const_op = trunc_int_for_mode (INTVAL (op1), mode);
+ op1 = GEN_INT (const_op);
/* If we are comparing against a constant power of two and the value
being compared can only have that single bit nonzero (e.g., it was
@@ -9719,7 +10064,7 @@ simplify_comparison (code, pop0, pop1)
}
/* Do some canonicalizations based on the comparison code. We prefer
- comparisons against zero and then prefer equality comparisons.
+ comparisons against zero and then prefer equality comparisons.
If we can reduce the size of a constant, we will do that too. */
switch (code)
@@ -9767,7 +10112,7 @@ simplify_comparison (code, pop0, pop1)
break;
case GT:
- /* > C is equivalent to >= (C + 1); we do this for C < 0*/
+ /* > C is equivalent to >= (C + 1); we do this for C < 0. */
if (const_op < 0)
{
const_op += 1;
@@ -9863,7 +10208,7 @@ simplify_comparison (code, pop0, pop1)
equality_comparison_p = (code == EQ || code == NE);
sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
- || code == LEU);
+ || code == GEU);
/* If this is a sign bit comparison and we can do arithmetic in
MODE, say that we will only be needing the sign bit of OP0. */
@@ -9883,7 +10228,7 @@ simplify_comparison (code, pop0, pop1)
case ZERO_EXTRACT:
/* If we are extracting a single bit from a variable position in
a constant that has only a single bit set and are comparing it
- with zero, we can convert this into an equality comparison
+ with zero, we can convert this into an equality comparison
between the position and the location of the single bit. */
if (GET_CODE (XEXP (op0, 0)) == CONST_INT
@@ -9893,14 +10238,15 @@ simplify_comparison (code, pop0, pop1)
{
if (BITS_BIG_ENDIAN)
{
-#ifdef HAVE_extzv
- mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
- if (mode == VOIDmode)
- mode = word_mode;
- i = (GET_MODE_BITSIZE (mode) - 1 - i);
-#else
- i = BITS_PER_WORD - 1 - i;
-#endif
+ enum machine_mode new_mode
+ = mode_for_extraction (EP_extzv, 1);
+ if (new_mode == MAX_MACHINE_MODE)
+ i = BITS_PER_WORD - 1 - i;
+ else
+ {
+ mode = new_mode;
+ i = (GET_MODE_BITSIZE (mode) - 1 - i);
+ }
}
op0 = XEXP (op0, 2);
@@ -10008,7 +10354,7 @@ simplify_comparison (code, pop0, pop1)
continue;
}
- /* ... fall through ... */
+ /* Fall through. */
case ABS:
/* ABS is ignorable inside an equality comparison with zero. */
@@ -10018,11 +10364,10 @@ simplify_comparison (code, pop0, pop1)
continue;
}
break;
-
case SIGN_EXTEND:
/* Can simplify (compare (zero/sign_extend FOO) CONST)
- to (compare FOO CONST) if CONST fits in FOO's mode and we
+ to (compare FOO CONST) if CONST fits in FOO's mode and we
are either testing inequality or have an unsigned comparison
with ZERO_EXTEND or a signed comparison with SIGN_EXTEND. */
if (! unsigned_comparison_p
@@ -10052,12 +10397,12 @@ simplify_comparison (code, pop0, pop1)
&& GET_CODE (SUBREG_REG (op0)) == PLUS
&& GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT
&& INTVAL (XEXP (SUBREG_REG (op0), 1)) < 0
- && (- INTVAL (XEXP (SUBREG_REG (op0), 1))
- < (HOST_WIDE_INT)(GET_MODE_MASK (mode) / 2))
+ && (-INTVAL (XEXP (SUBREG_REG (op0), 1))
+ < (HOST_WIDE_INT) (GET_MODE_MASK (mode) / 2))
&& (unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode) / 2
&& (0 == (nonzero_bits (XEXP (SUBREG_REG (op0), 0),
GET_MODE (SUBREG_REG (op0)))
- & ~ GET_MODE_MASK (mode))
+ & ~GET_MODE_MASK (mode))
|| (num_sign_bit_copies (XEXP (SUBREG_REG (op0), 0),
GET_MODE (SUBREG_REG (op0)))
> (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
@@ -10113,6 +10458,11 @@ simplify_comparison (code, pop0, pop1)
break;
case MINUS:
+ /* We used to optimize signed comparisons against zero, but that
+ was incorrect. Unsigned comparisons against zero (GTU, LEU)
+ arrive here as equality comparisons, or (GEU, LTU) are
+ optimized away. No need to special-case them. */
+
/* (eq (minus A B) C) -> (eq A (plus B C)) or
(eq B (minus A C)), whichever simplifies. We can only do
this for equality comparisons due to pathological cases involving
@@ -10162,8 +10512,10 @@ simplify_comparison (code, pop0, pop1)
break;
case EQ: case NE:
- case LT: case LTU: case LE: case LEU:
- case GT: case GTU: case GE: case GEU:
+ case UNEQ: case LTGT:
+ case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
+ case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
+ case UNORDERED: case ORDERED:
/* We can't do anything if OP0 is a condition code value, rather
than an actual data value. */
if (const_op != 0
@@ -10181,20 +10533,27 @@ simplify_comparison (code, pop0, pop1)
/* Check for the cases where we simply want the result of the
earlier test or the opposite of that result. */
- if (code == NE
- || (code == EQ && reversible_comparison_p (op0))
+ if (code == NE || code == EQ
|| (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
&& (STORE_FLAG_VALUE
& (((HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
- && (code == LT
- || (code == GE && reversible_comparison_p (op0)))))
+ && (code == LT || code == GE)))
{
- code = (code == LT || code == NE
- ? GET_CODE (op0) : reverse_condition (GET_CODE (op0)));
- op0 = tem, op1 = tem1;
- continue;
+ enum rtx_code new_code;
+ if (code == LT || code == NE)
+ new_code = GET_CODE (op0);
+ else
+ new_code = combine_reversed_comparison_code (op0);
+
+ if (new_code != UNKNOWN)
+ {
+ code = new_code;
+ op0 = tem;
+ op1 = tem1;
+ continue;
+ }
}
break;
@@ -10219,9 +10578,9 @@ simplify_comparison (code, pop0, pop1)
&& XEXP (XEXP (op0, 0), 0) == const1_rtx)
{
op0 = simplify_and_const_int
- (op0, mode, gen_rtx_combine (LSHIFTRT, mode,
- XEXP (op0, 1),
- XEXP (XEXP (op0, 0), 1)),
+ (op0, mode, gen_rtx_LSHIFTRT (mode,
+ XEXP (op0, 1),
+ XEXP (XEXP (op0, 0), 1)),
(HOST_WIDE_INT) 1);
continue;
}
@@ -10239,7 +10598,7 @@ simplify_comparison (code, pop0, pop1)
{
mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
<< INTVAL (XEXP (XEXP (op0, 0), 1)));
- if ((~ STORE_FLAG_VALUE & mask) == 0
+ if ((~STORE_FLAG_VALUE & mask) == 0
&& (GET_RTX_CLASS (GET_CODE (XEXP (XEXP (op0, 0), 0))) == '<'
|| ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
&& GET_RTX_CLASS (GET_CODE (tem)) == '<')))
@@ -10285,12 +10644,17 @@ simplify_comparison (code, pop0, pop1)
represents the low part, permute the SUBREG and the AND and
try again. */
if (GET_CODE (XEXP (op0, 0)) == SUBREG
- && ((mode_width
- >= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (XEXP (op0, 0)))))
+ && (0
#ifdef WORD_REGISTER_OPERATIONS
- || subreg_lowpart_p (XEXP (op0, 0))
+ || ((mode_width
+ > (GET_MODE_BITSIZE
+ (GET_MODE (SUBREG_REG (XEXP (op0, 0))))))
+ && mode_width <= BITS_PER_WORD)
#endif
- )
+ || ((mode_width
+ <= (GET_MODE_BITSIZE
+ (GET_MODE (SUBREG_REG (XEXP (op0, 0))))))
+ && subreg_lowpart_p (XEXP (op0, 0))))
#ifndef WORD_REGISTER_OPERATIONS
/* It is unsafe to commute the AND into the SUBREG if the SUBREG
is paradoxical and WORD_REGISTER_OPERATIONS is not defined.
@@ -10305,13 +10669,13 @@ simplify_comparison (code, pop0, pop1)
&& mode_width <= HOST_BITS_PER_WIDE_INT
&& (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (XEXP (op0, 0))))
<= HOST_BITS_PER_WIDE_INT)
- && (INTVAL (XEXP (op0, 1)) & ~ mask) == 0
- && 0 == (~ GET_MODE_MASK (GET_MODE (SUBREG_REG (XEXP (op0, 0))))
+ && (INTVAL (XEXP (op0, 1)) & ~mask) == 0
+ && 0 == (~GET_MODE_MASK (GET_MODE (SUBREG_REG (XEXP (op0, 0))))
& INTVAL (XEXP (op0, 1)))
&& (unsigned HOST_WIDE_INT) INTVAL (XEXP (op0, 1)) != mask
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op0, 1))
!= GET_MODE_MASK (GET_MODE (SUBREG_REG (XEXP (op0, 0))))))
-
+
{
op0
= gen_lowpart_for_combine
@@ -10321,6 +10685,21 @@ simplify_comparison (code, pop0, pop1)
continue;
}
+ /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
+ (eq (and (lshiftrt X) 1) 0). */
+ if (const_op == 0 && equality_comparison_p
+ && XEXP (op0, 1) == const1_rtx
+ && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (op0, 0), 0)) == NOT)
+ {
+ op0 = simplify_and_const_int
+ (op0, mode,
+ gen_rtx_LSHIFTRT (mode, XEXP (XEXP (XEXP (op0, 0), 0), 0),
+ XEXP (XEXP (op0, 0), 1)),
+ (HOST_WIDE_INT) 1);
+ code = (code == NE ? EQ : NE);
+ continue;
+ }
break;
case ASHIFT:
@@ -10337,11 +10716,15 @@ simplify_comparison (code, pop0, pop1)
& (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0)
&& mode_width <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (XEXP (op0, 0), mode)
- & ~ (mask >> (INTVAL (XEXP (op0, 1))
- + ! equality_comparison_p))) == 0)
+ & ~(mask >> (INTVAL (XEXP (op0, 1))
+ + ! equality_comparison_p))) == 0)
{
- const_op >>= INTVAL (XEXP (op0, 1));
- op1 = GEN_INT (const_op);
+ /* We must perform a logical shift, not an arithmetic one,
+ as we want the top N bits of C to be zero. */
+ unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
+
+ temp >>= INTVAL (XEXP (op0, 1));
+ op1 = GEN_INT (trunc_int_for_mode (temp, mode));
op0 = XEXP (op0, 0);
continue;
}
@@ -10393,13 +10776,39 @@ simplify_comparison (code, pop0, pop1)
&& (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
MODE_INT, 1)) != BLKmode
&& ((unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (tmode)
- || ((unsigned HOST_WIDE_INT) - const_op
+ || ((unsigned HOST_WIDE_INT) -const_op
<= GET_MODE_MASK (tmode))))
{
op0 = gen_lowpart_for_combine (tmode, XEXP (XEXP (op0, 0), 0));
continue;
}
+ /* Likewise if OP0 is a PLUS of a sign extension with a
+ constant, which is usually represented with the PLUS
+ between the shifts. */
+ if (! unsigned_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
+ && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
+ && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
+ MODE_INT, 1)) != BLKmode
+ && ((unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (tmode)
+ || ((unsigned HOST_WIDE_INT) -const_op
+ <= GET_MODE_MASK (tmode))))
+ {
+ rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
+ rtx add_const = XEXP (XEXP (op0, 0), 1);
+ rtx new_const = gen_binary (ASHIFTRT, GET_MODE (op0), add_const,
+ XEXP (op0, 1));
+
+ op0 = gen_binary (PLUS, tmode,
+ gen_lowpart_for_combine (tmode, inner),
+ new_const);
+ continue;
+ }
+
/* ... fall through ... */
case LSHIFTRT:
/* If we have (compare (xshiftrt FOO N) (const_int C)) and
@@ -10434,7 +10843,7 @@ simplify_comparison (code, pop0, pop1)
continue;
}
break;
-
+
default:
break;
}
@@ -10454,6 +10863,7 @@ simplify_comparison (code, pop0, pop1)
if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
&& (code == NE || code == EQ)
&& ((GET_MODE_SIZE (GET_MODE (op0))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))))
@@ -10464,15 +10874,16 @@ simplify_comparison (code, pop0, pop1)
else if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
&& (code == NE || code == EQ)
&& (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
<= HOST_BITS_PER_WIDE_INT)
&& (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0)))
- & ~ GET_MODE_MASK (GET_MODE (op0))) == 0
+ & ~GET_MODE_MASK (GET_MODE (op0))) == 0
&& (tem = gen_lowpart_for_combine (GET_MODE (SUBREG_REG (op0)),
op1),
(nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
- & ~ GET_MODE_MASK (GET_MODE (op0))) == 0))
+ & ~GET_MODE_MASK (GET_MODE (op0))) == 0))
op0 = SUBREG_REG (op0), op1 = tem;
/* We now do the opposite procedure: Some machines don't have compare
@@ -10484,12 +10895,12 @@ simplify_comparison (code, pop0, pop1)
mode = GET_MODE (op0);
if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) < UNITS_PER_WORD
- && cmp_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing)
+ && ! have_insn_for (COMPARE, mode))
for (tmode = GET_MODE_WIDER_MODE (mode);
(tmode != VOIDmode
&& GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
tmode = GET_MODE_WIDER_MODE (tmode))
- if (cmp_optab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
+ if (have_insn_for (COMPARE, tmode))
{
/* If the only nonzero bits in OP0 and OP1 are those in the
narrower mode and this is an equality or unsigned comparison,
@@ -10497,13 +10908,23 @@ simplify_comparison (code, pop0, pop1)
values, in which case it is true for all comparisons. */
if (((code == EQ || code == NE
|| code == GEU || code == GTU || code == LEU || code == LTU)
- && (nonzero_bits (op0, tmode) & ~ GET_MODE_MASK (mode)) == 0
- && (nonzero_bits (op1, tmode) & ~ GET_MODE_MASK (mode)) == 0)
+ && (nonzero_bits (op0, tmode) & ~GET_MODE_MASK (mode)) == 0
+ && (nonzero_bits (op1, tmode) & ~GET_MODE_MASK (mode)) == 0)
|| ((num_sign_bit_copies (op0, tmode)
> GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))
&& (num_sign_bit_copies (op1, tmode)
> GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))))
{
+ /* If OP0 is an AND and we don't have an AND in MODE either,
+ make a new AND in the proper mode. */
+ if (GET_CODE (op0) == AND
+ && !have_insn_for (AND, mode))
+ op0 = gen_binary (AND, tmode,
+ gen_lowpart_for_combine (tmode,
+ XEXP (op0, 0)),
+ gen_lowpart_for_combine (tmode,
+ XEXP (op0, 1)));
+
op0 = gen_lowpart_for_combine (tmode, op0);
op1 = gen_lowpart_for_combine (tmode, op1);
break;
@@ -10536,41 +10957,38 @@ simplify_comparison (code, pop0, pop1)
return code;
}
-/* Return 1 if we know that X, a comparison operation, is not operating
- on a floating-point value or is EQ or NE, meaning that we can safely
- reverse it. */
-
-static int
-reversible_comparison_p (x)
- rtx x;
+/* Like jump.c' reversed_comparison_code, but use combine infrastructure for
+ searching backward. */
+static enum rtx_code
+combine_reversed_comparison_code (exp)
+ rtx exp;
{
- if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math
- || GET_CODE (x) == NE || GET_CODE (x) == EQ)
- return 1;
-
- switch (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))))
- {
- case MODE_INT:
- case MODE_PARTIAL_INT:
- case MODE_COMPLEX_INT:
- return 1;
-
- case MODE_CC:
- /* If the mode of the condition codes tells us that this is safe,
- we need look no further. */
- if (REVERSIBLE_CC_MODE (GET_MODE (XEXP (x, 0))))
- return 1;
-
- /* Otherwise try and find where the condition codes were last set and
- use that. */
- x = get_last_value (XEXP (x, 0));
- return (x && GET_CODE (x) == COMPARE
- && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0))));
-
- default:
- return 0;
- }
+ enum rtx_code code1 = reversed_comparison_code (exp, NULL);
+ rtx x;
+
+ if (code1 != UNKNOWN
+ || GET_MODE_CLASS (GET_MODE (XEXP (exp, 0))) != MODE_CC)
+ return code1;
+ /* Otherwise try and find where the condition codes were last set and
+ use that. */
+ x = get_last_value (XEXP (exp, 0));
+ if (!x || GET_CODE (x) != COMPARE)
+ return UNKNOWN;
+ return reversed_comparison_code_parts (GET_CODE (exp),
+ XEXP (x, 0), XEXP (x, 1), NULL);
+}
+/* Return comparison with reversed code of EXP and operands OP0 and OP1.
+ Return NULL_RTX in case we fail to do the reversal. */
+static rtx
+reversed_comparison (exp, mode, op0, op1)
+ rtx exp, op0, op1;
+ enum machine_mode mode;
+{
+ enum rtx_code reversed_code = combine_reversed_comparison_code (exp);
+ if (reversed_code == UNKNOWN)
+ return NULL_RTX;
+ else
+ return gen_binary (reversed_code, mode, op0, op1);
}
/* Utility function for following routine. Called when X is part of a value
@@ -10581,22 +10999,24 @@ static void
update_table_tick (x)
rtx x;
{
- register enum rtx_code code = GET_CODE (x);
- register char *fmt = GET_RTX_FORMAT (code);
- register int i;
+ enum rtx_code code = GET_CODE (x);
+ const char *fmt = GET_RTX_FORMAT (code);
+ int i;
if (code == REG)
{
- int regno = REGNO (x);
- int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ unsigned int regno = REGNO (x);
+ unsigned int endregno
+ = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ unsigned int r;
- for (i = regno; i < endregno; i++)
- reg_last_set_table_tick[i] = label_tick;
+ for (r = regno; r < endregno; r++)
+ reg_last_set_table_tick[r] = label_tick;
return;
}
-
+
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
/* Note that we can't have an "E" in values stored; see
get_last_value_validate. */
@@ -10615,10 +11035,11 @@ record_value_for_reg (reg, insn, value)
rtx insn;
rtx value;
{
- int regno = REGNO (reg);
- int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (regno, GET_MODE (reg)) : 1);
- int i;
+ unsigned int regno = REGNO (reg);
+ unsigned int endregno
+ = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (reg)) : 1);
+ unsigned int i;
/* If VALUE contains REG and we have a previous value for REG, substitute
the previous value. */
@@ -10629,20 +11050,33 @@ record_value_for_reg (reg, insn, value)
/* Set things up so get_last_value is allowed to see anything set up to
our insn. */
subst_low_cuid = INSN_CUID (insn);
- tem = get_last_value (reg);
+ tem = get_last_value (reg);
+
+ /* If TEM is simply a binary operation with two CLOBBERs as operands,
+ it isn't going to be useful and will take a lot of time to process,
+ so just use the CLOBBER. */
if (tem)
- value = replace_rtx (copy_rtx (value), reg, tem);
+ {
+ if ((GET_RTX_CLASS (GET_CODE (tem)) == '2'
+ || GET_RTX_CLASS (GET_CODE (tem)) == 'c')
+ && GET_CODE (XEXP (tem, 0)) == CLOBBER
+ && GET_CODE (XEXP (tem, 1)) == CLOBBER)
+ tem = XEXP (tem, 0);
+
+ value = replace_rtx (copy_rtx (value), reg, tem);
+ }
}
/* For each register modified, show we don't know its value, that
we don't know about its bitwise content, that its value has been
updated, and that we don't know the location of the death of the
register. */
- for (i = regno; i < endregno; i ++)
+ for (i = regno; i < endregno; i++)
{
if (insn)
reg_last_set[i] = insn;
+
reg_last_set_value[i] = 0;
reg_last_set_mode[i] = 0;
reg_last_set_nonzero_bits[i] = 0;
@@ -10697,16 +11131,17 @@ record_value_for_reg (reg, insn, value)
}
}
-/* Used for communication between the following two routines. */
-static rtx record_dead_insn;
-
/* Called via note_stores from record_dead_and_set_regs to handle one
- SET or CLOBBER in an insn. */
+ SET or CLOBBER in an insn. DATA is the instruction in which the
+ set is occurring. */
static void
-record_dead_and_set_regs_1 (dest, setter)
+record_dead_and_set_regs_1 (dest, setter, data)
rtx dest, setter;
+ void *data;
{
+ rtx record_dead_insn = (rtx) data;
+
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
@@ -10748,16 +11183,16 @@ static void
record_dead_and_set_regs (insn)
rtx insn;
{
- register rtx link;
- int i;
+ rtx link;
+ unsigned int i;
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
{
if (REG_NOTE_KIND (link) == REG_DEAD
&& GET_CODE (XEXP (link, 0)) == REG)
{
- int regno = REGNO (XEXP (link, 0));
- int endregno
+ unsigned int regno = REGNO (XEXP (link, 0));
+ unsigned int endregno
= regno + (regno < FIRST_PSEUDO_REGISTER
? HARD_REGNO_NREGS (regno, GET_MODE (XEXP (link, 0)))
: 1);
@@ -10772,7 +11207,7 @@ record_dead_and_set_regs (insn)
if (GET_CODE (insn) == CALL_INSN)
{
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (call_used_regs[i])
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
{
reg_last_set_value[i] = 0;
reg_last_set_mode[i] = 0;
@@ -10782,10 +11217,98 @@ record_dead_and_set_regs (insn)
}
last_call_cuid = mem_last_set = INSN_CUID (insn);
+
+ /* Don't bother recording what this insn does. It might set the
+ return value register, but we can't combine into a call
+ pattern anyway, so there's no point trying (and it may cause
+ a crash, if e.g. we wind up asking for last_set_value of a
+ SUBREG of the return value register). */
+ return;
}
- record_dead_insn = insn;
- note_stores (PATTERN (insn), record_dead_and_set_regs_1);
+ note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
+}
+
+/* If a SUBREG has the promoted bit set, it is in fact a property of the
+ register present in the SUBREG, so for each such SUBREG go back and
+ adjust nonzero and sign bit information of the registers that are
+ known to have some zero/sign bits set.
+
+ This is needed because when combine blows the SUBREGs away, the
+ information on zero/sign bits is lost and further combines can be
+ missed because of that. */
+
+static void
+record_promoted_value (insn, subreg)
+ rtx insn;
+ rtx subreg;
+{
+ rtx links, set;
+ unsigned int regno = REGNO (SUBREG_REG (subreg));
+ enum machine_mode mode = GET_MODE (subreg);
+
+ if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ return;
+
+ for (links = LOG_LINKS (insn); links;)
+ {
+ insn = XEXP (links, 0);
+ set = single_set (insn);
+
+ if (! set || GET_CODE (SET_DEST (set)) != REG
+ || REGNO (SET_DEST (set)) != regno
+ || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
+ {
+ links = XEXP (links, 1);
+ continue;
+ }
+
+ if (reg_last_set[regno] == insn)
+ {
+ if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
+ reg_last_set_nonzero_bits[regno] &= GET_MODE_MASK (mode);
+ }
+
+ if (GET_CODE (SET_SRC (set)) == REG)
+ {
+ regno = REGNO (SET_SRC (set));
+ links = LOG_LINKS (insn);
+ }
+ else
+ break;
+ }
+}
+
+/* Scan X for promoted SUBREGs. For each one found,
+ note what it implies to the registers used in it. */
+
+static void
+check_promoted_subreg (insn, x)
+ rtx insn;
+ rtx x;
+{
+ if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x)
+ && GET_CODE (SUBREG_REG (x)) == REG)
+ record_promoted_value (insn, x);
+ else
+ {
+ const char *format = GET_RTX_FORMAT (GET_CODE (x));
+ int i, j;
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
+ switch (format[i])
+ {
+ case 'e':
+ check_promoted_subreg (insn, XEXP (x, i));
+ break;
+ case 'V':
+ case 'E':
+ if (XVEC (x, i) != 0)
+ for (j = 0; j < XVECLEN (x, i); j++)
+ check_promoted_subreg (insn, XVECEXP (x, i, j));
+ break;
+ }
+ }
}
/* Utility routine for the following function. Verify that all the registers
@@ -10806,22 +11329,26 @@ get_last_value_validate (loc, insn, tick, replace)
int replace;
{
rtx x = *loc;
- char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
int len = GET_RTX_LENGTH (GET_CODE (x));
int i;
if (GET_CODE (x) == REG)
{
- int regno = REGNO (x);
- int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
- int j;
+ unsigned int regno = REGNO (x);
+ unsigned int endregno
+ = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ unsigned int j;
for (j = regno; j < endregno; j++)
if (reg_last_set_invalid[j]
- /* If this is a pseudo-register that was only set once, it is
- always valid. */
- || (! (regno >= FIRST_PSEUDO_REGISTER && REG_N_SETS (regno) == 1)
+ /* If this is a pseudo-register that was only set once and not
+ live at the beginning of the function, it is always valid. */
+ || (! (regno >= FIRST_PSEUDO_REGISTER
+ && REG_N_SETS (regno) == 1
+ && (! REGNO_REG_SET_P
+ (BASIC_BLOCK (0)->global_live_at_start, regno)))
&& reg_last_set_label[j] > tick))
{
if (replace)
@@ -10861,7 +11388,7 @@ static rtx
get_last_value (x)
rtx x;
{
- int regno;
+ unsigned int regno;
rtx value;
/* If this is a non-paradoxical SUBREG, get the value of its operand and
@@ -10880,12 +11407,22 @@ get_last_value (x)
regno = REGNO (x);
value = reg_last_set_value[regno];
- /* If we don't have a value or if it isn't for this basic block,
- return 0. */
+ /* If we don't have a value, or if it isn't for this basic block and
+ it's either a hard register, set more than once, or it's a live
+ at the beginning of the function, return 0.
+
+ Because if it's not live at the beginning of the function then the reg
+ is always set before being used (is never used without being set).
+ And, if it's set only once, and it's always set before use, then all
+ uses must have the same last value, even if it's not from this basic
+ block. */
if (value == 0
- || (REG_N_SETS (regno) != 1
- && reg_last_set_label[regno] != label_tick))
+ || (reg_last_set_label[regno] != label_tick
+ && (regno < FIRST_PSEUDO_REGISTER
+ || REG_N_SETS (regno) != 1
+ || (REGNO_REG_SET_P
+ (BASIC_BLOCK (0)->global_live_at_start, regno)))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
@@ -10914,26 +11451,26 @@ get_last_value (x)
static int
use_crosses_set_p (x, from_cuid)
- register rtx x;
+ rtx x;
int from_cuid;
{
- register char *fmt;
- register int i;
- register enum rtx_code code = GET_CODE (x);
+ const char *fmt;
+ int i;
+ enum rtx_code code = GET_CODE (x);
if (code == REG)
{
- register int regno = REGNO (x);
- int endreg = regno + (regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
-
+ unsigned int regno = REGNO (x);
+ unsigned endreg = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
#ifdef PUSH_ROUNDING
/* Don't allow uses of the stack pointer to be moved,
because we don't know whether the move crosses a push insn. */
- if (regno == STACK_POINTER_REGNUM)
+ if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
return 1;
#endif
- for (;regno < endreg; regno++)
+ for (; regno < endreg; regno++)
if (reg_last_set[regno]
&& INSN_CUID (reg_last_set[regno]) > from_cuid)
return 1;
@@ -10949,7 +11486,7 @@ use_crosses_set_p (x, from_cuid)
{
if (fmt[i] == 'E')
{
- register int j;
+ int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
if (use_crosses_set_p (XVECEXP (x, i, j), from_cuid))
return 1;
@@ -10964,26 +11501,27 @@ use_crosses_set_p (x, from_cuid)
/* Define three variables used for communication between the following
routines. */
-static int reg_dead_regno, reg_dead_endregno;
+static unsigned int reg_dead_regno, reg_dead_endregno;
static int reg_dead_flag;
/* Function called via note_stores from reg_dead_at_p.
- If DEST is within [reg_dead_regno, reg_dead_endregno), set
+ If DEST is within [reg_dead_regno, reg_dead_endregno), set
reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
static void
-reg_dead_at_p_1 (dest, x)
+reg_dead_at_p_1 (dest, x, data)
rtx dest;
rtx x;
+ void *data ATTRIBUTE_UNUSED;
{
- int regno, endregno;
+ unsigned int regno, endregno;
if (GET_CODE (dest) != REG)
return;
regno = REGNO (dest);
- endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
? HARD_REGNO_NREGS (regno, GET_MODE (dest)) : 1);
if (reg_dead_endregno > regno && reg_dead_regno < endregno)
@@ -11003,7 +11541,8 @@ reg_dead_at_p (reg, insn)
rtx reg;
rtx insn;
{
- int block, i;
+ int block;
+ unsigned int i;
/* Set variables for reg_dead_at_p_1. */
reg_dead_regno = REGNO (reg);
@@ -11027,7 +11566,7 @@ reg_dead_at_p (reg, insn)
for (; insn && GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != BARRIER;
insn = prev_nonnote_insn (insn))
{
- note_stores (PATTERN (insn), reg_dead_at_p_1);
+ note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
if (reg_dead_flag)
return reg_dead_flag == 1 ? 1 : 0;
@@ -11062,8 +11601,8 @@ static void
mark_used_regs_combine (x)
rtx x;
{
- register RTX_CODE code = GET_CODE (x);
- register int regno;
+ RTX_CODE code = GET_CODE (x);
+ unsigned int regno;
int i;
switch (code)
@@ -11097,6 +11636,8 @@ mark_used_regs_combine (x)
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
+ unsigned int endregno, r;
+
/* None of this applies to the stack, frame or arg pointers */
if (regno == STACK_POINTER_REGNUM
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
@@ -11108,9 +11649,9 @@ mark_used_regs_combine (x)
|| regno == FRAME_POINTER_REGNUM)
return;
- i = HARD_REGNO_NREGS (regno, GET_MODE (x));
- while (i-- > 0)
- SET_HARD_REG_BIT (newpat_used_regs, regno + i);
+ endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ for (r = regno; r < endregno; r++)
+ SET_HARD_REG_BIT (newpat_used_regs, r);
}
return;
@@ -11118,7 +11659,7 @@ mark_used_regs_combine (x)
{
/* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
the address. */
- register rtx testreg = SET_DEST (x);
+ rtx testreg = SET_DEST (x);
while (GET_CODE (testreg) == SUBREG
|| GET_CODE (testreg) == ZERO_EXTRACT
@@ -11140,23 +11681,22 @@ mark_used_regs_combine (x)
/* Recursively scan the operands of this expression. */
{
- register char *fmt = GET_RTX_FORMAT (code);
+ const char *fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
- if (fmt[i] == 'e')
+ if (fmt[i] == 'e')
mark_used_regs_combine (XEXP (x, i));
- else if (fmt[i] == 'E')
- {
- register int j;
+ else if (fmt[i] == 'E')
+ {
+ int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- mark_used_regs_combine (XVECEXP (x, i, j));
- }
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_used_regs_combine (XVECEXP (x, i, j));
+ }
}
}
}
-
/* Remove register number REGNO from the dead registers list of INSN.
@@ -11164,10 +11704,10 @@ mark_used_regs_combine (x)
rtx
remove_death (regno, insn)
- int regno;
+ unsigned int regno;
rtx insn;
{
- register rtx note = find_regno_note (insn, REG_DEAD, regno);
+ rtx note = find_regno_note (insn, REG_DEAD, regno);
if (note)
{
@@ -11181,7 +11721,7 @@ remove_death (regno, insn)
/* For each register (hardware or pseudo) used within expression X, if its
death is in an instruction with cuid between FROM_CUID (inclusive) and
TO_INSN (exclusive), put a REG_DEAD note for that register in the
- list headed by PNOTES.
+ list headed by PNOTES.
That said, don't move registers killed by maybe_kill_insn.
@@ -11196,19 +11736,19 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
rtx to_insn;
rtx *pnotes;
{
- register char *fmt;
- register int len, i;
- register enum rtx_code code = GET_CODE (x);
+ const char *fmt;
+ int len, i;
+ enum rtx_code code = GET_CODE (x);
if (code == REG)
{
- register int regno = REGNO (x);
- register rtx where_dead = reg_last_death[regno];
- register rtx before_dead, after_dead;
+ unsigned int regno = REGNO (x);
+ rtx where_dead = reg_last_death[regno];
+ rtx before_dead, after_dead;
/* Don't move the register if it gets killed in between from and to */
if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
- && !reg_referenced_p (x, maybe_kill_insn))
+ && ! reg_referenced_p (x, maybe_kill_insn))
return;
/* WHERE_DEAD could be a USE insn made by combine, so first we
@@ -11216,6 +11756,7 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
before_dead = where_dead;
while (before_dead && INSN_UID (before_dead) > max_uid_cuid)
before_dead = PREV_INSN (before_dead);
+
after_dead = where_dead;
while (after_dead && INSN_UID (after_dead) > max_uid_cuid)
after_dead = NEXT_INSN (after_dead);
@@ -11241,12 +11782,13 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
&& (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
> GET_MODE_SIZE (GET_MODE (x))))
{
- int deadregno = REGNO (XEXP (note, 0));
- int deadend
+ unsigned int deadregno = REGNO (XEXP (note, 0));
+ unsigned int deadend
= (deadregno + HARD_REGNO_NREGS (deadregno,
GET_MODE (XEXP (note, 0))));
- int ourend = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
- int i;
+ unsigned int ourend
+ = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ unsigned int i;
for (i = deadregno; i < deadend; i++)
if (i < regno || i >= ourend)
@@ -11255,6 +11797,7 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
gen_rtx_REG (reg_raw_mode[i], i),
REG_NOTES (where_dead));
}
+
/* If we didn't find any note, or if we found a REG_DEAD note that
covers only part of the given reg, and we have a multi-reg hard
register, then to be safe we must check for REG_DEAD notes
@@ -11267,8 +11810,9 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
&& regno < FIRST_PSEUDO_REGISTER
&& HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
{
- int ourend = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
- int i, offset;
+ unsigned int ourend
+ = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ unsigned int i, offset;
rtx oldnotes = 0;
if (note)
@@ -11305,6 +11849,7 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
that accesses one word of a multi-word item, some
piece of everything register in the expression is used by
this insn, so remove any old death. */
+ /* ??? So why do we test for equality of the sizes? */
if (GET_CODE (dest) == ZERO_EXTRACT
|| GET_CODE (dest) == STRICT_LOW_PART
@@ -11343,7 +11888,7 @@ move_deaths (x, maybe_kill_insn, from_cuid, to_insn, pnotes)
{
if (fmt[i] == 'E')
{
- register int j;
+ int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_cuid,
to_insn, pnotes);
@@ -11367,7 +11912,7 @@ reg_bitfield_target_p (x, body)
{
rtx dest = SET_DEST (body);
rtx target;
- int regno, tregno, endregno, endtregno;
+ unsigned int regno, tregno, endregno, endtregno;
if (GET_CODE (dest) == ZERO_EXTRACT)
target = XEXP (dest, 0);
@@ -11398,7 +11943,7 @@ reg_bitfield_target_p (x, body)
return 1;
return 0;
-}
+}
/* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
as appropriate. I3 and I2 are the insns resulting from the combination
@@ -11435,6 +11980,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
switch (REG_NOTE_KIND (note))
{
case REG_BR_PROB:
+ case REG_BR_PRED:
case REG_EXEC_COUNT:
/* Doesn't matter much where we put this, as long as it's somewhere.
It is preferable to keep these notes on branches, which is most
@@ -11442,10 +11988,46 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
place = i3;
break;
+ case REG_VTABLE_REF:
+ /* ??? Should remain with *a particular* memory load. Given the
+ nature of vtable data, the last insn seems relatively safe. */
+ place = i3;
+ break;
+
+ case REG_NON_LOCAL_GOTO:
+ if (GET_CODE (i3) == JUMP_INSN)
+ place = i3;
+ else if (i2 && GET_CODE (i2) == JUMP_INSN)
+ place = i2;
+ else
+ abort ();
+ break;
+
case REG_EH_REGION:
- /* This note must remain with the call. It should not be possible
- for both I2 and I3 to be a call. */
- if (GET_CODE (i3) == CALL_INSN)
+ /* These notes must remain with the call or trapping instruction. */
+ if (GET_CODE (i3) == CALL_INSN)
+ place = i3;
+ else if (i2 && GET_CODE (i2) == CALL_INSN)
+ place = i2;
+ else if (flag_non_call_exceptions)
+ {
+ if (may_trap_p (i3))
+ place = i3;
+ else if (i2 && may_trap_p (i2))
+ place = i2;
+ /* ??? Otherwise assume we've combined things such that we
+ can now prove that the instructions can't trap. Drop the
+ note in this case. */
+ }
+ else
+ abort ();
+ break;
+
+ case REG_NORETURN:
+ case REG_SETJMP:
+ /* These notes must remain with the call. It should not be
+ possible for both I2 and I3 to be a call. */
+ if (GET_CODE (i3) == CALL_INSN)
place = i3;
else if (i2 && GET_CODE (i2) == CALL_INSN)
place = i2;
@@ -11486,7 +12068,8 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
is one already. */
else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
&& ! (GET_CODE (XEXP (note, 0)) == REG
- ? find_regno_note (i3, REG_DEAD, REGNO (XEXP (note, 0)))
+ ? find_regno_note (i3, REG_DEAD,
+ REGNO (XEXP (note, 0)))
: find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
{
PUT_REG_NOTE_KIND (note, REG_DEAD);
@@ -11496,7 +12079,6 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
case REG_EQUAL:
case REG_EQUIV:
- case REG_NONNEG:
case REG_NOALIAS:
/* These notes say something about results of an insn. We can
only support them if they used to be on I3 in which case they
@@ -11543,7 +12125,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (i2
&& (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
- || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
+ || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
&& GET_CODE (XEXP (tem, 0)) == LABEL_REF
&& XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
{
@@ -11552,11 +12134,33 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
else
place = i2;
}
+
+ /* Don't attach REG_LABEL note to a JUMP_INSN which has
+ JUMP_LABEL already. Instead, decrement LABEL_NUSES. */
+ if (place && GET_CODE (place) == JUMP_INSN && JUMP_LABEL (place))
+ {
+ if (JUMP_LABEL (place) != XEXP (note, 0))
+ abort ();
+ if (GET_CODE (JUMP_LABEL (place)) == CODE_LABEL)
+ LABEL_NUSES (JUMP_LABEL (place))--;
+ place = 0;
+ }
+ if (place2 && GET_CODE (place2) == JUMP_INSN && JUMP_LABEL (place2))
+ {
+ if (JUMP_LABEL (place2) != XEXP (note, 0))
+ abort ();
+ if (GET_CODE (JUMP_LABEL (place2)) == CODE_LABEL)
+ LABEL_NUSES (JUMP_LABEL (place2))--;
+ place2 = 0;
+ }
break;
+ case REG_NONNEG:
case REG_WAS_0:
- /* It is too much trouble to try to see if this note is still
- correct in all situations. It is better to simply delete it. */
+ /* These notes say something about the value of a register prior
+ to the execution of an insn. It is too much trouble to see
+ if the note is still correct in all situations. It is better
+ to simply delete it. */
break;
case REG_RETVAL:
@@ -11571,6 +12175,10 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
place = prev_real_insn (from_insn);
if (tem && place)
XEXP (tem, 0) = place;
+ /* If we're deleting the last remaining instruction of a
+ libcall sequence, don't add the notes. */
+ else if (XEXP (note, 0) == from_insn)
+ tem = place = 0;
}
break;
@@ -11584,6 +12192,10 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
place = next_real_insn (from_insn);
if (tem && place)
XEXP (tem, 0) = place;
+ /* If we're deleting the last remaining instruction of a
+ libcall sequence, don't add the notes. */
+ else if (XEXP (note, 0) == from_insn)
+ tem = place = 0;
}
break;
@@ -11604,7 +12216,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (from_insn
&& GET_CODE (from_insn) == CALL_INSN
- && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
+ && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
place = from_insn;
else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
place = i3;
@@ -11612,28 +12224,23 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
&& reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
place = i2;
- if (XEXP (note, 0) == elim_i2 || XEXP (note, 0) == elim_i1)
+ if (rtx_equal_p (XEXP (note, 0), elim_i2)
+ || rtx_equal_p (XEXP (note, 0), elim_i1))
break;
- /* If the register is used in both I2 and I3 and it dies in I3,
- we might have added another reference to it. If reg_n_refs
- was 2, bump it to 3. This has to be correct since the
- register must have been set somewhere. The reason this is
- done is because local-alloc.c treats 2 references as a
- special case. */
-
- if (place == i3 && i2 != 0 && GET_CODE (XEXP (note, 0)) == REG
- && REG_N_REFS (REGNO (XEXP (note, 0)))== 2
- && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
- REG_N_REFS (REGNO (XEXP (note, 0))) = 3;
-
if (place == 0)
{
- for (tem = prev_nonnote_insn (i3);
- place == 0 && tem
- && (GET_CODE (tem) == INSN || GET_CODE (tem) == CALL_INSN);
- tem = prev_nonnote_insn (tem))
+ basic_block bb = BASIC_BLOCK (this_basic_block);
+
+ for (tem = PREV_INSN (i3); place == 0; tem = PREV_INSN (tem))
{
+ if (! INSN_P (tem))
+ {
+ if (tem == bb->head)
+ break;
+ continue;
+ }
+
/* If the register is being set at TEM, see if that is all
TEM is doing. If so, delete TEM. Otherwise, make this
into a REG_UNUSED note instead. */
@@ -11647,14 +12254,14 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (set != 0)
for (inner_dest = SET_DEST (set);
- GET_CODE (inner_dest) == STRICT_LOW_PART
- || GET_CODE (inner_dest) == SUBREG
- || GET_CODE (inner_dest) == ZERO_EXTRACT;
+ (GET_CODE (inner_dest) == STRICT_LOW_PART
+ || GET_CODE (inner_dest) == SUBREG
+ || GET_CODE (inner_dest) == ZERO_EXTRACT);
inner_dest = XEXP (inner_dest, 0))
;
/* Verify that it was the set, and not a clobber that
- modified the register.
+ modified the register.
CC0 targets must be careful to maintain setter/user
pairs. If we cannot delete the setter due to side
@@ -11671,7 +12278,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
)
{
/* Move the notes and links of TEM elsewhere.
- This might delete other dead insns recursively.
+ This might delete other dead insns recursively.
First set the pattern to something that won't use
any register. */
@@ -11697,7 +12304,8 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
distribute_links (LOG_LINKS (cc0_setter));
PUT_CODE (cc0_setter, NOTE);
- NOTE_LINE_NUMBER (cc0_setter) = NOTE_INSN_DELETED;
+ NOTE_LINE_NUMBER (cc0_setter)
+ = NOTE_INSN_DELETED;
NOTE_SOURCE_FILE (cc0_setter) = 0;
}
#endif
@@ -11713,62 +12321,61 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (! find_regno_note (tem, REG_UNUSED,
REGNO (XEXP (note, 0))))
REG_NOTES (tem)
- = gen_rtx_EXPR_LIST (REG_UNUSED,
- XEXP (note, 0),
+ = gen_rtx_EXPR_LIST (REG_UNUSED, XEXP (note, 0),
REG_NOTES (tem));
}
else
{
PUT_REG_NOTE_KIND (note, REG_UNUSED);
-
+
/* If there isn't already a REG_UNUSED note, put one
here. */
if (! find_regno_note (tem, REG_UNUSED,
REGNO (XEXP (note, 0))))
place = tem;
break;
- }
- }
- else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
- || (GET_CODE (tem) == CALL_INSN
- && find_reg_fusage (tem, USE, XEXP (note, 0))))
- {
- place = tem;
-
- /* If we are doing a 3->2 combination, and we have a
- register which formerly died in i3 and was not used
- by i2, which now no longer dies in i3 and is used in
- i2 but does not die in i2, and place is between i2
- and i3, then we may need to move a link from place to
- i2. */
- if (i2 && INSN_UID (place) <= max_uid_cuid
- && INSN_CUID (place) > INSN_CUID (i2)
- && from_insn && INSN_CUID (from_insn) > INSN_CUID (i2)
- && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
- {
- rtx links = LOG_LINKS (place);
- LOG_LINKS (place) = 0;
- distribute_links (links);
- }
+ }
+ }
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
+ || (GET_CODE (tem) == CALL_INSN
+ && find_reg_fusage (tem, USE, XEXP (note, 0))))
+ {
+ place = tem;
+
+ /* If we are doing a 3->2 combination, and we have a
+ register which formerly died in i3 and was not used
+ by i2, which now no longer dies in i3 and is used in
+ i2 but does not die in i2, and place is between i2
+ and i3, then we may need to move a link from place to
+ i2. */
+ if (i2 && INSN_UID (place) <= max_uid_cuid
+ && INSN_CUID (place) > INSN_CUID (i2)
+ && from_insn
+ && INSN_CUID (from_insn) > INSN_CUID (i2)
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ {
+ rtx links = LOG_LINKS (place);
+ LOG_LINKS (place) = 0;
+ distribute_links (links);
+ }
+ break;
+ }
+
+ if (tem == bb->head)
break;
- }
}
-
- /* If we haven't found an insn for the death note and it
- is still a REG_DEAD note, but we have hit a CODE_LABEL,
- insert a USE insn for the register at that label and
- put the death node there. This prevents problems with
- call-state tracking in caller-save.c. */
- if (REG_NOTE_KIND (note) == REG_DEAD && place == 0 && tem != 0)
+
+ /* We haven't found an insn for the death note and it
+ is still a REG_DEAD note, but we have hit the beginning
+ of the block. If the existing life info says the reg
+ was dead, there's nothing left to do. Otherwise, we'll
+ need to do a global life update after combine. */
+ if (REG_NOTE_KIND (note) == REG_DEAD && place == 0
+ && REGNO_REG_SET_P (bb->global_live_at_start,
+ REGNO (XEXP (note, 0))))
{
- place
- = emit_insn_after (gen_rtx_USE (VOIDmode, XEXP (note, 0)),
- tem);
-
- /* If this insn was emitted between blocks, then update
- BLOCK_HEAD of the current block to include it. */
- if (BLOCK_END (this_basic_block - 1) == tem)
- BLOCK_HEAD (this_basic_block) = place;
+ SET_BIT (refresh_blocks, this_basic_block);
+ need_refresh = 1;
}
}
@@ -11778,10 +12385,18 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
which is what `dead_or_set_p' checks, so also check for it being
set partially. */
-
if (place && REG_NOTE_KIND (note) == REG_DEAD)
{
- int regno = REGNO (XEXP (note, 0));
+ unsigned int regno = REGNO (XEXP (note, 0));
+
+ /* Similarly, if the instruction on which we want to place
+ the note is a noop, we'll need do a global live update
+ after we remove them in delete_noop_moves. */
+ if (noop_move_p (place))
+ {
+ SET_BIT (refresh_blocks, this_basic_block);
+ need_refresh = 1;
+ }
if (dead_or_set_p (place, XEXP (note, 0))
|| reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
@@ -11799,85 +12414,76 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
/* If this is a death note for a hard reg that is occupying
multiple registers, ensure that we are still using all
parts of the object. If we find a piece of the object
- that is unused, we must add a USE for that piece before
- PLACE and put the appropriate REG_DEAD note on it.
-
- An alternative would be to put a REG_UNUSED for the pieces
- on the insn that set the register, but that can't be done if
- it is not in the same block. It is simpler, though less
- efficient, to add the USE insns. */
+ that is unused, we must arrange for an appropriate REG_DEAD
+ note to be added for it. However, we can't just emit a USE
+ and tag the note to it, since the register might actually
+ be dead; so we recourse, and the recursive call then finds
+ the previous insn that used this register. */
if (place && regno < FIRST_PSEUDO_REGISTER
&& HARD_REGNO_NREGS (regno, GET_MODE (XEXP (note, 0))) > 1)
{
- int endregno
+ unsigned int endregno
= regno + HARD_REGNO_NREGS (regno,
GET_MODE (XEXP (note, 0)));
int all_used = 1;
- int i;
+ unsigned int i;
for (i = regno; i < endregno; i++)
- if (! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
- && ! find_regno_fusage (place, USE, i))
- {
- rtx piece = gen_rtx_REG (reg_raw_mode[i], i);
- rtx p;
-
- /* See if we already placed a USE note for this
- register in front of PLACE. */
- for (p = place;
- GET_CODE (PREV_INSN (p)) == INSN
- && GET_CODE (PATTERN (PREV_INSN (p))) == USE;
- p = PREV_INSN (p))
- if (rtx_equal_p (piece,
- XEXP (PATTERN (PREV_INSN (p)), 0)))
- {
- p = 0;
- break;
- }
-
- if (p)
- {
- rtx use_insn
- = emit_insn_before (gen_rtx_USE (VOIDmode,
- piece),
- p);
- REG_NOTES (use_insn)
- = gen_rtx_EXPR_LIST (REG_DEAD, piece,
- REG_NOTES (use_insn));
- }
-
- all_used = 0;
- }
-
- /* Check for the case where the register dying partially
- overlaps the register set by this insn. */
- if (all_used)
- for (i = regno; i < endregno; i++)
- if (dead_or_set_regno_p (place, i))
- {
- all_used = 0;
- break;
- }
+ if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
+ && ! find_regno_fusage (place, USE, i))
+ || dead_or_set_regno_p (place, i))
+ all_used = 0;
if (! all_used)
{
/* Put only REG_DEAD notes for pieces that are
- still used and that are not already dead or set. */
+ not already dead or set. */
- for (i = regno; i < endregno; i++)
+ for (i = regno; i < endregno;
+ i += HARD_REGNO_NREGS (i, reg_raw_mode[i]))
{
rtx piece = gen_rtx_REG (reg_raw_mode[i], i);
+ basic_block bb = BASIC_BLOCK (this_basic_block);
- if ((reg_referenced_p (piece, PATTERN (place))
- || (GET_CODE (place) == CALL_INSN
- && find_reg_fusage (place, USE, piece)))
- && ! dead_or_set_p (place, piece)
+ if (! dead_or_set_p (place, piece)
&& ! reg_bitfield_target_p (piece,
PATTERN (place)))
- REG_NOTES (place)
- = gen_rtx_EXPR_LIST (REG_DEAD,
- piece, REG_NOTES (place));
+ {
+ rtx new_note
+ = gen_rtx_EXPR_LIST (REG_DEAD, piece, NULL_RTX);
+
+ distribute_notes (new_note, place, place,
+ NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+ else if (! refers_to_regno_p (i, i + 1,
+ PATTERN (place), 0)
+ && ! find_regno_fusage (place, USE, i))
+ for (tem = PREV_INSN (place); ;
+ tem = PREV_INSN (tem))
+ {
+ if (! INSN_P (tem))
+ {
+ if (tem == bb->head)
+ {
+ SET_BIT (refresh_blocks,
+ this_basic_block);
+ need_refresh = 1;
+ break;
+ }
+ continue;
+ }
+ if (dead_or_set_p (tem, piece)
+ || reg_bitfield_target_p (piece,
+ PATTERN (tem)))
+ {
+ REG_NOTES (tem)
+ = gen_rtx_EXPR_LIST (REG_UNUSED, piece,
+ REG_NOTES (tem));
+ break;
+ }
+ }
+
}
place = 0;
@@ -11937,7 +12543,7 @@ distribute_links (links)
/* If the insn that this link points to is a NOTE or isn't a single
set, ignore it. In the latter case, it isn't clear what we
- can do other than ignore the link, since we can't tell which
+ can do other than ignore the link, since we can't tell which
register it was for. Such links wouldn't be used by combine
anyway.
@@ -11969,15 +12575,14 @@ distribute_links (links)
(insn && (this_basic_block == n_basic_blocks - 1
|| BLOCK_HEAD (this_basic_block + 1) != insn));
insn = NEXT_INSN (insn))
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
- && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
{
if (reg_referenced_p (reg, PATTERN (insn)))
place = insn;
break;
}
else if (GET_CODE (insn) == CALL_INSN
- && find_reg_fusage (insn, USE, reg))
+ && find_reg_fusage (insn, USE, reg))
{
place = insn;
break;
@@ -12001,7 +12606,7 @@ distribute_links (links)
/* Set added_links_insn to the earliest insn we added a
link to. */
- if (added_links_insn == 0
+ if (added_links_insn == 0
|| INSN_CUID (added_links_insn) > INSN_CUID (place))
added_links_insn = place;
}
OpenPOWER on IntegriCloud