summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/rtlanal.c
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
committerkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
commit1f9ea4d0a40cca64d60cf4dab152349da7b9dddf (patch)
tree0cb530c9c38af219e6dda2994c078b6b2b9ad853 /contrib/gcc/rtlanal.c
parent4895159b2b4f648051c1f139faa7b6dc50c2bfcb (diff)
downloadFreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.zip
FreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.tar.gz
GCC 4.2.0 release.
Diffstat (limited to 'contrib/gcc/rtlanal.c')
-rw-r--r--contrib/gcc/rtlanal.c3088
1 files changed, 2029 insertions, 1059 deletions
diff --git a/contrib/gcc/rtlanal.c b/contrib/gcc/rtlanal.c
index fcb426d..54f2f72 100644
--- a/contrib/gcc/rtlanal.c
+++ b/contrib/gcc/rtlanal.c
@@ -1,6 +1,7 @@
-/* Analyze RTL for C-Compiler
+/* Analyze RTL for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -16,8 +17,8 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
#include "config.h"
@@ -29,26 +30,59 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "hard-reg-set.h"
#include "insn-config.h"
#include "recog.h"
+#include "target.h"
+#include "output.h"
#include "tm_p.h"
#include "flags.h"
-#include "basic-block.h"
#include "real.h"
+#include "regs.h"
+#include "function.h"
/* Forward declarations */
-static int global_reg_mentioned_p_1 (rtx *, void *);
static void set_of_1 (rtx, rtx, void *);
-static void insn_dependent_p_1 (rtx, rtx, void *);
+static bool covers_regno_p (rtx, unsigned int);
+static bool covers_regno_no_parallel_p (rtx, unsigned int);
static int rtx_referenced_p_1 (rtx *, void *);
static int computed_jump_p_1 (rtx);
static void parms_set (rtx, rtx, void *);
-static bool hoist_test_store (rtx, rtx, regset);
-static void hoist_update_store (rtx, rtx *, rtx, rtx);
+
+static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx, enum machine_mode,
+ rtx, enum machine_mode,
+ unsigned HOST_WIDE_INT);
+static unsigned HOST_WIDE_INT nonzero_bits1 (rtx, enum machine_mode, rtx,
+ enum machine_mode,
+ unsigned HOST_WIDE_INT);
+static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx,
+ enum machine_mode,
+ unsigned int);
+static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx,
+ enum machine_mode, unsigned int);
+
+/* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
+ -1 if a code has no such operand. */
+static int non_rtx_starting_operands[NUM_RTX_CODE];
/* Bit flags that specify the machine subtype we are compiling for.
Bits are tested using macros TARGET_... defined in the tm.h file
and set by `-m...' switches. Must be defined in rtlanal.c. */
int target_flags;
+
+/* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
+ If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
+ SIGN_EXTEND then while narrowing we also have to enforce the
+ representation and sign-extend the value to mode DESTINATION_REP.
+
+ If the value is already sign-extended to DESTINATION_REP mode we
+ can just switch to DESTINATION mode on it. For each pair of
+ integral modes SOURCE and DESTINATION, when truncating from SOURCE
+ to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
+ contains the number of high-order bits in SOURCE that have to be
+ copies of the sign-bit so that we can do this mode-switch to
+ DESTINATION. */
+
+static unsigned int
+num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
/* Return 1 if the value of X is unstable
(would be different at a different point in the program).
@@ -65,12 +99,8 @@ rtx_unstable_p (rtx x)
switch (code)
{
case MEM:
- return ! RTX_UNCHANGING_P (x) || rtx_unstable_p (XEXP (x, 0));
-
- case QUEUED:
- return 1;
+ return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
- case ADDRESSOF:
case CONST:
case CONST_INT:
case CONST_DOUBLE:
@@ -83,8 +113,7 @@ rtx_unstable_p (rtx x)
/* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
/* The arg pointer varies if it is not a fixed register. */
- || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
- || RTX_UNCHANGING_P (x))
+ || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
return 0;
#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
/* ??? When call-clobbered, the value is stable modulo the restore
@@ -144,10 +173,7 @@ rtx_varies_p (rtx x, int for_alias)
switch (code)
{
case MEM:
- return ! RTX_UNCHANGING_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
-
- case QUEUED:
- return 1;
+ return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
case CONST:
case CONST_INT:
@@ -157,10 +183,6 @@ rtx_varies_p (rtx x, int for_alias)
case LABEL_REF:
return 0;
- case ADDRESSOF:
- /* This will resolve to some offset from the frame pointer. */
- return 0;
-
case REG:
/* Note that we have to test for the actual rtx used for the frame
and arg pointers and not just the register number in case we have
@@ -217,10 +239,13 @@ rtx_varies_p (rtx x, int for_alias)
return 0;
}
-/* Return 0 if the use of X as an address in a MEM can cause a trap. */
+/* Return nonzero if the use of X as an address in a MEM can cause a trap.
+ MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
+ whether nonzero is returned for unaligned memory accesses on strict
+ alignment machines. */
-int
-rtx_addr_can_trap_p (rtx x)
+static int
+rtx_addr_can_trap_p_1 (rtx x, enum machine_mode mode, bool unaligned_mems)
{
enum rtx_code code = GET_CODE (x);
@@ -232,10 +257,6 @@ rtx_addr_can_trap_p (rtx x)
case LABEL_REF:
return 0;
- case ADDRESSOF:
- /* This will resolve to some offset from the frame pointer. */
- return 0;
-
case REG:
/* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
@@ -250,27 +271,54 @@ rtx_addr_can_trap_p (rtx x)
return 1;
case CONST:
- return rtx_addr_can_trap_p (XEXP (x, 0));
+ return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
case PLUS:
- /* An address is assumed not to trap if it is an address that can't
- trap plus a constant integer or it is the pic register plus a
- constant. */
- return ! ((! rtx_addr_can_trap_p (XEXP (x, 0))
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- || (XEXP (x, 0) == pic_offset_table_rtx
- && CONSTANT_P (XEXP (x, 1))));
+ /* An address is assumed not to trap if:
+ - it is an address that can't trap plus a constant integer,
+ with the proper remainder modulo the mode size if we are
+ considering unaligned memory references. */
+ if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT offset;
+
+ if (!STRICT_ALIGNMENT
+ || !unaligned_mems
+ || GET_MODE_SIZE (mode) == 0)
+ return 0;
+
+ offset = INTVAL (XEXP (x, 1));
+
+#ifdef SPARC_STACK_BOUNDARY_HACK
+ /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
+ the real alignment of %sp. However, when it does this, the
+ alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
+ if (SPARC_STACK_BOUNDARY_HACK
+ && (XEXP (x, 0) == stack_pointer_rtx
+ || XEXP (x, 0) == hard_frame_pointer_rtx))
+ offset -= STACK_POINTER_OFFSET;
+#endif
+
+ return offset % GET_MODE_SIZE (mode) != 0;
+ }
+
+ /* - or it is the pic register plus a constant. */
+ if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
+ return 0;
+
+ return 1;
case LO_SUM:
case PRE_MODIFY:
- return rtx_addr_can_trap_p (XEXP (x, 1));
+ return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
case PRE_DEC:
case PRE_INC:
case POST_DEC:
case POST_INC:
case POST_MODIFY:
- return rtx_addr_can_trap_p (XEXP (x, 0));
+ return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
default:
break;
@@ -280,6 +328,14 @@ rtx_addr_can_trap_p (rtx x)
return 1;
}
+/* Return nonzero if the use of X as an address in a MEM can cause a trap. */
+
+int
+rtx_addr_can_trap_p (rtx x)
+{
+ return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
+}
+
/* Return true if X is an address that is known to not be zero. */
bool
@@ -295,10 +351,6 @@ nonzero_address_p (rtx x)
case LABEL_REF:
return true;
- case ADDRESSOF:
- /* This will resolve to some offset from the frame pointer. */
- return true;
-
case REG:
/* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
@@ -316,17 +368,7 @@ nonzero_address_p (rtx x)
case PLUS:
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- /* Pointers aren't allowed to wrap. If we've got a register
- that is known to be a pointer, and a positive offset, then
- the composite can't be zero. */
- if (INTVAL (XEXP (x, 1)) > 0
- && REG_P (XEXP (x, 0))
- && REG_POINTER (XEXP (x, 0)))
- return true;
-
- return nonzero_address_p (XEXP (x, 0));
- }
+ return nonzero_address_p (XEXP (x, 0));
/* Handle PIC references. */
else if (XEXP (x, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x, 1)))
@@ -439,210 +481,6 @@ get_related_value (rtx x)
return 0;
}
-/* Given a tablejump insn INSN, return the RTL expression for the offset
- into the jump table. If the offset cannot be determined, then return
- NULL_RTX.
-
- If EARLIEST is nonzero, it is a pointer to a place where the earliest
- insn used in locating the offset was found. */
-
-rtx
-get_jump_table_offset (rtx insn, rtx *earliest)
-{
- rtx label;
- rtx table;
- rtx set;
- rtx old_insn;
- rtx x;
- rtx old_x;
- rtx y;
- rtx old_y;
- int i;
-
- if (!tablejump_p (insn, &label, &table) || !(set = single_set (insn)))
- return NULL_RTX;
-
- x = SET_SRC (set);
-
- /* Some targets (eg, ARM) emit a tablejump that also
- contains the out-of-range target. */
- if (GET_CODE (x) == IF_THEN_ELSE
- && GET_CODE (XEXP (x, 2)) == LABEL_REF)
- x = XEXP (x, 1);
-
- /* Search backwards and locate the expression stored in X. */
- for (old_x = NULL_RTX; GET_CODE (x) == REG && x != old_x;
- old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0))
- ;
-
- /* If X is an expression using a relative address then strip
- off the addition / subtraction of PC, PIC_OFFSET_TABLE_REGNUM,
- or the jump table label. */
- if (GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC
- && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS))
- {
- for (i = 0; i < 2; i++)
- {
- old_insn = insn;
- y = XEXP (x, i);
-
- if (y == pc_rtx || y == pic_offset_table_rtx)
- break;
-
- for (old_y = NULL_RTX; GET_CODE (y) == REG && y != old_y;
- old_y = y, y = find_last_value (y, &old_insn, NULL_RTX, 0))
- ;
-
- if ((GET_CODE (y) == LABEL_REF && XEXP (y, 0) == label))
- break;
- }
-
- if (i >= 2)
- return NULL_RTX;
-
- x = XEXP (x, 1 - i);
-
- for (old_x = NULL_RTX; GET_CODE (x) == REG && x != old_x;
- old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0))
- ;
- }
-
- /* Strip off any sign or zero extension. */
- if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
- {
- x = XEXP (x, 0);
-
- for (old_x = NULL_RTX; GET_CODE (x) == REG && x != old_x;
- old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0))
- ;
- }
-
- /* If X isn't a MEM then this isn't a tablejump we understand. */
- if (GET_CODE (x) != MEM)
- return NULL_RTX;
-
- /* Strip off the MEM. */
- x = XEXP (x, 0);
-
- for (old_x = NULL_RTX; GET_CODE (x) == REG && x != old_x;
- old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0))
- ;
-
- /* If X isn't a PLUS than this isn't a tablejump we understand. */
- if (GET_CODE (x) != PLUS)
- return NULL_RTX;
-
- /* At this point we should have an expression representing the jump table
- plus an offset. Examine each operand in order to determine which one
- represents the jump table. Knowing that tells us that the other operand
- must represent the offset. */
- for (i = 0; i < 2; i++)
- {
- old_insn = insn;
- y = XEXP (x, i);
-
- for (old_y = NULL_RTX; GET_CODE (y) == REG && y != old_y;
- old_y = y, y = find_last_value (y, &old_insn, NULL_RTX, 0))
- ;
-
- if ((GET_CODE (y) == CONST || GET_CODE (y) == LABEL_REF)
- && reg_mentioned_p (label, y))
- break;
- }
-
- if (i >= 2)
- return NULL_RTX;
-
- x = XEXP (x, 1 - i);
-
- /* Strip off the addition / subtraction of PIC_OFFSET_TABLE_REGNUM. */
- if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
- for (i = 0; i < 2; i++)
- if (XEXP (x, i) == pic_offset_table_rtx)
- {
- x = XEXP (x, 1 - i);
- break;
- }
-
- if (earliest)
- *earliest = insn;
-
- /* Return the RTL expression representing the offset. */
- return x;
-}
-
-/* A subroutine of global_reg_mentioned_p, returns 1 if *LOC mentions
- a global register. */
-
-static int
-global_reg_mentioned_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
-{
- int regno;
- rtx x = *loc;
-
- if (! x)
- return 0;
-
- switch (GET_CODE (x))
- {
- case SUBREG:
- if (GET_CODE (SUBREG_REG (x)) == REG)
- {
- if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER
- && global_regs[subreg_regno (x)])
- return 1;
- return 0;
- }
- break;
-
- case REG:
- regno = REGNO (x);
- if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
- return 1;
- return 0;
-
- case SCRATCH:
- case PC:
- case CC0:
- case CONST_INT:
- case CONST_DOUBLE:
- case CONST:
- case LABEL_REF:
- return 0;
-
- case CALL:
- /* A non-constant call might use a global register. */
- return 1;
-
- default:
- break;
- }
-
- return 0;
-}
-
-/* Returns nonzero if X mentions a global register. */
-
-int
-global_reg_mentioned_p (rtx x)
-{
- if (INSN_P (x))
- {
- if (GET_CODE (x) == CALL_INSN)
- {
- if (! CONST_OR_PURE_CALL_P (x))
- return 1;
- x = CALL_INSN_FUNCTION_USAGE (x);
- if (x == 0)
- return 0;
- }
- else
- x = PATTERN (x);
- }
-
- return for_each_rtx (&x, global_reg_mentioned_p_1, NULL);
-}
-
/* Return the number of places FIND appears within X. If COUNT_DEST is
zero, we do not count occurrences inside the destination of a SET. */
@@ -672,7 +510,7 @@ count_occurrences (rtx x, rtx find, int count_dest)
return 0;
case MEM:
- if (GET_CODE (find) == MEM && rtx_equal_p (x, find))
+ if (MEM_P (find) && rtx_equal_p (x, find))
return 1;
break;
@@ -731,7 +569,7 @@ reg_mentioned_p (rtx reg, rtx in)
{
/* Compare registers by number. */
case REG:
- return GET_CODE (reg) == REG && REGNO (in) == REGNO (reg);
+ return REG_P (reg) && REGNO (in) == REGNO (reg);
/* These codes have no constituent expressions
and are unique. */
@@ -781,20 +619,7 @@ no_labels_between_p (rtx beg, rtx end)
if (beg == end)
return 0;
for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
- if (GET_CODE (p) == CODE_LABEL)
- return 0;
- return 1;
-}
-
-/* Return 1 if in between BEG and END, exclusive of BEG and END, there is
- no JUMP_INSN insn. */
-
-int
-no_jumps_between_p (rtx beg, rtx end)
-{
- rtx p;
- for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
- if (GET_CODE (p) == JUMP_INSN)
+ if (LABEL_P (p))
return 0;
return 1;
}
@@ -813,9 +638,7 @@ reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn)
for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
if (INSN_P (insn)
&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
- || (GET_CODE (insn) == CALL_INSN
- && (find_reg_fusage (insn, USE, reg)
- || find_reg_fusage (insn, CLOBBER, reg)))))
+ || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
return 1;
return 0;
}
@@ -840,9 +663,9 @@ reg_referenced_p (rtx x, rtx body)
it is mentioned in the destination. */
if (GET_CODE (SET_DEST (body)) != CC0
&& GET_CODE (SET_DEST (body)) != PC
- && GET_CODE (SET_DEST (body)) != REG
+ && !REG_P (SET_DEST (body))
&& ! (GET_CODE (SET_DEST (body)) == SUBREG
- && GET_CODE (SUBREG_REG (SET_DEST (body))) == REG
+ && REG_P (SUBREG_REG (SET_DEST (body)))
&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
== ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
@@ -882,7 +705,7 @@ reg_referenced_p (rtx x, rtx body)
return 0;
case CLOBBER:
- if (GET_CODE (XEXP (body, 0)) == MEM)
+ if (MEM_P (XEXP (body, 0)))
if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
return 1;
return 0;
@@ -896,27 +719,6 @@ reg_referenced_p (rtx x, rtx body)
return 0;
}
}
-
-/* Nonzero if register REG is referenced in an insn between
- FROM_INSN and TO_INSN (exclusive of those two). Sets of REG do
- not count. */
-
-int
-reg_referenced_between_p (rtx reg, rtx from_insn, rtx to_insn)
-{
- rtx insn;
-
- if (from_insn == to_insn)
- return 0;
-
- for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
- if (INSN_P (insn)
- && (reg_referenced_p (reg, PATTERN (insn))
- || (GET_CODE (insn) == CALL_INSN
- && find_reg_fusage (insn, USE, reg))))
- return 1;
- return 0;
-}
/* Nonzero if register REG is set or clobbered in an insn between
FROM_INSN and TO_INSN (exclusive of those two). */
@@ -943,16 +745,12 @@ reg_set_p (rtx reg, rtx insn)
check if a side-effect of the insn clobbers REG. */
if (INSN_P (insn)
&& (FIND_REG_INC_NOTE (insn, reg)
- || (GET_CODE (insn) == CALL_INSN
- /* We'd like to test call_used_regs here, but rtlanal.c can't
- reference that variable due to its use in genattrtab. So
- we'll just be more conservative.
-
- ??? Unless we could ensure that the CALL_INSN_FUNCTION_USAGE
- information holds all clobbered registers. */
- && ((GET_CODE (reg) == REG
- && REGNO (reg) < FIRST_PSEUDO_REGISTER)
- || GET_CODE (reg) == MEM
+ || (CALL_P (insn)
+ && ((REG_P (reg)
+ && REGNO (reg) < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (regs_invalidated_by_call,
+ REGNO (reg)))
+ || MEM_P (reg)
|| find_reg_fusage (insn, CLOBBER, reg)))))
return 1;
@@ -960,51 +758,6 @@ reg_set_p (rtx reg, rtx insn)
}
/* Similar to reg_set_between_p, but check all registers in X. Return 0
- only if none of them are modified between START and END. Do not
- consider non-registers one way or the other. */
-
-int
-regs_set_between_p (rtx x, rtx start, rtx end)
-{
- enum rtx_code code = GET_CODE (x);
- const char *fmt;
- int i, j;
-
- switch (code)
- {
- case CONST_INT:
- case CONST_DOUBLE:
- case CONST_VECTOR:
- case CONST:
- case SYMBOL_REF:
- case LABEL_REF:
- case PC:
- case CC0:
- return 0;
-
- case REG:
- return reg_set_between_p (x, start, end);
-
- default:
- break;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e' && regs_set_between_p (XEXP (x, i), start, end))
- return 1;
-
- else if (fmt[i] == 'E')
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (regs_set_between_p (XVECEXP (x, i, j), start, end))
- return 1;
- }
-
- return 0;
-}
-
-/* Similar to reg_set_between_p, but check all registers in X. Return 0
only if none of them are modified between START and END. Return 1 if
X contains a MEM; this routine does usememory aliasing. */
@@ -1034,10 +787,10 @@ modified_between_p (rtx x, rtx start, rtx end)
return 1;
case MEM:
- if (RTX_UNCHANGING_P (x))
- return 0;
if (modified_between_p (XEXP (x, 0), start, end))
return 1;
+ if (MEM_READONLY_P (x))
+ return 0;
for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
if (memory_modified_in_insn_p (x, insn))
return 1;
@@ -1092,10 +845,10 @@ modified_in_p (rtx x, rtx insn)
return 1;
case MEM:
- if (RTX_UNCHANGING_P (x))
- return 0;
if (modified_in_p (XEXP (x, 0), insn))
return 1;
+ if (MEM_READONLY_P (x))
+ return 0;
if (memory_modified_in_insn_p (x, insn))
return 1;
return 0;
@@ -1122,41 +875,6 @@ modified_in_p (rtx x, rtx insn)
return 0;
}
-
-/* Return true if anything in insn X is (anti,output,true) dependent on
- anything in insn Y. */
-
-int
-insn_dependent_p (rtx x, rtx y)
-{
- rtx tmp;
-
- if (! INSN_P (x) || ! INSN_P (y))
- abort ();
-
- tmp = PATTERN (y);
- note_stores (PATTERN (x), insn_dependent_p_1, &tmp);
- if (tmp == NULL_RTX)
- return 1;
-
- tmp = PATTERN (x);
- note_stores (PATTERN (y), insn_dependent_p_1, &tmp);
- if (tmp == NULL_RTX)
- return 1;
-
- return 0;
-}
-
-/* A helper routine for insn_dependent_p called through note_stores. */
-
-static void
-insn_dependent_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
-{
- rtx * pinsn = (rtx *) data;
-
- if (*pinsn && reg_mentioned_p (x, *pinsn))
- *pinsn = NULL_RTX;
-}
/* Helper function for set_of. */
struct set_of_data
@@ -1170,7 +888,7 @@ set_of_1 (rtx x, rtx pat, void *data1)
{
struct set_of_data *data = (struct set_of_data *) (data1);
if (rtx_equal_p (x, data->pat)
- || (GET_CODE (x) != MEM && reg_overlap_mentioned_p (data->pat, x)))
+ || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
data->found = pat;
}
@@ -1283,11 +1001,10 @@ set_noop_p (rtx set)
if (dst == pc_rtx && src == pc_rtx)
return 1;
- if (GET_CODE (dst) == MEM && GET_CODE (src) == MEM)
+ if (MEM_P (dst) && MEM_P (src))
return rtx_equal_p (dst, src) && !side_effects_p (dst);
- if (GET_CODE (dst) == SIGN_EXTRACT
- || GET_CODE (dst) == ZERO_EXTRACT)
+ if (GET_CODE (dst) == ZERO_EXTRACT)
return rtx_equal_p (XEXP (dst, 0), src)
&& ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
&& !side_effects_p (src);
@@ -1303,7 +1020,7 @@ set_noop_p (rtx set)
dst = SUBREG_REG (dst);
}
- return (GET_CODE (src) == REG && GET_CODE (dst) == REG
+ return (REG_P (src) && REG_P (dst)
&& REGNO (src) == REGNO (dst));
}
@@ -1365,7 +1082,7 @@ find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
{
rtx p;
- for (p = PREV_INSN (*pinsn); p && GET_CODE (p) != CODE_LABEL;
+ for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
p = PREV_INSN (p))
if (INSN_P (p))
{
@@ -1383,7 +1100,7 @@ find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
|| ! modified_between_p (src, PREV_INSN (p), valid_to))
/* Reject hard registers because we don't usually want
to use them; we'd rather use a pseudo. */
- && (! (GET_CODE (src) == REG
+ && (! (REG_P (src)
&& REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
{
*pinsn = p;
@@ -1441,19 +1158,19 @@ refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x,
return (endregno > x_regno
&& regno < x_regno + (x_regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (x_regno, GET_MODE (x))
+ ? hard_regno_nregs[x_regno][GET_MODE (x)]
: 1));
case SUBREG:
/* If this is a SUBREG of a hard reg, we can see exactly which
registers are being modified. Otherwise, handle normally. */
- if (GET_CODE (SUBREG_REG (x)) == REG
+ if (REG_P (SUBREG_REG (x))
&& REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
{
unsigned int inner_regno = subreg_regno (x);
unsigned int inner_endregno
= inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ ? hard_regno_nregs[inner_regno][GET_MODE (x)] : 1);
return endregno > inner_regno && regno < inner_endregno;
}
@@ -1467,11 +1184,11 @@ refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x,
treat each word individually. */
&& ((GET_CODE (SET_DEST (x)) == SUBREG
&& loc != &SUBREG_REG (SET_DEST (x))
- && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && REG_P (SUBREG_REG (SET_DEST (x)))
&& REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
&& refers_to_regno_p (regno, endregno,
SUBREG_REG (SET_DEST (x)), loc))
- || (GET_CODE (SET_DEST (x)) != REG
+ || (!REG_P (SET_DEST (x))
&& refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
return 1;
@@ -1523,18 +1240,22 @@ reg_overlap_mentioned_p (rtx x, rtx in)
{
unsigned int regno, endregno;
- /* Overly conservative. */
- if (GET_CODE (x) == STRICT_LOW_PART
- || GET_CODE (x) == ZERO_EXTRACT
- || GET_CODE (x) == SIGN_EXTRACT)
- x = XEXP (x, 0);
-
- /* If either argument is a constant, then modifying X can not affect IN. */
- if (CONSTANT_P (x) || CONSTANT_P (in))
+ /* If either argument is a constant, then modifying X can not
+ affect IN. Here we look at IN, we can profitably combine
+ CONSTANT_P (x) with the switch statement below. */
+ if (CONSTANT_P (in))
return 0;
+ recurse:
switch (GET_CODE (x))
{
+ case STRICT_LOW_PART:
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ /* Overly conservative. */
+ x = XEXP (x, 0);
+ goto recurse;
+
case SUBREG:
regno = REGNO (SUBREG_REG (x));
if (regno < FIRST_PSEUDO_REGISTER)
@@ -1545,7 +1266,7 @@ reg_overlap_mentioned_p (rtx x, rtx in)
regno = REGNO (x);
do_reg:
endregno = regno + (regno < FIRST_PSEUDO_REGISTER
- ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
case MEM:
@@ -1553,13 +1274,23 @@ reg_overlap_mentioned_p (rtx x, rtx in)
const char *fmt;
int i;
- if (GET_CODE (in) == MEM)
+ if (MEM_P (in))
return 1;
fmt = GET_RTX_FORMAT (GET_CODE (in));
for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
- if (fmt[i] == 'e' && reg_overlap_mentioned_p (x, XEXP (in, i)))
- return 1;
+ if (fmt[i] == 'e')
+ {
+ if (reg_overlap_mentioned_p (x, XEXP (in, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = XVECLEN (in, i) - 1; j >= 0; --j)
+ if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
+ return 1;
+ }
return 0;
}
@@ -1577,63 +1308,14 @@ reg_overlap_mentioned_p (rtx x, rtx in)
for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
if (XEXP (XVECEXP (x, 0, i), 0) != 0
&& reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
- return 1;
+ return 1;
return 0;
}
default:
- break;
+ gcc_assert (CONSTANT_P (x));
+ return 0;
}
-
- abort ();
-}
-
-/* Return the last value to which REG was set prior to INSN. If we can't
- find it easily, return 0.
-
- We only return a REG, SUBREG, or constant because it is too hard to
- check if a MEM remains unchanged. */
-
-rtx
-reg_set_last (rtx x, rtx insn)
-{
- rtx orig_insn = insn;
-
- /* Scan backwards until reg_set_last_1 changed one of the above flags.
- Stop when we reach a label or X is a hard reg and we reach a
- CALL_INSN (if reg_set_last_last_regno is a hard reg).
-
- If we find a set of X, ensure that its SET_SRC remains unchanged. */
-
- /* We compare with <= here, because reg_set_last_last_regno
- is actually the number of the first reg *not* in X. */
- for (;
- insn && GET_CODE (insn) != CODE_LABEL
- && ! (GET_CODE (insn) == CALL_INSN
- && REGNO (x) <= FIRST_PSEUDO_REGISTER);
- insn = PREV_INSN (insn))
- if (INSN_P (insn))
- {
- rtx set = set_of (x, insn);
- /* OK, this function modify our register. See if we understand it. */
- if (set)
- {
- rtx last_value;
- if (GET_CODE (set) != SET || SET_DEST (set) != x)
- return 0;
- last_value = SET_SRC (x);
- if (CONSTANT_P (last_value)
- || ((GET_CODE (last_value) == REG
- || GET_CODE (last_value) == SUBREG)
- && ! reg_set_between_p (last_value,
- insn, orig_insn)))
- return last_value;
- else
- return 0;
- }
- }
-
- return 0;
}
/* Call FUN on each register or MEM that is stored into or clobbered by X.
@@ -1658,10 +1340,9 @@ note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
rtx dest = SET_DEST (x);
while ((GET_CODE (dest) == SUBREG
- && (GET_CODE (SUBREG_REG (dest)) != REG
+ && (!REG_P (SUBREG_REG (dest))
|| REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
|| GET_CODE (dest) == ZERO_EXTRACT
- || GET_CODE (dest) == SIGN_EXTRACT
|| GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
@@ -1733,7 +1414,7 @@ note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
return;
case CLOBBER:
- if (GET_CODE (XEXP (body, 0)) == MEM)
+ if (MEM_P (XEXP (body, 0)))
(*fun) (&XEXP (XEXP (body, 0), 0), data);
return;
@@ -1754,7 +1435,7 @@ note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
- if (GET_CODE (dest) == MEM)
+ if (MEM_P (dest))
(*fun) (&XEXP (dest, 0), data);
}
return;
@@ -1770,8 +1451,8 @@ note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
This will be true if X is (cc0) or if X is a register and
X dies in INSN or because INSN entirely sets X.
- "Entirely set" means set directly and not through a SUBREG,
- ZERO_EXTRACT or SIGN_EXTRACT, so no trace of the old contents remains.
+ "Entirely set" means set directly and not through a SUBREG, or
+ ZERO_EXTRACT, so no trace of the old contents remains.
Likewise, REG_INC does not count.
REG may be a hard or pseudo reg. Renumbering is not taken into account,
@@ -1793,12 +1474,11 @@ dead_or_set_p (rtx insn, rtx x)
if (GET_CODE (x) == CC0)
return 1;
- if (GET_CODE (x) != REG)
- abort ();
+ gcc_assert (REG_P (x));
regno = REGNO (x);
last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
- : regno + HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1);
+ : regno + hard_regno_nregs[regno][GET_MODE (x)] - 1);
for (i = regno; i <= last_regno; i++)
if (! dead_or_set_regno_p (insn, i))
@@ -1807,20 +1487,71 @@ dead_or_set_p (rtx insn, rtx x)
return 1;
}
+/* Return TRUE iff DEST is a register or subreg of a register and
+ doesn't change the number of words of the inner register, and any
+ part of the register is TEST_REGNO. */
+
+static bool
+covers_regno_no_parallel_p (rtx dest, unsigned int test_regno)
+{
+ unsigned int regno, endregno;
+
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+ dest = SUBREG_REG (dest);
+
+ if (!REG_P (dest))
+ return false;
+
+ regno = REGNO (dest);
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + hard_regno_nregs[regno][GET_MODE (dest)]);
+ return (test_regno >= regno && test_regno < endregno);
+}
+
+/* Like covers_regno_no_parallel_p, but also handles PARALLELs where
+ any member matches the covers_regno_no_parallel_p criteria. */
+
+static bool
+covers_regno_p (rtx dest, unsigned int test_regno)
+{
+ if (GET_CODE (dest) == PARALLEL)
+ {
+ /* Some targets place small structures in registers for return
+ values of functions, and those registers are wrapped in
+ PARALLELs that we may see as the destination of a SET. */
+ int i;
+
+ for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
+ {
+ rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
+ if (inner != NULL_RTX
+ && covers_regno_no_parallel_p (inner, test_regno))
+ return true;
+ }
+
+ return false;
+ }
+ else
+ return covers_regno_no_parallel_p (dest, test_regno);
+}
+
/* Utility function for dead_or_set_p to check an individual register. Also
called from flow.c. */
int
dead_or_set_regno_p (rtx insn, unsigned int test_regno)
{
- unsigned int regno, endregno;
rtx pattern;
/* See if there is a death note for something that includes TEST_REGNO. */
if (find_regno_note (insn, REG_DEAD, test_regno))
return 1;
- if (GET_CODE (insn) == CALL_INSN
+ if (CALL_P (insn)
&& find_regno_fusage (insn, CLOBBER, test_regno))
return 1;
@@ -1830,28 +1561,7 @@ dead_or_set_regno_p (rtx insn, unsigned int test_regno)
pattern = COND_EXEC_CODE (pattern);
if (GET_CODE (pattern) == SET)
- {
- rtx dest = SET_DEST (pattern);
-
- /* A value is totally replaced if it is the destination or the
- destination is a SUBREG of REGNO that does not change the number of
- words in it. */
- if (GET_CODE (dest) == SUBREG
- && (((GET_MODE_SIZE (GET_MODE (dest))
- + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
- == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
- + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
- dest = SUBREG_REG (dest);
-
- if (GET_CODE (dest) != REG)
- return 0;
-
- regno = REGNO (dest);
- endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
- : regno + HARD_REGNO_NREGS (regno, GET_MODE (dest)));
-
- return (test_regno >= regno && test_regno < endregno);
- }
+ return covers_regno_p (SET_DEST (pattern), test_regno);
else if (GET_CODE (pattern) == PARALLEL)
{
int i;
@@ -1863,27 +1573,9 @@ dead_or_set_regno_p (rtx insn, unsigned int test_regno)
if (GET_CODE (body) == COND_EXEC)
body = COND_EXEC_CODE (body);
- if (GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
- {
- rtx dest = SET_DEST (body);
-
- if (GET_CODE (dest) == SUBREG
- && (((GET_MODE_SIZE (GET_MODE (dest))
- + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
- == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
- + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
- dest = SUBREG_REG (dest);
-
- if (GET_CODE (dest) != REG)
- continue;
-
- regno = REGNO (dest);
- endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
- : regno + HARD_REGNO_NREGS (regno, GET_MODE (dest)));
-
- if (test_regno >= regno && test_regno < endregno)
- return 1;
- }
+ if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
+ && covers_regno_p (SET_DEST (body), test_regno))
+ return 1;
}
}
@@ -1898,13 +1590,21 @@ find_reg_note (rtx insn, enum reg_note kind, rtx datum)
{
rtx link;
+ gcc_assert (insn);
+
/* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
if (! INSN_P (insn))
return 0;
+ if (datum == 0)
+ {
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == kind)
+ return link;
+ return 0;
+ }
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == kind
- && (datum == 0 || datum == XEXP (link, 0)))
+ if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
return link;
return 0;
}
@@ -1927,12 +1627,12 @@ find_regno_note (rtx insn, enum reg_note kind, unsigned int regno)
if (REG_NOTE_KIND (link) == kind
/* Verify that it is a register, so that scratch and MEM won't cause a
problem here. */
- && GET_CODE (XEXP (link, 0)) == REG
+ && REG_P (XEXP (link, 0))
&& REGNO (XEXP (link, 0)) <= regno
&& ((REGNO (XEXP (link, 0))
+ (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
- : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
- GET_MODE (XEXP (link, 0)))))
+ : hard_regno_nregs[REGNO (XEXP (link, 0))]
+ [GET_MODE (XEXP (link, 0))]))
> regno))
return link;
return 0;
@@ -1967,13 +1667,12 @@ find_reg_fusage (rtx insn, enum rtx_code code, rtx datum)
{
/* If it's not a CALL_INSN, it can't possibly have a
CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
- if (GET_CODE (insn) != CALL_INSN)
+ if (!CALL_P (insn))
return 0;
- if (! datum)
- abort ();
+ gcc_assert (datum);
- if (GET_CODE (datum) != REG)
+ if (!REG_P (datum))
{
rtx link;
@@ -1994,7 +1693,7 @@ find_reg_fusage (rtx insn, enum rtx_code code, rtx datum)
if (regno < FIRST_PSEUDO_REGISTER)
{
unsigned int end_regno
- = regno + HARD_REGNO_NREGS (regno, GET_MODE (datum));
+ = regno + hard_regno_nregs[regno][GET_MODE (datum)];
unsigned int i;
for (i = regno; i < end_regno; i++)
@@ -2018,7 +1717,7 @@ find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno)
to pseudo registers, so don't bother checking. */
if (regno >= FIRST_PSEUDO_REGISTER
- || GET_CODE (insn) != CALL_INSN )
+ || !CALL_P (insn) )
return 0;
for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
@@ -2027,9 +1726,9 @@ find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno)
rtx op, reg;
if (GET_CODE (op = XEXP (link, 0)) == code
- && GET_CODE (reg = XEXP (op, 0)) == REG
+ && REG_P (reg = XEXP (op, 0))
&& (regnote = REGNO (reg)) <= regno
- && regnote + HARD_REGNO_NREGS (regnote, GET_MODE (reg)) > regno)
+ && regnote + hard_regno_nregs[regnote][GET_MODE (reg)] > regno)
return 1;
}
@@ -2043,7 +1742,7 @@ pure_call_p (rtx insn)
{
rtx link;
- if (GET_CODE (insn) != CALL_INSN || ! CONST_OR_PURE_CALL_P (insn))
+ if (!CALL_P (insn) || ! CONST_OR_PURE_CALL_P (insn))
return 0;
/* Look for the note that differentiates const and pure functions. */
@@ -2052,7 +1751,7 @@ pure_call_p (rtx insn)
rtx u, m;
if (GET_CODE (u = XEXP (link, 0)) == USE
- && GET_CODE (m = XEXP (u, 0)) == MEM && GET_MODE (m) == BLKmode
+ && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode
&& GET_CODE (XEXP (m, 0)) == SCRATCH)
return 1;
}
@@ -2083,7 +1782,7 @@ remove_note (rtx insn, rtx note)
return;
}
- abort ();
+ gcc_unreachable ();
}
/* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
@@ -2340,14 +2039,25 @@ side_effects_p (rtx x)
return 0;
}
-/* Return nonzero if evaluating rtx X might cause a trap. */
+enum may_trap_p_flags
+{
+ MTP_UNALIGNED_MEMS = 1,
+ MTP_AFTER_MOVE = 2
+};
+/* Return nonzero if evaluating rtx X might cause a trap.
+ (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
+ unaligned memory accesses on strict alignment machines. If
+ (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
+ cannot trap at its current location, but it might become trapping if moved
+ elsewhere. */
-int
-may_trap_p (rtx x)
+static int
+may_trap_p_1 (rtx x, unsigned flags)
{
int i;
enum rtx_code code;
const char *fmt;
+ bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
if (x == 0)
return 0;
@@ -2377,9 +2087,15 @@ may_trap_p (rtx x)
/* Memory ref can trap unless it's a static var or a stack slot. */
case MEM:
- if (MEM_NOTRAP_P (x))
+ if (/* MEM_NOTRAP_P only relates to the actual position of the memory
+ reference; moving it out of condition might cause its address
+ become invalid. */
+ !(flags & MTP_AFTER_MOVE)
+ && MEM_NOTRAP_P (x)
+ && (!STRICT_ALIGNMENT || !unaligned_mems))
return 0;
- return rtx_addr_can_trap_p (XEXP (x, 0));
+ return
+ rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
/* Division by a non-constant might trap. */
case DIV:
@@ -2388,11 +2104,9 @@ may_trap_p (rtx x)
case UMOD:
if (HONOR_SNANS (GET_MODE (x)))
return 1;
- if (! CONSTANT_P (XEXP (x, 1))
- || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
- && flag_trapping_math))
- return 1;
- if (XEXP (x, 1) == const0_rtx)
+ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
+ return flag_trapping_math;
+ if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
return 1;
break;
@@ -2405,6 +2119,7 @@ may_trap_p (rtx x)
case GT:
case LE:
case LT:
+ case LTGT:
case COMPARE:
/* Some floating point comparisons may trap. */
if (!flag_trapping_math)
@@ -2440,12 +2155,13 @@ may_trap_p (rtx x)
case NEG:
case ABS:
+ case SUBREG:
/* These operations don't trap even with floating point. */
break;
default:
/* Any floating arithmetic may trap. */
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
+ if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
&& flag_trapping_math)
return 1;
}
@@ -2455,19 +2171,82 @@ may_trap_p (rtx x)
{
if (fmt[i] == 'e')
{
- if (may_trap_p (XEXP (x, i)))
+ if (may_trap_p_1 (XEXP (x, i), flags))
return 1;
}
else if (fmt[i] == 'E')
{
int j;
for (j = 0; j < XVECLEN (x, i); j++)
- if (may_trap_p (XVECEXP (x, i, j)))
+ if (may_trap_p_1 (XVECEXP (x, i, j), flags))
return 1;
}
}
return 0;
}
+
+/* Return nonzero if evaluating rtx X might cause a trap. */
+
+int
+may_trap_p (rtx x)
+{
+ return may_trap_p_1 (x, 0);
+}
+
+/* Return nonzero if evaluating rtx X might cause a trap, when the expression
+ is moved from its current location by some optimization. */
+
+int
+may_trap_after_code_motion_p (rtx x)
+{
+ return may_trap_p_1 (x, MTP_AFTER_MOVE);
+}
+
+/* Same as above, but additionally return nonzero if evaluating rtx X might
+ cause a fault. We define a fault for the purpose of this function as a
+ erroneous execution condition that cannot be encountered during the normal
+ execution of a valid program; the typical example is an unaligned memory
+ access on a strict alignment machine. The compiler guarantees that it
+ doesn't generate code that will fault from a valid program, but this
+ guarantee doesn't mean anything for individual instructions. Consider
+ the following example:
+
+ struct S { int d; union { char *cp; int *ip; }; };
+
+ int foo(struct S *s)
+ {
+ if (s->d == 1)
+ return *s->ip;
+ else
+ return *s->cp;
+ }
+
+ on a strict alignment machine. In a valid program, foo will never be
+ invoked on a structure for which d is equal to 1 and the underlying
+ unique field of the union not aligned on a 4-byte boundary, but the
+ expression *s->ip might cause a fault if considered individually.
+
+ At the RTL level, potentially problematic expressions will almost always
+ verify may_trap_p; for example, the above dereference can be emitted as
+ (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
+ However, suppose that foo is inlined in a caller that causes s->cp to
+ point to a local character variable and guarantees that s->d is not set
+ to 1; foo may have been effectively translated into pseudo-RTL as:
+
+ if ((reg:SI) == 1)
+ (set (reg:SI) (mem:SI (%fp - 7)))
+ else
+ (set (reg:QI) (mem:QI (%fp - 7)))
+
+ Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
+ memory reference to a stack slot, but it will certainly cause a fault
+ on a strict alignment machine. */
+
+int
+may_trap_or_fault_p (rtx x)
+{
+ return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
+}
/* Return nonzero if X contains a comparison that is not either EQ or NE,
i.e., an inequality. */
@@ -2562,8 +2341,7 @@ replace_rtx (rtx x, rtx from, rtx to)
x = simplify_subreg (GET_MODE (x), new,
GET_MODE (SUBREG_REG (x)),
SUBREG_BYTE (x));
- if (! x)
- abort ();
+ gcc_assert (x);
}
else
SUBREG_REG (x) = new;
@@ -2578,8 +2356,7 @@ replace_rtx (rtx x, rtx from, rtx to)
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new, GET_MODE (XEXP (x, 0)));
- if (! x)
- abort ();
+ gcc_assert (x);
}
else
XEXP (x, 0) = new;
@@ -2600,106 +2377,6 @@ replace_rtx (rtx x, rtx from, rtx to)
return x;
}
-/* Throughout the rtx X, replace many registers according to REG_MAP.
- Return the replacement for X (which may be X with altered contents).
- REG_MAP[R] is the replacement for register R, or 0 for don't replace.
- NREGS is the length of REG_MAP; regs >= NREGS are not mapped.
-
- We only support REG_MAP entries of REG or SUBREG. Also, hard registers
- should not be mapped to pseudos or vice versa since validate_change
- is not called.
-
- If REPLACE_DEST is 1, replacements are also done in destinations;
- otherwise, only sources are replaced. */
-
-rtx
-replace_regs (rtx x, rtx *reg_map, unsigned int nregs, int replace_dest)
-{
- enum rtx_code code;
- int i;
- const char *fmt;
-
- if (x == 0)
- return x;
-
- code = GET_CODE (x);
- switch (code)
- {
- case SCRATCH:
- case PC:
- case CC0:
- case CONST_INT:
- case CONST_DOUBLE:
- case CONST_VECTOR:
- case CONST:
- case SYMBOL_REF:
- case LABEL_REF:
- return x;
-
- case REG:
- /* Verify that the register has an entry before trying to access it. */
- if (REGNO (x) < nregs && reg_map[REGNO (x)] != 0)
- {
- /* SUBREGs can't be shared. Always return a copy to ensure that if
- this replacement occurs more than once then each instance will
- get distinct rtx. */
- if (GET_CODE (reg_map[REGNO (x)]) == SUBREG)
- return copy_rtx (reg_map[REGNO (x)]);
- return reg_map[REGNO (x)];
- }
- return x;
-
- case SUBREG:
- /* Prevent making nested SUBREGs. */
- if (GET_CODE (SUBREG_REG (x)) == REG && REGNO (SUBREG_REG (x)) < nregs
- && reg_map[REGNO (SUBREG_REG (x))] != 0
- && GET_CODE (reg_map[REGNO (SUBREG_REG (x))]) == SUBREG)
- {
- rtx map_val = reg_map[REGNO (SUBREG_REG (x))];
- return simplify_gen_subreg (GET_MODE (x), map_val,
- GET_MODE (SUBREG_REG (x)),
- SUBREG_BYTE (x));
- }
- break;
-
- case SET:
- if (replace_dest)
- SET_DEST (x) = replace_regs (SET_DEST (x), reg_map, nregs, 0);
-
- else if (GET_CODE (SET_DEST (x)) == MEM
- || GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
- /* Even if we are not to replace destinations, replace register if it
- is CONTAINED in destination (destination is memory or
- STRICT_LOW_PART). */
- XEXP (SET_DEST (x), 0) = replace_regs (XEXP (SET_DEST (x), 0),
- reg_map, nregs, 0);
- else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
- /* Similarly, for ZERO_EXTRACT we replace all operands. */
- break;
-
- SET_SRC (x) = replace_regs (SET_SRC (x), reg_map, nregs, 0);
- return x;
-
- default:
- break;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- XEXP (x, i) = replace_regs (XEXP (x, i), reg_map, nregs, replace_dest);
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- XVECEXP (x, i, j) = replace_regs (XVECEXP (x, i, j), reg_map,
- nregs, replace_dest);
- }
- }
- return x;
-}
-
/* Replace occurrences of the old label in *X with the new one.
DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
@@ -2742,7 +2419,7 @@ replace_label (rtx *x, void *data)
/* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
field. This is not handled by for_each_rtx because it doesn't
handle unprinted ('0') fields. */
- if (GET_CODE (l) == JUMP_INSN && JUMP_LABEL (l) == old_label)
+ if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
JUMP_LABEL (l) = new_label;
if ((GET_CODE (l) == LABEL_REF
@@ -2774,7 +2451,7 @@ rtx_referenced_p_1 (rtx *body, void *x)
return y == NULL_RTX;
/* Return true if a label_ref *BODY refers to label Y. */
- if (GET_CODE (*body) == LABEL_REF && GET_CODE (y) == CODE_LABEL)
+ if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
return XEXP (*body, 0) == y;
/* If *BODY is a reference to pool constant traverse the constant. */
@@ -2802,10 +2479,10 @@ tablejump_p (rtx insn, rtx *labelp, rtx *tablep)
{
rtx label, table;
- if (GET_CODE (insn) == JUMP_INSN
+ if (JUMP_P (insn)
&& (label = JUMP_LABEL (insn)) != NULL_RTX
&& (table = next_active_insn (label)) != NULL_RTX
- && GET_CODE (table) == JUMP_INSN
+ && JUMP_P (table)
&& (GET_CODE (PATTERN (table)) == ADDR_VEC
|| GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
{
@@ -2880,7 +2557,7 @@ int
computed_jump_p (rtx insn)
{
int i;
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
{
rtx pat = PATTERN (insn);
@@ -2912,6 +2589,82 @@ computed_jump_p (rtx insn)
return 0;
}
+/* Optimized loop of for_each_rtx, trying to avoid useless recursive
+ calls. Processes the subexpressions of EXP and passes them to F. */
+static int
+for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
+{
+ int result, i, j;
+ const char *format = GET_RTX_FORMAT (GET_CODE (exp));
+ rtx *x;
+
+ for (; format[n] != '\0'; n++)
+ {
+ switch (format[n])
+ {
+ case 'e':
+ /* Call F on X. */
+ x = &XEXP (exp, n);
+ result = (*f) (x, data);
+ if (result == -1)
+ /* Do not traverse sub-expressions. */
+ continue;
+ else if (result != 0)
+ /* Stop the traversal. */
+ return result;
+
+ if (*x == NULL_RTX)
+ /* There are no sub-expressions. */
+ continue;
+
+ i = non_rtx_starting_operands[GET_CODE (*x)];
+ if (i >= 0)
+ {
+ result = for_each_rtx_1 (*x, i, f, data);
+ if (result != 0)
+ return result;
+ }
+ break;
+
+ case 'V':
+ case 'E':
+ if (XVEC (exp, n) == 0)
+ continue;
+ for (j = 0; j < XVECLEN (exp, n); ++j)
+ {
+ /* Call F on X. */
+ x = &XVECEXP (exp, n, j);
+ result = (*f) (x, data);
+ if (result == -1)
+ /* Do not traverse sub-expressions. */
+ continue;
+ else if (result != 0)
+ /* Stop the traversal. */
+ return result;
+
+ if (*x == NULL_RTX)
+ /* There are no sub-expressions. */
+ continue;
+
+ i = non_rtx_starting_operands[GET_CODE (*x)];
+ if (i >= 0)
+ {
+ result = for_each_rtx_1 (*x, i, f, data);
+ if (result != 0)
+ return result;
+ }
+ }
+ break;
+
+ default:
+ /* Nothing to do. */
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* Traverse X via depth-first search, calling F for each
sub-expression (including X itself). F is also passed the DATA.
If F returns -1, do not traverse sub-expressions, but continue
@@ -2929,8 +2682,6 @@ int
for_each_rtx (rtx *x, rtx_function f, void *data)
{
int result;
- int length;
- const char *format;
int i;
/* Call F on X. */
@@ -2946,43 +2697,14 @@ for_each_rtx (rtx *x, rtx_function f, void *data)
/* There are no sub-expressions. */
return 0;
- length = GET_RTX_LENGTH (GET_CODE (*x));
- format = GET_RTX_FORMAT (GET_CODE (*x));
-
- for (i = 0; i < length; ++i)
- {
- switch (format[i])
- {
- case 'e':
- result = for_each_rtx (&XEXP (*x, i), f, data);
- if (result != 0)
- return result;
- break;
-
- case 'V':
- case 'E':
- if (XVEC (*x, i) != 0)
- {
- int j;
- for (j = 0; j < XVECLEN (*x, i); ++j)
- {
- result = for_each_rtx (&XVECEXP (*x, i, j), f, data);
- if (result != 0)
- return result;
- }
- }
- break;
-
- default:
- /* Nothing to do. */
- break;
- }
-
- }
+ i = non_rtx_starting_operands[GET_CODE (*x)];
+ if (i < 0)
+ return 0;
- return 0;
+ return for_each_rtx_1 (*x, i, f, data);
}
+
/* Searches X for any reference to REGNO, returning the rtx of the
reference found if any. Otherwise, returns NULL_RTX. */
@@ -2993,7 +2715,7 @@ regno_use_in (unsigned int regno, rtx x)
int i, j;
rtx tem;
- if (GET_CODE (x) == REG && REGNO (x) == regno)
+ if (REG_P (x) && REGNO (x) == regno)
return x;
fmt = GET_RTX_FORMAT (GET_CODE (x));
@@ -3022,32 +2744,61 @@ regno_use_in (unsigned int regno, rtx x)
int
commutative_operand_precedence (rtx op)
{
+ enum rtx_code code = GET_CODE (op);
+
/* Constants always come the second operand. Prefer "nice" constants. */
- if (GET_CODE (op) == CONST_INT)
- return -5;
- if (GET_CODE (op) == CONST_DOUBLE)
- return -4;
- if (CONSTANT_P (op))
- return -3;
-
- /* SUBREGs of objects should come second. */
- if (GET_CODE (op) == SUBREG
- && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op))) == 'o')
- return -2;
-
- /* If only one operand is a `neg', `not',
- `mult', `plus', or `minus' expression, it will be the first
- operand. */
- if (GET_CODE (op) == NEG || GET_CODE (op) == NOT
- || GET_CODE (op) == MULT || GET_CODE (op) == PLUS
- || GET_CODE (op) == MINUS)
- return 2;
-
- /* Complex expressions should be the first, so decrease priority
- of objects. */
- if (GET_RTX_CLASS (GET_CODE (op)) == 'o')
- return -1;
- return 0;
+ if (code == CONST_INT)
+ return -7;
+ if (code == CONST_DOUBLE)
+ return -6;
+ op = avoid_constant_pool_reference (op);
+ code = GET_CODE (op);
+
+ switch (GET_RTX_CLASS (code))
+ {
+ case RTX_CONST_OBJ:
+ if (code == CONST_INT)
+ return -5;
+ if (code == CONST_DOUBLE)
+ return -4;
+ return -3;
+
+ case RTX_EXTRA:
+ /* SUBREGs of objects should come second. */
+ if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
+ return -2;
+
+ if (!CONSTANT_P (op))
+ return 0;
+ else
+ /* As for RTX_CONST_OBJ. */
+ return -3;
+
+ case RTX_OBJ:
+ /* Complex expressions should be the first, so decrease priority
+ of objects. */
+ return -1;
+
+ case RTX_COMM_ARITH:
+ /* Prefer operands that are themselves commutative to be first.
+ This helps to make things linear. In particular,
+ (and (and (reg) (reg)) (not (reg))) is canonical. */
+ return 4;
+
+ case RTX_BIN_ARITH:
+ /* If only one operand is a binary expression, it will be the first
+ operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
+ is canonical, although it will usually be further simplified. */
+ return 2;
+
+ case RTX_UNARY:
+ /* Then prefer NEG and NOT. */
+ if (code == NEG || code == NOT)
+ return 1;
+
+ default:
+ return 0;
+ }
}
/* Return 1 iff it is necessary to swap operands of commutative operation
@@ -3082,82 +2833,6 @@ auto_inc_p (rtx x)
return 0;
}
-/* Return 1 if the sequence of instructions beginning with FROM and up
- to and including TO is safe to move. If NEW_TO is non-NULL, and
- the sequence is not already safe to move, but can be easily
- extended to a sequence which is safe, then NEW_TO will point to the
- end of the extended sequence.
-
- For now, this function only checks that the region contains whole
- exception regions, but it could be extended to check additional
- conditions as well. */
-
-int
-insns_safe_to_move_p (rtx from, rtx to, rtx *new_to)
-{
- int eh_region_count = 0;
- int past_to_p = 0;
- rtx r = from;
-
- /* By default, assume the end of the region will be what was
- suggested. */
- if (new_to)
- *new_to = to;
-
- while (r)
- {
- if (GET_CODE (r) == NOTE)
- {
- switch (NOTE_LINE_NUMBER (r))
- {
- case NOTE_INSN_EH_REGION_BEG:
- ++eh_region_count;
- break;
-
- case NOTE_INSN_EH_REGION_END:
- if (eh_region_count == 0)
- /* This sequence of instructions contains the end of
- an exception region, but not he beginning. Moving
- it will cause chaos. */
- return 0;
-
- --eh_region_count;
- break;
-
- default:
- break;
- }
- }
- else if (past_to_p)
- /* If we've passed TO, and we see a non-note instruction, we
- can't extend the sequence to a movable sequence. */
- return 0;
-
- if (r == to)
- {
- if (!new_to)
- /* It's OK to move the sequence if there were matched sets of
- exception region notes. */
- return eh_region_count == 0;
-
- past_to_p = 1;
- }
-
- /* It's OK to move the sequence if there were matched sets of
- exception region notes. */
- if (past_to_p && eh_region_count == 0)
- {
- *new_to = r;
- return 1;
- }
-
- /* Go to the next instruction. */
- r = NEXT_INSN (r);
- }
-
- return 0;
-}
-
/* Return nonzero if IN contains a piece of rtl that has the address LOC. */
int
loc_mentioned_in_p (rtx *loc, rtx in)
@@ -3168,7 +2843,7 @@ loc_mentioned_in_p (rtx *loc, rtx in)
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
- if (loc == &in->u.fld[i].rtx)
+ if (loc == &in->u.fld[i].rt_rtx)
return 1;
if (fmt[i] == 'e')
{
@@ -3183,48 +2858,58 @@ loc_mentioned_in_p (rtx *loc, rtx in)
return 0;
}
-/* Given a subreg X, return the bit offset where the subreg begins
- (counting from the least significant bit of the reg). */
+/* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
+ and SUBREG_BYTE, return the bit offset where the subreg begins
+ (counting from the least significant bit of the operand). */
unsigned int
-subreg_lsb (rtx x)
+subreg_lsb_1 (enum machine_mode outer_mode,
+ enum machine_mode inner_mode,
+ unsigned int subreg_byte)
{
- enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
- enum machine_mode mode = GET_MODE (x);
unsigned int bitpos;
unsigned int byte;
unsigned int word;
/* A paradoxical subreg begins at bit position 0. */
- if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (inner_mode))
+ if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
return 0;
if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
/* If the subreg crosses a word boundary ensure that
it also begins and ends on a word boundary. */
- if ((SUBREG_BYTE (x) % UNITS_PER_WORD
- + GET_MODE_SIZE (mode)) > UNITS_PER_WORD
- && (SUBREG_BYTE (x) % UNITS_PER_WORD
- || GET_MODE_SIZE (mode) % UNITS_PER_WORD))
- abort ();
+ gcc_assert (!((subreg_byte % UNITS_PER_WORD
+ + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
+ && (subreg_byte % UNITS_PER_WORD
+ || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
if (WORDS_BIG_ENDIAN)
word = (GET_MODE_SIZE (inner_mode)
- - (SUBREG_BYTE (x) + GET_MODE_SIZE (mode))) / UNITS_PER_WORD;
+ - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
else
- word = SUBREG_BYTE (x) / UNITS_PER_WORD;
+ word = subreg_byte / UNITS_PER_WORD;
bitpos = word * BITS_PER_WORD;
if (BYTES_BIG_ENDIAN)
byte = (GET_MODE_SIZE (inner_mode)
- - (SUBREG_BYTE (x) + GET_MODE_SIZE (mode))) % UNITS_PER_WORD;
+ - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
else
- byte = SUBREG_BYTE (x) % UNITS_PER_WORD;
+ byte = subreg_byte % UNITS_PER_WORD;
bitpos += byte * BITS_PER_UNIT;
return bitpos;
}
+/* Given a subreg X, return the bit offset where the subreg begins
+ (counting from the least significant bit of the reg). */
+
+unsigned int
+subreg_lsb (rtx x)
+{
+ return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
+ SUBREG_BYTE (x));
+}
+
/* This function returns the regno offset of a subreg expression.
xregno - A regno of an inner hard subreg_reg (or what will become one).
xmode - The mode of xregno.
@@ -3239,11 +2924,15 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
int mode_multiple, nregs_multiple;
int y_offset;
- if (xregno >= FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
- nregs_xmode = HARD_REGNO_NREGS (xregno, xmode);
- nregs_ymode = HARD_REGNO_NREGS (xregno, ymode);
+ /* Adjust nregs_xmode to allow for 'holes'. */
+ if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
+ nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
+ else
+ nregs_xmode = hard_regno_nregs[xregno][xmode];
+
+ nregs_ymode = hard_regno_nregs[xregno][ymode];
/* If this is a big endian paradoxical subreg, which uses more actual
hard registers than the original register, we must return a negative
@@ -3257,10 +2946,9 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
if (offset == 0 || nregs_xmode == nregs_ymode)
return 0;
- /* size of ymode must not be greater than the size of xmode. */
+ /* Size of ymode must not be greater than the size of xmode. */
mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
- if (mode_multiple == 0)
- abort ();
+ gcc_assert (mode_multiple != 0);
y_offset = offset / GET_MODE_SIZE (ymode);
nregs_multiple = nregs_xmode / nregs_ymode;
@@ -3273,7 +2961,7 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
xmode - The mode of xregno.
offset - The byte offset.
ymode - The mode of a top level SUBREG (or what may become one).
- RETURN - The regno offset which would be used. */
+ RETURN - Whether the offset is representable. */
bool
subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
unsigned int offset, enum machine_mode ymode)
@@ -3281,33 +2969,74 @@ subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
int nregs_xmode, nregs_ymode;
int mode_multiple, nregs_multiple;
int y_offset;
+ int regsize_xmode, regsize_ymode;
+
+ gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
- if (xregno >= FIRST_PSEUDO_REGISTER)
- abort ();
+ /* If there are holes in a non-scalar mode in registers, we expect
+ that it is made up of its units concatenated together. */
+ if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
+ {
+ enum machine_mode xmode_unit;
- nregs_xmode = HARD_REGNO_NREGS (xregno, xmode);
- nregs_ymode = HARD_REGNO_NREGS (xregno, ymode);
+ nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
+ if (GET_MODE_INNER (xmode) == VOIDmode)
+ xmode_unit = xmode;
+ else
+ xmode_unit = GET_MODE_INNER (xmode);
+ gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
+ gcc_assert (nregs_xmode
+ == (GET_MODE_NUNITS (xmode)
+ * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
+ gcc_assert (hard_regno_nregs[xregno][xmode]
+ == (hard_regno_nregs[xregno][xmode_unit]
+ * GET_MODE_NUNITS (xmode)));
+
+ /* You can only ask for a SUBREG of a value with holes in the middle
+ if you don't cross the holes. (Such a SUBREG should be done by
+ picking a different register class, or doing it in memory if
+ necessary.) An example of a value with holes is XCmode on 32-bit
+ x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
+ 3 for each part, but in memory it's two 128-bit parts.
+ Padding is assumed to be at the end (not necessarily the 'high part')
+ of each unit. */
+ if ((offset / GET_MODE_SIZE (xmode_unit) + 1
+ < GET_MODE_NUNITS (xmode))
+ && (offset / GET_MODE_SIZE (xmode_unit)
+ != ((offset + GET_MODE_SIZE (ymode) - 1)
+ / GET_MODE_SIZE (xmode_unit))))
+ return false;
+ }
+ else
+ nregs_xmode = hard_regno_nregs[xregno][xmode];
+
+ nregs_ymode = hard_regno_nregs[xregno][ymode];
- /* paradoxical subregs are always valid. */
+ /* Paradoxical subregs are otherwise valid. */
if (offset == 0
&& nregs_ymode > nregs_xmode
&& (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
return true;
- /* Lowpart subregs are always valid. */
+ /* If registers store different numbers of bits in the different
+ modes, we cannot generally form this subreg. */
+ regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
+ regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
+ if (regsize_xmode > regsize_ymode && nregs_ymode > 1)
+ return false;
+ if (regsize_ymode > regsize_xmode && nregs_xmode > 1)
+ return false;
+
+ /* Lowpart subregs are otherwise valid. */
if (offset == subreg_lowpart_offset (ymode, xmode))
return true;
-#ifdef ENABLE_CHECKING
- /* This should always pass, otherwise we don't know how to verify the
- constraint. These conditions may be relaxed but subreg_offset would
- need to be redesigned. */
- if (GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)
- || GET_MODE_SIZE (ymode) % nregs_ymode
- || nregs_xmode % nregs_ymode)
- abort ();
-#endif
+ /* This should always pass, otherwise we don't know how to verify
+ the constraint. These conditions may be relaxed but
+ subreg_regno_offset would need to be redesigned. */
+ gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
+ gcc_assert ((nregs_xmode % nregs_ymode) == 0);
/* The XMODE value can be seen as a vector of NREGS_XMODE
values. The subreg must represent a lowpart of given field.
@@ -3317,18 +3046,16 @@ subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
/ nregs_xmode,
MODE_INT, 0));
- /* size of ymode must not be greater than the size of xmode. */
+ /* Size of ymode must not be greater than the size of xmode. */
mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
- if (mode_multiple == 0)
- abort ();
+ gcc_assert (mode_multiple != 0);
y_offset = offset / GET_MODE_SIZE (ymode);
nregs_multiple = nregs_xmode / nregs_ymode;
-#ifdef ENABLE_CHECKING
- if (offset % GET_MODE_SIZE (ymode)
- || mode_multiple % nregs_multiple)
- abort ();
-#endif
+
+ gcc_assert ((offset % GET_MODE_SIZE (ymode)) == 0);
+ gcc_assert ((mode_multiple % nregs_multiple) == 0);
+
return (!(y_offset % (mode_multiple / nregs_multiple)));
}
@@ -3367,12 +3094,15 @@ parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
}
/* Look backward for first parameter to be loaded.
+ Note that loads of all parameters will not necessarily be
+ found if CSE has eliminated some of them (e.g., an argument
+ to the outer function is passed down as a parameter).
Do not skip BOUNDARY. */
rtx
find_first_parameter_load (rtx call_insn, rtx boundary)
{
struct parms_set_data parm;
- rtx p, before;
+ rtx p, before, first_set;
/* Since different machines initialize their parameter registers
in different orders, assume nothing. Collect the set of all
@@ -3381,10 +3111,9 @@ find_first_parameter_load (rtx call_insn, rtx boundary)
parm.nregs = 0;
for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
if (GET_CODE (XEXP (p, 0)) == USE
- && GET_CODE (XEXP (XEXP (p, 0), 0)) == REG)
+ && REG_P (XEXP (XEXP (p, 0), 0)))
{
- if (REGNO (XEXP (XEXP (p, 0), 0)) >= FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
/* We only care about registers which can hold function
arguments. */
@@ -3395,6 +3124,7 @@ find_first_parameter_load (rtx call_insn, rtx boundary)
parm.nregs++;
}
before = call_insn;
+ first_set = call_insn;
/* Search backward for the first set of a register in this set. */
while (parm.nregs && before != boundary)
@@ -3403,24 +3133,34 @@ find_first_parameter_load (rtx call_insn, rtx boundary)
/* It is possible that some loads got CSEed from one call to
another. Stop in that case. */
- if (GET_CODE (before) == CALL_INSN)
+ if (CALL_P (before))
break;
/* Our caller needs either ensure that we will find all sets
(in case code has not been optimized yet), or take care
for possible labels in a way by setting boundary to preceding
CODE_LABEL. */
- if (GET_CODE (before) == CODE_LABEL)
+ if (LABEL_P (before))
{
- if (before != boundary)
- abort ();
+ gcc_assert (before == boundary);
break;
}
if (INSN_P (before))
- note_stores (PATTERN (before), parms_set, &parm);
+ {
+ int nregs_old = parm.nregs;
+ note_stores (PATTERN (before), parms_set, &parm);
+ /* If we found something that did not set a parameter reg,
+ we're done. Do not keep going, as that might result
+ in hoisting an insn before the setting of a pseudo
+ that is used by the hoisted insn. */
+ if (nregs_old != parm.nregs)
+ first_set = before;
+ else
+ break;
+ }
}
- return before;
+ return first_set;
}
/* Return true if we should avoid inserting code between INSN and preceding
@@ -3433,14 +3173,14 @@ keep_with_call_p (rtx insn)
if (INSN_P (insn) && (set = single_set (insn)) != NULL)
{
- if (GET_CODE (SET_DEST (set)) == REG
+ if (REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
&& fixed_regs[REGNO (SET_DEST (set))]
&& general_operand (SET_SRC (set), VOIDmode))
return true;
- if (GET_CODE (SET_SRC (set)) == REG
+ if (REG_P (SET_SRC (set))
&& FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
- && GET_CODE (SET_DEST (set)) == REG
+ && REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
return true;
/* There may be a stack pop just after the call and before the store
@@ -3456,285 +3196,1515 @@ keep_with_call_p (rtx insn)
return false;
}
-/* Return true when store to register X can be hoisted to the place
- with LIVE registers (can be NULL). Value VAL contains destination
- whose value will be used. */
+/* Return true if LABEL is a target of JUMP_INSN. This applies only
+ to non-complex jumps. That is, direct unconditional, conditional,
+ and tablejumps, but not computed jumps or returns. It also does
+ not apply to the fallthru case of a conditional jump. */
-static bool
-hoist_test_store (rtx x, rtx val, regset live)
+bool
+label_is_jump_target_p (rtx label, rtx jump_insn)
{
- if (GET_CODE (x) == SCRATCH)
- return true;
+ rtx tmp = JUMP_LABEL (jump_insn);
- if (rtx_equal_p (x, val))
+ if (label == tmp)
return true;
- /* Allow subreg of X in case it is not writing just part of multireg pseudo.
- Then we would need to update all users to care hoisting the store too.
- Caller may represent that by specifying whole subreg as val. */
-
- if (GET_CODE (x) == SUBREG && rtx_equal_p (SUBREG_REG (x), val))
+ if (tablejump_p (jump_insn, NULL, &tmp))
{
- if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) > UNITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (x)) <
- GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
- return false;
- return true;
+ rtvec vec = XVEC (PATTERN (tmp),
+ GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
+ int i, veclen = GET_NUM_ELEM (vec);
+
+ for (i = 0; i < veclen; ++i)
+ if (XEXP (RTVEC_ELT (vec, i), 0) == label)
+ return true;
}
- if (GET_CODE (x) == SUBREG)
- x = SUBREG_REG (x);
- /* Anything except register store is not hoistable. This includes the
- partial stores to registers. */
+ return false;
+}
- if (!REG_P (x))
- return false;
+
+/* Return an estimate of the cost of computing rtx X.
+ One use is in cse, to decide which expression to keep in the hash table.
+ Another is in rtl generation, to pick the cheapest way to multiply.
+ Other uses like the latter are expected in the future. */
+
+int
+rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
+{
+ int i, j;
+ enum rtx_code code;
+ const char *fmt;
+ int total;
+
+ if (x == 0)
+ return 0;
- /* Pseudo registers can be always replaced by another pseudo to avoid
- the side effect, for hard register we must ensure that they are dead.
- Eventually we may want to add code to try turn pseudos to hards, but it
- is unlikely useful. */
+ /* Compute the default costs of certain things.
+ Note that targetm.rtx_costs can override the defaults. */
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case MULT:
+ total = COSTS_N_INSNS (5);
+ break;
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ total = COSTS_N_INSNS (7);
+ break;
+ case USE:
+ /* Used in combine.c as a marker. */
+ total = 0;
+ break;
+ default:
+ total = COSTS_N_INSNS (1);
+ }
- if (REGNO (x) < FIRST_PSEUDO_REGISTER)
+ switch (code)
{
- int regno = REGNO (x);
- int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ case REG:
+ return 0;
- if (!live)
- return false;
- if (REGNO_REG_SET_P (live, regno))
- return false;
- while (--n > 0)
- if (REGNO_REG_SET_P (live, regno + n))
- return false;
+ case SUBREG:
+ total = 0;
+ /* If we can't tie these modes, make this expensive. The larger
+ the mode, the more expensive it is. */
+ if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
+ return COSTS_N_INSNS (2
+ + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
+ break;
+
+ default:
+ if (targetm.rtx_costs (x, code, outer_code, &total))
+ return total;
+ break;
}
- return true;
+
+ /* Sum the costs of the sub-rtx's, plus cost of this operation,
+ which is already in total. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ total += rtx_cost (XEXP (x, i), code);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ total += rtx_cost (XVECEXP (x, i, j), code);
+
+ return total;
}
+
+/* Return cost of address expression X.
+ Expect that X is properly formed address reference. */
+int
+address_cost (rtx x, enum machine_mode mode)
+{
+ /* We may be asked for cost of various unusual addresses, such as operands
+ of push instruction. It is not worthwhile to complicate writing
+ of the target hook by such cases. */
-/* Return true if INSN can be hoisted to place with LIVE hard registers
- (LIVE can be NULL when unknown). VAL is expected to be stored by the insn
- and used by the hoisting pass. */
+ if (!memory_address_p (mode, x))
+ return 1000;
-bool
-can_hoist_insn_p (rtx insn, rtx val, regset live)
+ return targetm.address_cost (x);
+}
+
+/* If the target doesn't override, compute the cost as with arithmetic. */
+
+int
+default_address_cost (rtx x)
{
- rtx pat = PATTERN (insn);
- int i;
+ return rtx_cost (x, MEM);
+}
+
- /* It probably does not worth the complexity to handle multiple
- set stores. */
- if (!single_set (insn))
- return false;
- /* We can move CALL_INSN, but we need to check that all caller clobbered
- regs are dead. */
- if (GET_CODE (insn) == CALL_INSN)
- return false;
- /* In future we will handle hoisting of libcall sequences, but
- give up for now. */
- if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
- return false;
- switch (GET_CODE (pat))
+unsigned HOST_WIDE_INT
+nonzero_bits (rtx x, enum machine_mode mode)
+{
+ return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
+}
+
+unsigned int
+num_sign_bit_copies (rtx x, enum machine_mode mode)
+{
+ return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
+}
+
+/* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
+ It avoids exponential behavior in nonzero_bits1 when X has
+ identical subexpressions on the first or the second level. */
+
+static unsigned HOST_WIDE_INT
+cached_nonzero_bits (rtx x, enum machine_mode mode, rtx known_x,
+ enum machine_mode known_mode,
+ unsigned HOST_WIDE_INT known_ret)
+{
+ if (x == known_x && mode == known_mode)
+ return known_ret;
+
+ /* Try to find identical subexpressions. If found call
+ nonzero_bits1 on X with the subexpressions as KNOWN_X and the
+ precomputed value for the subexpression as KNOWN_RET. */
+
+ if (ARITHMETIC_P (x))
{
- case SET:
- if (!hoist_test_store (SET_DEST (pat), val, live))
- return false;
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ /* Check the first level. */
+ if (x0 == x1)
+ return nonzero_bits1 (x, mode, x0, mode,
+ cached_nonzero_bits (x0, mode, known_x,
+ known_mode, known_ret));
+
+ /* Check the second level. */
+ if (ARITHMETIC_P (x0)
+ && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
+ return nonzero_bits1 (x, mode, x1, mode,
+ cached_nonzero_bits (x1, mode, known_x,
+ known_mode, known_ret));
+
+ if (ARITHMETIC_P (x1)
+ && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
+ return nonzero_bits1 (x, mode, x0, mode,
+ cached_nonzero_bits (x0, mode, known_x,
+ known_mode, known_ret));
+ }
+
+ return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
+}
+
+/* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
+ We don't let nonzero_bits recur into num_sign_bit_copies, because that
+ is less useful. We can't allow both, because that results in exponential
+ run time recursion. There is a nullstone testcase that triggered
+ this. This macro avoids accidental uses of num_sign_bit_copies. */
+#define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
+
+/* Given an expression, X, compute which bits in X can be nonzero.
+ We don't care about bits outside of those defined in MODE.
+
+ For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
+ an arithmetic operation, we can do better. */
+
+static unsigned HOST_WIDE_INT
+nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x,
+ enum machine_mode known_mode,
+ unsigned HOST_WIDE_INT known_ret)
+{
+ unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
+ unsigned HOST_WIDE_INT inner_nz;
+ enum rtx_code code;
+ unsigned int mode_width = GET_MODE_BITSIZE (mode);
+
+ /* For floating-point values, assume all bits are needed. */
+ if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
+ return nonzero;
+
+ /* If X is wider than MODE, use its mode instead. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
+ {
+ mode = GET_MODE (x);
+ nonzero = GET_MODE_MASK (mode);
+ mode_width = GET_MODE_BITSIZE (mode);
+ }
+
+ if (mode_width > HOST_BITS_PER_WIDE_INT)
+ /* Our only callers in this case look for single bit values. So
+ just return the mode mask. Those tests will then be false. */
+ return nonzero;
+
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If MODE is wider than X, but both are a single word for both the host
+ and target machines, we can compute this from which bits of the
+ object might be nonzero in its own mode, taking into account the fact
+ that on many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+
+ if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+ nonzero &= cached_nonzero_bits (x, GET_MODE (x),
+ known_x, known_mode, known_ret);
+ nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
+ return nonzero;
+ }
+#endif
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+ /* If pointers extend unsigned and this is a pointer in Pmode, say that
+ all the bits above ptr_mode are known to be zero. */
+ if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ && REG_POINTER (x))
+ nonzero &= GET_MODE_MASK (ptr_mode);
+#endif
+
+ /* Include declared information about alignment of pointers. */
+ /* ??? We don't properly preserve REG_POINTER changes across
+ pointer-to-integer casts, so we can't trust it except for
+ things that we know must be pointers. See execute/960116-1.c. */
+ if ((x == stack_pointer_rtx
+ || x == frame_pointer_rtx
+ || x == arg_pointer_rtx)
+ && REGNO_POINTER_ALIGN (REGNO (x)))
+ {
+ unsigned HOST_WIDE_INT alignment
+ = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
+
+#ifdef PUSH_ROUNDING
+ /* If PUSH_ROUNDING is defined, it is possible for the
+ stack to be momentarily aligned only to that amount,
+ so we pick the least alignment. */
+ if (x == stack_pointer_rtx && PUSH_ARGS)
+ alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
+ alignment);
+#endif
+
+ nonzero &= ~(alignment - 1);
+ }
+
+ {
+ unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
+ rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
+ known_mode, known_ret,
+ &nonzero_for_hook);
+
+ if (new)
+ nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
+ known_mode, known_ret);
+
+ return nonzero_for_hook;
+ }
+
+ case CONST_INT:
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is negative in MODE, sign-extend the value. */
+ if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
+ && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
+ return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
+#endif
+
+ return INTVAL (x);
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* In many, if not most, RISC machines, reading a byte from memory
+ zeros the rest of the register. Noticing that fact saves a lot
+ of extra zero-extends. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
+ nonzero &= GET_MODE_MASK (GET_MODE (x));
+#endif
break;
- case USE:
- /* USES do have sick semantics, so do not move them. */
- return false;
+
+ case EQ: case NE:
+ case UNEQ: case LTGT:
+ case GT: case GTU: case UNGT:
+ case LT: case LTU: case UNLT:
+ case GE: case GEU: case UNGE:
+ case LE: case LEU: case UNLE:
+ case UNORDERED: case ORDERED:
+ /* If this produces an integer result, we know which bits are set.
+ Code here used to clear bits outside the mode of X, but that is
+ now done above. */
+ /* Mind that MODE is the mode the caller wants to look at this
+ operation in, and not the actual operation mode. We can wind
+ up with (subreg:DI (gt:V4HI x y)), and we don't have anything
+ that describes the results of a vector compare. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ nonzero = STORE_FLAG_VALUE;
break;
- case CLOBBER:
- if (!hoist_test_store (XEXP (pat, 0), val, live))
- return false;
+
+ case NEG:
+#if 0
+ /* Disabled to avoid exponential mutual recursion between nonzero_bits
+ and num_sign_bit_copies. */
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+#endif
+
+ if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
+ nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
break;
- case PARALLEL:
- for (i = 0; i < XVECLEN (pat, 0); i++)
+
+ case ABS:
+#if 0
+ /* Disabled to avoid exponential mutual recursion between nonzero_bits
+ and num_sign_bit_copies. */
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+#endif
+ break;
+
+ case TRUNCATE:
+ nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret)
+ & GET_MODE_MASK (mode));
+ break;
+
+ case ZERO_EXTEND:
+ nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ break;
+
+ case SIGN_EXTEND:
+ /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
+ Otherwise, show all the bits in the outer mode but not the inner
+ may be nonzero. */
+ inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
{
- rtx x = XVECEXP (pat, 0, i);
- switch (GET_CODE (x))
- {
- case SET:
- if (!hoist_test_store (SET_DEST (x), val, live))
- return false;
+ inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ if (inner_nz
+ & (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
+ inner_nz |= (GET_MODE_MASK (mode)
+ & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
+ }
+
+ nonzero &= inner_nz;
+ break;
+
+ case AND:
+ nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret)
+ & cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ break;
+
+ case XOR: case IOR:
+ case UMIN: case UMAX: case SMIN: case SMAX:
+ {
+ unsigned HOST_WIDE_INT nonzero0 =
+ cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+
+ /* Don't call nonzero_bits for the second time if it cannot change
+ anything. */
+ if ((nonzero & nonzero0) != nonzero)
+ nonzero &= nonzero0
+ | cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ }
+ break;
+
+ case PLUS: case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ /* We can apply the rules of arithmetic to compute the number of
+ high- and low-order zero bits of these operations. We start by
+ computing the width (position of the highest-order nonzero bit)
+ and the number of low-order zero bits for each value. */
+ {
+ unsigned HOST_WIDE_INT nz0 =
+ cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT nz1 =
+ cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
+ int width0 = floor_log2 (nz0) + 1;
+ int width1 = floor_log2 (nz1) + 1;
+ int low0 = floor_log2 (nz0 & -nz0);
+ int low1 = floor_log2 (nz1 & -nz1);
+ HOST_WIDE_INT op0_maybe_minusp
+ = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
+ HOST_WIDE_INT op1_maybe_minusp
+ = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
+ unsigned int result_width = mode_width;
+ int result_low = 0;
+
+ switch (code)
+ {
+ case PLUS:
+ result_width = MAX (width0, width1) + 1;
+ result_low = MIN (low0, low1);
+ break;
+ case MINUS:
+ result_low = MIN (low0, low1);
+ break;
+ case MULT:
+ result_width = width0 + width1;
+ result_low = low0 + low1;
+ break;
+ case DIV:
+ if (width1 == 0)
break;
- case USE:
- /* We need to fix callers to really ensure availability
- of all values insn uses, but for now it is safe to prohibit
- hoisting of any insn having such a hidden uses. */
- return false;
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = width0;
+ break;
+ case UDIV:
+ if (width1 == 0)
break;
- case CLOBBER:
- if (!hoist_test_store (SET_DEST (x), val, live))
- return false;
+ result_width = width0;
+ break;
+ case MOD:
+ if (width1 == 0)
break;
- default:
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ case UMOD:
+ if (width1 == 0)
break;
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (result_width < mode_width)
+ nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
+
+ if (result_low > 0)
+ nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend unsigned and this is an addition or subtraction
+ to a pointer in Pmode, all the bits above ptr_mode are known to be
+ zero. */
+ if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
+ && (code == PLUS || code == MINUS)
+ && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
+ nonzero &= GET_MODE_MASK (ptr_mode);
+#endif
+ }
+ break;
+
+ case ZERO_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
+ break;
+
+ case SUBREG:
+ /* If this is a SUBREG formed for a promoted variable that has
+ been zero-extended, we know that at least the high-order bits
+ are zero, though others might be too. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
+ nonzero = GET_MODE_MASK (GET_MODE (x))
+ & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
+ known_x, known_mode, known_ret);
+
+ /* If the inner mode is a single word for both the host and target
+ machines, we can compute this from which bits of the inner
+ object might be nonzero. */
+ if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ <= HOST_BITS_PER_WIDE_INT))
+ {
+ nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
+ known_x, known_mode, known_ret);
+
+#if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
+ /* If this is a typical RISC machine, we only have to worry
+ about the way loads are extended. */
+ if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
+ ? (((nonzero
+ & (((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
+ != 0))
+ : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
+ || !MEM_P (SUBREG_REG (x)))
+#endif
+ {
+ /* On many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ nonzero |= (GET_MODE_MASK (GET_MODE (x))
+ & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
}
}
break;
+
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ASHIFT:
+ case ROTATE:
+ /* The nonzero bits are in two classes: any bits within MODE
+ that aren't in GET_MODE (x) are always significant. The rest of the
+ nonzero bits are those that are significant in the operand of
+ the shift when shifted the appropriate number of bits. This
+ shows that high-order bits are cleared by the right shift and
+ low-order bits by left shifts. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ enum machine_mode inner_mode = GET_MODE (x);
+ unsigned int width = GET_MODE_BITSIZE (inner_mode);
+ int count = INTVAL (XEXP (x, 1));
+ unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
+ unsigned HOST_WIDE_INT op_nonzero =
+ cached_nonzero_bits (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
+ unsigned HOST_WIDE_INT outer = 0;
+
+ if (mode_width > width)
+ outer = (op_nonzero & nonzero & ~mode_mask);
+
+ if (code == LSHIFTRT)
+ inner >>= count;
+ else if (code == ASHIFTRT)
+ {
+ inner >>= count;
+
+ /* If the sign bit may have been nonzero before the shift, we
+ need to mark all the places it could have been copied to
+ by the shift as possibly nonzero. */
+ if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
+ inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
+ }
+ else if (code == ASHIFT)
+ inner <<= count;
+ else
+ inner = ((inner << (count % width)
+ | (inner >> (width - (count % width)))) & mode_mask);
+
+ nonzero &= (outer | inner);
+ }
+ break;
+
+ case FFS:
+ case POPCOUNT:
+ /* This is at most the number of bits in the mode. */
+ nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
+ break;
+
+ case CLZ:
+ /* If CLZ has a known value at zero, then the nonzero bits are
+ that value, plus the number of bits in the mode minus one. */
+ if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
+ nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ else
+ nonzero = -1;
+ break;
+
+ case CTZ:
+ /* If CTZ has a known value at zero, then the nonzero bits are
+ that value, plus the number of bits in the mode minus one. */
+ if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
+ nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
+ else
+ nonzero = -1;
+ break;
+
+ case PARITY:
+ nonzero = 1;
+ break;
+
+ case IF_THEN_ELSE:
+ {
+ unsigned HOST_WIDE_INT nonzero_true =
+ cached_nonzero_bits (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+
+ /* Don't call nonzero_bits for the second time if it cannot change
+ anything. */
+ if ((nonzero & nonzero_true) != nonzero)
+ nonzero &= nonzero_true
+ | cached_nonzero_bits (XEXP (x, 2), mode,
+ known_x, known_mode, known_ret);
+ }
+ break;
+
default:
- abort ();
+ break;
}
- return true;
+
+ return nonzero;
}
-/* Update store after hoisting - replace all stores to pseudo registers
- by new ones to avoid clobbering of values except for store to VAL that will
- be updated to NEW. */
+/* See the macro definition above. */
+#undef cached_num_sign_bit_copies
-static void
-hoist_update_store (rtx insn, rtx *xp, rtx val, rtx new)
+
+/* The function cached_num_sign_bit_copies is a wrapper around
+ num_sign_bit_copies1. It avoids exponential behavior in
+ num_sign_bit_copies1 when X has identical subexpressions on the
+ first or the second level. */
+
+static unsigned int
+cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x,
+ enum machine_mode known_mode,
+ unsigned int known_ret)
{
- rtx x = *xp;
+ if (x == known_x && mode == known_mode)
+ return known_ret;
- if (GET_CODE (x) == SCRATCH)
- return;
+ /* Try to find identical subexpressions. If found call
+ num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
+ the precomputed value for the subexpression as KNOWN_RET. */
- if (GET_CODE (x) == SUBREG && SUBREG_REG (x) == val)
- validate_change (insn, xp,
- simplify_gen_subreg (GET_MODE (x), new, GET_MODE (new),
- SUBREG_BYTE (x)), 1);
- if (rtx_equal_p (x, val))
- {
- validate_change (insn, xp, new, 1);
- return;
- }
- if (GET_CODE (x) == SUBREG)
+ if (ARITHMETIC_P (x))
{
- xp = &SUBREG_REG (x);
- x = *xp;
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ /* Check the first level. */
+ if (x0 == x1)
+ return
+ num_sign_bit_copies1 (x, mode, x0, mode,
+ cached_num_sign_bit_copies (x0, mode, known_x,
+ known_mode,
+ known_ret));
+
+ /* Check the second level. */
+ if (ARITHMETIC_P (x0)
+ && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
+ return
+ num_sign_bit_copies1 (x, mode, x1, mode,
+ cached_num_sign_bit_copies (x1, mode, known_x,
+ known_mode,
+ known_ret));
+
+ if (ARITHMETIC_P (x1)
+ && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
+ return
+ num_sign_bit_copies1 (x, mode, x0, mode,
+ cached_num_sign_bit_copies (x0, mode, known_x,
+ known_mode,
+ known_ret));
}
- if (!REG_P (x))
- abort ();
-
- /* We've verified that hard registers are dead, so we may keep the side
- effect. Otherwise replace it by new pseudo. */
- if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
- validate_change (insn, xp, gen_reg_rtx (GET_MODE (x)), 1);
- REG_NOTES (insn)
- = alloc_EXPR_LIST (REG_UNUSED, *xp, REG_NOTES (insn));
+ return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
}
-/* Create a copy of INSN after AFTER replacing store of VAL to NEW
- and each other side effect to pseudo register by new pseudo register. */
+/* Return the number of bits at the high-order end of X that are known to
+ be equal to the sign bit. X will be used in mode MODE; if MODE is
+ VOIDmode, X will be used in its own mode. The returned value will always
+ be between 1 and the number of bits in MODE. */
-rtx
-hoist_insn_after (rtx insn, rtx after, rtx val, rtx new)
+static unsigned int
+num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x,
+ enum machine_mode known_mode,
+ unsigned int known_ret)
{
- rtx pat;
- int i;
- rtx note;
+ enum rtx_code code = GET_CODE (x);
+ unsigned int bitwidth = GET_MODE_BITSIZE (mode);
+ int num0, num1, result;
+ unsigned HOST_WIDE_INT nonzero;
+
+ /* If we weren't given a mode, use the mode of X. If the mode is still
+ VOIDmode, we don't know anything. Likewise if one of the modes is
+ floating-point. */
- insn = emit_copy_of_insn_after (insn, after);
- pat = PATTERN (insn);
+ if (mode == VOIDmode)
+ mode = GET_MODE (x);
- /* Remove REG_UNUSED notes as we will re-emit them. */
- while ((note = find_reg_note (insn, REG_UNUSED, NULL_RTX)))
- remove_note (insn, note);
+ if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
+ return 1;
- /* To get this working callers must ensure to move everything referenced
- by REG_EQUAL/REG_EQUIV notes too. Lets remove them, it is probably
- easier. */
- while ((note = find_reg_note (insn, REG_EQUAL, NULL_RTX)))
- remove_note (insn, note);
- while ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)))
- remove_note (insn, note);
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+ num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
+ known_x, known_mode, known_ret);
+ return MAX (1,
+ num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
+ }
- /* Remove REG_DEAD notes as they might not be valid anymore in case
- we create redundancy. */
- while ((note = find_reg_note (insn, REG_DEAD, NULL_RTX)))
- remove_note (insn, note);
- switch (GET_CODE (pat))
+ if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
{
- case SET:
- hoist_update_store (insn, &SET_DEST (pat), val, new);
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If this machine does not do all register operations on the entire
+ register and MODE is wider than the mode of X, we can say nothing
+ at all about the high-order bits. */
+ return 1;
+#else
+ /* Likewise on machines that do, if the mode of the object is smaller
+ than a word and loads of that size don't sign extend, we can say
+ nothing about the high order bits. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+#ifdef LOAD_EXTEND_OP
+ && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
+#endif
+ )
+ return 1;
+#endif
+ }
+
+ switch (code)
+ {
+ case REG:
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+ /* If pointers extend signed and this is a pointer in Pmode, say that
+ all the bits above ptr_mode are known to be sign bit copies. */
+ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
+ && REG_POINTER (x))
+ return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
+#endif
+
+ {
+ unsigned int copies_for_hook = 1, copies = 1;
+ rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
+ known_mode, known_ret,
+ &copies_for_hook);
+
+ if (new)
+ copies = cached_num_sign_bit_copies (new, mode, known_x,
+ known_mode, known_ret);
+
+ if (copies > 1 || copies_for_hook > 1)
+ return MAX (copies, copies_for_hook);
+
+ /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
+ }
break;
- case USE:
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* Some RISC machines sign-extend all loads of smaller than a word. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
+ return MAX (1, ((int) bitwidth
+ - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
+#endif
break;
- case CLOBBER:
- hoist_update_store (insn, &XEXP (pat, 0), val, new);
+
+ case CONST_INT:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = INTVAL (x) & GET_MODE_MASK (mode);
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted object that is sign-extended
+ and we are looking at it in a wider mode, we know that at least the
+ high-order bits are known to be sign bit copies. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
+ {
+ num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
+ known_x, known_mode, known_ret);
+ return MAX ((int) bitwidth
+ - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ num0);
+ }
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
+ {
+ num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
+ known_x, known_mode, known_ret);
+ return MAX (1, (num0
+ - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - bitwidth)));
+ }
+
+#ifdef WORD_REGISTER_OPERATIONS
+#ifdef LOAD_EXTEND_OP
+ /* For paradoxical SUBREGs on machines where all register operations
+ affect the entire register, just look inside. Note that we are
+ passing MODE to the recursive call, so the number of sign bit copies
+ will remain relative to that mode, not the inner mode. */
+
+ /* This works only if loads sign extend. Otherwise, if we get a
+ reload for the inner part, it may be loaded from the stack, and
+ then we lose all sign bit copies that existed before the store
+ to the stack. */
+
+ if ((GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
+ && MEM_P (SUBREG_REG (x)))
+ return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
+ known_x, known_mode, known_ret);
+#endif
+#endif
break;
- case PARALLEL:
+
+ case SIGN_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
+ break;
+
+ case SIGN_EXTEND:
+ return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
+ known_x, known_mode, known_ret));
+
+ case TRUNCATE:
+ /* For a smaller object, just ignore the high bits. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
+ known_x, known_mode, known_ret);
+ return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ - bitwidth)));
+
+ case NOT:
+ return cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+
+ case ROTATE: case ROTATERT:
+ /* If we are rotating left by a number of bits less than the number
+ of sign bit copies, we can just subtract that amount from the
+ number. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < (int) bitwidth)
+ {
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
+ : (int) bitwidth - INTVAL (XEXP (x, 1))));
+ }
+ break;
+
+ case NEG:
+ /* In general, this subtracts one sign bit copy. But if the value
+ is known to be positive, the number of sign bit copies is the
+ same as that of the input. Finally, if the input has just one bit
+ that might be nonzero, all the bits are copies of the sign bit. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return num0 > 1 ? num0 - 1 : 1;
+
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if (nonzero == 1)
+ return bitwidth;
+
+ if (num0 > 1
+ && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
+ num0--;
+
+ return num0;
+
+ case IOR: case AND: case XOR:
+ case SMIN: case SMAX: case UMIN: case UMAX:
+ /* Logical operations will preserve the number of sign-bit copies.
+ MIN and MAX operations always return one of the operands. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ return MIN (num0, num1);
+
+ case PLUS: case MINUS:
+ /* For addition and subtraction, we can have a 1-bit carry. However,
+ if we are subtracting 1 from a positive number, there will not
+ be such a carry. Furthermore, if the positive number is known to
+ be 0 or 1, we know the result is either -1 or 0. */
+
+ if (code == PLUS && XEXP (x, 1) == constm1_rtx
+ && bitwidth <= HOST_BITS_PER_WIDE_INT)
+ {
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
+ return (nonzero == 1 || nonzero == 0 ? bitwidth
+ : bitwidth - floor_log2 (nonzero) - 1);
+ }
+
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ result = MAX (1, MIN (num0, num1) - 1);
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If pointers extend signed and this is an addition or subtraction
+ to a pointer in Pmode, all the bits above ptr_mode are known to be
+ sign bit copies. */
+ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+ && (code == PLUS || code == MINUS)
+ && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
+ result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
+ - GET_MODE_BITSIZE (ptr_mode) + 1),
+ result);
+#endif
+ return result;
+
+ case MULT:
+ /* The number of bits of the product is the sum of the number of
+ bits of both terms. However, unless one of the terms if known
+ to be positive, we must allow for an additional bit since negating
+ a negative number can remove one sign bit copy. */
+
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+
+ result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
+ if (result > 0
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ && ((nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
+ result--;
+
+ return MAX (1, result);
+
+ case UDIV:
+ /* The result must be <= the first operand. If the first operand
+ has the high bit set, we know nothing about the number of sign
+ bit copies. */
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+ else if ((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ return 1;
+ else
+ return cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+
+ case UMOD:
+ /* The result must be <= the second operand. */
+ return cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+
+ case DIV:
+ /* Similar to unsigned division, except that we have to worry about
+ the case where the divisor is negative, in which case we have
+ to add 1. */
+ result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (result > 1
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ result--;
+
+ return result;
+
+ case MOD:
+ result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ if (result > 1
+ && (bitwidth > HOST_BITS_PER_WIDE_INT
+ || (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
+ result--;
+
+ return result;
+
+ case ASHIFTRT:
+ /* Shifts by a constant add to the number of bits equal to the
+ sign bit. */
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) > 0)
+ num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
+
+ return num0;
+
+ case ASHIFT:
+ /* Left shifts destroy copies. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || INTVAL (XEXP (x, 1)) < 0
+ || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
+ return 1;
+
+ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
+ known_x, known_mode, known_ret);
+ return MAX (1, num0 - INTVAL (XEXP (x, 1)));
+
+ case IF_THEN_ELSE:
+ num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
+ known_x, known_mode, known_ret);
+ num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
+ known_x, known_mode, known_ret);
+ return MIN (num0, num1);
+
+ case EQ: case NE: case GE: case GT: case LE: case LT:
+ case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
+ case GEU: case GTU: case LEU: case LTU:
+ case UNORDERED: case ORDERED:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = STORE_FLAG_VALUE;
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
+
+ default:
+ break;
+ }
+
+ /* If we haven't been able to figure it out by one of the above rules,
+ see if some of the high-order bits are known to be zero. If so,
+ count those bits and return one less than that amount. If we can't
+ safely compute the mask for this mode, always return BITWIDTH. */
+
+ bitwidth = GET_MODE_BITSIZE (mode);
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+
+ nonzero = nonzero_bits (x, mode);
+ return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
+ ? 1 : bitwidth - floor_log2 (nonzero) - 1;
+}
+
+/* Calculate the rtx_cost of a single instruction. A return value of
+ zero indicates an instruction pattern without a known cost. */
+
+int
+insn_rtx_cost (rtx pat)
+{
+ int i, cost;
+ rtx set;
+
+ /* Extract the single set rtx from the instruction pattern.
+ We can't use single_set since we only have the pattern. */
+ if (GET_CODE (pat) == SET)
+ set = pat;
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ set = NULL_RTX;
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx x = XVECEXP (pat, 0, i);
- switch (GET_CODE (x))
+ if (GET_CODE (x) == SET)
{
- case SET:
- hoist_update_store (insn, &SET_DEST (x), val, new);
- break;
- case USE:
- break;
- case CLOBBER:
- hoist_update_store (insn, &SET_DEST (x), val, new);
- break;
- default:
- break;
+ if (set)
+ return 0;
+ set = x;
}
}
- break;
- default:
- abort ();
+ if (!set)
+ return 0;
}
- if (!apply_change_group ())
- abort ();
+ else
+ return 0;
- return insn;
+ cost = rtx_cost (SET_SRC (set), SET);
+ return cost > 0 ? cost : COSTS_N_INSNS (1);
}
+/* Given an insn INSN and condition COND, return the condition in a
+ canonical form to simplify testing by callers. Specifically:
+
+ (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
+ (2) Both operands will be machine operands; (cc0) will have been replaced.
+ (3) If an operand is a constant, it will be the second operand.
+ (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
+ for GE, GEU, and LEU.
+
+ If the condition cannot be understood, or is an inequality floating-point
+ comparison which needs to be reversed, 0 will be returned.
+
+ If REVERSE is nonzero, then reverse the condition prior to canonizing it.
+
+ If EARLIEST is nonzero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid.
+
+ If WANT_REG is nonzero, we wish the condition to be relative to that
+ register, if possible. Therefore, do not canonicalize the condition
+ further. If ALLOW_CC_MODE is nonzero, allow the condition returned
+ to be a compare to a CC mode register.
+
+ If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
+ and at INSN. */
+
rtx
-hoist_insn_to_edge (rtx insn, edge e, rtx val, rtx new)
+canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
+ rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
{
- rtx new_insn;
+ enum rtx_code code;
+ rtx prev = insn;
+ rtx set;
+ rtx tem;
+ rtx op0, op1;
+ int reverse_code = 0;
+ enum machine_mode mode;
+ basic_block bb = BLOCK_FOR_INSN (insn);
+
+ code = GET_CODE (cond);
+ mode = GET_MODE (cond);
+ op0 = XEXP (cond, 0);
+ op1 = XEXP (cond, 1);
+
+ if (reverse)
+ code = reversed_comparison_code (cond, insn);
+ if (code == UNKNOWN)
+ return 0;
+
+ if (earliest)
+ *earliest = insn;
- /* We cannot insert instructions on an abnormal critical edge.
- It will be easier to find the culprit if we die now. */
- if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
- abort ();
+ /* If we are comparing a register with zero, see if the register is set
+ in the previous insn to a COMPARE or a comparison operation. Perform
+ the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
+ in cse.c */
- /* Do not use emit_insn_on_edge as we want to preserve notes and similar
- stuff. We also emit CALL_INSNS and firends. */
- if (e->insns == NULL_RTX)
+ while ((GET_RTX_CLASS (code) == RTX_COMPARE
+ || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
+ && op1 == CONST0_RTX (GET_MODE (op0))
+ && op0 != want_reg)
{
- start_sequence ();
- emit_note (NOTE_INSN_DELETED);
+ /* Set nonzero when we find something of interest. */
+ rtx x = 0;
+
+#ifdef HAVE_cc0
+ /* If comparison with cc0, import actual comparison from compare
+ insn. */
+ if (op0 == cc0_rtx)
+ {
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || !NONJUMP_INSN_P (prev)
+ || (set = single_set (prev)) == 0
+ || SET_DEST (set) != cc0_rtx)
+ return 0;
+
+ op0 = SET_SRC (set);
+ op1 = CONST0_RTX (GET_MODE (op0));
+ if (earliest)
+ *earliest = prev;
+ }
+#endif
+
+ /* If this is a COMPARE, pick up the two things being compared. */
+ if (GET_CODE (op0) == COMPARE)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ else if (!REG_P (op0))
+ break;
+
+ /* Go back to the previous insn. Stop if it is not an INSN. We also
+ stop if it isn't a single set or if it has a REG_INC note because
+ we don't want to bother dealing with it. */
+
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || !NONJUMP_INSN_P (prev)
+ || FIND_REG_INC_NOTE (prev, NULL_RTX)
+ /* In cfglayout mode, there do not have to be labels at the
+ beginning of a block, or jumps at the end, so the previous
+ conditions would not stop us when we reach bb boundary. */
+ || BLOCK_FOR_INSN (prev) != bb)
+ break;
+
+ set = set_of (op0, prev);
+
+ if (set
+ && (GET_CODE (set) != SET
+ || !rtx_equal_p (SET_DEST (set), op0)))
+ break;
+
+ /* If this is setting OP0, get what it sets it to if it looks
+ relevant. */
+ if (set)
+ {
+ enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ REAL_VALUE_TYPE fsfv;
+#endif
+
+ /* ??? We may not combine comparisons done in a CCmode with
+ comparisons not done in a CCmode. This is to aid targets
+ like Alpha that have an IEEE compliant EQ instruction, and
+ a non-IEEE compliant BEQ instruction. The use of CCmode is
+ actually artificial, simply to prevent the combination, but
+ should not affect other platforms.
+
+ However, we must allow VOIDmode comparisons to match either
+ CCmode or non-CCmode comparison, because some ports have
+ modeless comparisons inside branch patterns.
+
+ ??? This mode check should perhaps look more like the mode check
+ in simplify_comparison in combine. */
+
+ if ((GET_CODE (SET_SRC (set)) == COMPARE
+ || (((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && SCALAR_FLOAT_MODE_P (inner_mode)
+ && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
+ REAL_VALUE_NEGATIVE (fsfv)))
+#endif
+ ))
+ && COMPARISON_P (SET_SRC (set))))
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+ x = SET_SRC (set);
+ else if (((code == EQ
+ || (code == GE
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && SCALAR_FLOAT_MODE_P (inner_mode)
+ && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
+ REAL_VALUE_NEGATIVE (fsfv)))
+#endif
+ ))
+ && COMPARISON_P (SET_SRC (set))
+ && (((GET_MODE_CLASS (mode) == MODE_CC)
+ == (GET_MODE_CLASS (inner_mode) == MODE_CC))
+ || mode == VOIDmode || inner_mode == VOIDmode))
+
+ {
+ reverse_code = 1;
+ x = SET_SRC (set);
+ }
+ else
+ break;
+ }
+
+ else if (reg_set_p (op0, prev))
+ /* If this sets OP0, but not directly, we have to give up. */
+ break;
+
+ if (x)
+ {
+ /* If the caller is expecting the condition to be valid at INSN,
+ make sure X doesn't change before INSN. */
+ if (valid_at_insn_p)
+ if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
+ break;
+ if (COMPARISON_P (x))
+ code = GET_CODE (x);
+ if (reverse_code)
+ {
+ code = reversed_comparison_code (x, prev);
+ if (code == UNKNOWN)
+ return 0;
+ reverse_code = 0;
+ }
+
+ op0 = XEXP (x, 0), op1 = XEXP (x, 1);
+ if (earliest)
+ *earliest = prev;
+ }
}
- else
- push_to_sequence (e->insns);
- new_insn = hoist_insn_after (insn, get_last_insn (), val, new);
+ /* If constant is first, put it last. */
+ if (CONSTANT_P (op0))
+ code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
+
+ /* If OP0 is the result of a comparison, we weren't able to find what
+ was really being compared, so fail. */
+ if (!allow_cc_mode
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
+ return 0;
+
+ /* Canonicalize any ordered comparison with integers involving equality
+ if we can do computations in the relevant mode and we do not
+ overflow. */
+
+ if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
+ && GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT const_val = INTVAL (op1);
+ unsigned HOST_WIDE_INT uconst_val = const_val;
+ unsigned HOST_WIDE_INT max_val
+ = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
+
+ switch (code)
+ {
+ case LE:
+ if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
+ code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
+ break;
+
+ /* When cross-compiling, const_val might be sign-extended from
+ BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
+ case GE:
+ if ((HOST_WIDE_INT) (const_val & max_val)
+ != (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
+ break;
+
+ case LEU:
+ if (uconst_val < max_val)
+ code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
+ break;
+
+ case GEU:
+ if (uconst_val != 0)
+ code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
+ break;
- e->insns = get_insns ();
- end_sequence ();
- return new_insn;
+ default:
+ break;
+ }
+ }
+
+ /* Never return CC0; return zero instead. */
+ if (CC0_P (op0))
+ return 0;
+
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
}
-/* Return true if LABEL is a target of JUMP_INSN. This applies only
- to non-complex jumps. That is, direct unconditional, conditional,
- and tablejumps, but not computed jumps or returns. It also does
- not apply to the fallthru case of a conditional jump. */
+/* Given a jump insn JUMP, return the condition that will cause it to branch
+ to its JUMP_LABEL. If the condition cannot be understood, or is an
+ inequality floating-point comparison which needs to be reversed, 0 will
+ be returned.
+
+ If EARLIEST is nonzero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid. If EARLIEST
+ is null, the returned condition will be valid at INSN.
+
+ If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
+ compare CC mode register.
+
+ VALID_AT_INSN_P is the same as for canonicalize_condition. */
+
+rtx
+get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
+{
+ rtx cond;
+ int reverse;
+ rtx set;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ if (!JUMP_P (jump)
+ || ! any_condjump_p (jump))
+ return 0;
+ set = pc_set (jump);
+
+ cond = XEXP (SET_SRC (set), 0);
+
+ /* If this branches to JUMP_LABEL when the condition is false, reverse
+ the condition. */
+ reverse
+ = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
+
+ return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
+ allow_cc_mode, valid_at_insn_p);
+}
+
+/* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
+ TARGET_MODE_REP_EXTENDED.
+
+ Note that we assume that the property of
+ TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
+ narrower than mode B. I.e., if A is a mode narrower than B then in
+ order to be able to operate on it in mode B, mode A needs to
+ satisfy the requirements set by the representation of mode B. */
+
+static void
+init_num_sign_bit_copies_in_rep (void)
+{
+ enum machine_mode mode, in_mode;
+
+ for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
+ in_mode = GET_MODE_WIDER_MODE (mode))
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ enum machine_mode i;
+
+ /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
+ extends to the next widest mode. */
+ gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
+ || GET_MODE_WIDER_MODE (mode) == in_mode);
+
+ /* We are in in_mode. Count how many bits outside of mode
+ have to be copies of the sign-bit. */
+ for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
+ {
+ enum machine_mode wider = GET_MODE_WIDER_MODE (i);
+
+ if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
+ /* We can only check sign-bit copies starting from the
+ top-bit. In order to be able to check the bits we
+ have already seen we pretend that subsequent bits
+ have to be sign-bit copies too. */
+ || num_sign_bit_copies_in_rep [in_mode][mode])
+ num_sign_bit_copies_in_rep [in_mode][mode]
+ += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
+ }
+ }
+}
+
+/* Suppose that truncation from the machine mode of X to MODE is not a
+ no-op. See if there is anything special about X so that we can
+ assume it already contains a truncated value of MODE. */
bool
-label_is_jump_target_p (rtx label, rtx jump_insn)
+truncated_to_mode (enum machine_mode mode, rtx x)
{
- rtx tmp = JUMP_LABEL (jump_insn);
+ /* This register has already been used in MODE without explicit
+ truncation. */
+ if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
+ return true;
- if (label == tmp)
+ /* See if we already satisfy the requirements of MODE. If yes we
+ can just switch to MODE. */
+ if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
+ && (num_sign_bit_copies (x, GET_MODE (x))
+ >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
return true;
- if (tablejump_p (jump_insn, NULL, &tmp))
+ return false;
+}
+
+/* Initialize non_rtx_starting_operands, which is used to speed up
+ for_each_rtx. */
+void
+init_rtlanal (void)
+{
+ int i;
+ for (i = 0; i < NUM_RTX_CODE; i++)
{
- rtvec vec = XVEC (PATTERN (tmp),
- GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
- int i, veclen = GET_NUM_ELEM (vec);
-
- for (i = 0; i < veclen; ++i)
- if (XEXP (RTVEC_ELT (vec, i), 0) == label)
- return true;
+ const char *format = GET_RTX_FORMAT (i);
+ const char *first = strpbrk (format, "eEV");
+ non_rtx_starting_operands[i] = first ? first - format : -1;
}
- return false;
+ init_num_sign_bit_copies_in_rep ();
+}
+
+/* Check whether this is a constant pool constant. */
+bool
+constant_pool_constant_p (rtx x)
+{
+ x = avoid_constant_pool_reference (x);
+ return GET_CODE (x) == CONST_DOUBLE;
}
OpenPOWER on IntegriCloud