summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/jump.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/gcc/jump.c')
-rw-r--r--contrib/gcc/jump.c693
1 files changed, 562 insertions, 131 deletions
diff --git a/contrib/gcc/jump.c b/contrib/gcc/jump.c
index 88a6c3a..ff4680c 100644
--- a/contrib/gcc/jump.c
+++ b/contrib/gcc/jump.c
@@ -1,5 +1,5 @@
/* Optimize jump instructions, for GNU compiler.
- Copyright (C) 1987, 88, 89, 91-94, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -52,14 +52,19 @@ Boston, MA 02111-1307, USA. */
from other passes as well. */
#include "config.h"
+#include "system.h"
#include "rtl.h"
#include "flags.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "insn-config.h"
#include "insn-flags.h"
+#include "insn-attr.h"
+#include "recog.h"
#include "expr.h"
#include "real.h"
+#include "except.h"
+#include "toplev.h"
/* ??? Eventually must record somehow the labels used by jumps
from nested functions. */
@@ -100,7 +105,7 @@ int can_reach_end;
Normally they are not significant, because of A and B jump to C,
and R dies in A, it must die in B. But this might not be true after
stack register conversion, and we must compare death notes in that
- case. */
+ case. */
static int cross_jump_death_matters = 0;
@@ -113,7 +118,11 @@ static void mark_jump_label PROTO((rtx, rtx, int));
static void delete_computation PROTO((rtx));
static void delete_from_jump_chain PROTO((rtx));
static int delete_labelref_insn PROTO((rtx, rtx, int));
+static void mark_modified_reg PROTO((rtx, rtx));
static void redirect_tablejump PROTO((rtx, rtx));
+#ifndef HAVE_cc0
+static rtx find_insert_position PROTO((rtx, rtx));
+#endif
/* Delete no-op jumps and optimize jumps to jumps
and jumps around jumps.
@@ -143,6 +152,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
{
register rtx insn, next, note;
int changed;
+ int old_max_reg;
int first = 1;
int max_uid = 0;
rtx last_insn;
@@ -173,6 +183,12 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
max_uid++;
+ /* If we are performing cross jump optimizations, then initialize
+ tables mapping UIDs to EH regions to avoid incorrect movement
+ of insns from one EH region to another. */
+ if (flag_exceptions && cross_jump)
+ init_insn_eh_region (f, max_uid);
+
/* Delete insns following barriers, up to next label. */
for (insn = f; insn;)
@@ -208,11 +224,10 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
also make a chain of all returns. */
for (insn = f; insn; insn = NEXT_INSN (insn))
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
- && ! INSN_DELETED_P (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
{
mark_jump_label (PATTERN (insn), insn, cross_jump);
- if (GET_CODE (insn) == JUMP_INSN)
+ if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN)
{
if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
{
@@ -234,6 +249,16 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
for (insn = forced_labels; insn; insn = XEXP (insn, 1))
LABEL_NUSES (XEXP (insn, 0))++;
+ check_exception_handler_labels ();
+
+ /* Keep track of labels used for marking handlers for exception
+ regions; they cannot usually be deleted. */
+
+ for (insn = exception_handler_labels; insn; insn = XEXP (insn, 1))
+ LABEL_NUSES (XEXP (insn, 0))++;
+
+ exception_optimize ();
+
/* Delete all labels already not referenced.
Also find the last insn. */
@@ -283,6 +308,9 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
/* Zero the "deleted" flag of all the "deleted" insns. */
for (insn = f; insn; insn = NEXT_INSN (insn))
INSN_DELETED_P (insn) = 0;
+
+ /* Show that the jump chain is not valid. */
+ jump_chain = 0;
return;
}
@@ -381,9 +409,14 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
if (GET_CODE (p) != INSN)
break;
pbody = PATTERN (p);
- if (GET_CODE (pbody) == SET)
+ if (GET_CODE (pbody) != SET)
break;
dest = SET_DEST (pbody);
+ /* Allow a no-op move between the adjust and the push. */
+ if (GET_CODE (dest) == REG
+ && GET_CODE (SET_SRC (pbody)) == REG
+ && REGNO (dest) == REGNO (SET_SRC (pbody)))
+ continue;
if (! (GET_CODE (dest) == MEM
&& GET_CODE (XEXP (dest, 0)) == POST_INC
&& XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
@@ -438,28 +471,40 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
sreg, NULL_PTR, dreg,
GET_MODE (SET_SRC (body)));
-#ifdef PRESERVE_DEATH_INFO_REGNO_P
- /* Deleting insn could lose a death-note for SREG or DREG
- so don't do it if final needs accurate death-notes. */
- if (! PRESERVE_DEATH_INFO_REGNO_P (sreg)
- && ! PRESERVE_DEATH_INFO_REGNO_P (dreg))
-#endif
+ if (tem != 0
+ && GET_MODE (tem) == GET_MODE (SET_DEST (body)))
{
/* DREG may have been the target of a REG_DEAD note in
the insn which makes INSN redundant. If so, reorg
would still think it is dead. So search for such a
note and delete it if we find it. */
- for (trial = prev_nonnote_insn (insn);
- trial && GET_CODE (trial) != CODE_LABEL;
- trial = prev_nonnote_insn (trial))
- if (find_regno_note (trial, REG_DEAD, dreg))
- {
- remove_death (dreg, trial);
- break;
- }
-
- if (tem != 0
- && GET_MODE (tem) == GET_MODE (SET_DEST (body)))
+ if (! find_regno_note (insn, REG_UNUSED, dreg))
+ for (trial = prev_nonnote_insn (insn);
+ trial && GET_CODE (trial) != CODE_LABEL;
+ trial = prev_nonnote_insn (trial))
+ if (find_regno_note (trial, REG_DEAD, dreg))
+ {
+ remove_death (dreg, trial);
+ break;
+ }
+#ifdef PRESERVE_DEATH_INFO_REGNO_P
+ /* Deleting insn could lose a death-note for SREG
+ so don't do it if final needs accurate
+ death-notes. */
+ if (PRESERVE_DEATH_INFO_REGNO_P (sreg)
+ && (trial = find_regno_note (insn, REG_DEAD, sreg)))
+ {
+ /* Change this into a USE so that we won't emit
+ code for it, but still can keep the note. */
+ PATTERN (insn)
+ = gen_rtx_USE (VOIDmode, XEXP (trial, 0));
+ INSN_CODE (insn) = -1;
+ /* Remove all reg notes but the REG_DEAD one. */
+ REG_NOTES (insn) = trial;
+ XEXP (trial, 1) = NULL_RTX;
+ }
+ else
+#endif
delete_insn (insn);
}
}
@@ -499,7 +544,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
else if (GET_CODE (body) == PARALLEL)
{
/* If each part is a set between two identical registers or
- a USE or CLOBBER, delete the insn. */
+ a USE or CLOBBER, delete the insn. */
int i, sreg, dreg;
rtx tem;
@@ -547,11 +592,11 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
if (set && GET_CODE (SET_DEST (set)) == REG
&& REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
- && regno_first_uid[REGNO (SET_DEST (set))] == INSN_UID (insn)
+ && REGNO_FIRST_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
/* We use regno_last_note_uid so as not to delete the setting
of a reg that's used in notes. A subsequent optimization
might arrange to use that reg for real. */
- && regno_last_note_uid[REGNO (SET_DEST (set))] == INSN_UID (insn)
+ && REGNO_LAST_NOTE_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
&& ! side_effects_p (SET_SRC (set))
&& ! find_reg_note (insn, REG_RETVAL, 0))
delete_insn (insn);
@@ -559,6 +604,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
/* Now iterate optimizing jumps until nothing changes over one pass. */
changed = 1;
+ old_max_reg = max_reg_num ();
while (changed)
{
changed = 0;
@@ -568,8 +614,9 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
rtx reallabelprev;
rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
rtx nlabel;
- int this_is_simplejump, this_is_condjump, reversep;
+ int this_is_simplejump, this_is_condjump, reversep = 0;
int this_is_condjump_in_parallel;
+
#if 0
/* If NOT the first iteration, if this is the last jump pass
(just before final), do the special peephole optimizations.
@@ -682,7 +729,15 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
&& (GET_CODE (temp1) == BARRIER
|| (GET_CODE (temp1) == INSN
- && rtx_equal_p (PATTERN (temp), PATTERN (temp1)))))
+ && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
+ /* Don't do this optimization if we have a loop containing only
+ the USE instruction, and the loop start label has a usage
+ count of 1. This is because we will redo this optimization
+ everytime through the outer loop, and jump opt will never
+ exit. */
+ && ! ((temp2 = prev_nonnote_insn (temp)) != 0
+ && temp2 == JUMP_LABEL (insn)
+ && LABEL_NUSES (temp2) == 1))
{
if (GET_CODE (temp1) == BARRIER)
{
@@ -729,16 +784,14 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& GET_CODE (temp3) == INSN
&& (temp4 = single_set (temp3)) != 0
&& GET_CODE (temp1 = SET_DEST (temp4)) == REG
-#ifdef SMALL_REGISTER_CLASSES
- && REGNO (temp1) >= FIRST_PSEUDO_REGISTER
-#endif
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
&& (temp2 = next_active_insn (insn)) != 0
&& GET_CODE (temp2) == INSN
&& (temp4 = single_set (temp2)) != 0
&& rtx_equal_p (SET_DEST (temp4), temp1)
- && (GET_CODE (SET_SRC (temp4)) == REG
- || GET_CODE (SET_SRC (temp4)) == SUBREG
- || CONSTANT_P (SET_SRC (temp4)))
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
&& (REG_NOTES (temp2) == 0
|| ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL
|| REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)
@@ -765,7 +818,10 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
or a jump to somewhere else. */
rtx target = JUMP_LABEL (temp);
int nuses = LABEL_NUSES (target);
- rtx p, q;
+ rtx p;
+#ifdef HAVE_cc0
+ rtx q;
+#endif
/* Set P to the first jump insn that goes around "x = a;". */
for (p = temp; nuses && p; p = prev_nonnote_insn (p))
@@ -806,7 +862,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3))
&& ! reg_set_between_p (temp1, p, temp3)
&& (GET_CODE (SET_SRC (temp4)) == CONST_INT
- || ! reg_set_between_p (SET_SRC (temp4), p, temp2)))
+ || ! modified_between_p (SET_SRC (temp4), p, temp2)))
{
emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2);
delete_insn (temp2);
@@ -833,6 +889,93 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
}
}
+ /* Simplify if (...) { x = a; goto l; } x = b; by converting it
+ to x = a; if (...) goto l; x = b;
+ if A is sufficiently simple, the test doesn't involve X,
+ and nothing in the test modifies A or X.
+
+ If we have small register classes, we also can't do this if X
+ is a hard register.
+
+ If the "x = a;" insn has any REG_NOTES, we don't do this because
+ of the possibility that we are running after CSE and there is a
+ REG_EQUAL note that is only valid if the branch has already been
+ taken. If we move the insn with the REG_EQUAL note, we may
+ fold the comparison to always be false in a later CSE pass.
+ (We could also delete the REG_NOTES when moving the insn, but it
+ seems simpler to not move it.) An exception is that we can move
+ the insn if the only note is a REG_EQUAL or REG_EQUIV whose
+ value is the same as "a".
+
+ INSN is the goto.
+
+ We set:
+
+ TEMP to the jump insn preceding "x = a;"
+ TEMP1 to X
+ TEMP2 to the insn that sets "x = b;"
+ TEMP3 to the insn that sets "x = a;"
+ TEMP4 to the set of "x = a"; */
+
+ if (this_is_simplejump
+ && (temp2 = next_active_insn (insn)) != 0
+ && GET_CODE (temp2) == INSN
+ && (temp4 = single_set (temp2)) != 0
+ && GET_CODE (temp1 = SET_DEST (temp4)) == REG
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
+ && (temp3 = prev_active_insn (insn)) != 0
+ && GET_CODE (temp3) == INSN
+ && (temp4 = single_set (temp3)) != 0
+ && rtx_equal_p (SET_DEST (temp4), temp1)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && (REG_NOTES (temp3) == 0
+ || ((REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUAL
+ || REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUIV)
+ && XEXP (REG_NOTES (temp3), 1) == 0
+ && rtx_equal_p (XEXP (REG_NOTES (temp3), 0),
+ SET_SRC (temp4))))
+ && (temp = prev_active_insn (temp3)) != 0
+ && condjump_p (temp) && ! simplejump_p (temp)
+ /* TEMP must skip over the "x = a;" insn */
+ && prev_real_insn (JUMP_LABEL (temp)) == insn
+ && no_labels_between_p (temp, insn))
+ {
+ rtx prev_label = JUMP_LABEL (temp);
+ rtx insert_after = prev_nonnote_insn (temp);
+
+#ifdef HAVE_cc0
+ /* We cannot insert anything between a set of cc and its use. */
+ if (insert_after && GET_RTX_CLASS (GET_CODE (insert_after)) == 'i'
+ && sets_cc0_p (PATTERN (insert_after)))
+ insert_after = prev_nonnote_insn (insert_after);
+#endif
+ ++LABEL_NUSES (prev_label);
+
+ if (insert_after
+ && no_labels_between_p (insert_after, temp)
+ && ! reg_referenced_between_p (temp1, insert_after, temp3)
+ && ! reg_referenced_between_p (temp1, temp3,
+ NEXT_INSN (temp2))
+ && ! reg_set_between_p (temp1, insert_after, temp)
+ && ! modified_between_p (SET_SRC (temp4), insert_after, temp)
+ && invert_jump (temp, JUMP_LABEL (insn)))
+ {
+ emit_insn_after_with_line_notes (PATTERN (temp3),
+ insert_after, temp3);
+ delete_insn (temp3);
+ delete_insn (insn);
+ /* Set NEXT to an insn that we know won't go away. */
+ next = temp2;
+ changed = 1;
+ }
+ if (prev_label && --LABEL_NUSES (prev_label) == 0)
+ delete_insn (prev_label);
+ if (changed)
+ continue;
+ }
+
#ifndef HAVE_cc0
/* If we have if (...) x = exp; and branches are expensive,
EXP is a single insn, does not have any side effects, cannot
@@ -847,7 +990,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
We set:
TEMP to the "x = exp;" insn.
- TEMP1 to the single set in the "x = exp; insn.
+ TEMP1 to the single set in the "x = exp;" insn.
TEMP2 to "x". */
if (! reload_completed
@@ -862,10 +1005,8 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
&& (temp1 = single_set (temp)) != 0
&& (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
- && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
-#ifdef SMALL_REGISTER_CLASSES
- && REGNO (temp2) >= FIRST_PSEUDO_REGISTER
-#endif
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
&& GET_CODE (SET_SRC (temp1)) != REG
&& GET_CODE (SET_SRC (temp1)) != SUBREG
&& GET_CODE (SET_SRC (temp1)) != CONST_INT
@@ -875,13 +1016,20 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
{
rtx new = gen_reg_rtx (GET_MODE (temp2));
- if (validate_change (temp, &SET_DEST (temp1), new, 0))
+ if ((temp3 = find_insert_position (insn, temp))
+ && validate_change (temp, &SET_DEST (temp1), new, 0))
{
next = emit_insn_after (gen_move_insn (temp2, new), insn);
emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (insn), temp);
+ PREV_INSN (temp3), temp);
delete_insn (temp);
reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ if (after_regscan)
+ {
+ reg_scan_update (temp3, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
}
}
@@ -905,9 +1053,8 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& (temp1 = single_set (temp)) != 0
&& (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
&& GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
-#ifdef SMALL_REGISTER_CLASSES
- && REGNO (temp2) >= FIRST_PSEUDO_REGISTER
-#endif
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
&& ! side_effects_p (SET_SRC (temp1))
&& ! may_trap_p (SET_SRC (temp1))
&& rtx_cost (SET_SRC (temp1), SET) < 10
@@ -919,23 +1066,34 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
{
rtx new = gen_reg_rtx (GET_MODE (temp2));
- if (validate_change (temp, &SET_DEST (temp1), new, 0))
+ if ((temp5 = find_insert_position (insn, temp))
+ && (temp6 = find_insert_position (insn, temp3))
+ && validate_change (temp, &SET_DEST (temp1), new, 0))
{
+ /* Use the earliest of temp5 and temp6. */
+ if (temp5 != insn)
+ temp6 = temp5;
next = emit_insn_after (gen_move_insn (temp2, new), insn);
emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (insn), temp);
+ PREV_INSN (temp6), temp);
emit_insn_after_with_line_notes
(replace_rtx (PATTERN (temp3), temp2, new),
- PREV_INSN (insn), temp3);
+ PREV_INSN (temp6), temp3);
delete_insn (temp);
delete_insn (temp3);
reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ if (after_regscan)
+ {
+ reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
}
}
/* Finally, handle the case where two insns are used to
compute EXP but a temporary register is used. Here we must
- ensure that the temporary register is not used anywhere else. */
+ ensure that the temporary register is not used anywhere else. */
if (! reload_completed
&& after_regscan
@@ -958,17 +1116,16 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& (temp5 = SUBREG_REG (temp5),
GET_CODE (temp5) == REG))))
&& REGNO (temp5) >= FIRST_PSEUDO_REGISTER
- && regno_first_uid[REGNO (temp5)] == INSN_UID (temp)
- && regno_last_uid[REGNO (temp5)] == INSN_UID (temp3)
+ && REGNO_FIRST_UID (REGNO (temp5)) == INSN_UID (temp)
+ && REGNO_LAST_UID (REGNO (temp5)) == INSN_UID (temp3)
&& ! side_effects_p (SET_SRC (temp1))
&& ! may_trap_p (SET_SRC (temp1))
&& rtx_cost (SET_SRC (temp1), SET) < 10
&& (temp4 = single_set (temp3)) != 0
&& (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG)
&& GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
-#ifdef SMALL_REGISTER_CLASSES
- && REGNO (temp2) >= FIRST_PSEUDO_REGISTER
-#endif
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
&& rtx_equal_p (SET_DEST (temp4), temp2)
&& ! side_effects_p (SET_SRC (temp4))
&& ! may_trap_p (SET_SRC (temp4))
@@ -976,16 +1133,27 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
{
rtx new = gen_reg_rtx (GET_MODE (temp2));
- if (validate_change (temp3, &SET_DEST (temp4), new, 0))
+ if ((temp5 = find_insert_position (insn, temp))
+ && (temp6 = find_insert_position (insn, temp3))
+ && validate_change (temp3, &SET_DEST (temp4), new, 0))
{
+ /* Use the earliest of temp5 and temp6. */
+ if (temp5 != insn)
+ temp6 = temp5;
next = emit_insn_after (gen_move_insn (temp2, new), insn);
emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (insn), temp);
+ PREV_INSN (temp6), temp);
emit_insn_after_with_line_notes (PATTERN (temp3),
- PREV_INSN (insn), temp3);
+ PREV_INSN (temp6), temp3);
delete_insn (temp);
delete_insn (temp3);
reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ if (after_regscan)
+ {
+ reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
}
}
#endif /* HAVE_cc0 */
@@ -1019,19 +1187,19 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& GET_CODE (temp) == INSN
&& GET_CODE (PATTERN (temp)) == SET
&& GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG
-#ifdef SMALL_REGISTER_CLASSES
- && REGNO (temp1) >= FIRST_PSEUDO_REGISTER
-#endif
- && (GET_CODE (temp2 = SET_SRC (PATTERN (temp))) == REG
- || GET_CODE (temp2) == SUBREG
- /* ??? How about floating point constants? */
- || GET_CODE (temp2) == CONST_INT)
+ && (! SMALL_REGISTER_CLASSES
+ || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
+ && ! side_effects_p (temp2 = SET_SRC (PATTERN (temp)))
+ && ! may_trap_p (temp2)
/* Allow either form, but prefer the former if both apply.
There is no point in using the old value of TEMP1 if
it is a register, since cse will alias them. It can
lose if the old value were a hard register since CSE
- won't replace hard registers. */
- && (((temp3 = reg_set_last (temp1, insn)) != 0)
+ won't replace hard registers. Avoid using TEMP3 if
+ small register classes and it is a hard register. */
+ && (((temp3 = reg_set_last (temp1, insn)) != 0
+ && ! (SMALL_REGISTER_CLASSES && GET_CODE (temp3) == REG
+ && REGNO (temp3) < FIRST_PSEUDO_REGISTER))
/* Make the latter case look like x = x; if (...) x = b; */
|| (temp3 = temp1, 1))
/* INSN must either branch to the insn after TEMP or the insn
@@ -1045,13 +1213,9 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
We could handle BLKmode if (1) emit_store_flag could
and (2) we could find the size reliably. */
&& GET_MODE (XEXP (temp4, 0)) != BLKmode
- /* No point in doing any of this if branches are cheap or we
- don't have conditional moves. */
- && (BRANCH_COST >= 2
-#ifdef HAVE_conditional_move
- || 1
-#endif
- )
+ /* Even if branches are cheap, the store_flag optimization
+ can win when the operation to be performed can be
+ expressed directly. */
#ifdef HAVE_cc0
/* If the previous insn sets CC0 and something else, we can't
do this since we are going to delete that insn. */
@@ -1101,7 +1265,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
if (target)
{
- rtx seq1,seq2;
+ rtx seq1,seq2,last;
/* Save the conditional move sequence but don't emit it
yet. On some machines, like the alpha, it is possible
@@ -1121,13 +1285,22 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
end_sequence ();
emit_insns_before (seq1, temp5);
- emit_insns_before (seq2, insn);
+ /* Insert conditional move after insn, to be sure that
+ the jump and a possible compare won't be separated */
+ last = emit_insns_after (seq2, insn);
/* ??? We can also delete the insn that sets X to A.
Flow will do it too though. */
delete_insn (temp);
next = NEXT_INSN (insn);
delete_jump (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq1, NEXT_INSN (last), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
changed = 1;
continue;
}
@@ -1160,8 +1333,21 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
can reverse the condition. See if (3) applies possibly
by reversing the condition. Prefer reversing to (4) when
branches are very expensive. */
- && ((reversep = 0, temp2 == const0_rtx)
- || (temp3 == const0_rtx
+ && (((BRANCH_COST >= 2
+ || STORE_FLAG_VALUE == -1
+ || (STORE_FLAG_VALUE == 1
+ /* Check that the mask is a power of two,
+ so that it can probably be generated
+ with a shift. */
+ && GET_CODE (temp3) == CONST_INT
+ && exact_log2 (INTVAL (temp3)) >= 0))
+ && (reversep = 0, temp2 == const0_rtx))
+ || ((BRANCH_COST >= 2
+ || STORE_FLAG_VALUE == -1
+ || (STORE_FLAG_VALUE == 1
+ && GET_CODE (temp2) == CONST_INT
+ && exact_log2 (INTVAL (temp2)) >= 0))
+ && temp3 == const0_rtx
&& (reversep = can_reverse_comparison_p (temp4, insn)))
|| (BRANCH_COST >= 2
&& GET_CODE (temp2) == CONST_INT
@@ -1297,6 +1483,13 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
delete_insn (temp);
next = NEXT_INSN (insn);
delete_jump (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
changed = 1;
continue;
}
@@ -1362,6 +1555,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
into our sequence. */
if ((temp5 = prev_active_insn (insn)) != 0
+ && no_labels_between_p (temp5, insn)
&& GET_CODE (temp5) == INSN
&& (temp6 = single_set (temp5)) != 0
&& rtx_equal_p (temp2, SET_DEST (temp6))
@@ -1415,6 +1609,13 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
delete_insn (prev_nonnote_insn (insn));
#endif
delete_insn (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
changed = 1;
continue;
}
@@ -1505,7 +1706,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
else if (ultimate && GET_CODE (ultimate) != RETURN)
ultimate = XEXP (ultimate, 0);
- if (ultimate)
+ if (ultimate && JUMP_LABEL(insn) != ultimate)
changed |= redirect_jump (insn, ultimate);
}
}
@@ -1730,10 +1931,96 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
&& (next_active_insn (JUMP_LABEL (insn))
== next_active_insn (JUMP_LABEL (temp))))
{
- delete_jump (insn);
- changed = 1;
- continue;
+ rtx tem = temp;
+
+ /* ??? Optional. Disables some optimizations, but makes
+ gcov output more accurate with -O. */
+ if (flag_test_coverage && !reload_completed)
+ for (tem = insn; tem != temp; tem = NEXT_INSN (tem))
+ if (GET_CODE (tem) == NOTE && NOTE_LINE_NUMBER (tem) > 0)
+ break;
+
+ if (tem == temp)
+ {
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
}
+#ifdef HAVE_trap
+ /* Detect a conditional jump jumping over an unconditional trap. */
+ else if (HAVE_trap
+ && this_is_condjump && ! this_is_simplejump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == INSN
+ && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
+ && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && (temp2 = get_condition (insn, &temp4))
+ && can_reverse_comparison_p (temp2, insn))
+ {
+ rtx new = gen_cond_trap (reverse_condition (GET_CODE (temp2)),
+ XEXP (temp2, 0), XEXP (temp2, 1),
+ TRAP_CODE (PATTERN (reallabelprev)));
+
+ if (new)
+ {
+ emit_insn_before (new, temp4);
+ delete_insn (reallabelprev);
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ }
+ /* Detect a jump jumping to an unconditional trap. */
+ else if (HAVE_trap && this_is_condjump
+ && (temp = next_active_insn (JUMP_LABEL (insn)))
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == TRAP_IF
+ && (this_is_simplejump
+ || (temp2 = get_condition (insn, &temp4))))
+ {
+ rtx tc = TRAP_CONDITION (PATTERN (temp));
+
+ if (tc == const_true_rtx
+ || (! this_is_simplejump && rtx_equal_p (temp2, tc)))
+ {
+ rtx new;
+ /* Replace an unconditional jump to a trap with a trap. */
+ if (this_is_simplejump)
+ {
+ emit_barrier_after (emit_insn_before (gen_trap (), insn));
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ new = gen_cond_trap (GET_CODE (temp2), XEXP (temp2, 0),
+ XEXP (temp2, 1),
+ TRAP_CODE (PATTERN (temp)));
+ if (new)
+ {
+ emit_insn_before (new, temp4);
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ }
+ /* If the trap condition and jump condition are mutually
+ exclusive, redirect the jump to the following insn. */
+ else if (GET_RTX_CLASS (GET_CODE (tc)) == '<'
+ && ! this_is_simplejump
+ && swap_condition (GET_CODE (temp2)) == GET_CODE (tc)
+ && rtx_equal_p (XEXP (tc, 0), XEXP (temp2, 0))
+ && rtx_equal_p (XEXP (tc, 1), XEXP (temp2, 1))
+ && redirect_jump (insn, get_label_after (temp)))
+ {
+ changed = 1;
+ continue;
+ }
+ }
+#endif
+
/* Detect a conditional jump jumping over an unconditional jump. */
else if ((this_is_condjump || this_is_condjump_in_parallel)
@@ -1916,7 +2203,7 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
/* Now that the jump has been tensioned,
try cross jumping: check for identical code
- before the jump and before its target label. */
+ before the jump and before its target label. */
/* First, cross jumping of conditional jumps: */
@@ -1947,11 +2234,11 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan)
/* Make the old conditional jump
into an unconditional one. */
SET_SRC (PATTERN (insn))
- = gen_rtx (LABEL_REF, VOIDmode, JUMP_LABEL (insn));
+ = gen_rtx_LABEL_REF (VOIDmode, JUMP_LABEL (insn));
INSN_CODE (insn) = -1;
emit_barrier_after (insn);
/* Add to jump_chain unless this is a new label
- whose UID is too large. */
+ whose UID is too large. */
if (INSN_UID (JUMP_LABEL (insn)) < max_jump_chain)
{
jump_chain[INSN_UID (insn)]
@@ -2147,6 +2434,11 @@ duplicate_loop_exit_test (loop_start)
is a NOTE_INSN_LOOP_BEG because this means we have a nested loop
is a NOTE_INSN_BLOCK_{BEG,END} because duplicating these notes
are not valid
+
+
+ We also do not do this if we find an insn with ASM_OPERANDS. While
+ this restriction should not be necessary, copying an insn with
+ ASM_OPERANDS can confuse asm_noperands in some cases.
Also, don't do this if the exit code is more than 20 insns. */
@@ -2172,18 +2464,29 @@ duplicate_loop_exit_test (loop_start)
This can be avoided by checking here for NOTE_INSN_LOOP_CONT. */
if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
return 0;
+
+ if (optimize < 2
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END))
+ /* If we were to duplicate this code, we would not move
+ the BLOCK notes, and so debugging the moved code would
+ be difficult. Thus, we only move the code with -O2 or
+ higher. */
+ return 0;
+
break;
case JUMP_INSN:
case INSN:
if (++num_insns > 20
|| find_reg_note (insn, REG_RETVAL, NULL_RTX)
- || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
+ || asm_noperands (PATTERN (insn)) > 0)
return 0;
break;
+ default:
+ break;
}
}
@@ -2202,10 +2505,10 @@ duplicate_loop_exit_test (loop_start)
|| (GET_CODE (reg) == SUBREG
&& (reg = SUBREG_REG (reg), GET_CODE (reg) == REG)))
&& REGNO (reg) >= FIRST_PSEUDO_REGISTER
- && regno_first_uid[REGNO (reg)] == INSN_UID (insn))
+ && REGNO_FIRST_UID (REGNO (reg)) == INSN_UID (insn))
{
for (p = NEXT_INSN (insn); p != lastexit; p = NEXT_INSN (p))
- if (regno_last_uid[REGNO (reg)] == INSN_UID (p))
+ if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (p))
break;
if (p != lastexit)
@@ -2252,8 +2555,9 @@ duplicate_loop_exit_test (loop_start)
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) != REG_LABEL)
REG_NOTES (copy)
- = copy_rtx (gen_rtx (EXPR_LIST, REG_NOTE_KIND (link),
- XEXP (link, 0), REG_NOTES (copy)));
+ = copy_rtx (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
+ XEXP (link, 0),
+ REG_NOTES (copy)));
if (reg_map && REG_NOTES (copy))
replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
break;
@@ -2379,7 +2683,6 @@ find_cross_jump (e1, e2, minimum, f1, f2)
rtx last1 = 0, last2 = 0;
rtx afterlast1 = 0, afterlast2 = 0;
- rtx prev1;
*f1 = 0;
*f2 = 0;
@@ -2412,6 +2715,13 @@ find_cross_jump (e1, e2, minimum, f1, f2)
if (i2 == 0 || GET_CODE (i1) != GET_CODE (i2))
break;
+ /* Avoid moving insns across EH regions.
+
+ ??? This is only necessary if i1 or i2 can throw an exception. */
+ if (flag_exceptions
+ && !in_same_eh_region (i1, i2))
+ break;
+
p1 = PATTERN (i1);
p2 = PATTERN (i2);
@@ -2433,13 +2743,13 @@ find_cross_jump (e1, e2, minimum, f1, f2)
#ifdef STACK_REGS
/* If cross_jump_death_matters is not 0, the insn's mode
indicates whether or not the insn contains any stack-like
- regs. */
+ regs. */
if (!lose && cross_jump_death_matters && GET_MODE (i1) == QImode)
{
/* If register stack conversion has already been done, then
death notes must also be compared before it is certain that
- the two instruction streams match. */
+ the two instruction streams match. */
rtx note;
HARD_REG_SET i1_regset, i2_regset;
@@ -2466,7 +2776,19 @@ find_cross_jump (e1, e2, minimum, f1, f2)
}
#endif
- if (lose || GET_CODE (p1) != GET_CODE (p2)
+ /* Don't allow old-style asm or volatile extended asms to be accepted
+ for cross jumping purposes. It is conceptually correct to allow
+ them, since cross-jumping preserves the dynamic instruction order
+ even though it is changing the static instruction order. However,
+ if an asm is being used to emit an assembler pseudo-op, such as
+ the MIPS `.set reorder' pseudo-op, then the static instruction order
+ matters and it must be preserved. */
+ if (GET_CODE (p1) == ASM_INPUT || GET_CODE (p2) == ASM_INPUT
+ || (GET_CODE (p1) == ASM_OPERANDS && MEM_VOLATILE_P (p1))
+ || (GET_CODE (p2) == ASM_OPERANDS && MEM_VOLATILE_P (p2)))
+ lose = 1;
+
+ if (lose || GET_CODE (p1) != GET_CODE (p2)
|| ! rtx_renumbered_equal_p (p1, p2))
{
/* The following code helps take care of G++ cleanups. */
@@ -2922,6 +3244,9 @@ comparison_dominates_p (code1, code2)
if (code2 == GEU || code2 == NE)
return 1;
break;
+
+ default:
+ break;
}
return 0;
@@ -3054,7 +3379,8 @@ follow_jumps (label)
(depth < 10
&& (insn = next_active_insn (value)) != 0
&& GET_CODE (insn) == JUMP_INSN
- && (JUMP_LABEL (insn) != 0 || GET_CODE (PATTERN (insn)) == RETURN)
+ && ((JUMP_LABEL (insn) != 0 && simplejump_p (insn))
+ || GET_CODE (PATTERN (insn)) == RETURN)
&& (next = NEXT_INSN (insn))
&& GET_CODE (next) == BARRIER);
depth++)
@@ -3067,7 +3393,10 @@ follow_jumps (label)
if (!reload_completed)
for (tem = value; tem != insn; tem = NEXT_INSN (tem))
if (GET_CODE (tem) == NOTE
- && NOTE_LINE_NUMBER (tem) == NOTE_INSN_LOOP_BEG)
+ && (NOTE_LINE_NUMBER (tem) == NOTE_INSN_LOOP_BEG
+ /* ??? Optional. Disables some optimizations, but makes
+ gcov output more accurate with -O. */
+ || (flag_test_coverage && NOTE_LINE_NUMBER (tem) > 0)))
return value;
/* If we have found a cycle, make the insn jump to itself. */
@@ -3188,12 +3517,16 @@ mark_jump_label (x, insn, cross_jump)
break;
else if (! cross_jump
&& (NOTE_LINE_NUMBER (next) == NOTE_INSN_LOOP_BEG
- || NOTE_LINE_NUMBER (next) == NOTE_INSN_FUNCTION_END))
+ || NOTE_LINE_NUMBER (next) == NOTE_INSN_FUNCTION_END
+ /* ??? Optional. Disables some optimizations, but
+ makes gcov output more accurate with -O. */
+ || (flag_test_coverage && NOTE_LINE_NUMBER (next) > 0)))
break;
}
XEXP (x, 0) = label;
- ++LABEL_NUSES (label);
+ if (! insn || ! INSN_DELETED_P (insn))
+ ++LABEL_NUSES (label);
if (insn)
{
@@ -3210,17 +3543,21 @@ mark_jump_label (x, insn, cross_jump)
is one. */
else if (! find_reg_note (insn, REG_LABEL, label))
{
- rtx next = next_real_insn (label);
- /* Don't record labels that refer to dispatch tables.
- This is not necessary, since the tablejump
- references the same label.
- And if we did record them, flow.c would make worse code. */
- if (next == 0
- || ! (GET_CODE (next) == JUMP_INSN
- && (GET_CODE (PATTERN (next)) == ADDR_VEC
- || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
- REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, label,
- REG_NOTES (insn));
+ /* This code used to ignore labels which refered to dispatch
+ tables to avoid flow.c generating worse code.
+
+ However, in the presense of global optimizations like
+ gcse which call find_basic_blocks without calling
+ life_analysis, not recording such labels will lead
+ to compiler aborts because of inconsistencies in the
+ flow graph. So we go ahead and record the label.
+
+ It may also be the case that the optimization argument
+ is no longer valid because of the more accurate cfg
+ we build in find_basic_blocks -- it no longer pessimizes
+ code when it finds a REG_LABEL note. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, label,
+ REG_NOTES (insn));
}
}
return;
@@ -3230,13 +3567,17 @@ mark_jump_label (x, insn, cross_jump)
ADDR_DIFF_VEC. Don't set the JUMP_LABEL of a vector. */
case ADDR_VEC:
case ADDR_DIFF_VEC:
- {
- int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
+ if (! INSN_DELETED_P (insn))
+ {
+ int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
- for (i = 0; i < XVECLEN (x, eltnum); i++)
- mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
- return;
- }
+ for (i = 0; i < XVECLEN (x, eltnum); i++)
+ mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
+ }
+ return;
+
+ default:
+ break;
}
fmt = GET_RTX_FORMAT (code);
@@ -3304,12 +3645,23 @@ delete_computation (insn)
delete_computation (prev);
else
/* Otherwise, show that cc0 won't be used. */
- REG_NOTES (prev) = gen_rtx (EXPR_LIST, REG_UNUSED,
- cc0_rtx, REG_NOTES (prev));
+ REG_NOTES (prev) = gen_rtx_EXPR_LIST (REG_UNUSED,
+ cc0_rtx, REG_NOTES (prev));
}
}
#endif
+#ifdef INSN_SCHEDULING
+ /* ?!? The schedulers do not keep REG_DEAD notes accurate after
+ reload has completed. The schedulers need to be fixed. Until
+ they are, we must not rely on the death notes here. */
+ if (reload_completed && flag_schedule_insns_after_reload)
+ {
+ delete_insn (insn);
+ return;
+ }
+#endif
+
for (note = REG_NOTES (insn); note; note = next)
{
rtx our_prev;
@@ -3603,7 +3955,21 @@ invert_jump (jump, nlabel)
return 0;
if (redirect_jump (jump, nlabel))
- return 1;
+ {
+ if (flag_branch_probabilities)
+ {
+ rtx note = find_reg_note (jump, REG_BR_PROB, 0);
+
+ /* An inverted jump means that a probability taken becomes a
+ probability not taken. Subtract the branch probability from the
+ probability base to convert it back to a taken probability.
+ (We don't flip the probability on a branch that's never taken. */
+ if (note && XINT (XEXP (note, 0), 0) >= 0)
+ XINT (XEXP (note, 0), 0) = REG_BR_PROB_BASE - XINT (XEXP (note, 0), 0);
+ }
+
+ return 1;
+ }
if (! invert_exp (PATTERN (jump), jump))
/* This should just be putting it back the way it was. */
@@ -3640,9 +4006,9 @@ invert_exp (x, insn)
if (can_reverse_comparison_p (comp, insn)
&& validate_change (insn, &XEXP (x, 0),
- gen_rtx (reverse_condition (GET_CODE (comp)),
- GET_MODE (comp), XEXP (comp, 0),
- XEXP (comp, 1)), 0))
+ gen_rtx_fmt_ee (reverse_condition (GET_CODE (comp)),
+ GET_MODE (comp), XEXP (comp, 0),
+ XEXP (comp, 1)), 0))
return 1;
tem = XEXP (x, 1);
@@ -3783,22 +4149,22 @@ redirect_exp (loc, olabel, nlabel, insn)
if (nlabel)
XEXP (x, 0) = nlabel;
else
- return validate_change (insn, loc, gen_rtx (RETURN, VOIDmode), 0);
+ return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
return 1;
}
}
else if (code == RETURN && olabel == 0)
{
- x = gen_rtx (LABEL_REF, VOIDmode, nlabel);
+ x = gen_rtx_LABEL_REF (VOIDmode, nlabel);
if (loc == &PATTERN (insn))
- x = gen_rtx (SET, VOIDmode, pc_rtx, x);
+ x = gen_rtx_SET (VOIDmode, pc_rtx, x);
return validate_change (insn, loc, x, 0);
}
if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx
&& GET_CODE (SET_SRC (x)) == LABEL_REF
&& XEXP (SET_SRC (x), 0) == olabel)
- return validate_change (insn, loc, gen_rtx (RETURN, VOIDmode), 0);
+ return validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 0);
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
@@ -3990,6 +4356,13 @@ rtx_renumbered_equal_p (x, y)
case SYMBOL_REF:
return XSTR (x, 0) == XSTR (y, 0);
+
+ case CODE_LABEL:
+ /* If we didn't match EQ equality above, they aren't the same. */
+ return 0;
+
+ default:
+ break;
}
/* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
@@ -4134,7 +4507,7 @@ static int modified_mem;
static void
mark_modified_reg (dest, x)
rtx dest;
- rtx x;
+ rtx x ATTRIBUTE_UNUSED;
{
int regno, i;
@@ -4279,7 +4652,10 @@ thread_jumps (f, max_reg, flag_before_loop)
if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
&& rtx_equal_for_thread_p (b1op1, b2op1, b2)
&& (comparison_dominates_p (code1, code2)
- || comparison_dominates_p (code1, reverse_condition (code2))))
+ || (comparison_dominates_p (code1, reverse_condition (code2))
+ && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
+ 0),
+ b1))))
{
t1 = prev_nonnote_insn (b1);
t2 = prev_nonnote_insn (b2);
@@ -4308,6 +4684,7 @@ thread_jumps (f, max_reg, flag_before_loop)
rtx prev = PREV_INSN (new_label);
if (flag_before_loop
+ && GET_CODE (prev) == NOTE
&& NOTE_LINE_NUMBER (prev) == NOTE_INSN_LOOP_BEG)
{
/* Don't thread to the loop label. If a loop
@@ -4366,6 +4743,13 @@ rtx_equal_for_thread_p (x, y, yinsn)
if (GET_MODE (x) != GET_MODE (y))
return 0;
+ /* For floating-point, consider everything unequal. This is a bit
+ pessimistic, but this pass would only rarely do anything for FP
+ anyway. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && FLOAT_MODE_P (GET_MODE (x)) && ! flag_fast_math)
+ return 0;
+
/* For commutative operations, the RTX match if the operand match in any
order. Also handle the simple binary and unary cases without a loop. */
if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
@@ -4401,7 +4785,7 @@ rtx_equal_for_thread_p (x, y, yinsn)
/* If this is the first time we are seeing a register on the `Y'
side, see if it is the last use. If not, we can't thread the
jump, so mark it as not equivalent. */
- if (regno_last_uid[REGNO (y)] != INSN_UID (yinsn))
+ if (REGNO_LAST_UID (REGNO (y)) != INSN_UID (yinsn))
return 0;
return 1;
@@ -4413,7 +4797,7 @@ rtx_equal_for_thread_p (x, y, yinsn)
case MEM:
/* If memory modified or either volatile, not equivalent.
- Else, check address. */
+ Else, check address. */
if (modified_mem || MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
return 0;
@@ -4450,6 +4834,9 @@ rtx_equal_for_thread_p (x, y, yinsn)
case SYMBOL_REF:
return XSTR (x, 0) == XSTR (y, 0);
+
+ default:
+ break;
}
if (x == y)
@@ -4511,3 +4898,47 @@ rtx_equal_for_thread_p (x, y, yinsn)
}
return 1;
}
+
+
+#ifndef HAVE_cc0
+/* Return the insn that NEW can be safely inserted in front of starting at
+ the jump insn INSN. Return 0 if it is not safe to do this jump
+ optimization. Note that NEW must contain a single set. */
+
+static rtx
+find_insert_position (insn, new)
+ rtx insn;
+ rtx new;
+{
+ int i;
+ rtx prev;
+
+ /* If NEW does not clobber, it is safe to insert NEW before INSN. */
+ if (GET_CODE (PATTERN (new)) != PARALLEL)
+ return insn;
+
+ for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
+ && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
+ insn))
+ break;
+
+ if (i < 0)
+ return insn;
+
+ /* There is a good chance that the previous insn PREV sets the thing
+ being clobbered (often the CC in a hard reg). If PREV does not
+ use what NEW sets, we can insert NEW before PREV. */
+
+ prev = prev_active_insn (insn);
+ for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
+ && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
+ insn)
+ && ! modified_in_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
+ prev))
+ return 0;
+
+ return reg_mentioned_p (SET_DEST (single_set (new)), prev) ? 0 : prev;
+}
+#endif /* !HAVE_cc0 */
OpenPOWER on IntegriCloud