summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/cfgcleanup.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/gcc/cfgcleanup.c')
-rw-r--r--contrib/gcc/cfgcleanup.c429
1 files changed, 234 insertions, 195 deletions
diff --git a/contrib/gcc/cfgcleanup.c b/contrib/gcc/cfgcleanup.c
index 9589d1f..008c8da 100644
--- a/contrib/gcc/cfgcleanup.c
+++ b/contrib/gcc/cfgcleanup.c
@@ -43,20 +43,18 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "recog.h"
#include "toplev.h"
#include "cselib.h"
+#include "params.h"
#include "tm_p.h"
#include "target.h"
-#include "obstack.h"
-
/* cleanup_cfg maintains following flags for each basic block. */
enum bb_flags
{
- /* Set if life info needs to be recomputed for given BB. */
- BB_UPDATE_LIFE = 1,
/* Set if BB is the forwarder block to avoid too many
forwarder_block_p calls. */
- BB_FORWARDER_BLOCK = 2
+ BB_FORWARDER_BLOCK = 1,
+ BB_NONTHREADABLE_BLOCK = 2
};
#define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
@@ -75,7 +73,6 @@ static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
rtx *, rtx *));
static bool insns_match_p PARAMS ((int, rtx, rtx));
-static bool delete_unreachable_blocks PARAMS ((void));
static bool label_is_jump_target_p PARAMS ((rtx, rtx));
static bool tail_recursion_label_p PARAMS ((rtx));
static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
@@ -91,6 +88,7 @@ static edge thread_jump PARAMS ((int, edge, basic_block));
static bool mark_effect PARAMS ((rtx, bitmap));
static void notice_new_block PARAMS ((basic_block));
static void update_forwarder_flag PARAMS ((basic_block));
+static int mentions_nonequal_regs PARAMS ((rtx *, void *));
/* Set flags for newly created block. */
@@ -101,7 +99,6 @@ notice_new_block (bb)
if (!bb)
return;
- BB_SET_FLAG (bb, BB_UPDATE_LIFE);
if (forwarder_block_p (bb))
BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
}
@@ -149,7 +146,7 @@ try_simplify_condjump (cbranch_block)
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (jump_block->pred->pred_next
- || jump_block->index == n_basic_blocks - 1
+ || jump_block->next_bb == EXIT_BLOCK_PTR
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = jump_block->succ->dest;
@@ -192,8 +189,8 @@ try_simplify_condjump (cbranch_block)
static bool
mark_effect (exp, nonequal)
- rtx exp;
- regset nonequal;
+ rtx exp;
+ regset nonequal;
{
int regno;
rtx dest;
@@ -201,43 +198,69 @@ mark_effect (exp, nonequal)
{
/* In case we do clobber the register, mark it as equal, as we know the
value is dead so it don't have to match. */
- case CLOBBER:
- if (REG_P (XEXP (exp, 0)))
- {
- dest = XEXP (exp, 0);
- regno = REGNO (dest);
- CLEAR_REGNO_REG_SET (nonequal, regno);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
- while (--n > 0)
- CLEAR_REGNO_REG_SET (nonequal, regno + n);
- }
- }
- return false;
+ case CLOBBER:
+ if (REG_P (XEXP (exp, 0)))
+ {
+ dest = XEXP (exp, 0);
+ regno = REGNO (dest);
+ CLEAR_REGNO_REG_SET (nonequal, regno);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ while (--n > 0)
+ CLEAR_REGNO_REG_SET (nonequal, regno + n);
+ }
+ }
+ return false;
- case SET:
- if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
- return false;
- dest = SET_DEST (exp);
- if (dest == pc_rtx)
- return false;
- if (!REG_P (dest))
- return true;
- regno = REGNO (dest);
- SET_REGNO_REG_SET (nonequal, regno);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
- while (--n > 0)
- SET_REGNO_REG_SET (nonequal, regno + n);
- }
+ case SET:
+ if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
return false;
-
- default:
+ dest = SET_DEST (exp);
+ if (dest == pc_rtx)
return false;
+ if (!REG_P (dest))
+ return true;
+ regno = REGNO (dest);
+ SET_REGNO_REG_SET (nonequal, regno);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ while (--n > 0)
+ SET_REGNO_REG_SET (nonequal, regno + n);
+ }
+ return false;
+
+ default:
+ return false;
}
}
+
+/* Return nonzero if X is an register set in regset DATA.
+ Called via for_each_rtx. */
+static int
+mentions_nonequal_regs (x, data)
+ rtx *x;
+ void *data;
+{
+ regset nonequal = (regset) data;
+ if (REG_P (*x))
+ {
+ int regno;
+
+ regno = REGNO (*x);
+ if (REGNO_REG_SET_P (nonequal, regno))
+ return 1;
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
+ while (--n > 0)
+ if (REGNO_REG_SET_P (nonequal, regno + n))
+ return 1;
+ }
+ }
+ return 0;
+}
/* Attempt to prove that the basic block B will have no side effects and
allways continues in the same edge if reached via E. Return the edge
if exist, NULL otherwise. */
@@ -255,18 +278,29 @@ thread_jump (mode, e, b)
regset nonequal;
bool failed = false;
+ if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
+ return NULL;
+
/* At the moment, we do handle only conditional jumps, but later we may
want to extend this code to tablejumps and others. */
if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
return NULL;
if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
- return NULL;
+ {
+ BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
+ return NULL;
+ }
/* Second branch must end with onlyjump, as we will eliminate the jump. */
- if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
- || !onlyjump_p (b->end))
+ if (!any_condjump_p (e->src->end))
return NULL;
+ if (!any_condjump_p (b->end) || !onlyjump_p (b->end))
+ {
+ BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
+ return NULL;
+ }
+
set1 = pc_set (e->src->end);
set2 = pc_set (b->end);
if (((e->flags & EDGE_FALLTHRU) != 0)
@@ -300,7 +334,10 @@ thread_jump (mode, e, b)
for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
insn = NEXT_INSN (insn))
if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
- return NULL;
+ {
+ BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
+ return NULL;
+ }
cselib_init ();
@@ -319,26 +356,34 @@ thread_jump (mode, e, b)
for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn))
- {
- rtx pat = PATTERN (insn);
-
- if (GET_CODE (pat) == PARALLEL)
- {
- for (i = 0; i < XVECLEN (pat, 0); i++)
- failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
- }
- else
- failed |= mark_effect (pat, nonequal);
- }
+ {
+ if (INSN_P (insn))
+ {
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
+ }
+ else
+ failed |= mark_effect (pat, nonequal);
+ }
- cselib_process_insn (insn);
- }
+ cselib_process_insn (insn);
+ }
/* Later we should clear nonequal of dead registers. So far we don't
have life information in cfg_cleanup. */
if (failed)
+ {
+ BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
+ goto failed_exit;
+ }
+
+ /* cond2 must not mention any register that is not equal to the
+ former block. */
+ if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
goto failed_exit;
/* In case liveness information is available, we need to prove equivalence
@@ -455,10 +500,10 @@ try_forward_edges (mode, b)
For fallthru forwarders, the LOOP_BEG note must appear between
the header of block and CODE_LABEL of the loop, for non forwarders
it must appear before the JUMP_INSN. */
- if (mode & CLEANUP_PRE_LOOP)
+ if ((mode & CLEANUP_PRE_LOOP) && optimize)
{
rtx insn = (target->succ->flags & EDGE_FALLTHRU
- ? target->head : prev_nonnote_insn (target->end));
+ ? target->head : prev_nonnote_insn (target->end));
if (GET_CODE (insn) != NOTE)
insn = NEXT_INSN (insn);
@@ -469,14 +514,23 @@ try_forward_edges (mode, b)
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
break;
- if (insn && GET_CODE (insn) == NOTE)
+ if (GET_CODE (insn) == NOTE)
+ break;
+
+ /* Do not clean up branches to just past the end of a loop
+ at this time; it can mess up the loop optimizer's
+ recognition of some patterns. */
+
+ insn = PREV_INSN (target->head);
+ if (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
break;
}
counter++;
target = new_target;
threaded |= new_target_threaded;
- }
+ }
if (counter >= n_basic_blocks)
{
@@ -499,7 +553,7 @@ try_forward_edges (mode, b)
{
notice_new_block (redirect_edge_and_branch_force (e, target));
if (rtl_dump_file)
- fprintf (rtl_dump_file, "Conditionals threaded.\n");
+ fprintf (rtl_dump_file, "Conditionals threaded.\n");
}
else if (!redirect_edge_and_branch (e, target))
{
@@ -519,7 +573,6 @@ try_forward_edges (mode, b)
if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
- BB_SET_FLAG (b, BB_UPDATE_LIFE);
do
{
@@ -569,7 +622,7 @@ try_forward_edges (mode, b)
&& first == threaded_edges [n]->src)
n++;
t = first->succ;
- }
+ }
t->count -= edge_count;
if (t->count < 0)
@@ -643,7 +696,6 @@ merge_blocks_move_predecessor_nojumps (a, b)
basic_block a, b;
{
rtx barrier;
- int index;
barrier = next_nonnote_insn (a->end);
if (GET_CODE (barrier) != BARRIER)
@@ -663,20 +715,16 @@ merge_blocks_move_predecessor_nojumps (a, b)
/* Scramble the insn chain. */
if (a->end != PREV_INSN (b->head))
reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
- BB_SET_FLAG (a, BB_UPDATE_LIFE);
+ a->flags |= BB_DIRTY;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
a->index, b->index);
- /* Swap the records for the two blocks around. Although we are deleting B,
- A is now where B was and we want to compact the BB array from where
- A used to be. */
- BASIC_BLOCK (a->index) = b;
- BASIC_BLOCK (b->index) = a;
- index = a->index;
- a->index = b->index;
- b->index = index;
+ /* Swap the records for the two blocks around. */
+
+ unlink_block (a);
+ link_block (a, b->prev_bb);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
@@ -729,13 +777,12 @@ merge_blocks_move_successor_nojumps (a, b)
/* Restore the real end of b. */
b->end = real_b_end;
- /* Now blocks A and B are contiguous. Merge them. */
- merge_blocks_nomove (a, b);
- BB_SET_FLAG (a, BB_UPDATE_LIFE);
-
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
b->index, a->index);
+
+ /* Now blocks A and B are contiguous. Merge them. */
+ merge_blocks_nomove (a, b);
}
/* Attempt to merge basic blocks that are potentially non-adjacent.
@@ -760,18 +807,12 @@ merge_blocks (e, b, c, mode)
if (e->flags & EDGE_FALLTHRU)
{
int b_index = b->index, c_index = c->index;
- /* We need to update liveness in case C already has broken liveness
- or B ends by conditional jump to next instructions that will be
- removed. */
- if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
- || GET_CODE (b->end) == JUMP_INSN)
- BB_SET_FLAG (b, BB_UPDATE_LIFE);
merge_blocks_nomove (b, c);
update_forwarder_flag (b);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
- b_index, c_index);
+ b_index, c_index);
return true;
}
@@ -831,8 +872,6 @@ merge_blocks (e, b, c, mode)
bb = force_nonfallthru (b_fallthru_edge);
if (bb)
notice_new_block (bb);
- else
- BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
}
merge_blocks_move_predecessor_nojumps (b, c);
@@ -847,8 +886,8 @@ merge_blocks (e, b, c, mode)
static bool
insns_match_p (mode, i1, i2)
- int mode ATTRIBUTE_UNUSED;
- rtx i1, i2;
+ int mode ATTRIBUTE_UNUSED;
+ rtx i1, i2;
{
rtx p1, p2;
@@ -873,8 +912,9 @@ insns_match_p (mode, i1, i2)
equal, they were constructed identically. */
if (GET_CODE (i1) == CALL_INSN
- && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
- CALL_INSN_FUNCTION_USAGE (i2)))
+ && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
+ CALL_INSN_FUNCTION_USAGE (i2))
+ || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
return false;
#ifdef STACK_REGS
@@ -988,10 +1028,10 @@ flow_find_cross_jump (mode, bb1, bb2, f1, f2)
while (true)
{
/* Ignore notes. */
- while (!active_insn_p (i1) && i1 != bb1->head)
+ while (!INSN_P (i1) && i1 != bb1->head)
i1 = PREV_INSN (i1);
- while (!active_insn_p (i2) && i2 != bb2->head)
+ while (!INSN_P (i2) && i2 != bb2->head)
i2 = PREV_INSN (i2);
if (i1 == bb1->head || i2 == bb2->head)
@@ -1000,8 +1040,8 @@ flow_find_cross_jump (mode, bb1, bb2, f1, f2)
if (!insns_match_p (mode, i1, i2))
break;
- /* Don't begin a cross-jump with a USE or CLOBBER insn. */
- if (active_insn_p (i1))
+ /* Don't begin a cross-jump with a NOTE insn. */
+ if (INSN_P (i1))
{
/* If the merged insns have different REG_EQUAL notes, then
remove them. */
@@ -1018,10 +1058,10 @@ flow_find_cross_jump (mode, bb1, bb2, f1, f2)
remove_note (i1, equiv1);
remove_note (i2, equiv2);
}
-
+
afterlast1 = last1, afterlast2 = last2;
last1 = i1, last2 = i2;
- ninsns++;
+ ninsns++;
}
i1 = PREV_INSN (i1);
@@ -1040,13 +1080,13 @@ flow_find_cross_jump (mode, bb1, bb2, f1, f2)
Two, it keeps line number notes as matched as may be. */
if (ninsns)
{
- while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
+ while (last1 != bb1->head && !INSN_P (PREV_INSN (last1)))
last1 = PREV_INSN (last1);
if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
last1 = PREV_INSN (last1);
- while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
+ while (last2 != bb2->head && !INSN_P (PREV_INSN (last2)))
last2 = PREV_INSN (last2);
if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
@@ -1078,9 +1118,11 @@ outgoing_edges_match (mode, bb1, bb2)
/* If BB1 has only one successor, we may be looking at either an
unconditional jump, or a fake edge to exit. */
if (bb1->succ && !bb1->succ->succ_next
- && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
+ && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
+ && (GET_CODE (bb1->end) != JUMP_INSN || simplejump_p (bb1->end)))
return (bb2->succ && !bb2->succ->succ_next
- && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
+ && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
+ && (GET_CODE (bb2->end) != JUMP_INSN || simplejump_p (bb2->end)));
/* Match conditional jumps - this may get tricky when fallthru and branch
edges are crossed. */
@@ -1096,7 +1138,7 @@ outgoing_edges_match (mode, bb1, bb2)
enum rtx_code code1, code2;
if (!bb2->succ
- || !bb2->succ->succ_next
+ || !bb2->succ->succ_next
|| bb2->succ->succ_next->succ_next
|| !any_condjump_p (bb2->end)
|| !onlyjump_p (bb2->end))
@@ -1175,8 +1217,8 @@ outgoing_edges_match (mode, bb1, bb2)
roughly similar. */
if (match
&& !optimize_size
- && bb1->frequency > BB_FREQ_MAX / 1000
- && bb2->frequency > BB_FREQ_MAX / 1000)
+ && maybe_hot_bb_p (bb1)
+ && maybe_hot_bb_p (bb2))
{
int prob2;
@@ -1207,7 +1249,7 @@ outgoing_edges_match (mode, bb1, bb2)
return match;
}
- /* Generic case - we are seeing an computed jump, table jump or trapping
+ /* Generic case - we are seeing a computed jump, table jump or trapping
instruction. */
/* First ensure that the instructions match. There may be many outgoing
@@ -1245,9 +1287,9 @@ outgoing_edges_match (mode, bb1, bb2)
if (fallthru1)
{
basic_block d1 = (forwarder_block_p (fallthru1->dest)
- ? fallthru1->dest->succ->dest: fallthru1->dest);
+ ? fallthru1->dest->succ->dest: fallthru1->dest);
basic_block d2 = (forwarder_block_p (fallthru2->dest)
- ? fallthru2->dest->succ->dest: fallthru2->dest);
+ ? fallthru2->dest->succ->dest: fallthru2->dest);
if (d1 != d2)
return false;
@@ -1279,21 +1321,21 @@ try_crossjump_to_edge (mode, e1, e2)
{
int nmatch;
basic_block src1 = e1->src, src2 = e2->src;
- basic_block redirect_to;
+ basic_block redirect_to, redirect_from, to_remove;
rtx newpos1, newpos2;
edge s;
- rtx last;
- rtx label;
/* Search backward through forwarder blocks. We don't need to worry
about multiple entry or chained forwarders, as they will be optimized
away. We do this to look past the unconditional jump following a
conditional jump that is required due to the current CFG shape. */
if (src1->pred
+ && !src1->pred->pred_next
&& FORWARDER_BLOCK_P (src1))
e1 = src1->pred, src1 = e1->src;
if (src2->pred
+ && !src2->pred->pred_next
&& FORWARDER_BLOCK_P (src2))
e2 = src2->pred, src2 = e2->src;
@@ -1344,6 +1386,8 @@ try_crossjump_to_edge (mode, e1, e2)
redirect_to->count += src1->count;
redirect_to->frequency += src1->frequency;
+ /* We may have some registers visible trought the block. */
+ redirect_to->flags |= BB_DIRTY;
/* Recompute the frequencies and counts of outgoing edges. */
for (s = redirect_to->succ; s; s = s->succ_next)
@@ -1407,29 +1451,14 @@ try_crossjump_to_edge (mode, e1, e2)
if (GET_CODE (newpos1) == NOTE)
newpos1 = NEXT_INSN (newpos1);
- last = src1->end;
- /* Emit the jump insn. */
- label = block_label (redirect_to);
- emit_jump_insn_after (gen_jump (label), src1->end);
- JUMP_LABEL (src1->end) = label;
- LABEL_NUSES (label)++;
+ redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
+ to_remove = redirect_from->succ->dest;
- /* Delete the now unreachable instructions. */
- delete_insn_chain (newpos1, last);
+ redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
+ flow_delete_block (to_remove);
- /* Make sure there is a barrier after the new jump. */
- last = next_nonnote_insn (src1->end);
- if (!last || GET_CODE (last) != BARRIER)
- emit_barrier_after (src1->end);
-
- /* Update CFG. */
- while (src1->succ)
- remove_edge (src1->succ);
- make_single_succ_edge (src1, redirect_to, 0);
-
- BB_SET_FLAG (src1, BB_UPDATE_LIFE);
- update_forwarder_flag (src1);
+ update_forwarder_flag (redirect_from);
return true;
}
@@ -1445,6 +1474,7 @@ try_crossjump_bb (mode, bb)
{
edge e, e2, nexte2, nexte, fallthru;
bool changed;
+ int n = 0, max;
/* Nothing to do if there is not at least two incoming edges. */
if (!bb->pred || !bb->pred->pred_next)
@@ -1453,9 +1483,15 @@ try_crossjump_bb (mode, bb)
/* It is always cheapest to redirect a block that ends in a branch to
a block that falls through into BB, as that adds no branches to the
program. We'll try that combination first. */
- for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
- if (fallthru->flags & EDGE_FALLTHRU)
- break;
+ fallthru = NULL;
+ max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
+ for (e = bb->pred; e ; e = e->pred_next, n++)
+ {
+ if (e->flags & EDGE_FALLTHRU)
+ fallthru = e;
+ if (n > max)
+ return false;
+ }
changed = false;
for (e = bb->pred; e; e = nexte)
@@ -1530,17 +1566,19 @@ static bool
try_optimize_cfg (mode)
int mode;
{
- int i;
bool changed_overall = false;
bool changed;
int iterations = 0;
- sbitmap blocks;
+ basic_block bb, b;
if (mode & CLEANUP_CROSSJUMP)
add_noreturn_fake_exit_edges ();
- for (i = 0; i < n_basic_blocks; i++)
- update_forwarder_flag (BASIC_BLOCK (i));
+ FOR_EACH_BB (bb)
+ update_forwarder_flag (bb);
+
+ if (mode & CLEANUP_UPDATE_LIFE)
+ clear_bb_flags ();
if (! (* targetm.cannot_modify_jumps_p) ())
{
@@ -1557,16 +1595,16 @@ try_optimize_cfg (mode)
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
- for (i = 0; i < n_basic_blocks;)
+ for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
{
- basic_block c, b = BASIC_BLOCK (i);
+ basic_block c;
edge s;
bool changed_here = false;
/* Delete trivially dead basic blocks. */
while (b->pred == NULL)
{
- c = BASIC_BLOCK (b->index - 1);
+ c = b->prev_bb;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleting block %i.\n",
b->index);
@@ -1620,7 +1658,7 @@ try_optimize_cfg (mode)
"Deleting fallthru block %i.\n",
b->index);
- c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
+ c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
redirect_edge_succ_nodup (b->pred, b->succ->dest);
flow_delete_block (b);
changed = true;
@@ -1638,17 +1676,15 @@ try_optimize_cfg (mode)
/* If the jump insn has side effects,
we can't kill the edge. */
&& (GET_CODE (b->end) != JUMP_INSN
- || (onlyjump_p (b->end)
- && !tablejump_p (b->end)))
+ || (flow2_completed
+ ? simplejump_p (b->end)
+ : onlyjump_p (b->end)))
&& merge_blocks (s, b, c, mode))
changed_here = true;
/* Simplify branch over branch. */
if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
- {
- BB_SET_FLAG (b, BB_UPDATE_LIFE);
- changed_here = true;
- }
+ changed_here = true;
/* If B has a single outgoing edge, but uses a
non-trivial jump instruction without side-effects, we
@@ -1661,7 +1697,6 @@ try_optimize_cfg (mode)
&& onlyjump_p (b->end)
&& redirect_edge_and_branch (b->succ, b->succ->dest))
{
- BB_SET_FLAG (b, BB_UPDATE_LIFE);
update_forwarder_flag (b);
changed_here = true;
}
@@ -1678,7 +1713,7 @@ try_optimize_cfg (mode)
/* Don't get confused by the index shift caused by
deleting blocks. */
if (!changed_here)
- i = b->index + 1;
+ b = b->next_bb;
else
changed = true;
}
@@ -1700,64 +1735,33 @@ try_optimize_cfg (mode)
if (mode & CLEANUP_CROSSJUMP)
remove_fake_edges ();
- if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
- {
- bool found = 0;
-
- blocks = sbitmap_alloc (n_basic_blocks);
- sbitmap_zero (blocks);
- for (i = 0; i < n_basic_blocks; i++)
- if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
- {
- found = 1;
- SET_BIT (blocks, i);
- }
-
- if (found)
- update_life_info (blocks, UPDATE_LIFE_GLOBAL,
- PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
- | PROP_KILL_DEAD_CODE);
- sbitmap_free (blocks);
- }
-
- for (i = 0; i < n_basic_blocks; i++)
- BASIC_BLOCK (i)->aux = NULL;
+ clear_aux_for_blocks ();
return changed_overall;
}
/* Delete all unreachable basic blocks. */
-static bool
+bool
delete_unreachable_blocks ()
{
- int i, j;
bool changed = false;
+ basic_block b, next_bb;
find_unreachable_blocks ();
- /* Delete all unreachable basic blocks. Do compaction concurrently,
- as otherwise we can wind up with O(N^2) behaviour here when we
- have oodles of dead code. */
+ /* Delete all unreachable basic blocks. */
- for (i = j = 0; i < n_basic_blocks; ++i)
+ for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
{
- basic_block b = BASIC_BLOCK (i);
+ next_bb = b->next_bb;
if (!(b->flags & BB_REACHABLE))
{
- flow_delete_block_noexpunge (b);
- expunge_block_nocompact (b);
+ flow_delete_block (b);
changed = true;
}
- else
- {
- BASIC_BLOCK (j) = b;
- b->index = j++;
- }
}
- n_basic_blocks = j;
- basic_block_info->num_elements = j;
if (changed)
tidy_fallthru_edges ();
@@ -1773,13 +1777,48 @@ cleanup_cfg (mode)
bool changed = false;
timevar_push (TV_CLEANUP_CFG);
- changed = delete_unreachable_blocks ();
- if (try_optimize_cfg (mode))
- delete_unreachable_blocks (), changed = true;
+ if (delete_unreachable_blocks ())
+ {
+ changed = true;
+ /* We've possibly created trivially dead code. Cleanup it right
+ now to introduce more oppurtunities for try_optimize_cfg. */
+ if (!(mode & (CLEANUP_NO_INSN_DEL
+ | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
+ && !reload_completed)
+ delete_trivially_dead_insns (get_insns(), max_reg_num ());
+ }
+
+ compact_blocks ();
+
+ while (try_optimize_cfg (mode))
+ {
+ delete_unreachable_blocks (), changed = true;
+ if (mode & CLEANUP_UPDATE_LIFE)
+ {
+ /* Cleaning up CFG introduces more oppurtunities for dead code
+ removal that in turn may introduce more oppurtunities for
+ cleaning up the CFG. */
+ if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
+ PROP_DEATH_NOTES
+ | PROP_SCAN_DEAD_CODE
+ | PROP_KILL_DEAD_CODE
+ | PROP_LOG_LINKS))
+ break;
+ }
+ else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
+ && (mode & CLEANUP_EXPENSIVE)
+ && !reload_completed)
+ {
+ if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
+ break;
+ }
+ else
+ break;
+ delete_dead_jumptables ();
+ }
/* Kill the data we won't maintain. */
free_EXPR_LIST_list (&label_value_list);
- free_EXPR_LIST_list (&tail_recursion_label_list);
timevar_pop (TV_CLEANUP_CFG);
return changed;
OpenPOWER on IntegriCloud