summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/sched-ebb.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/gcc/sched-ebb.c')
-rw-r--r--contrib/gcc/sched-ebb.c405
1 files changed, 338 insertions, 67 deletions
diff --git a/contrib/gcc/sched-ebb.c b/contrib/gcc/sched-ebb.c
index 47dd83f..c95bb45 100644
--- a/contrib/gcc/sched-ebb.c
+++ b/contrib/gcc/sched-ebb.c
@@ -1,6 +1,6 @@
/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@@ -23,6 +23,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
@@ -37,7 +39,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "toplev.h"
#include "recog.h"
#include "cfglayout.h"
+#include "params.h"
#include "sched-int.h"
+#include "target.h"
/* The number of insns to be scheduled in total. */
static int target_n_insns;
@@ -45,21 +49,25 @@ static int target_n_insns;
static int sched_n_insns;
/* Implementations of the sched_info functions for region scheduling. */
-static void init_ready_list PARAMS ((struct ready_list *));
-static int can_schedule_ready_p PARAMS ((rtx));
-static int new_ready PARAMS ((rtx));
-static int schedule_more_p PARAMS ((void));
-static const char *ebb_print_insn PARAMS ((rtx, int));
-static int rank PARAMS ((rtx, rtx));
-static int contributes_to_priority PARAMS ((rtx, rtx));
-static void compute_jump_reg_dependencies PARAMS ((rtx, regset, regset,
- regset));
-static void schedule_ebb PARAMS ((rtx, rtx));
+static void init_ready_list (struct ready_list *);
+static int can_schedule_ready_p (rtx);
+static int new_ready (rtx);
+static int schedule_more_p (void);
+static const char *ebb_print_insn (rtx, int);
+static int rank (rtx, rtx);
+static int contributes_to_priority (rtx, rtx);
+static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
+static basic_block earliest_block_with_similiar_load (basic_block, rtx);
+static void add_deps_for_risky_insns (rtx, rtx);
+static basic_block schedule_ebb (rtx, rtx);
+static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx,
+ rtx);
+static void add_missing_bbs (rtx, basic_block, basic_block);
/* Return nonzero if there are more insns that should be scheduled. */
static int
-schedule_more_p ()
+schedule_more_p (void)
{
return sched_n_insns < target_n_insns;
}
@@ -68,8 +76,7 @@ schedule_more_p ()
once before scheduling a set of insns. */
static void
-init_ready_list (ready)
- struct ready_list *ready;
+init_ready_list (struct ready_list *ready)
{
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
@@ -88,17 +95,9 @@ init_ready_list (ready)
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
- rtx next;
-
- if (! INSN_P (insn))
- continue;
- next = NEXT_INSN (insn);
-
- if (INSN_DEP_COUNT (insn) == 0
- && (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
+ if (INSN_DEP_COUNT (insn) == 0)
ready_add (ready, insn);
- if (!(SCHED_GROUP_P (insn)))
- target_n_insns++;
+ target_n_insns++;
}
}
@@ -106,8 +105,7 @@ init_ready_list (ready)
insn can be scheduled, nonzero if we should silently discard it. */
static int
-can_schedule_ready_p (insn)
- rtx insn ATTRIBUTE_UNUSED;
+can_schedule_ready_p (rtx insn ATTRIBUTE_UNUSED)
{
sched_n_insns++;
return 1;
@@ -117,8 +115,7 @@ can_schedule_ready_p (insn)
if it should be moved to the ready list or the queue, or zero if we
should silently discard it. */
static int
-new_ready (next)
- rtx next ATTRIBUTE_UNUSED;
+new_ready (rtx next ATTRIBUTE_UNUSED)
{
return 1;
}
@@ -129,9 +126,7 @@ new_ready (next)
to be formatted so that multiple output lines will line up nicely. */
static const char *
-ebb_print_insn (insn, aligned)
- rtx insn;
- int aligned ATTRIBUTE_UNUSED;
+ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
{
static char tmp[80];
@@ -144,9 +139,17 @@ ebb_print_insn (insn, aligned)
is to be preferred. Zero if they are equally good. */
static int
-rank (insn1, insn2)
- rtx insn1 ATTRIBUTE_UNUSED, insn2 ATTRIBUTE_UNUSED;
+rank (rtx insn1, rtx insn2)
{
+ basic_block bb1 = BLOCK_FOR_INSN (insn1);
+ basic_block bb2 = BLOCK_FOR_INSN (insn2);
+
+ if (bb1->count > bb2->count
+ || bb1->frequency > bb2->frequency)
+ return -1;
+ if (bb1->count < bb2->count
+ || bb1->frequency < bb2->frequency)
+ return 1;
return 0;
}
@@ -155,21 +158,20 @@ rank (insn1, insn2)
calculations. */
static int
-contributes_to_priority (next, insn)
- rtx next ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
+contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
+ rtx insn ATTRIBUTE_UNUSED)
{
return 1;
}
-/* INSN is a JUMP_INSN, COND_SET is the set of registers that are
- conditionally set before INSN. Store the set of registers that
- must be considered as used by this jump in USED and that of
- registers that must be considered as set in SET. */
+ /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
+ conditionally set before INSN. Store the set of registers that
+ must be considered as used by this jump in USED and that of
+ registers that must be considered as set in SET. */
static void
-compute_jump_reg_dependencies (insn, cond_set, used, set)
- rtx insn;
- regset cond_set, used, set;
+compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
+ regset set)
{
basic_block b = BLOCK_FOR_INSN (insn);
edge e;
@@ -203,21 +205,286 @@ static struct sched_info ebb_sched_info =
NULL, NULL,
NULL, NULL,
- 0, 1
+ 0, 1, 0
};
+/* It is possible that ebb scheduling eliminated some blocks.
+ Place blocks from FIRST to LAST before BEFORE. */
+
+static void
+add_missing_bbs (rtx before, basic_block first, basic_block last)
+{
+ for (; last != first->prev_bb; last = last->prev_bb)
+ {
+ before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before);
+ NOTE_BASIC_BLOCK (before) = last;
+ BB_HEAD (last) = before;
+ BB_END (last) = before;
+ update_bb_for_insn (last);
+ }
+}
+
+/* Fixup the CFG after EBB scheduling. Re-recognize the basic
+ block boundaries in between HEAD and TAIL and update basic block
+ structures between BB and LAST. */
+
+static basic_block
+fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head,
+ rtx tail)
+{
+ rtx insn = head;
+ rtx last_inside = BB_HEAD (bb);
+ rtx aftertail = NEXT_INSN (tail);
+
+ head = BB_HEAD (bb);
+
+ for (; insn != aftertail; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ abort ();
+ /* Create new basic blocks just before first insn. */
+ if (inside_basic_block_p (insn))
+ {
+ if (!last_inside)
+ {
+ rtx note;
+
+ /* Re-emit the basic block note for newly found BB header. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn);
+ head = insn;
+ last_inside = note;
+ }
+ else
+ {
+ note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn);
+ head = note;
+ last_inside = insn;
+ }
+ }
+ else
+ last_inside = insn;
+ }
+ /* Control flow instruction terminate basic block. It is possible
+ that we've eliminated some basic blocks (made them empty).
+ Find the proper basic block using BLOCK_FOR_INSN and arrange things in
+ a sensible way by inserting empty basic blocks as needed. */
+ if (control_flow_insn_p (insn) || (insn == tail && last_inside))
+ {
+ basic_block curr_bb = BLOCK_FOR_INSN (insn);
+ rtx note;
+
+ if (!control_flow_insn_p (insn))
+ curr_bb = last;
+ if (bb == last->next_bb)
+ {
+ edge f;
+ rtx h;
+
+ /* An obscure special case, where we do have partially dead
+ instruction scheduled after last control flow instruction.
+ In this case we can create new basic block. It is
+ always exactly one basic block last in the sequence. Handle
+ it by splitting the edge and repositioning the block.
+ This is somewhat hackish, but at least avoid cut&paste
+
+ A safer solution can be to bring the code into sequence,
+ do the split and re-emit it back in case this will ever
+ trigger problem. */
+ f = bb->prev_bb->succ;
+ while (f && !(f->flags & EDGE_FALLTHRU))
+ f = f->succ_next;
+
+ if (f)
+ {
+ last = curr_bb = split_edge (f);
+ h = BB_HEAD (curr_bb);
+ BB_HEAD (curr_bb) = head;
+ BB_END (curr_bb) = insn;
+ /* Edge splitting created misplaced BASIC_BLOCK note, kill
+ it. */
+ delete_insn (h);
+ }
+ /* It may happen that code got moved past unconditional jump in
+ case the code is completely dead. Kill it. */
+ else
+ {
+ rtx next = next_nonnote_insn (insn);
+ delete_insn_chain (head, insn);
+ /* We keep some notes in the way that may split barrier from the
+ jump. */
+ if (GET_CODE (next) == BARRIER)
+ {
+ emit_barrier_after (prev_nonnote_insn (head));
+ delete_insn (next);
+ }
+ insn = NULL;
+ }
+ }
+ else
+ {
+ BB_HEAD (curr_bb) = head;
+ BB_END (curr_bb) = insn;
+ add_missing_bbs (BB_HEAD (curr_bb), bb, curr_bb->prev_bb);
+ }
+ note = GET_CODE (head) == CODE_LABEL ? NEXT_INSN (head) : head;
+ NOTE_BASIC_BLOCK (note) = curr_bb;
+ update_bb_for_insn (curr_bb);
+ bb = curr_bb->next_bb;
+ last_inside = NULL;
+ if (!insn)
+ break;
+ }
+ }
+ add_missing_bbs (BB_HEAD (last->next_bb), bb, last);
+ return bb->prev_bb;
+}
+
+/* Returns the earliest block in EBB currently being processed where a
+ "similar load" 'insn2' is found, and hence LOAD_INSN can move
+ speculatively into the found block. All the following must hold:
+
+ (1) both loads have 1 base register (PFREE_CANDIDATEs).
+ (2) load_insn and load2 have a def-use dependence upon
+ the same insn 'insn1'.
+
+ From all these we can conclude that the two loads access memory
+ addresses that differ at most by a constant, and hence if moving
+ load_insn would cause an exception, it would have been caused by
+ load2 anyhow.
+
+ The function uses list (given by LAST_BLOCK) of already processed
+ blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
+
+static basic_block
+earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
+{
+ rtx back_link;
+ basic_block bb, earliest_block = NULL;
+
+ for (back_link = LOG_LINKS (load_insn);
+ back_link;
+ back_link = XEXP (back_link, 1))
+ {
+ rtx insn1 = XEXP (back_link, 0);
+
+ if (GET_MODE (back_link) == VOIDmode)
+ {
+ /* Found a DEF-USE dependence (insn1, load_insn). */
+ rtx fore_link;
+
+ for (fore_link = INSN_DEPEND (insn1);
+ fore_link;
+ fore_link = XEXP (fore_link, 1))
+ {
+ rtx insn2 = XEXP (fore_link, 0);
+ basic_block insn2_block = BLOCK_FOR_INSN (insn2);
+
+ if (GET_MODE (fore_link) == VOIDmode)
+ {
+ if (earliest_block != NULL
+ && earliest_block->index < insn2_block->index)
+ continue;
+
+ /* Found a DEF-USE dependence (insn1, insn2). */
+ if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
+ /* insn2 not guaranteed to be a 1 base reg load. */
+ continue;
+
+ for (bb = last_block; bb; bb = bb->aux)
+ if (insn2_block == bb)
+ break;
+
+ if (!bb)
+ /* insn2 is the similar load. */
+ earliest_block = insn2_block;
+ }
+ }
+ }
+ }
+
+ return earliest_block;
+}
+
+/* The following function adds dependencies between jumps and risky
+ insns in given ebb. */
+
+static void
+add_deps_for_risky_insns (rtx head, rtx tail)
+{
+ rtx insn, prev;
+ int class;
+ rtx last_jump = NULL_RTX;
+ rtx next_tail = NEXT_INSN (tail);
+ basic_block last_block = NULL, bb;
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ bb = BLOCK_FOR_INSN (insn);
+ bb->aux = last_block;
+ last_block = bb;
+ last_jump = insn;
+ }
+ else if (INSN_P (insn) && last_jump != NULL_RTX)
+ {
+ class = haifa_classify_insn (insn);
+ prev = last_jump;
+ switch (class)
+ {
+ case PFREE_CANDIDATE:
+ if (flag_schedule_speculative_load)
+ {
+ bb = earliest_block_with_similiar_load (last_block, insn);
+ if (bb)
+ {
+ bb = bb->aux;
+ if (!bb)
+ break;
+ prev = BB_END (bb);
+ }
+ }
+ /* Fall through. */
+ case TRAP_RISKY:
+ case IRISKY:
+ case PRISKY_CANDIDATE:
+ /* ??? We could implement better checking PRISKY_CANDIDATEs
+ analogous to sched-rgn.c. */
+ /* We can not change the mode of the backward
+ dependency because REG_DEP_ANTI has the lowest
+ rank. */
+ if (add_dependence (insn, prev, REG_DEP_ANTI))
+ add_forward_dependence (prev, insn, REG_DEP_ANTI);
+ break;
+
+ default:
+ break;
+ }
+ }
+ /* Maintain the invariant that bb->aux is clear after use. */
+ while (last_block)
+ {
+ bb = last_block->aux;
+ last_block->aux = NULL;
+ last_block = bb;
+ }
+}
+
/* Schedule a single extended basic block, defined by the boundaries HEAD
and TAIL. */
-static void
-schedule_ebb (head, tail)
- rtx head, tail;
+static basic_block
+schedule_ebb (rtx head, rtx tail)
{
int n_insns;
+ basic_block b;
struct deps tmp_deps;
+ basic_block first_bb = BLOCK_FOR_INSN (head);
+ basic_block last_bb = BLOCK_FOR_INSN (tail);
if (no_real_insns_p (head, tail))
- return;
+ return BLOCK_FOR_INSN (tail);
init_deps_global ();
@@ -229,6 +496,11 @@ schedule_ebb (head, tail)
/* Compute INSN_DEPEND. */
compute_forward_dependences (head, tail);
+ add_deps_for_risky_insns (head, tail);
+
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+
/* Set priorities. */
n_insns = set_priorities (head, tail);
@@ -237,7 +509,7 @@ schedule_ebb (head, tail)
if (write_symbols != NO_DEBUG)
{
- save_line_notes (0, head, tail);
+ save_line_notes (first_bb->index, head, tail);
rm_line_notes (head, tail);
}
@@ -277,18 +549,26 @@ schedule_ebb (head, tail)
if (write_symbols != NO_DEBUG)
restore_line_notes (head, tail);
+ b = fix_basic_block_boundaries (first_bb, last_bb, head, tail);
finish_deps_global ();
+ return b;
}
/* The one entry point in this file. DUMP_FILE is the dump file for
this pass. */
void
-schedule_ebbs (dump_file)
- FILE *dump_file;
+schedule_ebbs (FILE *dump_file)
{
basic_block bb;
+ int probability_cutoff;
+
+ if (profile_info && flag_branch_probabilities)
+ probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ else
+ probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
@@ -305,32 +585,23 @@ schedule_ebbs (dump_file)
/* Schedule every region in the subroutine. */
FOR_EACH_BB (bb)
{
- rtx head = bb->head;
+ rtx head = BB_HEAD (bb);
rtx tail;
for (;;)
{
edge e;
- tail = bb->end;
+ tail = BB_END (bb);
if (bb->next_bb == EXIT_BLOCK_PTR
- || GET_CODE (bb->next_bb->head) == CODE_LABEL)
+ || GET_CODE (BB_HEAD (bb->next_bb)) == CODE_LABEL)
break;
for (e = bb->succ; e; e = e->succ_next)
if ((e->flags & EDGE_FALLTHRU) != 0)
break;
if (! e)
break;
- if (GET_CODE (tail) == JUMP_INSN)
- {
- rtx x = find_reg_note (tail, REG_BR_PROB, 0);
- if (x)
- {
- int pred_val = INTVAL (XEXP (x, 0));
- if (pred_val > REG_BR_PROB_BASE / 2)
- break;
- }
- }
-
+ if (e->probability <= probability_cutoff)
+ break;
bb = bb->next_bb;
}
@@ -348,11 +619,11 @@ schedule_ebbs (dump_file)
break;
}
- schedule_ebb (head, tail);
+ bb = schedule_ebb (head, tail);
}
- /* It doesn't make much sense to try and update life information here - we
- probably messed up even the flow graph. */
+ /* Updating life info can be done by local propagation over the modified
+ superblocks. */
/* Reposition the prologue and epilogue notes in case we moved the
prologue/epilogue insns. */
OpenPOWER on IntegriCloud