summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/sched-rgn.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/gcc/sched-rgn.c')
-rw-r--r--contrib/gcc/sched-rgn.c307
1 files changed, 178 insertions, 129 deletions
diff --git a/contrib/gcc/sched-rgn.c b/contrib/gcc/sched-rgn.c
index ab65e2c..88545b2 100644
--- a/contrib/gcc/sched-rgn.c
+++ b/contrib/gcc/sched-rgn.c
@@ -62,6 +62,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "recog.h"
#include "cfglayout.h"
#include "sched-int.h"
+#include "target.h"
/* Define when we want to do count REG_DEAD notes before and after scheduling
for sanity checking. We can't do that when conditional execution is used,
@@ -152,7 +153,7 @@ static int *containing_rgn;
void debug_regions PARAMS ((void));
static void find_single_block_region PARAMS ((void));
-static void find_rgns PARAMS ((struct edge_list *, sbitmap *));
+static void find_rgns PARAMS ((struct edge_list *, dominance_info));
static int too_large PARAMS ((int, int *, int *));
extern void debug_live PARAMS ((int, int));
@@ -269,16 +270,13 @@ static edgeset *ancestor_edges;
static void compute_dom_prob_ps PARAMS ((int));
-#define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
/* Parameters affecting the decision of rank_for_schedule().
??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
-#define MIN_DIFF_PRIORITY 2
#define MIN_PROBABILITY 40
-#define MIN_PROB_DIFF 10
/* Speculative scheduling functions. */
static int check_live_1 PARAMS ((int, rtx));
@@ -318,7 +316,7 @@ static void free_pending_lists PARAMS ((void));
static int
is_cfg_nonregular ()
{
- int b;
+ basic_block b;
rtx insn;
RTX_CODE code;
@@ -345,8 +343,8 @@ is_cfg_nonregular ()
/* If we have non-jumping insns which refer to labels, then we consider
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
- for (b = 0; b < n_basic_blocks; b++)
- for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
+ FOR_EACH_BB (b)
+ for (insn = b->head;; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
@@ -360,7 +358,7 @@ is_cfg_nonregular ()
return 1;
}
- if (insn == BLOCK_END (b))
+ if (insn == b->end)
break;
}
@@ -381,6 +379,7 @@ build_control_flow (edge_list)
struct edge_list *edge_list;
{
int i, unreachable, num_edges;
+ basic_block b;
/* This already accounts for entry/exit edges. */
num_edges = NUM_EDGES (edge_list);
@@ -392,10 +391,8 @@ build_control_flow (edge_list)
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (b)
{
- basic_block b = BASIC_BLOCK (i);
-
if (b->pred == NULL
|| (b->pred->src == b
&& b->pred->pred_next == NULL))
@@ -403,8 +400,8 @@ build_control_flow (edge_list)
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
- out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
+ out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
@@ -543,17 +540,19 @@ debug_regions ()
static void
find_single_block_region ()
{
- int i;
+ basic_block bb;
+
+ nr_regions = 0;
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
- rgn_bb_table[i] = i;
- RGN_NR_BLOCKS (i) = 1;
- RGN_BLOCKS (i) = i;
- CONTAINING_RGN (i) = i;
- BLOCK_TO_BB (i) = 0;
+ rgn_bb_table[nr_regions] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_BLOCKS (nr_regions) = nr_regions;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+ nr_regions++;
}
- nr_regions = n_basic_blocks;
}
/* Update number of blocks and the estimate for number of insns
@@ -576,17 +575,17 @@ too_large (block, num_bbs, num_insns)
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
is still an inner loop. Put in max_hdr[blk] the header of the most inner
loop containing blk. */
-#define UPDATE_LOOP_RELATIONS(blk, hdr) \
-{ \
- if (max_hdr[blk] == -1) \
- max_hdr[blk] = hdr; \
- else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
- RESET_BIT (inner, hdr); \
- else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
- { \
- RESET_BIT (inner,max_hdr[blk]); \
- max_hdr[blk] = hdr; \
- } \
+#define UPDATE_LOOP_RELATIONS(blk, hdr) \
+{ \
+ if (max_hdr[blk] == -1) \
+ max_hdr[blk] = hdr; \
+ else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
+ RESET_BIT (inner, hdr); \
+ else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
+ { \
+ RESET_BIT (inner,max_hdr[blk]); \
+ max_hdr[blk] = hdr; \
+ } \
}
/* Find regions for interblock scheduling.
@@ -622,7 +621,7 @@ too_large (block, num_bbs, num_insns)
static void
find_rgns (edge_list, dom)
struct edge_list *edge_list;
- sbitmap *dom;
+ dominance_info dom;
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
@@ -630,6 +629,7 @@ find_rgns (edge_list, dom)
int count = 0, sp, idx = 0, current_edge = out_edges[0];
int num_bbs, num_insns, unreachable;
int too_large_failure;
+ basic_block bb;
/* Note if an edge has been passed. */
sbitmap passed;
@@ -637,7 +637,7 @@ find_rgns (edge_list, dom)
/* Note if a block is a natural loop header. */
sbitmap header;
- /* Note if a block is an natural inner loop header. */
+ /* Note if a block is a natural inner loop header. */
sbitmap inner;
/* Note if a block is in the block queue. */
@@ -658,26 +658,26 @@ find_rgns (edge_list, dom)
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
- dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
stack = (int *) xmalloc (nr_edges * sizeof (int));
- inner = sbitmap_alloc (n_basic_blocks);
+ inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
- header = sbitmap_alloc (n_basic_blocks);
+ header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
passed = sbitmap_alloc (nr_edges);
sbitmap_zero (passed);
- in_queue = sbitmap_alloc (n_basic_blocks);
+ in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
- in_stack = sbitmap_alloc (n_basic_blocks);
+ in_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_stack);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < last_basic_block; i++)
max_hdr[i] = -1;
/* DFS traversal to find inner loops in the cfg. */
@@ -771,8 +771,8 @@ find_rgns (edge_list, dom)
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
- if (dfs_nr[i] == 0)
+ FOR_EACH_BB (bb)
+ if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
break;
@@ -782,8 +782,8 @@ find_rgns (edge_list, dom)
to hold degree counts. */
degree = dfs_nr;
- for (i = 0; i < n_basic_blocks; i++)
- degree[i] = 0;
+ FOR_EACH_BB (bb)
+ degree[bb->index] = 0;
for (i = 0; i < num_edges; i++)
{
edge e = INDEX_EDGE (edge_list, i);
@@ -808,12 +808,12 @@ find_rgns (edge_list, dom)
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
- if (TEST_BIT (header, i) && TEST_BIT (inner, i))
+ if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
{
edge e;
- int j;
+ basic_block jbb;
/* Now check that the loop is reducible. We do this separate
from finding inner loops so that we do not find a reducible
@@ -826,15 +826,15 @@ find_rgns (edge_list, dom)
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
{
/* First identify blocks in the loop, except for the loop
entry block. */
- if (i == max_hdr[j] && i != j)
+ if (bb->index == max_hdr[jbb->index] && bb != jbb)
{
/* Now verify that the block is dominated by the loop
header. */
- if (!TEST_BIT (dom[j], i))
+ if (!dominated_by_p (dom, jbb, bb))
break;
}
}
@@ -842,25 +842,25 @@ find_rgns (edge_list, dom)
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (j != n_basic_blocks)
+ if (jbb != EXIT_BLOCK_PTR)
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
with no loops at all. */
head = tail = -1;
too_large_failure = 0;
- loop_head = max_hdr[i];
+ loop_head = max_hdr[bb->index];
/* Decrease degree of all I's successors for topological
ordering. */
- for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
+ for (e = bb->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (BLOCK_END (i))
- - INSN_LUID (BLOCK_HEAD (i)));
+ num_insns = (INSN_LUID (bb->end)
+ - INSN_LUID (bb->head));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
@@ -868,17 +868,17 @@ find_rgns (edge_list, dom)
Place those blocks into the queue. */
if (no_loops)
{
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
- if (BASIC_BLOCK (j)->succ
- && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
- && BASIC_BLOCK (j)->succ->succ_next == NULL)
+ if (jbb->succ
+ && jbb->succ->dest == EXIT_BLOCK_PTR
+ && jbb->succ->succ_next == NULL)
{
- queue[++tail] = j;
- SET_BIT (in_queue, j);
+ queue[++tail] = jbb->index;
+ SET_BIT (in_queue, jbb->index);
- if (too_large (j, &num_bbs, &num_insns))
+ if (too_large (jbb->index, &num_bbs, &num_insns))
{
too_large_failure = 1;
break;
@@ -889,14 +889,14 @@ find_rgns (edge_list, dom)
{
edge e;
- for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
+ for (e = bb->pred; e; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
node = e->src->index;
- if (max_hdr[node] == loop_head && node != i)
+ if (max_hdr[node] == loop_head && node != bb->index)
{
/* This is a loop latch. */
queue[++tail] = node;
@@ -958,7 +958,7 @@ find_rgns (edge_list, dom)
tail = -1;
break;
}
- else if (!TEST_BIT (in_queue, node) && node != i)
+ else if (!TEST_BIT (in_queue, node) && node != bb->index)
{
queue[++tail] = node;
SET_BIT (in_queue, node);
@@ -975,12 +975,12 @@ find_rgns (edge_list, dom)
if (tail >= 0 && !too_large_failure)
{
/* Place the loop header into list of region blocks. */
- degree[i] = -1;
- rgn_bb_table[idx] = i;
+ degree[bb->index] = -1;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions;
- BLOCK_TO_BB (i) = count = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = count = 0;
/* Remove blocks from queue[] when their in degree
becomes zero. Repeat until no blocks are left on the
@@ -1019,14 +1019,14 @@ find_rgns (edge_list, dom)
/* Any block that did not end up in a region is placed into a region
by itself. */
- for (i = 0; i < n_basic_blocks; i++)
- if (degree[i] >= 0)
+ FOR_EACH_BB (bb)
+ if (degree[bb->index] >= 0)
{
- rgn_bb_table[idx] = i;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions++;
- BLOCK_TO_BB (i) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions++;
+ BLOCK_TO_BB (bb->index) = 0;
}
free (max_hdr);
@@ -1093,7 +1093,7 @@ compute_dom_prob_ps (bb)
if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
CONTAINING_RGN (BB_TO_BLOCK (bb)))
++nr_rgn_out_edges;
- SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
+ SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
nxt_out_edge = NEXT_OUT (nxt_out_edge);
}
@@ -1194,8 +1194,8 @@ compute_trg_info (trg)
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (n_basic_blocks);
- memset (update_blocks, 0, n_basic_blocks);
+ update_blocks = (char *) alloca (last_basic_block);
+ memset (update_blocks, 0, last_basic_block);
update_idx = 0;
for (j = 0; j < el.nr_members; j++)
@@ -1561,17 +1561,17 @@ enum INSN_TRAP_CLASS
/* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
#define IS_REACHABLE(bb_from, bb_to) \
-(bb_from == bb_to \
+ (bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
- || (TEST_BIT (ancestor_edges[bb_to], \
- EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
+ || (TEST_BIT (ancestor_edges[bb_to], \
+ EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
/* Non-zero iff the address is comprised from at most 1 register. */
#define CONST_BASED_ADDRESS_P(x) \
(GET_CODE (x) == REG \
- || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
- || (GET_CODE (x) == LO_SUM)) \
- && (CONSTANT_P (XEXP (x, 0)) \
+ || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
+ || (GET_CODE (x) == LO_SUM)) \
+ && (CONSTANT_P (XEXP (x, 0)) \
|| CONSTANT_P (XEXP (x, 1)))))
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
@@ -2030,7 +2030,7 @@ init_ready_list (ready)
next = NEXT_INSN (insn);
if (INSN_DEP_COUNT (insn) == 0
- && (SCHED_GROUP_P (next) == 0 || ! INSN_P (next)))
+ && (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
ready_add (ready, insn);
if (!(SCHED_GROUP_P (insn)))
target_n_insns++;
@@ -2057,7 +2057,14 @@ init_ready_list (ready)
if (!CANT_MOVE (insn)
&& (!IS_SPECULATIVE_INSN (insn)
- || (insn_issue_delay (insn) <= 3
+ || ((((!targetm.sched.use_dfa_pipeline_interface
+ || !(*targetm.sched.use_dfa_pipeline_interface) ())
+ && insn_issue_delay (insn) <= 3)
+ || (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ()
+ && (recog_memoized (insn) < 0
+ || min_insn_conflict_delay (curr_state,
+ insn, insn) <= 3)))
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
{
@@ -2069,8 +2076,8 @@ init_ready_list (ready)
next = next_nonnote_insn (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! next
- || SCHED_GROUP_P (next) == 0
- || ! INSN_P (next)))
+ || ! INSN_P (next)
+ || SCHED_GROUP_P (next) == 0))
ready_add (ready, insn);
}
}
@@ -2165,7 +2172,15 @@ new_ready (next)
&& (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
- && (insn_issue_delay (next) > 3
+ && (0
+ || (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ()
+ && recog_memoized (next) >= 0
+ && min_insn_conflict_delay (curr_state, next,
+ next) > 3)
+ || ((!targetm.sched.use_dfa_pipeline_interface
+ || !(*targetm.sched.use_dfa_pipeline_interface) ())
+ && insn_issue_delay (next) > 3)
|| !check_live (next, INSN_BB (next))
|| !is_exception_free (next, INSN_BB (next), target_bb)))))
return 0;
@@ -2481,7 +2496,7 @@ propagate_deps (bb, pred_deps)
succ_deps->last_pending_memory_flush
= concat_INSN_LIST (pred_deps->last_pending_memory_flush,
succ_deps->last_pending_memory_flush);
-
+
succ_deps->pending_lists_length += pred_deps->pending_lists_length;
succ_deps->pending_flush_length += pred_deps->pending_flush_length;
@@ -2589,14 +2604,27 @@ debug_dependencies ()
fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
BB_TO_BLOCK (bb), bb);
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
- fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
- "----", "----", "--", "---", "----", "----", "--------", "-----");
+ if (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ())
+ {
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "insn", "code", "bb", "dep", "prio", "cost",
+ "reservation");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
+ "----", "----", "--", "---", "----", "----",
+ "-----------");
+ }
+ else
+ {
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
+ "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
+ fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
+ "----", "----", "--", "---", "----", "----", "--------", "-----");
+ }
+
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
rtx link;
- int unit, range;
if (! INSN_P (insn))
{
@@ -2616,22 +2644,46 @@ debug_dependencies ()
continue;
}
- unit = insn_unit (insn);
- range = (unit < 0
- || function_units[unit].blockage_range_function == 0) ? 0 :
- function_units[unit].blockage_range_function (insn);
- fprintf (sched_dump,
- ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
- (SCHED_GROUP_P (insn) ? "+" : " "),
- INSN_UID (insn),
- INSN_CODE (insn),
- INSN_BB (insn),
- INSN_DEP_COUNT (insn),
- INSN_PRIORITY (insn),
- insn_cost (insn, 0, 0),
- (int) MIN_BLOCKAGE_COST (range),
- (int) MAX_BLOCKAGE_COST (range));
- insn_print_units (insn);
+ if (targetm.sched.use_dfa_pipeline_interface
+ && (*targetm.sched.use_dfa_pipeline_interface) ())
+ {
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0));
+
+ if (recog_memoized (insn) < 0)
+ fprintf (sched_dump, "nothing");
+ else
+ print_reservation (sched_dump, insn);
+ }
+ else
+ {
+ int unit = insn_unit (insn);
+ int range
+ = (unit < 0
+ || function_units[unit].blockage_range_function == 0
+ ? 0
+ : function_units[unit].blockage_range_function (insn));
+ fprintf (sched_dump,
+ ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
+ (SCHED_GROUP_P (insn) ? "+" : " "),
+ INSN_UID (insn),
+ INSN_CODE (insn),
+ INSN_BB (insn),
+ INSN_DEP_COUNT (insn),
+ INSN_PRIORITY (insn),
+ insn_cost (insn, 0, 0),
+ (int) MIN_BLOCKAGE_COST (range),
+ (int) MAX_BLOCKAGE_COST (range));
+ insn_print_units (insn);
+ }
+
fprintf (sched_dump, "\t: ");
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
@@ -2835,8 +2887,8 @@ init_regions ()
nr_regions = 0;
rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
@@ -2854,11 +2906,9 @@ init_regions ()
}
else
{
- sbitmap *dom;
+ dominance_info dom;
struct edge_list *edge_list;
- dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
-
/* The scheduler runs after flow; therefore, we can't blindly call
back into find_basic_blocks since doing so could invalidate the
info in global_live_at_start.
@@ -2873,7 +2923,7 @@ init_regions ()
edge_list = create_edge_list ();
/* Compute the dominators and post dominators. */
- calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
+ dom = calculate_dominance_info (CDI_DOMINATORS);
/* build_control_flow will return nonzero if it detects unreachable
blocks or any other irregularity with the cfg which prevents
@@ -2891,14 +2941,14 @@ init_regions ()
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
- free (dom);
+ free_dominance_info (dom);
}
}
if (CHECK_DEAD_NOTES)
{
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
@@ -2927,14 +2977,13 @@ schedule_insns (dump_file)
sbitmap large_region_blocks, blocks;
int rgn;
int any_large_regions;
+ basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
if (n_basic_blocks == 0)
return;
- scope_to_insns_initialize ();
-
nr_inter = 0;
nr_spec = 0;
@@ -2943,7 +2992,7 @@ schedule_insns (dump_file)
init_regions ();
current_sched_info = &region_sched_info;
-
+
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
schedule_region (rgn);
@@ -2952,7 +3001,7 @@ schedule_insns (dump_file)
first so that we can verify that live_at_start didn't change. Then
do all other blocks. */
/* ??? There is an outside possibility that update_life_info, or more
- to the point propagate_block, could get called with non-zero flags
+ to the point propagate_block, could get called with nonzero flags
more than once for one basic block. This would be kinda bad if it
were to happen, since REG_INFO would be accumulated twice for the
block, and we'd have twice the REG_DEAD notes.
@@ -2962,13 +3011,15 @@ schedule_insns (dump_file)
best way to test for this kind of thing... */
allocate_reg_life_data ();
- compute_bb_for_insn (get_max_uid ());
+ compute_bb_for_insn ();
any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (n_basic_blocks);
- sbitmap_ones (large_region_blocks);
+ large_region_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (large_region_blocks);
+ FOR_EACH_BB (bb)
+ SET_BIT (large_region_blocks, bb->index);
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
/* Update life information. For regions consisting of multiple blocks
@@ -3021,8 +3072,6 @@ schedule_insns (dump_file)
if (write_symbols != NO_DEBUG)
rm_redundant_line_notes ();
- scope_to_insns_finalize ();
-
if (sched_verbose)
{
if (reload_completed == 0 && flag_schedule_interblock)
OpenPOWER on IntegriCloud