summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/sched-rgn.c
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2004-07-28 03:11:36 +0000
committerkan <kan@FreeBSD.org>2004-07-28 03:11:36 +0000
commit5e00ec74d8ce58f99801200d4d3d0412c7cc1b28 (patch)
tree052f4bb635f2bea2c5e350bd60c902be100a0d1e /contrib/gcc/sched-rgn.c
parent87b8398a7d9f9bf0e28bbcd54a4fc27db2125f38 (diff)
downloadFreeBSD-src-5e00ec74d8ce58f99801200d4d3d0412c7cc1b28.zip
FreeBSD-src-5e00ec74d8ce58f99801200d4d3d0412c7cc1b28.tar.gz
Gcc 3.4.2 20040728.
Diffstat (limited to 'contrib/gcc/sched-rgn.c')
-rw-r--r--contrib/gcc/sched-rgn.c673
1 files changed, 175 insertions, 498 deletions
diff --git a/contrib/gcc/sched-rgn.c b/contrib/gcc/sched-rgn.c
index 935b8e2..b8e474d 100644
--- a/contrib/gcc/sched-rgn.c
+++ b/contrib/gcc/sched-rgn.c
@@ -1,6 +1,6 @@
/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@@ -47,6 +47,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
@@ -114,9 +116,9 @@ static int *out_edges;
#define IN_EDGES(block) (in_edges[block])
#define OUT_EDGES(block) (out_edges[block])
-static int is_cfg_nonregular PARAMS ((void));
-static int build_control_flow PARAMS ((struct edge_list *));
-static void new_edge PARAMS ((int, int));
+static int is_cfg_nonregular (void);
+static int build_control_flow (struct edge_list *);
+static void new_edge (int, int);
/* A region is the main entity for interblock scheduling: insns
are allowed to move between blocks in the same region, along
@@ -140,7 +142,7 @@ static int *rgn_bb_table;
/* Topological order of blocks in the region (if b2 is reachable from
b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
always referred to by either block or b, while its topological
- order name (in the region) is refered to by bb. */
+ order name (in the region) is referred to by bb. */
static int *block_to_bb;
/* The number of the region containing a block. */
@@ -151,12 +153,12 @@ static int *containing_rgn;
#define BLOCK_TO_BB(block) (block_to_bb[block])
#define CONTAINING_RGN(block) (containing_rgn[block])
-void debug_regions PARAMS ((void));
-static void find_single_block_region PARAMS ((void));
-static void find_rgns PARAMS ((struct edge_list *, dominance_info));
-static int too_large PARAMS ((int, int *, int *));
+void debug_regions (void);
+static void find_single_block_region (void);
+static void find_rgns (struct edge_list *);
+static int too_large (int, int *, int *);
-extern void debug_live PARAMS ((int, int));
+extern void debug_live (int, int);
/* Blocks of the current region being scheduled. */
static int current_nr_blocks;
@@ -173,10 +175,9 @@ typedef struct
bitlst;
static int bitlst_table_last;
-static int bitlst_table_size;
static int *bitlst_table;
-static void extract_bitlst PARAMS ((sbitmap, bitlst *));
+static void extract_bitlst (sbitmap, bitlst *);
/* Target info declarations.
@@ -217,10 +218,10 @@ static int target_bb;
typedef bitlst edgelst;
/* Target info functions. */
-static void split_edges PARAMS ((int, int, edgelst *));
-static void compute_trg_info PARAMS ((int));
-void debug_candidate PARAMS ((int));
-void debug_candidates PARAMS ((int));
+static void split_edges (int, int, edgelst *);
+static void compute_trg_info (int);
+void debug_candidate (int);
+void debug_candidates (int);
/* Dominators array: dom[i] contains the sbitmap of dominators of
bb i in the region. */
@@ -268,42 +269,40 @@ static edgeset *pot_split;
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
-static void compute_dom_prob_ps PARAMS ((int));
+static void compute_dom_prob_ps (int);
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
/* Parameters affecting the decision of rank_for_schedule().
- ??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
+ ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
#define MIN_PROBABILITY 40
/* Speculative scheduling functions. */
-static int check_live_1 PARAMS ((int, rtx));
-static void update_live_1 PARAMS ((int, rtx));
-static int check_live PARAMS ((rtx, int));
-static void update_live PARAMS ((rtx, int));
-static void set_spec_fed PARAMS ((rtx));
-static int is_pfree PARAMS ((rtx, int, int));
-static int find_conditional_protection PARAMS ((rtx, int));
-static int is_conditionally_protected PARAMS ((rtx, int, int));
-static int may_trap_exp PARAMS ((rtx, int));
-static int haifa_classify_insn PARAMS ((rtx));
-static int is_prisky PARAMS ((rtx, int, int));
-static int is_exception_free PARAMS ((rtx, int, int));
-
-static bool sets_likely_spilled PARAMS ((rtx));
-static void sets_likely_spilled_1 PARAMS ((rtx, rtx, void *));
-static void add_branch_dependences PARAMS ((rtx, rtx));
-static void compute_block_backward_dependences PARAMS ((int));
-void debug_dependencies PARAMS ((void));
-
-static void init_regions PARAMS ((void));
-static void schedule_region PARAMS ((int));
-static rtx concat_INSN_LIST PARAMS ((rtx, rtx));
-static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *));
-static void propagate_deps PARAMS ((int, struct deps *));
-static void free_pending_lists PARAMS ((void));
+static int check_live_1 (int, rtx);
+static void update_live_1 (int, rtx);
+static int check_live (rtx, int);
+static void update_live (rtx, int);
+static void set_spec_fed (rtx);
+static int is_pfree (rtx, int, int);
+static int find_conditional_protection (rtx, int);
+static int is_conditionally_protected (rtx, int, int);
+static int is_prisky (rtx, int, int);
+static int is_exception_free (rtx, int, int);
+
+static bool sets_likely_spilled (rtx);
+static void sets_likely_spilled_1 (rtx, rtx, void *);
+static void add_branch_dependences (rtx, rtx);
+static void compute_block_backward_dependences (int);
+void debug_dependencies (void);
+
+static void init_regions (void);
+static void schedule_region (int);
+static rtx concat_INSN_LIST (rtx, rtx);
+static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
+static void propagate_deps (int, struct deps *);
+static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
@@ -314,7 +313,7 @@ static void free_pending_lists PARAMS ((void));
have nonlocal gotos. */
static int
-is_cfg_nonregular ()
+is_cfg_nonregular (void)
{
basic_block b;
rtx insn;
@@ -344,7 +343,7 @@ is_cfg_nonregular ()
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
FOR_EACH_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
@@ -358,7 +357,7 @@ is_cfg_nonregular ()
return 1;
}
- if (insn == b->end)
+ if (insn == BB_END (b))
break;
}
@@ -375,8 +374,7 @@ is_cfg_nonregular ()
prevent cross block scheduling. */
static int
-build_control_flow (edge_list)
- struct edge_list *edge_list;
+build_control_flow (struct edge_list *edge_list)
{
int i, unreachable, num_edges;
basic_block b;
@@ -400,9 +398,9 @@ build_control_flow (edge_list)
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
+ in_edges = xcalloc (last_basic_block, sizeof (int));
+ out_edges = xcalloc (last_basic_block, sizeof (int));
+ edge_table = xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
for (i = 0; i < num_edges; i++)
@@ -427,8 +425,7 @@ build_control_flow (edge_list)
integer lists. */
static void
-new_edge (source, target)
- int source, target;
+new_edge (int source, int target)
{
int e, next_edge;
int curr_edge, fst_edge;
@@ -482,9 +479,7 @@ new_edge (source, target)
/* Translate a bit-set SET to a list BL of the bit-set members. */
static void
-extract_bitlst (set, bl)
- sbitmap set;
- bitlst *bl;
+extract_bitlst (sbitmap set, bitlst *bl)
{
int i;
@@ -508,7 +503,7 @@ extract_bitlst (set, bl)
/* Print the regions, for debugging purposes. Callable from debugger. */
void
-debug_regions ()
+debug_regions (void)
{
int rgn, bb;
@@ -538,7 +533,7 @@ debug_regions ()
scheduling. */
static void
-find_single_block_region ()
+find_single_block_region (void)
{
basic_block bb;
@@ -560,12 +555,11 @@ find_single_block_region ()
scheduling (compile time considerations), otherwise return 0. */
static int
-too_large (block, num_bbs, num_insns)
- int block, *num_bbs, *num_insns;
+too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
- (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
- INSN_LUID (BLOCK_HEAD (block)));
+ (*num_insns) += (INSN_LUID (BB_END (BASIC_BLOCK (block))) -
+ INSN_LUID (BB_HEAD (BASIC_BLOCK (block))));
if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
return 1;
else
@@ -619,14 +613,13 @@ too_large (block, num_bbs, num_insns)
of edge tables. That would simplify it somewhat. */
static void
-find_rgns (edge_list, dom)
- struct edge_list *edge_list;
- dominance_info dom;
+find_rgns (struct edge_list *edge_list)
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
int node, child, loop_head, i, head, tail;
- int count = 0, sp, idx = 0, current_edge = out_edges[0];
+ int count = 0, sp, idx = 0;
+ int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index];
int num_bbs, num_insns, unreachable;
int too_large_failure;
basic_block bb;
@@ -658,9 +651,9 @@ find_rgns (edge_list, dom)
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
- dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
- stack = (int *) xmalloc (nr_edges * sizeof (int));
+ max_hdr = xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = xcalloc (last_basic_block, sizeof (int));
+ stack = xmalloc (nr_edges * sizeof (int));
inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
@@ -801,10 +794,10 @@ find_rgns (edge_list, dom)
if (no_loops)
SET_BIT (header, 0);
- /* Second travsersal:find reducible inner loops and topologically sort
+ /* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ queue = xmalloc (n_basic_blocks * sizeof (int));
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
@@ -834,7 +827,7 @@ find_rgns (edge_list, dom)
{
/* Now verify that the block is dominated by the loop
header. */
- if (!dominated_by_p (dom, jbb, bb))
+ if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
break;
}
}
@@ -859,8 +852,8 @@ find_rgns (edge_list, dom)
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (bb->end)
- - INSN_LUID (bb->head));
+ num_insns = (INSN_LUID (BB_END (bb))
+ - INSN_LUID (BB_HEAD (bb)));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
@@ -1045,8 +1038,7 @@ find_rgns (edge_list, dom)
Assume that these values were already computed for bb's predecessors. */
static void
-compute_dom_prob_ps (bb)
- int bb;
+compute_dom_prob_ps (int bb)
{
int nxt_in_edge, fst_in_edge, pred;
int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
@@ -1124,12 +1116,9 @@ compute_dom_prob_ps (bb)
Note that bb_trg dominates bb_src. */
static void
-split_edges (bb_src, bb_trg, bl)
- int bb_src;
- int bb_trg;
- edgelst *bl;
+split_edges (int bb_src, int bb_trg, edgelst *bl)
{
- sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
+ sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
sbitmap_copy (src, pot_split[bb_src]);
sbitmap_difference (src, src, pot_split[bb_trg]);
@@ -1142,8 +1131,7 @@ split_edges (bb_src, bb_trg, bl)
For speculative sources, compute their update-blocks and split-blocks. */
static void
-compute_trg_info (trg)
- int trg;
+compute_trg_info (int trg)
{
candidate *sp;
edgelst el;
@@ -1194,7 +1182,7 @@ compute_trg_info (trg)
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (last_basic_block);
+ update_blocks = alloca (last_basic_block);
memset (update_blocks, 0, last_basic_block);
update_idx = 0;
@@ -1241,8 +1229,7 @@ compute_trg_info (trg)
/* Print candidates info, for debugging purposes. Callable from debugger. */
void
-debug_candidate (i)
- int i;
+debug_candidate (int i)
{
if (!candidate_table[i].is_valid)
return;
@@ -1279,8 +1266,7 @@ debug_candidate (i)
/* Print candidates info, for debugging purposes. Callable from debugger. */
void
-debug_candidates (trg)
- int trg;
+debug_candidates (int trg)
{
int i;
@@ -1290,15 +1276,13 @@ debug_candidates (trg)
debug_candidate (i);
}
-/* Functions for speculative scheduing. */
+/* Functions for speculative scheduling. */
/* Return 0 if x is a set of a register alive in the beginning of one
of the split-blocks of src, otherwise return 1. */
static int
-check_live_1 (src, x)
- int src;
- rtx x;
+check_live_1 (int src, rtx x)
{
int i;
int regno;
@@ -1356,7 +1340,7 @@ check_live_1 (src, x)
}
else
{
- /* Check for psuedo registers. */
+ /* Check for pseudo registers. */
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
{
int b = candidate_table[src].split_bbs.first_member[i];
@@ -1376,9 +1360,7 @@ check_live_1 (src, x)
of every update-block of src. */
static void
-update_live_1 (src, x)
- int src;
- rtx x;
+update_live_1 (int src, rtx x)
{
int i;
int regno;
@@ -1444,9 +1426,7 @@ update_live_1 (src, x)
ready-list or before the scheduling. */
static int
-check_live (insn, src)
- rtx insn;
- int src;
+check_live (rtx insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
@@ -1471,9 +1451,7 @@ check_live (insn, src)
block src to trg. */
static void
-update_live (insn, src)
- rtx insn;
- int src;
+update_live (rtx insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
@@ -1489,96 +1467,17 @@ update_live (insn, src)
}
}
-/* Exception Free Loads:
-
- We define five classes of speculative loads: IFREE, IRISKY,
- PFREE, PRISKY, and MFREE.
-
- IFREE loads are loads that are proved to be exception-free, just
- by examining the load insn. Examples for such loads are loads
- from TOC and loads of global data.
-
- IRISKY loads are loads that are proved to be exception-risky,
- just by examining the load insn. Examples for such loads are
- volatile loads and loads from shared memory.
-
- PFREE loads are loads for which we can prove, by examining other
- insns, that they are exception-free. Currently, this class consists
- of loads for which we are able to find a "similar load", either in
- the target block, or, if only one split-block exists, in that split
- block. Load2 is similar to load1 if both have same single base
- register. We identify only part of the similar loads, by finding
- an insn upon which both load1 and load2 have a DEF-USE dependence.
-
- PRISKY loads are loads for which we can prove, by examining other
- insns, that they are exception-risky. Currently we have two proofs for
- such loads. The first proof detects loads that are probably guarded by a
- test on the memory address. This proof is based on the
- backward and forward data dependence information for the region.
- Let load-insn be the examined load.
- Load-insn is PRISKY iff ALL the following hold:
-
- - insn1 is not in the same block as load-insn
- - there is a DEF-USE dependence chain (insn1, ..., load-insn)
- - test-insn is either a compare or a branch, not in the same block
- as load-insn
- - load-insn is reachable from test-insn
- - there is a DEF-USE dependence chain (insn1, ..., test-insn)
-
- This proof might fail when the compare and the load are fed
- by an insn not in the region. To solve this, we will add to this
- group all loads that have no input DEF-USE dependence.
-
- The second proof detects loads that are directly or indirectly
- fed by a speculative load. This proof is affected by the
- scheduling process. We will use the flag fed_by_spec_load.
- Initially, all insns have this flag reset. After a speculative
- motion of an insn, if insn is either a load, or marked as
- fed_by_spec_load, we will also mark as fed_by_spec_load every
- insn1 for which a DEF-USE dependence (insn, insn1) exists. A
- load which is fed_by_spec_load is also PRISKY.
-
- MFREE (maybe-free) loads are all the remaining loads. They may be
- exception-free, but we cannot prove it.
-
- Now, all loads in IFREE and PFREE classes are considered
- exception-free, while all loads in IRISKY and PRISKY classes are
- considered exception-risky. As for loads in the MFREE class,
- these are considered either exception-free or exception-risky,
- depending on whether we are pessimistic or optimistic. We have
- to take the pessimistic approach to assure the safety of
- speculative scheduling, but we can take the optimistic approach
- by invoking the -fsched_spec_load_dangerous option. */
-
-enum INSN_TRAP_CLASS
-{
- TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
- PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
-};
-
-#define WORST_CLASS(class1, class2) \
-((class1 > class2) ? class1 : class2)
-
-/* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
+/* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
#define IS_REACHABLE(bb_from, bb_to) \
(bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
|| (TEST_BIT (ancestor_edges[bb_to], \
EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
-/* Non-zero iff the address is comprised from at most 1 register. */
-#define CONST_BASED_ADDRESS_P(x) \
- (GET_CODE (x) == REG \
- || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
- || (GET_CODE (x) == LO_SUM)) \
- && (CONSTANT_P (XEXP (x, 0)) \
- || CONSTANT_P (XEXP (x, 1)))))
-
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
static void
-set_spec_fed (load_insn)
- rtx load_insn;
+set_spec_fed (rtx load_insn)
{
rtx link;
@@ -1591,9 +1490,7 @@ set_spec_fed (load_insn)
branch depending on insn, that guards the speculative load. */
static int
-find_conditional_protection (insn, load_insn_bb)
- rtx insn;
- int load_insn_bb;
+find_conditional_protection (rtx insn, int load_insn_bb)
{
rtx link;
@@ -1628,9 +1525,7 @@ find_conditional_protection (insn, load_insn_bb)
Locate the branch by following INSN_DEPEND from insn1. */
static int
-is_conditionally_protected (load_insn, bb_src, bb_trg)
- rtx load_insn;
- int bb_src, bb_trg;
+is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
{
rtx link;
@@ -1680,9 +1575,7 @@ is_conditionally_protected (load_insn, bb_src, bb_trg)
load2 anyhow. */
static int
-is_pfree (load_insn, bb_src, bb_trg)
- rtx load_insn;
- int bb_src, bb_trg;
+is_pfree (rtx load_insn, int bb_src, int bb_trg)
{
rtx back_link;
candidate *candp = candidate_table + bb_src;
@@ -1728,168 +1621,12 @@ is_pfree (load_insn, bb_src, bb_trg)
return 0;
} /* is_pfree */
-/* Returns a class that insn with GET_DEST(insn)=x may belong to,
- as found by analyzing insn's expression. */
-
-static int
-may_trap_exp (x, is_store)
- rtx x;
- int is_store;
-{
- enum rtx_code code;
-
- if (x == 0)
- return TRAP_FREE;
- code = GET_CODE (x);
- if (is_store)
- {
- if (code == MEM && may_trap_p (x))
- return TRAP_RISKY;
- else
- return TRAP_FREE;
- }
- if (code == MEM)
- {
- /* The insn uses memory: a volatile load. */
- if (MEM_VOLATILE_P (x))
- return IRISKY;
- /* An exception-free load. */
- if (!may_trap_p (x))
- return IFREE;
- /* A load with 1 base register, to be further checked. */
- if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
- return PFREE_CANDIDATE;
- /* No info on the load, to be further checked. */
- return PRISKY_CANDIDATE;
- }
- else
- {
- const char *fmt;
- int i, insn_class = TRAP_FREE;
-
- /* Neither store nor load, check if it may cause a trap. */
- if (may_trap_p (x))
- return TRAP_RISKY;
- /* Recursive step: walk the insn... */
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- {
- int tmp_class = may_trap_exp (XEXP (x, i), is_store);
- insn_class = WORST_CLASS (insn_class, tmp_class);
- }
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- {
- int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
- insn_class = WORST_CLASS (insn_class, tmp_class);
- if (insn_class == TRAP_RISKY || insn_class == IRISKY)
- break;
- }
- }
- if (insn_class == TRAP_RISKY || insn_class == IRISKY)
- break;
- }
- return insn_class;
- }
-}
-
-/* Classifies insn for the purpose of verifying that it can be
- moved speculatively, by examining it's patterns, returning:
- TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
- TRAP_FREE: non-load insn.
- IFREE: load from a globaly safe location.
- IRISKY: volatile load.
- PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
- being either PFREE or PRISKY. */
-
-static int
-haifa_classify_insn (insn)
- rtx insn;
-{
- rtx pat = PATTERN (insn);
- int tmp_class = TRAP_FREE;
- int insn_class = TRAP_FREE;
- enum rtx_code code;
-
- if (GET_CODE (pat) == PARALLEL)
- {
- int i, len = XVECLEN (pat, 0);
-
- for (i = len - 1; i >= 0; i--)
- {
- code = GET_CODE (XVECEXP (pat, 0, i));
- switch (code)
- {
- case CLOBBER:
- /* Test if it is a 'store'. */
- tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
- break;
- case SET:
- /* Test if it is a store. */
- tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
- if (tmp_class == TRAP_RISKY)
- break;
- /* Test if it is a load. */
- tmp_class
- = WORST_CLASS (tmp_class,
- may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
- 0));
- break;
- case COND_EXEC:
- case TRAP_IF:
- tmp_class = TRAP_RISKY;
- break;
- default:
- ;
- }
- insn_class = WORST_CLASS (insn_class, tmp_class);
- if (insn_class == TRAP_RISKY || insn_class == IRISKY)
- break;
- }
- }
- else
- {
- code = GET_CODE (pat);
- switch (code)
- {
- case CLOBBER:
- /* Test if it is a 'store'. */
- tmp_class = may_trap_exp (XEXP (pat, 0), 1);
- break;
- case SET:
- /* Test if it is a store. */
- tmp_class = may_trap_exp (SET_DEST (pat), 1);
- if (tmp_class == TRAP_RISKY)
- break;
- /* Test if it is a load. */
- tmp_class =
- WORST_CLASS (tmp_class,
- may_trap_exp (SET_SRC (pat), 0));
- break;
- case COND_EXEC:
- case TRAP_IF:
- tmp_class = TRAP_RISKY;
- break;
- default:;
- }
- insn_class = tmp_class;
- }
-
- return insn_class;
-}
-
/* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
a load moved speculatively, or if load_insn is protected by
a compare on load_insn's address). */
static int
-is_prisky (load_insn, bb_src, bb_trg)
- rtx load_insn;
- int bb_src, bb_trg;
+is_prisky (rtx load_insn, int bb_src, int bb_trg)
{
if (FED_BY_SPEC_LOAD (load_insn))
return 1;
@@ -1909,9 +1646,7 @@ is_prisky (load_insn, bb_src, bb_trg)
and 0 otherwise. */
static int
-is_exception_free (insn, bb_src, bb_trg)
- rtx insn;
- int bb_src, bb_trg;
+is_exception_free (rtx insn, int bb_src, int bb_trg)
{
int insn_class = haifa_classify_insn (insn);
@@ -1960,20 +1695,19 @@ static int sched_n_insns;
static int last_was_jump;
/* Implementations of the sched_info functions for region scheduling. */
-static void init_ready_list PARAMS ((struct ready_list *));
-static int can_schedule_ready_p PARAMS ((rtx));
-static int new_ready PARAMS ((rtx));
-static int schedule_more_p PARAMS ((void));
-static const char *rgn_print_insn PARAMS ((rtx, int));
-static int rgn_rank PARAMS ((rtx, rtx));
-static int contributes_to_priority PARAMS ((rtx, rtx));
-static void compute_jump_reg_dependencies PARAMS ((rtx, regset, regset,
- regset));
+static void init_ready_list (struct ready_list *);
+static int can_schedule_ready_p (rtx);
+static int new_ready (rtx);
+static int schedule_more_p (void);
+static const char *rgn_print_insn (rtx, int);
+static int rgn_rank (rtx, rtx);
+static int contributes_to_priority (rtx, rtx);
+static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
/* Return nonzero if there are more insns that should be scheduled. */
static int
-schedule_more_p ()
+schedule_more_p (void)
{
return ! last_was_jump && sched_target_n_insns < target_n_insns;
}
@@ -1982,8 +1716,7 @@ schedule_more_p ()
once before scheduling a set of insns. */
static void
-init_ready_list (ready)
- struct ready_list *ready;
+init_ready_list (struct ready_list *ready)
{
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
@@ -2002,8 +1735,7 @@ init_ready_list (ready)
/* Prepare current target block info. */
if (current_nr_blocks > 1)
{
- candidate_table = (candidate *) xmalloc (current_nr_blocks
- * sizeof (candidate));
+ candidate_table = xmalloc (current_nr_blocks * sizeof (candidate));
bblst_last = 0;
/* bblst_table holds split blocks and update blocks for each block after
@@ -2011,11 +1743,10 @@ init_ready_list (ready)
the TO blocks of region edges, so there can be at most rgn_nr_edges
of them. */
bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
- bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
+ bblst_table = xmalloc (bblst_size * sizeof (int));
bitlst_table_last = 0;
- bitlst_table_size = rgn_nr_edges;
- bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ bitlst_table = xmalloc (rgn_nr_edges * sizeof (int));
compute_trg_info (target_bb);
}
@@ -2024,17 +1755,15 @@ init_ready_list (ready)
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
- rtx next;
-
- if (! INSN_P (insn))
- continue;
- next = NEXT_INSN (insn);
+ if (INSN_DEP_COUNT (insn) == 0)
+ {
+ ready_add (ready, insn);
- if (INSN_DEP_COUNT (insn) == 0
- && (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
- ready_add (ready, insn);
- if (!(SCHED_GROUP_P (insn)))
- target_n_insns++;
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (insn) =
+ (*targetm.sched.adjust_priority) (insn, INSN_PRIORITY (insn));
+ }
+ target_n_insns++;
}
/* Add to ready list all 'ready' insns in valid source blocks.
@@ -2068,19 +1797,14 @@ init_ready_list (ready)
insn, insn) <= 3)))
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
- {
- rtx next;
-
- /* Note that we haven't squirreled away the notes for
- blocks other than the current. So if this is a
- speculative insn, NEXT might otherwise be a note. */
- next = next_nonnote_insn (insn);
- if (INSN_DEP_COUNT (insn) == 0
- && (! next
- || ! INSN_P (next)
- || SCHED_GROUP_P (next) == 0))
- ready_add (ready, insn);
- }
+ if (INSN_DEP_COUNT (insn) == 0)
+ {
+ ready_add (ready, insn);
+
+ if (targetm.sched.adjust_priority)
+ INSN_PRIORITY (insn) =
+ (*targetm.sched.adjust_priority) (insn, INSN_PRIORITY (insn));
+ }
}
}
}
@@ -2089,8 +1813,7 @@ init_ready_list (ready)
insn can be scheduled, nonzero if we should silently discard it. */
static int
-can_schedule_ready_p (insn)
- rtx insn;
+can_schedule_ready_p (rtx insn)
{
if (GET_CODE (insn) == JUMP_INSN)
last_was_jump = 1;
@@ -2098,7 +1821,6 @@ can_schedule_ready_p (insn)
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
{
- rtx temp;
basic_block b1;
if (IS_SPECULATIVE_INSN (insn))
@@ -2115,39 +1837,30 @@ can_schedule_ready_p (insn)
}
nr_inter++;
- /* Find the beginning of the scheduling group. */
- /* ??? Ought to update basic block here, but later bits of
- schedule_block assumes the original insn block is
- still intact. */
-
- temp = insn;
- while (SCHED_GROUP_P (temp))
- temp = PREV_INSN (temp);
-
/* Update source block boundaries. */
- b1 = BLOCK_FOR_INSN (temp);
- if (temp == b1->head && insn == b1->end)
+ b1 = BLOCK_FOR_INSN (insn);
+ if (insn == BB_HEAD (b1) && insn == BB_END (b1))
{
/* We moved all the insns in the basic block.
Emit a note after the last insn and update the
begin/end boundaries to point to the note. */
rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- b1->head = note;
- b1->end = note;
+ BB_HEAD (b1) = note;
+ BB_END (b1) = note;
}
- else if (insn == b1->end)
+ else if (insn == BB_END (b1))
{
/* We took insns from the end of the basic block,
so update the end of block boundary so that it
points to the first insn we did not move. */
- b1->end = PREV_INSN (temp);
+ BB_END (b1) = PREV_INSN (insn);
}
- else if (temp == b1->head)
+ else if (insn == BB_HEAD (b1))
{
/* We took insns from the start of the basic block,
so update the start of block boundary so that
it points to the first insn we did not move. */
- b1->head = NEXT_INSN (insn);
+ BB_HEAD (b1) = NEXT_INSN (insn);
}
}
else
@@ -2164,8 +1877,7 @@ can_schedule_ready_p (insn)
if it should be moved to the ready list or the queue, or zero if we
should silently discard it. */
static int
-new_ready (next)
- rtx next;
+new_ready (rtx next)
{
/* For speculative insns, before inserting to ready/queue,
check live, exception-free, and issue-delay. */
@@ -2194,9 +1906,7 @@ new_ready (next)
to be formatted so that multiple output lines will line up nicely. */
static const char *
-rgn_print_insn (insn, aligned)
- rtx insn;
- int aligned;
+rgn_print_insn (rtx insn, int aligned)
{
static char tmp[80];
@@ -2217,8 +1927,7 @@ rgn_print_insn (insn, aligned)
is to be preferred. Zero if they are equally good. */
static int
-rgn_rank (insn1, insn2)
- rtx insn1, insn2;
+rgn_rank (rtx insn1, rtx insn2)
{
/* Some comparison make sense in interblock scheduling only. */
if (INSN_BB (insn1) != INSN_BB (insn2))
@@ -2249,8 +1958,7 @@ rgn_rank (insn1, insn2)
calculations. */
static int
-contributes_to_priority (next, insn)
- rtx next, insn;
+contributes_to_priority (rtx next, rtx insn)
{
return BLOCK_NUM (next) == BLOCK_NUM (insn);
}
@@ -2261,11 +1969,10 @@ contributes_to_priority (next, insn)
registers that must be considered as set in SET. */
static void
-compute_jump_reg_dependencies (insn, cond_set, used, set)
- rtx insn ATTRIBUTE_UNUSED;
- regset cond_set ATTRIBUTE_UNUSED;
- regset used ATTRIBUTE_UNUSED;
- regset set ATTRIBUTE_UNUSED;
+compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
+ regset cond_exec ATTRIBUTE_UNUSED,
+ regset used ATTRIBUTE_UNUSED,
+ regset set ATTRIBUTE_UNUSED)
{
/* Nothing to do here, since we postprocess jumps in
add_branch_dependences. */
@@ -2287,14 +1994,13 @@ static struct sched_info region_sched_info =
NULL, NULL,
NULL, NULL,
- 0, 0
+ 0, 0, 0
};
/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
static bool
-sets_likely_spilled (pat)
- rtx pat;
+sets_likely_spilled (rtx pat)
{
bool ret = false;
note_stores (pat, sets_likely_spilled_1, &ret);
@@ -2302,9 +2008,7 @@ sets_likely_spilled (pat)
}
static void
-sets_likely_spilled_1 (x, pat, data)
- rtx x, pat;
- void *data;
+sets_likely_spilled_1 (rtx x, rtx pat, void *data)
{
bool *ret = (bool *) data;
@@ -2319,8 +2023,7 @@ sets_likely_spilled_1 (x, pat, data)
block. */
static void
-add_branch_dependences (head, tail)
- rtx head, tail;
+add_branch_dependences (rtx head, rtx tail)
{
rtx insn, last;
@@ -2333,7 +2036,7 @@ add_branch_dependences (head, tail)
end since moving them results in worse register allocation. Uses remain
at the end to ensure proper register allocation.
- cc0 setters remaim at the end because they can't be moved away from
+ cc0 setters remain at the end because they can't be moved away from
their cc0 user.
Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
@@ -2366,17 +2069,6 @@ add_branch_dependences (head, tail)
CANT_MOVE (insn) = 1;
last = insn;
- /* Skip over insns that are part of a group.
- Make each insn explicitly depend on the previous insn.
- This ensures that only the group header will ever enter
- the ready queue (and, when scheduled, will automatically
- schedule the SCHED_GROUP_P block). */
- while (SCHED_GROUP_P (insn))
- {
- rtx temp = prev_nonnote_insn (insn);
- add_dependence (insn, temp, REG_DEP_ANTI);
- insn = temp;
- }
}
/* Don't overrun the bounds of the basic block. */
@@ -2398,10 +2090,6 @@ add_branch_dependences (head, tail)
add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn) = 1;
-
- /* Skip over insns that are part of a group. */
- while (SCHED_GROUP_P (insn))
- insn = prev_nonnote_insn (insn);
}
}
@@ -2416,8 +2104,7 @@ static struct deps *bb_deps;
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
static rtx
-concat_INSN_LIST (copy, old)
- rtx copy, old;
+concat_INSN_LIST (rtx copy, rtx old)
{
rtx new = old;
for (; copy ; copy = XEXP (copy, 1))
@@ -2426,9 +2113,8 @@ concat_INSN_LIST (copy, old)
}
static void
-concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
- rtx copy_insns, copy_mems;
- rtx *old_insns_p, *old_mems_p;
+concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
+ rtx *old_mems_p)
{
rtx new_insns = *old_insns_p;
rtx new_mems = *old_mems_p;
@@ -2448,9 +2134,7 @@ concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (bb, pred_deps)
- int bb;
- struct deps *pred_deps;
+propagate_deps (int bb, struct deps *pred_deps)
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
@@ -2536,7 +2220,7 @@ propagate_deps (bb, pred_deps)
/* Compute backward dependences inside bb. In a multiple blocks region:
(1) a bb is analyzed after its predecessors, and (2) the lists in
effect at the end of bb (after analyzing for bb) are inherited by
- bb's successrs.
+ bb's successors.
Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are
@@ -2551,8 +2235,7 @@ propagate_deps (bb, pred_deps)
similar, and the result is interblock dependences in the region. */
static void
-compute_block_backward_dependences (bb)
- int bb;
+compute_block_backward_dependences (int bb)
{
rtx head, tail;
struct deps tmp_deps;
@@ -2575,7 +2258,7 @@ compute_block_backward_dependences (bb)
them to the unused_*_list variables, so that they can be reused. */
static void
-free_pending_lists ()
+free_pending_lists (void)
{
int bb;
@@ -2591,7 +2274,7 @@ free_pending_lists ()
/* Print dependences for debugging, callable from debugger. */
void
-debug_dependencies ()
+debug_dependencies (void)
{
int bb;
@@ -2704,8 +2387,7 @@ debug_dependencies ()
scheduled after its flow predecessors. */
static void
-schedule_region (rgn)
- int rgn;
+schedule_region (int rgn)
{
int bb;
int rgn_n_insns = 0;
@@ -2717,8 +2399,8 @@ schedule_region (rgn)
init_deps_global ();
- /* Initializations for region data dependence analyisis. */
- bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
+ /* Initializations for region data dependence analysis. */
+ bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
@@ -2733,6 +2415,10 @@ schedule_region (rgn)
get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
compute_forward_dependences (head, tail);
+
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+
}
/* Set priorities. */
@@ -2749,17 +2435,17 @@ schedule_region (rgn)
{
int i;
- prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
+ prob = xmalloc ((current_nr_blocks) * sizeof (float));
dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
sbitmap_vector_zero (dom, current_nr_blocks);
/* Edge to bit. */
rgn_nr_edges = 0;
- edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
+ edge_to_bit = xmalloc (nr_edges * sizeof (int));
for (i = 1; i < nr_edges; i++)
if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
EDGE_TO_BIT (i) = rgn_nr_edges++;
- rgn_edges = (int *) xmalloc (rgn_nr_edges * sizeof (int));
+ rgn_edges = xmalloc (rgn_nr_edges * sizeof (int));
rgn_nr_edges = 0;
for (i = 1; i < nr_edges; i++)
@@ -2799,7 +2485,7 @@ schedule_region (rgn)
/* rm_other_notes only removes notes which are _inside_ the
block---that is, it won't remove notes before the first real insn
- or after the last real insn of the block. So if the first insn
+ or after the last real insn of the block. So if the first insn
has a REG_SAVE_NOTE which would otherwise be emitted before the
insn, it is redundant with the note before the start of the
block, and so we have to take it out. */
@@ -2830,10 +2516,10 @@ schedule_region (rgn)
sched_rgn_n_insns += sched_n_insns;
/* Update target block boundaries. */
- if (head == BLOCK_HEAD (b))
- BLOCK_HEAD (b) = current_sched_info->head;
- if (tail == BLOCK_END (b))
- BLOCK_END (b) = current_sched_info->tail;
+ if (head == BB_HEAD (BASIC_BLOCK (b)))
+ BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
+ if (tail == BB_END (BASIC_BLOCK (b)))
+ BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
/* Clean up. */
if (current_nr_blocks > 1)
@@ -2884,16 +2570,16 @@ static int *deaths_in_region;
/* Initialize data structures for region scheduling. */
static void
-init_regions ()
+init_regions (void)
{
sbitmap blocks;
int rgn;
nr_regions = 0;
- rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
- rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
- containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ rgn_table = xmalloc ((n_basic_blocks) * sizeof (region));
+ rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
@@ -2911,24 +2597,16 @@ init_regions ()
}
else
{
- dominance_info dom;
struct edge_list *edge_list;
- /* The scheduler runs after flow; therefore, we can't blindly call
- back into find_basic_blocks since doing so could invalidate the
- info in global_live_at_start.
-
- Consider a block consisting entirely of dead stores; after life
- analysis it would be a block of NOTE_INSN_DELETED notes. If
- we call find_basic_blocks again, then the block would be removed
- entirely and invalidate our the register live information.
-
- We could (should?) recompute register live information. Doing
- so may even be beneficial. */
+ /* The scheduler runs after estimate_probabilities; therefore, we
+ can't blindly call back into find_basic_blocks since doing so
+ could invalidate the branch probability info. We could,
+ however, call cleanup_cfg. */
edge_list = create_edge_list ();
/* Compute the dominators and post dominators. */
- dom = calculate_dominance_info (CDI_DOMINATORS);
+ calculate_dominance_info (CDI_DOMINATORS);
/* build_control_flow will return nonzero if it detects unreachable
blocks or any other irregularity with the cfg which prevents
@@ -2936,7 +2614,7 @@ init_regions ()
if (build_control_flow (edge_list) != 0)
find_single_block_region ();
else
- find_rgns (edge_list, dom);
+ find_rgns (edge_list);
if (sched_verbose >= 3)
debug_regions ();
@@ -2946,7 +2624,7 @@ init_regions ()
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
- free_dominance_info (dom);
+ free_dominance_info (CDI_DOMINATORS);
}
}
@@ -2954,7 +2632,7 @@ init_regions ()
if (CHECK_DEAD_NOTES)
{
blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
+ deaths_in_region = xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
{
@@ -2976,8 +2654,7 @@ init_regions ()
this pass. */
void
-schedule_insns (dump_file)
- FILE *dump_file;
+schedule_insns (FILE *dump_file)
{
sbitmap large_region_blocks, blocks;
int rgn;
OpenPOWER on IntegriCloud