summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/sched-deps.c
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2004-07-28 03:11:36 +0000
committerkan <kan@FreeBSD.org>2004-07-28 03:11:36 +0000
commit5e00ec74d8ce58f99801200d4d3d0412c7cc1b28 (patch)
tree052f4bb635f2bea2c5e350bd60c902be100a0d1e /contrib/gcc/sched-deps.c
parent87b8398a7d9f9bf0e28bbcd54a4fc27db2125f38 (diff)
downloadFreeBSD-src-5e00ec74d8ce58f99801200d4d3d0412c7cc1b28.zip
FreeBSD-src-5e00ec74d8ce58f99801200d4d3d0412c7cc1b28.tar.gz
Gcc 3.4.2 20040728.
Diffstat (limited to 'contrib/gcc/sched-deps.c')
-rw-r--r--contrib/gcc/sched-deps.c527
1 files changed, 227 insertions, 300 deletions
diff --git a/contrib/gcc/sched-deps.c b/contrib/gcc/sched-deps.c
index e261608..0549e11 100644
--- a/contrib/gcc/sched-deps.c
+++ b/contrib/gcc/sched-deps.c
@@ -1,7 +1,7 @@
/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@@ -24,6 +24,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
@@ -40,9 +42,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
+#include "df.h"
-extern char *reg_known_equiv_p;
-extern rtx *reg_known_value;
static regset_head reg_pending_sets_head;
static regset_head reg_pending_clobbers_head;
@@ -51,7 +52,18 @@ static regset_head reg_pending_uses_head;
static regset reg_pending_sets;
static regset reg_pending_clobbers;
static regset reg_pending_uses;
-static bool reg_pending_barrier;
+
+/* The following enumeration values tell us what dependencies we
+ should use to implement the barrier. We use true-dependencies for
+ TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
+enum reg_pending_barrier_mode
+{
+ NOT_A_BARRIER = 0,
+ MOVE_BARRIER,
+ TRUE_BARRIER
+};
+
+static enum reg_pending_barrier_mode reg_pending_barrier;
/* To speed up the test for duplicate dependency links we keep a
record of dependencies created by add_dependence when the average
@@ -66,45 +78,45 @@ static bool reg_pending_barrier;
has enough entries to represent a dependency on any other insn in
the insn chain. All bitmap for true dependencies cache is
allocated then the rest two ones are also allocated. */
-static sbitmap *true_dependency_cache;
-static sbitmap *anti_dependency_cache;
-static sbitmap *output_dependency_cache;
+static bitmap_head *true_dependency_cache;
+static bitmap_head *anti_dependency_cache;
+static bitmap_head *output_dependency_cache;
+int cache_size;
/* To speed up checking consistency of formed forward insn
dependencies we use the following cache. Another possible solution
could be switching off checking duplication of insns in forward
dependencies. */
#ifdef ENABLE_CHECKING
-static sbitmap *forward_dependency_cache;
+static bitmap_head *forward_dependency_cache;
#endif
-static int deps_may_trap_p PARAMS ((rtx));
-static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
-static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
-static void remove_dependence PARAMS ((rtx, rtx));
-static void set_sched_group_p PARAMS ((rtx));
+static int deps_may_trap_p (rtx);
+static void add_dependence_list (rtx, rtx, enum reg_note);
+static void add_dependence_list_and_free (rtx, rtx *, enum reg_note);
+static void set_sched_group_p (rtx);
-static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
-static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
-static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
-static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
-static rtx group_leader PARAMS ((rtx));
+static void flush_pending_lists (struct deps *, rtx, int, int);
+static void sched_analyze_1 (struct deps *, rtx, rtx);
+static void sched_analyze_2 (struct deps *, rtx, rtx);
+static void sched_analyze_insn (struct deps *, rtx, rtx, rtx);
-static rtx get_condition PARAMS ((rtx));
-static int conditions_mutex_p PARAMS ((rtx, rtx));
+static rtx get_condition (rtx);
+static int conditions_mutex_p (rtx, rtx);
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
static int
-deps_may_trap_p (mem)
- rtx mem;
+deps_may_trap_p (rtx mem)
{
rtx addr = XEXP (mem, 0);
- if (REG_P (addr)
- && REGNO (addr) >= FIRST_PSEUDO_REGISTER
- && reg_known_value[REGNO (addr)])
- addr = reg_known_value[REGNO (addr)];
+ if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx t = get_reg_known_value (REGNO (addr));
+ if (t)
+ addr = t;
+ }
return rtx_addr_can_trap_p (addr);
}
@@ -112,9 +124,7 @@ deps_may_trap_p (mem)
if LIST does not contain INSN. */
rtx
-find_insn_list (insn, list)
- rtx insn;
- rtx list;
+find_insn_list (rtx insn, rtx list)
{
while (list)
{
@@ -128,8 +138,7 @@ find_insn_list (insn, list)
/* Find the condition under which INSN is executed. */
static rtx
-get_condition (insn)
- rtx insn;
+get_condition (rtx insn)
{
rtx pat = PATTERN (insn);
rtx cond;
@@ -160,8 +169,7 @@ get_condition (insn)
/* Return nonzero if conditions COND1 and COND2 can never be both true. */
static int
-conditions_mutex_p (cond1, cond2)
- rtx cond1, cond2;
+conditions_mutex_p (rtx cond1, rtx cond2)
{
if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
&& GET_RTX_CLASS (GET_CODE (cond2)) == '<'
@@ -173,28 +181,26 @@ conditions_mutex_p (cond1, cond2)
}
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
- LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
- of dependence that this link represents. */
+ LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
+ type of dependence that this link represents. The function returns
+ nonzero if a new entry has been added to insn's LOG_LINK. */
-void
-add_dependence (insn, elem, dep_type)
- rtx insn;
- rtx elem;
- enum reg_note dep_type;
+int
+add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
{
- rtx link, next;
+ rtx link;
int present_p;
rtx cond1, cond2;
/* Don't depend an insn on itself. */
if (insn == elem)
- return;
+ return 0;
/* We can get a dependency on deleted insns due to optimizations in
the register allocation and reloading or due to splitting. Any
such dependency is useless and can be ignored. */
if (GET_CODE (elem) == NOTE)
- return;
+ return 0;
/* flow.c doesn't handle conditional lifetimes entirely correctly;
calls mess up the conditional lifetimes. */
@@ -213,38 +219,7 @@ add_dependence (insn, elem, dep_type)
/* Make sure second instruction doesn't affect condition of first
instruction if switched. */
&& !modified_in_p (cond2, insn))
- return;
- }
-
- /* If elem is part of a sequence that must be scheduled together, then
- make the dependence point to the last insn of the sequence.
- When HAVE_cc0, it is possible for NOTEs to exist between users and
- setters of the condition codes, so we must skip past notes here.
- Otherwise, NOTEs are impossible here. */
- next = next_nonnote_insn (elem);
- if (next && INSN_P (next) && SCHED_GROUP_P (next))
- {
- /* Notes will never intervene here though, so don't bother checking
- for them. */
- /* Hah! Wrong. */
- /* We must reject CODE_LABELs, so that we don't get confused by one
- that has LABEL_PRESERVE_P set, which is represented by the same
- bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
- SCHED_GROUP_P. */
-
- rtx nnext;
- while ((nnext = next_nonnote_insn (next)) != NULL
- && INSN_P (nnext)
- && SCHED_GROUP_P (nnext))
- next = nnext;
-
- /* Again, don't depend an insn on itself. */
- if (insn == next)
- return;
-
- /* Make the dependence to NEXT, the last insn of the group, instead
- of the original ELEM. */
- elem = next;
+ return 0;
}
present_p = 1;
@@ -258,7 +233,7 @@ add_dependence (insn, elem, dep_type)
elem is a CALL is still required. */
if (GET_CODE (insn) == CALL_INSN
&& (INSN_BB (elem) != INSN_BB (insn)))
- return;
+ return 0;
#endif
/* If we already have a dependency for ELEM, then we do not need to
@@ -270,19 +245,20 @@ add_dependence (insn, elem, dep_type)
if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
abort ();
- if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
+ if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
/* Do nothing (present_set_type is already 0). */
;
- else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
+ else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_ANTI;
- else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
+ else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_OUTPUT;
else
present_p = 0;
if (present_p && (int) dep_type >= (int) present_dep_type)
- return;
+ return 0;
}
#endif
@@ -297,12 +273,12 @@ add_dependence (insn, elem, dep_type)
if (true_dependency_cache != NULL)
{
if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
&& output_dependency_cache)
- RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else
abort ();
}
@@ -319,18 +295,18 @@ add_dependence (insn, elem, dep_type)
if (true_dependency_cache != NULL)
{
if ((int) REG_NOTE_KIND (link) == 0)
- SET_BIT (true_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
- SET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
}
#endif
- return;
- }
+ return 0;
+ }
/* Might want to check one level of transitivity to save conses. */
link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
@@ -345,21 +321,20 @@ add_dependence (insn, elem, dep_type)
if (true_dependency_cache != NULL)
{
if ((int) dep_type == 0)
- SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
else if (dep_type == REG_DEP_ANTI)
- SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
else if (dep_type == REG_DEP_OUTPUT)
- SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
}
#endif
+ return 1;
}
/* A convenience wrapper to operate on an entire list. */
static void
-add_dependence_list (insn, list, dep_type)
- rtx insn, list;
- enum reg_note dep_type;
+add_dependence_list (rtx insn, rtx list, enum reg_note dep_type)
{
for (; list; list = XEXP (list, 1))
add_dependence (insn, XEXP (list, 0), dep_type);
@@ -368,10 +343,7 @@ add_dependence_list (insn, list, dep_type)
/* Similar, but free *LISTP at the same time. */
static void
-add_dependence_list_and_free (insn, listp, dep_type)
- rtx insn;
- rtx *listp;
- enum reg_note dep_type;
+add_dependence_list_and_free (rtx insn, rtx *listp, enum reg_note dep_type)
{
rtx list, next;
for (list = *listp, *listp = NULL; list ; list = next)
@@ -382,102 +354,18 @@ add_dependence_list_and_free (insn, listp, dep_type)
}
}
-/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
- of INSN. Abort if not found. */
-
-static void
-remove_dependence (insn, elem)
- rtx insn;
- rtx elem;
-{
- rtx prev, link, next;
- int found = 0;
-
- for (prev = 0, link = LOG_LINKS (insn); link; link = next)
- {
- next = XEXP (link, 1);
- if (XEXP (link, 0) == elem)
- {
- if (prev)
- XEXP (prev, 1) = next;
- else
- LOG_LINKS (insn) = next;
-
-#ifdef INSN_SCHEDULING
- /* If we are removing a dependency from the LOG_LINKS list,
- make sure to remove it from the cache too. */
- if (true_dependency_cache != NULL)
- {
- if (REG_NOTE_KIND (link) == 0)
- RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
- RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- }
-#endif
-
- free_INSN_LIST_node (link);
-
- found = 1;
- }
- else
- prev = link;
- }
-
- if (!found)
- abort ();
- return;
-}
-
-/* Return an insn which represents a SCHED_GROUP, which is
- the last insn in the group. */
-
-static rtx
-group_leader (insn)
- rtx insn;
-{
- rtx prev;
-
- do
- {
- prev = insn;
- insn = next_nonnote_insn (insn);
- }
- while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
-
- return prev;
-}
-
/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
goes along with that. */
static void
-set_sched_group_p (insn)
- rtx insn;
+set_sched_group_p (rtx insn)
{
- rtx link, prev;
+ rtx prev;
SCHED_GROUP_P (insn) = 1;
- /* There may be a note before this insn now, but all notes will
- be removed before we actually try to schedule the insns, so
- it won't cause a problem later. We must avoid it here though. */
prev = prev_nonnote_insn (insn);
-
- /* Make a copy of all dependencies on the immediately previous insn,
- and add to this insn. This is so that all the dependencies will
- apply to the group. Remove an explicit dependence on this insn
- as SCHED_GROUP_P now represents it. */
-
- if (find_insn_list (prev, LOG_LINKS (insn)))
- remove_dependence (insn, prev);
-
- for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
- add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+ add_dependence (insn, prev, REG_DEP_ANTI);
}
/* Process an insn's memory dependencies. There are four kinds of
@@ -496,9 +384,8 @@ set_sched_group_p (insn)
so that we can do memory aliasing on it. */
void
-add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
- struct deps *deps;
- rtx *insn_list, *mem_list, insn, mem;
+add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
+ rtx insn, rtx mem)
{
rtx link;
@@ -510,7 +397,7 @@ add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
mem = shallow_copy_rtx (mem);
XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
}
- link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
+ link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
deps->pending_lists_length++;
@@ -521,10 +408,8 @@ add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (deps, insn, for_read, for_write)
- struct deps *deps;
- rtx insn;
- int for_read, for_write;
+flush_pending_lists (struct deps *deps, rtx insn, int for_read,
+ int for_write)
{
if (for_write)
{
@@ -549,10 +434,7 @@ flush_pending_lists (deps, insn, for_read, for_write)
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (deps, x, insn)
- struct deps *deps;
- rtx x;
- rtx insn;
+sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
int regno;
rtx dest = XEXP (x, 0);
@@ -580,6 +462,19 @@ sched_analyze_1 (deps, x, insn)
while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
{
+ if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || read_modify_subreg_p (dest))
+ {
+ /* These both read and modify the result. We must handle
+ them as writes to get proper dependencies for following
+ instructions. We must handle them as reads to get proper
+ dependencies from this to previous instructions.
+ Thus we need to call sched_analyze_2. */
+
+ sched_analyze_2 (deps, XEXP (dest, 0), insn);
+ }
if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
{
/* The second and third arguments are values read by this insn. */
@@ -628,10 +523,12 @@ sched_analyze_1 (deps, x, insn)
/* Pseudos that are REG_EQUIV to something may be replaced
by that during reloading. We need only add dependencies for
the address in the REG_EQUIV note. */
- if (!reload_completed
- && reg_known_equiv_p[regno]
- && GET_CODE (reg_known_value[regno]) == MEM)
- sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
+ if (!reload_completed && get_reg_known_equiv_p (regno))
+ {
+ rtx t = get_reg_known_value (regno);
+ if (GET_CODE (t) == MEM)
+ sched_analyze_2 (deps, XEXP (t, 0), insn);
+ }
/* Don't let it cross a call after scheduling if it doesn't
already cross one. */
@@ -650,6 +547,7 @@ sched_analyze_1 (deps, x, insn)
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
+ t = canon_rtx (t);
if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
{
@@ -703,10 +601,7 @@ sched_analyze_1 (deps, x, insn)
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (deps, x, insn)
- struct deps *deps;
- rtx x;
- rtx insn;
+sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
{
int i;
int j;
@@ -735,6 +630,9 @@ sched_analyze_2 (deps, x, insn)
case CC0:
/* User of CC0 depends on immediately preceding insn. */
set_sched_group_p (insn);
+ /* Don't move CC0 setter to another block (it can set up the
+ same flag for previous CC0 users which is safe). */
+ CANT_MOVE (prev_nonnote_insn (insn)) = 1;
return;
#endif
@@ -763,10 +661,12 @@ sched_analyze_2 (deps, x, insn)
/* Pseudos that are REG_EQUIV to something may be replaced
by that during reloading. We need only add dependencies for
the address in the REG_EQUIV note. */
- if (!reload_completed
- && reg_known_equiv_p[regno]
- && GET_CODE (reg_known_value[regno]) == MEM)
- sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
+ if (!reload_completed && get_reg_known_equiv_p (regno))
+ {
+ rtx t = get_reg_known_value (regno);
+ if (GET_CODE (t) == MEM)
+ sched_analyze_2 (deps, XEXP (t, 0), insn);
+ }
/* If the register does not already cross any calls, then add this
insn to the sched_before_next_call list so that it will still
@@ -791,6 +691,7 @@ sched_analyze_2 (deps, x, insn)
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
+ t = canon_rtx (t);
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
@@ -846,7 +747,7 @@ sched_analyze_2 (deps, x, insn)
mode. An insn should not be moved across this even if it only uses
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
- reg_pending_barrier = true;
+ reg_pending_barrier = TRUE_BARRIER;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
We can not just fall through here since then we would be confused
@@ -903,10 +804,7 @@ sched_analyze_2 (deps, x, insn)
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (deps, x, insn, loop_notes)
- struct deps *deps;
- rtx x, insn;
- rtx loop_notes;
+sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes)
{
RTX_CODE code = GET_CODE (x);
rtx link;
@@ -965,7 +863,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
sched_analyze_2 (deps, XEXP (link, 0), insn);
}
if (find_reg_note (insn, REG_SETJMP, NULL))
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
}
if (GET_CODE (insn) == JUMP_INSN)
@@ -973,7 +871,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
rtx next;
next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER)
- reg_pending_barrier = true;
+ reg_pending_barrier = TRUE_BARRIER;
else
{
rtx pending, pending_mem;
@@ -983,7 +881,15 @@ sched_analyze_insn (deps, x, insn, loop_notes)
(*current_sched_info->compute_jump_reg_dependencies)
(insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
- IOR_REG_SET (reg_pending_uses, &tmp_uses);
+ /* Make latency of jump equal to 0 by using anti-dependence. */
+ EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i,
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
+ reg_last->uses_length++;
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ });
IOR_REG_SET (reg_pending_sets, &tmp_sets);
CLEAR_REG_SET (&tmp_uses);
@@ -1035,7 +941,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
link = XEXP (link, 1);
}
@@ -1047,19 +953,25 @@ sched_analyze_insn (deps, x, insn, loop_notes)
where block boundaries fall. This is mighty confusing elsewhere.
Therefore, prevent such an instruction from being moved. */
if (can_throw_internal (insn))
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
/* Add dependencies if a scheduling barrier was found. */
if (reg_pending_barrier)
{
+ /* In the case of barrier the most added dependencies are not
+ real, so we use anti-dependence here. */
if (GET_CODE (PATTERN (insn)) == COND_EXEC)
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->sets, 0);
- add_dependence_list (insn, reg_last->clobbers, 0);
+ add_dependence_list
+ (insn, reg_last->sets,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
+ add_dependence_list
+ (insn, reg_last->clobbers,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
});
}
else
@@ -1069,8 +981,12 @@ sched_analyze_insn (deps, x, insn, loop_notes)
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, &reg_last->uses,
REG_DEP_ANTI);
- add_dependence_list_and_free (insn, &reg_last->sets, 0);
- add_dependence_list_and_free (insn, &reg_last->clobbers, 0);
+ add_dependence_list_and_free
+ (insn, &reg_last->sets,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
+ add_dependence_list_and_free
+ (insn, &reg_last->clobbers,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
});
@@ -1085,7 +1001,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
flush_pending_lists (deps, insn, true, true);
CLEAR_REG_SET (&deps->reg_conditional_sets);
- reg_pending_barrier = false;
+ reg_pending_barrier = NOT_A_BARRIER;
}
else
{
@@ -1238,9 +1154,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
for every dependency. */
void
-sched_analyze (deps, head, tail)
- struct deps *deps;
- rtx head, tail;
+sched_analyze (struct deps *deps, rtx head, rtx tail)
{
rtx insn;
rtx loop_notes = 0;
@@ -1284,7 +1198,7 @@ sched_analyze (deps, head, tail)
{
/* This is setjmp. Assume that all registers, not just
hard registers, may be clobbered by this call. */
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
}
else
{
@@ -1373,7 +1287,7 @@ sched_analyze (deps, head, tail)
/* Now that we have completed handling INSN, check and see if it is
a CLOBBER beginning a libcall block. If it is, record the
- end of the libcall sequence.
+ end of the libcall sequence.
We want to schedule libcall blocks as a unit before reload. While
this restricts scheduling, it preserves the meaning of a libcall
@@ -1384,7 +1298,7 @@ sched_analyze (deps, head, tail)
a libcall block. */
if (!reload_completed
/* Note we may have nested libcall sequences. We only care about
- the outermost libcall sequence. */
+ the outermost libcall sequence. */
&& deps->libcall_block_tail_insn == 0
/* The sequence must start with a clobber of a register. */
&& GET_CODE (insn) == INSN
@@ -1420,17 +1334,52 @@ sched_analyze (deps, head, tail)
abort ();
}
+
+/* The following function adds forward dependence (FROM, TO) with
+ given DEP_TYPE. The forward dependence should be not exist before. */
+
+void
+add_forward_dependence (rtx from, rtx to, enum reg_note dep_type)
+{
+ rtx new_link;
+
+#ifdef ENABLE_CHECKING
+ /* If add_dependence is working properly there should never
+ be notes, deleted insns or duplicates in the backward
+ links. Thus we need not check for them here.
+
+ However, if we have enabled checking we might as well go
+ ahead and verify that add_dependence worked properly. */
+ if (GET_CODE (from) == NOTE
+ || INSN_DELETED_P (from)
+ || (forward_dependency_cache != NULL
+ && bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to)))
+ || (forward_dependency_cache == NULL
+ && find_insn_list (to, INSN_DEPEND (from))))
+ abort ();
+ if (forward_dependency_cache != NULL)
+ bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to));
+#endif
+
+ new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
+
+ PUT_REG_NOTE_KIND (new_link, dep_type);
+
+ INSN_DEPEND (from) = new_link;
+ INSN_DEP_COUNT (to) += 1;
+}
+
/* Examine insns in the range [ HEAD, TAIL ] and Use the backward
dependences from LOG_LINKS to build forward dependences in
INSN_DEPEND. */
void
-compute_forward_dependences (head, tail)
- rtx head, tail;
+compute_forward_dependences (rtx head, rtx tail)
{
rtx insn, link;
rtx next_tail;
- enum reg_note dep_type;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
@@ -1438,44 +1387,8 @@ compute_forward_dependences (head, tail)
if (! INSN_P (insn))
continue;
- insn = group_leader (insn);
-
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
- {
- rtx x = group_leader (XEXP (link, 0));
- rtx new_link;
-
- if (x != XEXP (link, 0))
- continue;
-
-#ifdef ENABLE_CHECKING
- /* If add_dependence is working properly there should never
- be notes, deleted insns or duplicates in the backward
- links. Thus we need not check for them here.
-
- However, if we have enabled checking we might as well go
- ahead and verify that add_dependence worked properly. */
- if (GET_CODE (x) == NOTE
- || INSN_DELETED_P (x)
- || (forward_dependency_cache != NULL
- && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
- INSN_LUID (insn)))
- || (forward_dependency_cache == NULL
- && find_insn_list (insn, INSN_DEPEND (x))))
- abort ();
- if (forward_dependency_cache != NULL)
- SET_BIT (forward_dependency_cache[INSN_LUID (x)],
- INSN_LUID (insn));
-#endif
-
- new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
-
- dep_type = REG_NOTE_KIND (link);
- PUT_REG_NOTE_KIND (new_link, dep_type);
-
- INSN_DEPEND (x) = new_link;
- INSN_DEP_COUNT (insn) += 1;
- }
+ add_forward_dependence (XEXP (link, 0), insn, REG_NOTE_KIND (link));
}
}
@@ -1483,14 +1396,12 @@ compute_forward_dependences (head, tail)
n_bbs is the number of region blocks. */
void
-init_deps (deps)
- struct deps *deps;
+init_deps (struct deps *deps)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
- deps->reg_last = (struct deps_reg *)
- xcalloc (max_reg, sizeof (struct deps_reg));
+ deps->reg_last = xcalloc (max_reg, sizeof (struct deps_reg));
INIT_REG_SET (&deps->reg_last_in_use);
INIT_REG_SET (&deps->reg_conditional_sets);
@@ -1510,8 +1421,7 @@ init_deps (deps)
/* Free insn lists found in DEPS. */
void
-free_deps (deps)
- struct deps *deps;
+free_deps (struct deps *deps)
{
int i;
@@ -1522,7 +1432,7 @@ free_deps (deps)
free_INSN_LIST_list (&deps->last_pending_memory_flush);
/* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
- times. For a test case with 42000 regs and 8000 small basic blocks,
+ times. For a testcase with 42000 regs and 8000 small basic blocks,
this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
@@ -1541,12 +1451,11 @@ free_deps (deps)
}
/* If it is profitable to use them, initialize caches for tracking
- dependency informatino. LUID is the number of insns to be scheduled,
+ dependency information. LUID is the number of insns to be scheduled,
it is used in the estimate of profitability. */
void
-init_dependency_caches (luid)
- int luid;
+init_dependency_caches (int luid)
{
/* ?!? We could save some memory by computing a per-region luid mapping
which could reduce both the number of vectors in the cache and the size
@@ -1556,34 +1465,52 @@ init_dependency_caches (luid)
what we consider "very high". */
if (luid / n_basic_blocks > 100 * 5)
{
- true_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (true_dependency_cache, luid);
- anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (anti_dependency_cache, luid);
- output_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (output_dependency_cache, luid);
+ int i;
+ true_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+ anti_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+ output_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
#ifdef ENABLE_CHECKING
- forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (forward_dependency_cache, luid);
+ forward_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
#endif
+ for (i = 0; i < luid; i++)
+ {
+ bitmap_initialize (&true_dependency_cache[i], 0);
+ bitmap_initialize (&anti_dependency_cache[i], 0);
+ bitmap_initialize (&output_dependency_cache[i], 0);
+#ifdef ENABLE_CHECKING
+ bitmap_initialize (&forward_dependency_cache[i], 0);
+#endif
+ }
+ cache_size = luid;
}
}
/* Free the caches allocated in init_dependency_caches. */
void
-free_dependency_caches ()
+free_dependency_caches (void)
{
if (true_dependency_cache)
{
- sbitmap_vector_free (true_dependency_cache);
+ int i;
+
+ for (i = 0; i < cache_size; i++)
+ {
+ bitmap_clear (&true_dependency_cache[i]);
+ bitmap_clear (&anti_dependency_cache[i]);
+ bitmap_clear (&output_dependency_cache[i]);
+#ifdef ENABLE_CHECKING
+ bitmap_clear (&forward_dependency_cache[i]);
+#endif
+ }
+ free (true_dependency_cache);
true_dependency_cache = NULL;
- sbitmap_vector_free (anti_dependency_cache);
+ free (anti_dependency_cache);
anti_dependency_cache = NULL;
- sbitmap_vector_free (output_dependency_cache);
+ free (output_dependency_cache);
output_dependency_cache = NULL;
#ifdef ENABLE_CHECKING
- sbitmap_vector_free (forward_dependency_cache);
+ free (forward_dependency_cache);
forward_dependency_cache = NULL;
#endif
}
@@ -1593,18 +1520,18 @@ free_dependency_caches ()
code. */
void
-init_deps_global ()
+init_deps_global (void)
{
reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
- reg_pending_barrier = false;
+ reg_pending_barrier = NOT_A_BARRIER;
}
/* Free everything used by the dependency analysis code. */
void
-finish_deps_global ()
+finish_deps_global (void)
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);
OpenPOWER on IntegriCloud