summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/predict.c
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
committerkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
commit1f9ea4d0a40cca64d60cf4dab152349da7b9dddf (patch)
tree0cb530c9c38af219e6dda2994c078b6b2b9ad853 /contrib/gcc/predict.c
parent4895159b2b4f648051c1f139faa7b6dc50c2bfcb (diff)
downloadFreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.zip
FreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.tar.gz
GCC 4.2.0 release.
Diffstat (limited to 'contrib/gcc/predict.c')
-rw-r--r--contrib/gcc/predict.c1478
1 files changed, 1077 insertions, 401 deletions
diff --git a/contrib/gcc/predict.c b/contrib/gcc/predict.c
index 9d885fc..9030763 100644
--- a/contrib/gcc/predict.c
+++ b/contrib/gcc/predict.c
@@ -16,8 +16,8 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* References:
@@ -52,7 +52,13 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "sreal.h"
#include "params.h"
#include "target.h"
-#include "loop.h"
+#include "cfgloop.h"
+#include "tree-flow.h"
+#include "ggc.h"
+#include "tree-dump.h"
+#include "tree-pass.h"
+#include "timevar.h"
+#include "tree-scalar-evolution.h"
#include "cfgloop.h"
/* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
@@ -66,15 +72,12 @@ static sreal real_zero, real_one, real_almost_one, real_br_prob_base,
#define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY)
#define PROB_ALWAYS (REG_BR_PROB_BASE)
-static bool predicted_by_p (basic_block, enum br_predictor);
static void combine_predictions_for_insn (rtx, basic_block);
-static void dump_prediction (enum br_predictor, int, basic_block, int);
-static void estimate_loops_at_level (struct loop *loop);
-static void propagate_freq (struct loop *);
+static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int);
+static void estimate_loops_at_level (struct loop *, bitmap);
+static void propagate_freq (struct loop *, bitmap);
static void estimate_bb_frequencies (struct loops *);
-static void counts_to_freqs (void);
-static void process_note_predictions (basic_block, int *);
-static void process_note_prediction (basic_block, int *, int, int);
+static void predict_paths_leading_to (basic_block, int *, enum br_predictor, enum prediction);
static bool last_basic_block_p (basic_block);
static void compute_function_frequency (void);
static void choose_function_section (void);
@@ -149,8 +152,8 @@ probably_never_executed_bb_p (basic_block bb)
/* Return true if the one of outgoing edges is already predicted by
PREDICTOR. */
-static bool
-predicted_by_p (basic_block bb, enum br_predictor predictor)
+bool
+rtl_predicted_by_p (basic_block bb, enum br_predictor predictor)
{
rtx note;
if (!INSN_P (BB_END (bb)))
@@ -162,11 +165,62 @@ predicted_by_p (basic_block bb, enum br_predictor predictor)
return false;
}
-void
+/* Return true if the one of outgoing edges is already predicted by
+ PREDICTOR. */
+
+bool
+tree_predicted_by_p (basic_block bb, enum br_predictor predictor)
+{
+ struct edge_prediction *i;
+ for (i = bb->predictions; i; i = i->ep_next)
+ if (i->ep_predictor == predictor)
+ return true;
+ return false;
+}
+
+/* Return true when the probability of edge is reliable.
+
+ The profile guessing code is good at predicting branch outcome (ie.
+ taken/not taken), that is predicted right slightly over 75% of time.
+ It is however notoriously poor on predicting the probability itself.
+ In general the profile appear a lot flatter (with probabilities closer
+ to 50%) than the reality so it is bad idea to use it to drive optimization
+ such as those disabling dynamic branch prediction for well predictable
+ branches.
+
+ There are two exceptions - edges leading to noreturn edges and edges
+ predicted by number of iterations heuristics are predicted well. This macro
+ should be able to distinguish those, but at the moment it simply check for
+ noreturn heuristic that is only one giving probability over 99% or bellow
+ 1%. In future we might want to propagate reliability information across the
+ CFG if we find this information useful on multiple places. */
+static bool
+probability_reliable_p (int prob)
+{
+ return (profile_status == PROFILE_READ
+ || (profile_status == PROFILE_GUESSED
+ && (prob <= HITRATE (1) || prob >= HITRATE (99))));
+}
+
+/* Same predicate as above, working on edges. */
+bool
+edge_probability_reliable_p (edge e)
+{
+ return probability_reliable_p (e->probability);
+}
+
+/* Same predicate as edge_probability_reliable_p, working on notes. */
+bool
+br_prob_note_reliable_p (rtx note)
+{
+ gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
+ return probability_reliable_p (INTVAL (XEXP (note, 0)));
+}
+
+static void
predict_insn (rtx insn, enum br_predictor predictor, int probability)
{
- if (!any_condjump_p (insn))
- abort ();
+ gcc_assert (any_condjump_p (insn));
if (!flag_guess_branch_prob)
return;
@@ -195,7 +249,7 @@ predict_insn_def (rtx insn, enum br_predictor predictor,
/* Predict edge E with given probability if possible. */
void
-predict_edge (edge e, enum br_predictor predictor, int probability)
+rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
{
rtx last_insn;
last_insn = BB_END (e->src);
@@ -212,15 +266,51 @@ predict_edge (edge e, enum br_predictor predictor, int probability)
predict_insn (last_insn, predictor, probability);
}
+/* Predict edge E with the given PROBABILITY. */
+void
+tree_predict_edge (edge e, enum br_predictor predictor, int probability)
+{
+ gcc_assert (profile_status != PROFILE_GUESSED);
+ if ((e->src != ENTRY_BLOCK_PTR && EDGE_COUNT (e->src->succs) > 1)
+ && flag_guess_branch_prob && optimize)
+ {
+ struct edge_prediction *i = ggc_alloc (sizeof (struct edge_prediction));
+
+ i->ep_next = e->src->predictions;
+ e->src->predictions = i;
+ i->ep_probability = probability;
+ i->ep_predictor = predictor;
+ i->ep_edge = e;
+ }
+}
+
+/* Remove all predictions on given basic block that are attached
+ to edge E. */
+void
+remove_predictions_associated_with_edge (edge e)
+{
+ if (e->src->predictions)
+ {
+ struct edge_prediction **prediction = &e->src->predictions;
+ while (*prediction)
+ {
+ if ((*prediction)->ep_edge == e)
+ *prediction = (*prediction)->ep_next;
+ else
+ prediction = &((*prediction)->ep_next);
+ }
+ }
+}
+
/* Return true when we can store prediction on insn INSN.
At the moment we represent predictions only on conditional
jumps, not at computed jump or other complicated cases. */
static bool
can_predict_insn_p (rtx insn)
{
- return (GET_CODE (insn) == JUMP_INSN
+ return (JUMP_P (insn)
&& any_condjump_p (insn)
- && BLOCK_FOR_INSN (insn)->succ->succ_next);
+ && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
}
/* Predict edge E by given predictor if possible. */
@@ -256,34 +346,55 @@ invert_br_probabilities (rtx insn)
/* Dump information about the branch prediction to the output file. */
static void
-dump_prediction (enum br_predictor predictor, int probability,
+dump_prediction (FILE *file, enum br_predictor predictor, int probability,
basic_block bb, int used)
{
- edge e = bb->succ;
+ edge e;
+ edge_iterator ei;
- if (!rtl_dump_file)
+ if (!file)
return;
- while (e && (e->flags & EDGE_FALLTHRU))
- e = e->succ_next;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (! (e->flags & EDGE_FALLTHRU))
+ break;
- fprintf (rtl_dump_file, " %s heuristics%s: %.1f%%",
+ fprintf (file, " %s heuristics%s: %.1f%%",
predictor_info[predictor].name,
used ? "" : " (ignored)", probability * 100.0 / REG_BR_PROB_BASE);
if (bb->count)
{
- fprintf (rtl_dump_file, " exec ");
- fprintf (rtl_dump_file, HOST_WIDEST_INT_PRINT_DEC, bb->count);
+ fprintf (file, " exec ");
+ fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count);
if (e)
{
- fprintf (rtl_dump_file, " hit ");
- fprintf (rtl_dump_file, HOST_WIDEST_INT_PRINT_DEC, e->count);
- fprintf (rtl_dump_file, " (%.1f%%)", e->count * 100.0 / bb->count);
+ fprintf (file, " hit ");
+ fprintf (file, HOST_WIDEST_INT_PRINT_DEC, e->count);
+ fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count);
}
}
- fprintf (rtl_dump_file, "\n");
+ fprintf (file, "\n");
+}
+
+/* We can not predict the probabilities of outgoing edges of bb. Set them
+ evenly and hope for the best. */
+static void
+set_even_probabilities (basic_block bb)
+{
+ int nedges = 0;
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
+ nedges ++;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
+ e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
+ else
+ e->probability = 0;
}
/* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
@@ -292,8 +403,8 @@ dump_prediction (enum br_predictor predictor, int probability,
static void
combine_predictions_for_insn (rtx insn, basic_block bb)
{
- rtx prob_note = find_reg_note (insn, REG_BR_PROB, 0);
- rtx *pnote = &REG_NOTES (insn);
+ rtx prob_note;
+ rtx *pnote;
rtx note;
int best_probability = PROB_EVEN;
int best_predictor = END_PREDICTORS;
@@ -302,13 +413,20 @@ combine_predictions_for_insn (rtx insn, basic_block bb)
bool first_match = false;
bool found = false;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
+ if (!can_predict_insn_p (insn))
+ {
+ set_even_probabilities (bb);
+ return;
+ }
+
+ prob_note = find_reg_note (insn, REG_BR_PROB, 0);
+ pnote = &REG_NOTES (insn);
+ if (dump_file)
+ fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
bb->index);
/* We implement "first match" heuristics and use probability guessed
- by predictor with smallest index. In the future we will use better
- probability combination techniques. */
+ by predictor with smallest index. */
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
if (REG_NOTE_KIND (note) == REG_BR_PRED)
{
@@ -340,16 +458,19 @@ combine_predictions_for_insn (rtx insn, basic_block bb)
first_match = true;
if (!found)
- dump_prediction (PRED_NO_PREDICTION, combined_probability, bb, true);
+ dump_prediction (dump_file, PRED_NO_PREDICTION,
+ combined_probability, bb, true);
else
{
- dump_prediction (PRED_DS_THEORY, combined_probability, bb, !first_match);
- dump_prediction (PRED_FIRST_MATCH, best_probability, bb, first_match);
+ dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
+ bb, !first_match);
+ dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
+ bb, first_match);
}
if (first_match)
combined_probability = best_probability;
- dump_prediction (PRED_COMBINED, combined_probability, bb, true);
+ dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
while (*pnote)
{
@@ -358,7 +479,7 @@ combine_predictions_for_insn (rtx insn, basic_block bb)
int predictor = INTVAL (XEXP (XEXP (*pnote, 0), 0));
int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
- dump_prediction (predictor, probability, bb,
+ dump_prediction (dump_file, predictor, probability, bb,
!first_match || best_predictor == predictor);
*pnote = XEXP (*pnote, 1);
}
@@ -374,30 +495,148 @@ combine_predictions_for_insn (rtx insn, basic_block bb)
/* Save the prediction into CFG in case we are seeing non-degenerated
conditional jump. */
- if (bb->succ->succ_next)
+ if (!single_succ_p (bb))
{
BRANCH_EDGE (bb)->probability = combined_probability;
FALLTHRU_EDGE (bb)->probability
= REG_BR_PROB_BASE - combined_probability;
}
}
+ else if (!single_succ_p (bb))
+ {
+ int prob = INTVAL (XEXP (prob_note, 0));
+
+ BRANCH_EDGE (bb)->probability = prob;
+ FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob;
+ }
+ else
+ single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
}
-/* Statically estimate the probability that a branch will be taken.
- ??? In the next revision there will be a number of other predictors added
- from the above references. Further, each heuristic will be factored out
- into its own function for clarity (and to facilitate the combination of
- predictions). */
+/* Combine predictions into single probability and store them into CFG.
+ Remove now useless prediction entries. */
-void
-estimate_probability (struct loops *loops_info)
+static void
+combine_predictions_for_bb (basic_block bb)
+{
+ int best_probability = PROB_EVEN;
+ int best_predictor = END_PREDICTORS;
+ int combined_probability = REG_BR_PROB_BASE / 2;
+ int d;
+ bool first_match = false;
+ bool found = false;
+ struct edge_prediction *pred;
+ int nedges = 0;
+ edge e, first = NULL, second = NULL;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
+ {
+ nedges ++;
+ if (first && !second)
+ second = e;
+ if (!first)
+ first = e;
+ }
+
+ /* When there is no successor or only one choice, prediction is easy.
+
+ We are lazy for now and predict only basic blocks with two outgoing
+ edges. It is possible to predict generic case too, but we have to
+ ignore first match heuristics and do more involved combining. Implement
+ this later. */
+ if (nedges != 2)
+ {
+ if (!bb->count)
+ set_even_probabilities (bb);
+ bb->predictions = NULL;
+ if (dump_file)
+ fprintf (dump_file, "%i edges in bb %i predicted to even probabilities\n",
+ nedges, bb->index);
+ return;
+ }
+
+ if (dump_file)
+ fprintf (dump_file, "Predictions for bb %i\n", bb->index);
+
+ /* We implement "first match" heuristics and use probability guessed
+ by predictor with smallest index. */
+ for (pred = bb->predictions; pred; pred = pred->ep_next)
+ {
+ int predictor = pred->ep_predictor;
+ int probability = pred->ep_probability;
+
+ if (pred->ep_edge != first)
+ probability = REG_BR_PROB_BASE - probability;
+
+ found = true;
+ if (best_predictor > predictor)
+ best_probability = probability, best_predictor = predictor;
+
+ d = (combined_probability * probability
+ + (REG_BR_PROB_BASE - combined_probability)
+ * (REG_BR_PROB_BASE - probability));
+
+ /* Use FP math to avoid overflows of 32bit integers. */
+ if (d == 0)
+ /* If one probability is 0% and one 100%, avoid division by zero. */
+ combined_probability = REG_BR_PROB_BASE / 2;
+ else
+ combined_probability = (((double) combined_probability) * probability
+ * REG_BR_PROB_BASE / d + 0.5);
+ }
+
+ /* Decide which heuristic to use. In case we didn't match anything,
+ use no_prediction heuristic, in case we did match, use either
+ first match or Dempster-Shaffer theory depending on the flags. */
+
+ if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
+ first_match = true;
+
+ if (!found)
+ dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb, true);
+ else
+ {
+ dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
+ !first_match);
+ dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
+ first_match);
+ }
+
+ if (first_match)
+ combined_probability = best_probability;
+ dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
+
+ for (pred = bb->predictions; pred; pred = pred->ep_next)
+ {
+ int predictor = pred->ep_predictor;
+ int probability = pred->ep_probability;
+
+ if (pred->ep_edge != EDGE_SUCC (bb, 0))
+ probability = REG_BR_PROB_BASE - probability;
+ dump_prediction (dump_file, predictor, probability, bb,
+ !first_match || best_predictor == predictor);
+ }
+ bb->predictions = NULL;
+
+ if (!bb->count)
+ {
+ first->probability = combined_probability;
+ second->probability = REG_BR_PROB_BASE - combined_probability;
+ }
+}
+
+/* Predict edge probabilities by exploiting loop structure.
+ When RTLSIMPLELOOPS is set, attempt to count number of iterations by analyzing
+ RTL otherwise use tree based approach. */
+static void
+predict_loops (struct loops *loops_info, bool rtlsimpleloops)
{
- basic_block bb;
unsigned i;
- connect_infinite_loops_to_exit ();
- calculate_dominance_info (CDI_DOMINATORS);
- calculate_dominance_info (CDI_POST_DOMINATORS);
+ if (!rtlsimpleloops)
+ scev_initialize (loops_info);
/* Try to predict out blocks in a loop that are not part of a
natural loop. */
@@ -405,36 +644,81 @@ estimate_probability (struct loops *loops_info)
{
basic_block bb, *bbs;
unsigned j;
- int exits;
+ unsigned n_exits;
struct loop *loop = loops_info->parray[i];
- struct loop_desc desc;
+ struct niter_desc desc;
unsigned HOST_WIDE_INT niter;
+ edge *exits;
- flow_loop_scan (loop, LOOP_EXIT_EDGES);
- exits = loop->num_exits;
+ exits = get_loop_exit_edges (loop, &n_exits);
- if (simple_loop_p (loop, &desc) && desc.const_iter)
+ if (rtlsimpleloops)
+ {
+ iv_analysis_loop_init (loop);
+ find_simple_exit (loop, &desc);
+
+ if (desc.simple_p && desc.const_iter)
+ {
+ int prob;
+ niter = desc.niter + 1;
+ if (niter == 0) /* We might overflow here. */
+ niter = desc.niter;
+ if (niter
+ > (unsigned int) PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS))
+ niter = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
+
+ prob = (REG_BR_PROB_BASE
+ - (REG_BR_PROB_BASE + niter /2) / niter);
+ /* Branch prediction algorithm gives 0 frequency for everything
+ after the end of loop for loop having 0 probability to finish. */
+ if (prob == REG_BR_PROB_BASE)
+ prob = REG_BR_PROB_BASE - 1;
+ predict_edge (desc.in_edge, PRED_LOOP_ITERATIONS,
+ prob);
+ }
+ }
+ else
{
- int prob;
- niter = desc.niter + 1;
- if (niter == 0) /* We might overflow here. */
- niter = desc.niter;
-
- prob = (REG_BR_PROB_BASE
- - (REG_BR_PROB_BASE + niter /2) / niter);
- /* Branch prediction algorithm gives 0 frequency for everything
- after the end of loop for loop having 0 probability to finish. */
- if (prob == REG_BR_PROB_BASE)
- prob = REG_BR_PROB_BASE - 1;
- predict_edge (desc.in_edge, PRED_LOOP_ITERATIONS,
- prob);
+ struct tree_niter_desc niter_desc;
+
+ for (j = 0; j < n_exits; j++)
+ {
+ tree niter = NULL;
+
+ if (number_of_iterations_exit (loop, exits[j], &niter_desc, false))
+ niter = niter_desc.niter;
+ if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
+ niter = loop_niter_by_eval (loop, exits[j]);
+
+ if (TREE_CODE (niter) == INTEGER_CST)
+ {
+ int probability;
+ int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
+ if (host_integerp (niter, 1)
+ && tree_int_cst_lt (niter,
+ build_int_cstu (NULL_TREE, max - 1)))
+ {
+ HOST_WIDE_INT nitercst = tree_low_cst (niter, 1) + 1;
+ probability = ((REG_BR_PROB_BASE + nitercst / 2)
+ / nitercst);
+ }
+ else
+ probability = ((REG_BR_PROB_BASE + max / 2) / max);
+
+ predict_edge (exits[j], PRED_LOOP_ITERATIONS, probability);
+ }
+ }
+
}
+ free (exits);
bbs = get_loop_body (loop);
+
for (j = 0; j < loop->num_nodes; j++)
{
int header_found = 0;
edge e;
+ edge_iterator ei;
bb = bbs[j];
@@ -442,186 +726,654 @@ estimate_probability (struct loops *loops_info)
statements construct loops via "non-loop" constructs
in the source language and are better to be handled
separately. */
- if (!can_predict_insn_p (BB_END (bb))
+ if ((rtlsimpleloops && !can_predict_insn_p (BB_END (bb)))
|| predicted_by_p (bb, PRED_CONTINUE))
continue;
/* Loop branch heuristics - predict an edge back to a
loop's head as taken. */
- for (e = bb->succ; e; e = e->succ_next)
- if (e->dest == loop->header
- && e->src == loop->latch)
- {
- header_found = 1;
- predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN);
- }
+ if (bb == loop->latch)
+ {
+ e = find_edge (loop->latch, loop->header);
+ if (e)
+ {
+ header_found = 1;
+ predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN);
+ }
+ }
/* Loop exit heuristics - predict an edge exiting the loop if the
conditional has no loop header successors as not taken. */
if (!header_found)
- for (e = bb->succ; e; e = e->succ_next)
- if (e->dest->index < 0
- || !flow_bb_inside_loop_p (loop, e->dest))
- predict_edge
- (e, PRED_LOOP_EXIT,
- (REG_BR_PROB_BASE
- - predictor_info [(int) PRED_LOOP_EXIT].hitrate)
- / exits);
+ {
+ /* For loop with many exits we don't want to predict all exits
+ with the pretty large probability, because if all exits are
+ considered in row, the loop would be predicted to iterate
+ almost never. The code to divide probability by number of
+ exits is very rough. It should compute the number of exits
+ taken in each patch through function (not the overall number
+ of exits that might be a lot higher for loops with wide switch
+ statements in them) and compute n-th square root.
+
+ We limit the minimal probability by 2% to avoid
+ EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
+ as this was causing regression in perl benchmark containing such
+ a wide loop. */
+
+ int probability = ((REG_BR_PROB_BASE
+ - predictor_info [(int) PRED_LOOP_EXIT].hitrate)
+ / n_exits);
+ if (probability < HITRATE (2))
+ probability = HITRATE (2);
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->dest->index < NUM_FIXED_BLOCKS
+ || !flow_bb_inside_loop_p (loop, e->dest))
+ predict_edge (e, PRED_LOOP_EXIT, probability);
+ }
}
/* Free basic blocks from get_loop_body. */
free (bbs);
}
- /* Attempt to predict conditional jumps using a number of heuristics. */
- FOR_EACH_BB (bb)
+ if (!rtlsimpleloops)
{
- rtx last_insn = BB_END (bb);
- rtx cond, earliest;
- edge e;
+ scev_finalize ();
+ current_loops = NULL;
+ }
+}
- if (! can_predict_insn_p (last_insn))
- continue;
+/* Attempt to predict probabilities of BB outgoing edges using local
+ properties. */
+static void
+bb_estimate_probability_locally (basic_block bb)
+{
+ rtx last_insn = BB_END (bb);
+ rtx cond;
+
+ if (! can_predict_insn_p (last_insn))
+ return;
+ cond = get_condition (last_insn, NULL, false, false);
+ if (! cond)
+ return;
- for (e = bb->succ; e; e = e->succ_next)
+ /* Try "pointer heuristic."
+ A comparison ptr == 0 is predicted as false.
+ Similarly, a comparison ptr1 == ptr2 is predicted as false. */
+ if (COMPARISON_P (cond)
+ && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
+ || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
+ {
+ if (GET_CODE (cond) == EQ)
+ predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
+ else if (GET_CODE (cond) == NE)
+ predict_insn_def (last_insn, PRED_POINTER, TAKEN);
+ }
+ else
+
+ /* Try "opcode heuristic."
+ EQ tests are usually false and NE tests are usually true. Also,
+ most quantities are positive, so we can make the appropriate guesses
+ about signed comparisons against zero. */
+ switch (GET_CODE (cond))
+ {
+ case CONST_INT:
+ /* Unconditional branch. */
+ predict_insn_def (last_insn, PRED_UNCONDITIONAL,
+ cond == const0_rtx ? NOT_TAKEN : TAKEN);
+ break;
+
+ case EQ:
+ case UNEQ:
+ /* Floating point comparisons appears to behave in a very
+ unpredictable way because of special role of = tests in
+ FP code. */
+ if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
+ ;
+ /* Comparisons with 0 are often used for booleans and there is
+ nothing useful to predict about them. */
+ else if (XEXP (cond, 1) == const0_rtx
+ || XEXP (cond, 0) == const0_rtx)
+ ;
+ else
+ predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
+ break;
+
+ case NE:
+ case LTGT:
+ /* Floating point comparisons appears to behave in a very
+ unpredictable way because of special role of = tests in
+ FP code. */
+ if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
+ ;
+ /* Comparisons with 0 are often used for booleans and there is
+ nothing useful to predict about them. */
+ else if (XEXP (cond, 1) == const0_rtx
+ || XEXP (cond, 0) == const0_rtx)
+ ;
+ else
+ predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
+ break;
+
+ case ORDERED:
+ predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
+ break;
+
+ case UNORDERED:
+ predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
+ break;
+
+ case LE:
+ case LT:
+ if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
+ || XEXP (cond, 1) == constm1_rtx)
+ predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
+ break;
+
+ case GE:
+ case GT:
+ if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
+ || XEXP (cond, 1) == constm1_rtx)
+ predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Set edge->probability for each successor edge of BB. */
+void
+guess_outgoing_edge_probabilities (basic_block bb)
+{
+ bb_estimate_probability_locally (bb);
+ combine_predictions_for_insn (BB_END (bb), bb);
+}
+
+/* Return constant EXPR will likely have at execution time, NULL if unknown.
+ The function is used by builtin_expect branch predictor so the evidence
+ must come from this construct and additional possible constant folding.
+
+ We may want to implement more involved value guess (such as value range
+ propagation based prediction), but such tricks shall go to new
+ implementation. */
+
+static tree
+expr_expected_value (tree expr, bitmap visited)
+{
+ if (TREE_CONSTANT (expr))
+ return expr;
+ else if (TREE_CODE (expr) == SSA_NAME)
+ {
+ tree def = SSA_NAME_DEF_STMT (expr);
+
+ /* If we were already here, break the infinite cycle. */
+ if (bitmap_bit_p (visited, SSA_NAME_VERSION (expr)))
+ return NULL;
+ bitmap_set_bit (visited, SSA_NAME_VERSION (expr));
+
+ if (TREE_CODE (def) == PHI_NODE)
{
- /* Predict early returns to be probable, as we've already taken
- care for error returns and other are often used for fast paths
- trought function. */
- if ((e->dest == EXIT_BLOCK_PTR
- || (e->dest->succ && !e->dest->succ->succ_next
- && e->dest->succ->dest == EXIT_BLOCK_PTR))
- && !predicted_by_p (bb, PRED_NULL_RETURN)
- && !predicted_by_p (bb, PRED_CONST_RETURN)
- && !predicted_by_p (bb, PRED_NEGATIVE_RETURN)
- && !last_basic_block_p (e->dest))
- predict_edge_def (e, PRED_EARLY_RETURN, TAKEN);
+ /* All the arguments of the PHI node must have the same constant
+ length. */
+ int i;
+ tree val = NULL, new_val;
- /* Look for block we are guarding (ie we dominate it,
- but it doesn't postdominate us). */
- if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
- && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
- && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
+ for (i = 0; i < PHI_NUM_ARGS (def); i++)
{
- rtx insn;
-
- /* The call heuristic claims that a guarded function call
- is improbable. This is because such calls are often used
- to signal exceptional situations such as printing error
- messages. */
- for (insn = BB_HEAD (e->dest); insn != NEXT_INSN (BB_END (e->dest));
- insn = NEXT_INSN (insn))
- if (GET_CODE (insn) == CALL_INSN
- /* Constant and pure calls are hardly used to signalize
- something exceptional. */
- && ! CONST_OR_PURE_CALL_P (insn))
- {
- predict_edge_def (e, PRED_CALL, NOT_TAKEN);
- break;
- }
+ tree arg = PHI_ARG_DEF (def, i);
+
+ /* If this PHI has itself as an argument, we cannot
+ determine the string length of this argument. However,
+ if we can find an expected constant value for the other
+ PHI args then we can still be sure that this is
+ likely a constant. So be optimistic and just
+ continue with the next argument. */
+ if (arg == PHI_RESULT (def))
+ continue;
+
+ new_val = expr_expected_value (arg, visited);
+ if (!new_val)
+ return NULL;
+ if (!val)
+ val = new_val;
+ else if (!operand_equal_p (val, new_val, false))
+ return NULL;
+ }
+ return val;
+ }
+ if (TREE_CODE (def) != MODIFY_EXPR || TREE_OPERAND (def, 0) != expr)
+ return NULL;
+ return expr_expected_value (TREE_OPERAND (def, 1), visited);
+ }
+ else if (TREE_CODE (expr) == CALL_EXPR)
+ {
+ tree decl = get_callee_fndecl (expr);
+ if (!decl)
+ return NULL;
+ if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
+ && DECL_FUNCTION_CODE (decl) == BUILT_IN_EXPECT)
+ {
+ tree arglist = TREE_OPERAND (expr, 1);
+ tree val;
+
+ if (arglist == NULL_TREE
+ || TREE_CHAIN (arglist) == NULL_TREE)
+ return NULL;
+ val = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (expr, 1)));
+ if (TREE_CONSTANT (val))
+ return val;
+ return TREE_VALUE (TREE_CHAIN (TREE_OPERAND (expr, 1)));
+ }
+ }
+ if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
+ {
+ tree op0, op1, res;
+ op0 = expr_expected_value (TREE_OPERAND (expr, 0), visited);
+ if (!op0)
+ return NULL;
+ op1 = expr_expected_value (TREE_OPERAND (expr, 1), visited);
+ if (!op1)
+ return NULL;
+ res = fold_build2 (TREE_CODE (expr), TREE_TYPE (expr), op0, op1);
+ if (TREE_CONSTANT (res))
+ return res;
+ return NULL;
+ }
+ if (UNARY_CLASS_P (expr))
+ {
+ tree op0, res;
+ op0 = expr_expected_value (TREE_OPERAND (expr, 0), visited);
+ if (!op0)
+ return NULL;
+ res = fold_build1 (TREE_CODE (expr), TREE_TYPE (expr), op0);
+ if (TREE_CONSTANT (res))
+ return res;
+ return NULL;
+ }
+ return NULL;
+}
+
+/* Get rid of all builtin_expect calls we no longer need. */
+static void
+strip_builtin_expect (void)
+{
+ basic_block bb;
+ FOR_EACH_BB (bb)
+ {
+ block_stmt_iterator bi;
+ for (bi = bsi_start (bb); !bsi_end_p (bi); bsi_next (&bi))
+ {
+ tree stmt = bsi_stmt (bi);
+ tree fndecl;
+ tree arglist;
+
+ if (TREE_CODE (stmt) == MODIFY_EXPR
+ && TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR
+ && (fndecl = get_callee_fndecl (TREE_OPERAND (stmt, 1)))
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
+ && (arglist = TREE_OPERAND (TREE_OPERAND (stmt, 1), 1))
+ && TREE_CHAIN (arglist))
+ {
+ TREE_OPERAND (stmt, 1) = TREE_VALUE (arglist);
+ update_stmt (stmt);
}
}
+ }
+}
+
+/* Predict using opcode of the last statement in basic block. */
+static void
+tree_predict_by_opcode (basic_block bb)
+{
+ tree stmt = last_stmt (bb);
+ edge then_edge;
+ tree cond;
+ tree op0;
+ tree type;
+ tree val;
+ bitmap visited;
+ edge_iterator ei;
+
+ if (!stmt || TREE_CODE (stmt) != COND_EXPR)
+ return;
+ FOR_EACH_EDGE (then_edge, ei, bb->succs)
+ if (then_edge->flags & EDGE_TRUE_VALUE)
+ break;
+ cond = TREE_OPERAND (stmt, 0);
+ if (!COMPARISON_CLASS_P (cond))
+ return;
+ op0 = TREE_OPERAND (cond, 0);
+ type = TREE_TYPE (op0);
+ visited = BITMAP_ALLOC (NULL);
+ val = expr_expected_value (cond, visited);
+ BITMAP_FREE (visited);
+ if (val)
+ {
+ if (integer_zerop (val))
+ predict_edge_def (then_edge, PRED_BUILTIN_EXPECT, NOT_TAKEN);
+ else
+ predict_edge_def (then_edge, PRED_BUILTIN_EXPECT, TAKEN);
+ return;
+ }
+ /* Try "pointer heuristic."
+ A comparison ptr == 0 is predicted as false.
+ Similarly, a comparison ptr1 == ptr2 is predicted as false. */
+ if (POINTER_TYPE_P (type))
+ {
+ if (TREE_CODE (cond) == EQ_EXPR)
+ predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
+ else if (TREE_CODE (cond) == NE_EXPR)
+ predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
+ }
+ else
- cond = get_condition (last_insn, &earliest, false);
- if (! cond)
- continue;
+ /* Try "opcode heuristic."
+ EQ tests are usually false and NE tests are usually true. Also,
+ most quantities are positive, so we can make the appropriate guesses
+ about signed comparisons against zero. */
+ switch (TREE_CODE (cond))
+ {
+ case EQ_EXPR:
+ case UNEQ_EXPR:
+ /* Floating point comparisons appears to behave in a very
+ unpredictable way because of special role of = tests in
+ FP code. */
+ if (FLOAT_TYPE_P (type))
+ ;
+ /* Comparisons with 0 are often used for booleans and there is
+ nothing useful to predict about them. */
+ else if (integer_zerop (op0)
+ || integer_zerop (TREE_OPERAND (cond, 1)))
+ ;
+ else
+ predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
+ break;
+
+ case NE_EXPR:
+ case LTGT_EXPR:
+ /* Floating point comparisons appears to behave in a very
+ unpredictable way because of special role of = tests in
+ FP code. */
+ if (FLOAT_TYPE_P (type))
+ ;
+ /* Comparisons with 0 are often used for booleans and there is
+ nothing useful to predict about them. */
+ else if (integer_zerop (op0)
+ || integer_zerop (TREE_OPERAND (cond, 1)))
+ ;
+ else
+ predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
+ break;
+
+ case ORDERED_EXPR:
+ predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
+ break;
+
+ case UNORDERED_EXPR:
+ predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
+ break;
+
+ case LE_EXPR:
+ case LT_EXPR:
+ if (integer_zerop (TREE_OPERAND (cond, 1))
+ || integer_onep (TREE_OPERAND (cond, 1))
+ || integer_all_onesp (TREE_OPERAND (cond, 1))
+ || real_zerop (TREE_OPERAND (cond, 1))
+ || real_onep (TREE_OPERAND (cond, 1))
+ || real_minus_onep (TREE_OPERAND (cond, 1)))
+ predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
+ break;
+
+ case GE_EXPR:
+ case GT_EXPR:
+ if (integer_zerop (TREE_OPERAND (cond, 1))
+ || integer_onep (TREE_OPERAND (cond, 1))
+ || integer_all_onesp (TREE_OPERAND (cond, 1))
+ || real_zerop (TREE_OPERAND (cond, 1))
+ || real_onep (TREE_OPERAND (cond, 1))
+ || real_minus_onep (TREE_OPERAND (cond, 1)))
+ predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
+ break;
+
+ default:
+ break;
+ }
+}
- /* Try "pointer heuristic."
- A comparison ptr == 0 is predicted as false.
- Similarly, a comparison ptr1 == ptr2 is predicted as false. */
- if (GET_RTX_CLASS (GET_CODE (cond)) == '<'
- && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
- || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
+/* Try to guess whether the value of return means error code. */
+static enum br_predictor
+return_prediction (tree val, enum prediction *prediction)
+{
+ /* VOID. */
+ if (!val)
+ return PRED_NO_PREDICTION;
+ /* Different heuristics for pointers and scalars. */
+ if (POINTER_TYPE_P (TREE_TYPE (val)))
+ {
+ /* NULL is usually not returned. */
+ if (integer_zerop (val))
{
- if (GET_CODE (cond) == EQ)
- predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
- else if (GET_CODE (cond) == NE)
- predict_insn_def (last_insn, PRED_POINTER, TAKEN);
+ *prediction = NOT_TAKEN;
+ return PRED_NULL_RETURN;
}
- else
+ }
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
+ {
+ /* Negative return values are often used to indicate
+ errors. */
+ if (TREE_CODE (val) == INTEGER_CST
+ && tree_int_cst_sgn (val) < 0)
+ {
+ *prediction = NOT_TAKEN;
+ return PRED_NEGATIVE_RETURN;
+ }
+ /* Constant return values seems to be commonly taken.
+ Zero/one often represent booleans so exclude them from the
+ heuristics. */
+ if (TREE_CONSTANT (val)
+ && (!integer_zerop (val) && !integer_onep (val)))
+ {
+ *prediction = TAKEN;
+ return PRED_NEGATIVE_RETURN;
+ }
+ }
+ return PRED_NO_PREDICTION;
+}
- /* Try "opcode heuristic."
- EQ tests are usually false and NE tests are usually true. Also,
- most quantities are positive, so we can make the appropriate guesses
- about signed comparisons against zero. */
- switch (GET_CODE (cond))
- {
- case CONST_INT:
- /* Unconditional branch. */
- predict_insn_def (last_insn, PRED_UNCONDITIONAL,
- cond == const0_rtx ? NOT_TAKEN : TAKEN);
- break;
+/* Find the basic block with return expression and look up for possible
+ return value trying to apply RETURN_PREDICTION heuristics. */
+static void
+apply_return_prediction (int *heads)
+{
+ tree return_stmt = NULL;
+ tree return_val;
+ edge e;
+ tree phi;
+ int phi_num_args, i;
+ enum br_predictor pred;
+ enum prediction direction;
+ edge_iterator ei;
- case EQ:
- case UNEQ:
- /* Floating point comparisons appears to behave in a very
- unpredictable way because of special role of = tests in
- FP code. */
- if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
- ;
- /* Comparisons with 0 are often used for booleans and there is
- nothing useful to predict about them. */
- else if (XEXP (cond, 1) == const0_rtx
- || XEXP (cond, 0) == const0_rtx)
- ;
- else
- predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
- break;
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ {
+ return_stmt = last_stmt (e->src);
+ if (TREE_CODE (return_stmt) == RETURN_EXPR)
+ break;
+ }
+ if (!e)
+ return;
+ return_val = TREE_OPERAND (return_stmt, 0);
+ if (!return_val)
+ return;
+ if (TREE_CODE (return_val) == MODIFY_EXPR)
+ return_val = TREE_OPERAND (return_val, 1);
+ if (TREE_CODE (return_val) != SSA_NAME
+ || !SSA_NAME_DEF_STMT (return_val)
+ || TREE_CODE (SSA_NAME_DEF_STMT (return_val)) != PHI_NODE)
+ return;
+ for (phi = SSA_NAME_DEF_STMT (return_val); phi; phi = PHI_CHAIN (phi))
+ if (PHI_RESULT (phi) == return_val)
+ break;
+ if (!phi)
+ return;
+ phi_num_args = PHI_NUM_ARGS (phi);
+ pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
+
+ /* Avoid the degenerate case where all return values form the function
+ belongs to same category (ie they are all positive constants)
+ so we can hardly say something about them. */
+ for (i = 1; i < phi_num_args; i++)
+ if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
+ break;
+ if (i != phi_num_args)
+ for (i = 0; i < phi_num_args; i++)
+ {
+ pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
+ if (pred != PRED_NO_PREDICTION)
+ predict_paths_leading_to (PHI_ARG_EDGE (phi, i)->src, heads, pred,
+ direction);
+ }
+}
- case NE:
- case LTGT:
- /* Floating point comparisons appears to behave in a very
- unpredictable way because of special role of = tests in
- FP code. */
- if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
- ;
- /* Comparisons with 0 are often used for booleans and there is
- nothing useful to predict about them. */
- else if (XEXP (cond, 1) == const0_rtx
- || XEXP (cond, 0) == const0_rtx)
- ;
- else
- predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
- break;
+/* Look for basic block that contains unlikely to happen events
+ (such as noreturn calls) and mark all paths leading to execution
+ of this basic blocks as unlikely. */
- case ORDERED:
- predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
- break;
+static void
+tree_bb_level_predictions (void)
+{
+ basic_block bb;
+ int *heads;
- case UNORDERED:
- predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
- break;
+ heads = XNEWVEC (int, last_basic_block);
+ memset (heads, ENTRY_BLOCK, sizeof (int) * last_basic_block);
+ heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block;
- case LE:
- case LT:
- if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
- || XEXP (cond, 1) == constm1_rtx)
- predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
- break;
+ apply_return_prediction (heads);
- case GE:
- case GT:
- if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
- || XEXP (cond, 1) == constm1_rtx)
- predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
- break;
+ FOR_EACH_BB (bb)
+ {
+ block_stmt_iterator bsi = bsi_last (bb);
- default:
- break;
- }
+ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ {
+ tree stmt = bsi_stmt (bsi);
+ switch (TREE_CODE (stmt))
+ {
+ case MODIFY_EXPR:
+ if (TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR)
+ {
+ stmt = TREE_OPERAND (stmt, 1);
+ goto call_expr;
+ }
+ break;
+ case CALL_EXPR:
+call_expr:;
+ if (call_expr_flags (stmt) & ECF_NORETURN)
+ predict_paths_leading_to (bb, heads, PRED_NORETURN,
+ NOT_TAKEN);
+ break;
+ default:
+ break;
+ }
+ }
}
- /* Attach the combined probability to each conditional jump. */
+ free (heads);
+}
+
+/* Predict branch probabilities and estimate profile of the tree CFG. */
+static unsigned int
+tree_estimate_probability (void)
+{
+ basic_block bb;
+ struct loops loops_info;
+
+ flow_loops_find (&loops_info);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ flow_loops_dump (&loops_info, dump_file, NULL, 0);
+
+ add_noreturn_fake_exit_edges ();
+ connect_infinite_loops_to_exit ();
+ calculate_dominance_info (CDI_DOMINATORS);
+ calculate_dominance_info (CDI_POST_DOMINATORS);
+
+ tree_bb_level_predictions ();
+
+ mark_irreducible_loops (&loops_info);
+ predict_loops (&loops_info, false);
+
FOR_EACH_BB (bb)
- if (GET_CODE (BB_END (bb)) == JUMP_INSN
- && any_condjump_p (BB_END (bb))
- && bb->succ->succ_next != NULL)
- combine_predictions_for_insn (BB_END (bb), bb);
+ {
+ edge e;
+ edge_iterator ei;
- free_dominance_info (CDI_POST_DOMINATORS);
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ /* Predict early returns to be probable, as we've already taken
+ care for error returns and other cases are often used for
+ fast paths through function. */
+ if (e->dest == EXIT_BLOCK_PTR
+ && TREE_CODE (last_stmt (bb)) == RETURN_EXPR
+ && !single_pred_p (bb))
+ {
+ edge e1;
+ edge_iterator ei1;
+
+ FOR_EACH_EDGE (e1, ei1, bb->preds)
+ if (!predicted_by_p (e1->src, PRED_NULL_RETURN)
+ && !predicted_by_p (e1->src, PRED_CONST_RETURN)
+ && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN)
+ && !last_basic_block_p (e1->src))
+ predict_edge_def (e1, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
+ }
+
+ /* Look for block we are guarding (ie we dominate it,
+ but it doesn't postdominate us). */
+ if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
+ && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
+ && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
+ {
+ block_stmt_iterator bi;
+
+ /* The call heuristic claims that a guarded function call
+ is improbable. This is because such calls are often used
+ to signal exceptional situations such as printing error
+ messages. */
+ for (bi = bsi_start (e->dest); !bsi_end_p (bi);
+ bsi_next (&bi))
+ {
+ tree stmt = bsi_stmt (bi);
+ if ((TREE_CODE (stmt) == CALL_EXPR
+ || (TREE_CODE (stmt) == MODIFY_EXPR
+ && TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR))
+ /* Constant and pure calls are hardly used to signalize
+ something exceptional. */
+ && TREE_SIDE_EFFECTS (stmt))
+ {
+ predict_edge_def (e, PRED_CALL, NOT_TAKEN);
+ break;
+ }
+ }
+ }
+ }
+ tree_predict_by_opcode (bb);
+ }
+ FOR_EACH_BB (bb)
+ combine_predictions_for_bb (bb);
- remove_fake_edges ();
- estimate_bb_frequencies (loops_info);
+ strip_builtin_expect ();
+ estimate_bb_frequencies (&loops_info);
+ free_dominance_info (CDI_POST_DOMINATORS);
+ remove_fake_exit_edges ();
+ flow_loops_free (&loops_info);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_tree_cfg (dump_file, dump_flags);
+ if (profile_status == PROFILE_ABSENT)
+ profile_status = PROFILE_GUESSED;
+ return 0;
}
/* __builtin_expect dropped tokens into the insn stream describing expected
@@ -655,7 +1407,7 @@ expected_value_to_br_prob (void)
case JUMP_INSN:
/* Look for simple conditional branches. If we haven't got an
expected value yet, no point going further. */
- if (GET_CODE (insn) != JUMP_INSN || ev == NULL_RTX
+ if (!JUMP_P (insn) || ev == NULL_RTX
|| ! any_condjump_p (insn))
continue;
break;
@@ -677,7 +1429,8 @@ expected_value_to_br_prob (void)
(lt r70, r71)
Could use cselib to try and reduce this further. */
cond = XEXP (SET_SRC (pc_set (insn)), 0);
- cond = canonicalize_condition (insn, cond, 0, NULL, ev_reg, false);
+ cond = canonicalize_condition (insn, cond, 0, NULL, ev_reg,
+ false, false);
if (! cond || XEXP (cond, 0) != ev_reg
|| GET_CODE (XEXP (cond, 1)) != CONST_INT)
continue;
@@ -690,8 +1443,7 @@ expected_value_to_br_prob (void)
cond = simplify_rtx (cond);
/* Turn the condition into a scaled branch probability. */
- if (cond != const_true_rtx && cond != const0_rtx)
- abort ();
+ gcc_assert (cond == const_true_rtx || cond == const0_rtx);
predict_insn_def (insn, PRED_BUILTIN_EXPECT,
cond == const_true_rtx ? TAKEN : NOT_TAKEN);
}
@@ -707,8 +1459,8 @@ last_basic_block_p (basic_block bb)
return (bb->next_bb == EXIT_BLOCK_PTR
|| (bb->next_bb->next_bb == EXIT_BLOCK_PTR
- && bb->succ && !bb->succ->succ_next
- && bb->succ->dest->next_bb == EXIT_BLOCK_PTR));
+ && single_succ_p (bb)
+ && single_succ (bb)->next_bb == EXIT_BLOCK_PTR));
}
/* Sets branch probabilities according to PREDiction and
@@ -718,15 +1470,14 @@ last_basic_block_p (basic_block bb)
on demand, so -1 may be there in case this was not needed yet). */
static void
-process_note_prediction (basic_block bb, int *heads, int pred, int flags)
+predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred,
+ enum prediction taken)
{
edge e;
+ edge_iterator ei;
int y;
- bool taken;
-
- taken = flags & IS_TAKEN;
- if (heads[bb->index] < 0)
+ if (heads[bb->index] == ENTRY_BLOCK)
{
/* This is first time we need this field in heads array; so
find first dominator that we do not post-dominate (we are
@@ -735,7 +1486,7 @@ process_note_prediction (basic_block bb, int *heads, int pred, int flags)
basic_block next_ai = get_immediate_dominator (CDI_DOMINATORS, bb);
int head;
- while (heads[next_ai->index] < 0)
+ while (heads[next_ai->index] == ENTRY_BLOCK)
{
if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb))
break;
@@ -750,10 +1501,7 @@ process_note_prediction (basic_block bb, int *heads, int pred, int flags)
while (next_ai != bb)
{
next_ai = ai;
- if (heads[ai->index] == ENTRY_BLOCK)
- ai = ENTRY_BLOCK_PTR;
- else
- ai = BASIC_BLOCK (heads[ai->index]);
+ ai = BASIC_BLOCK (heads[ai->index]);
heads[next_ai->index] = head;
}
}
@@ -761,99 +1509,13 @@ process_note_prediction (basic_block bb, int *heads, int pred, int flags)
/* Now find the edge that leads to our branch and aply the prediction. */
- if (y == last_basic_block || !can_predict_insn_p (BB_END (BASIC_BLOCK (y))))
+ if (y == last_basic_block)
return;
- for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next)
- if (e->dest->index >= 0
+ FOR_EACH_EDGE (e, ei, BASIC_BLOCK (y)->succs)
+ if (e->dest->index >= NUM_FIXED_BLOCKS
&& dominated_by_p (CDI_POST_DOMINATORS, e->dest, bb))
predict_edge_def (e, pred, taken);
}
-
-/* Gathers NOTE_INSN_PREDICTIONs in given basic block and turns them
- into branch probabilities. For description of heads array, see
- process_note_prediction. */
-
-static void
-process_note_predictions (basic_block bb, int *heads)
-{
- rtx insn;
- edge e;
-
- /* Additionally, we check here for blocks with no successors. */
- int contained_noreturn_call = 0;
- int was_bb_head = 0;
- int noreturn_block = 1;
-
- for (insn = BB_END (bb); insn;
- was_bb_head |= (insn == BB_HEAD (bb)), insn = PREV_INSN (insn))
- {
- if (GET_CODE (insn) != NOTE)
- {
- if (was_bb_head)
- break;
- else
- {
- /* Noreturn calls cause program to exit, therefore they are
- always predicted as not taken. */
- if (GET_CODE (insn) == CALL_INSN
- && find_reg_note (insn, REG_NORETURN, NULL))
- contained_noreturn_call = 1;
- continue;
- }
- }
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION)
- {
- int alg = (int) NOTE_PREDICTION_ALG (insn);
- /* Process single prediction note. */
- process_note_prediction (bb,
- heads,
- alg, (int) NOTE_PREDICTION_FLAGS (insn));
- delete_insn (insn);
- }
- }
- for (e = bb->succ; e; e = e->succ_next)
- if (!(e->flags & EDGE_FAKE))
- noreturn_block = 0;
- if (contained_noreturn_call)
- {
- /* This block ended from other reasons than because of return.
- If it is because of noreturn call, this should certainly not
- be taken. Otherwise it is probably some error recovery. */
- process_note_prediction (bb, heads, PRED_NORETURN, NOT_TAKEN);
- }
-}
-
-/* Gathers NOTE_INSN_PREDICTIONs and turns them into
- branch probabilities. */
-
-void
-note_prediction_to_br_prob (void)
-{
- basic_block bb;
- int *heads;
-
- /* To enable handling of noreturn blocks. */
- add_noreturn_fake_exit_edges ();
- connect_infinite_loops_to_exit ();
-
- calculate_dominance_info (CDI_POST_DOMINATORS);
- calculate_dominance_info (CDI_DOMINATORS);
-
- heads = xmalloc (sizeof (int) * last_basic_block);
- memset (heads, -1, sizeof (int) * last_basic_block);
- heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block;
-
- /* Process all prediction notes. */
-
- FOR_EACH_BB (bb)
- process_note_predictions (bb, heads);
-
- free_dominance_info (CDI_POST_DOMINATORS);
- free_dominance_info (CDI_DOMINATORS);
- free (heads);
-
- remove_fake_edges ();
-}
/* This is used to carry information about basic blocks. It is
attached to the AUX field of the standard CFG block. */
@@ -866,9 +1528,6 @@ typedef struct block_info_def
/* To keep queue of basic blocks to process. */
basic_block next;
- /* True if block needs to be visited in propagate_freq. */
- unsigned int tovisit:1;
-
/* Number of predecessors we need to visit first. */
int npredecessors;
} *block_info;
@@ -876,11 +1535,11 @@ typedef struct block_info_def
/* Similar information for edges. */
typedef struct edge_info_def
{
- /* In case edge is an loopback edge, the probability edge will be reached
+ /* In case edge is a loopback edge, the probability edge will be reached
in case header is. Estimated number of iterations of the loop can be
then computed as 1 / (1 - back_edge_prob). */
sreal back_edge_prob;
- /* True if the edge is an loopback edge in the natural loop. */
+ /* True if the edge is a loopback edge in the natural loop. */
unsigned int back_edge:1;
} *edge_info;
@@ -891,38 +1550,47 @@ typedef struct edge_info_def
Propagate the frequencies for LOOP. */
static void
-propagate_freq (struct loop *loop)
+propagate_freq (struct loop *loop, bitmap tovisit)
{
basic_block head = loop->header;
basic_block bb;
basic_block last;
+ unsigned i;
edge e;
basic_block nextbb;
+ bitmap_iterator bi;
/* For each basic block we need to visit count number of his predecessors
we need to visit first. */
- FOR_EACH_BB (bb)
+ EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
{
- if (BLOCK_INFO (bb)->tovisit)
+ edge_iterator ei;
+ int count = 0;
+
+ /* The outermost "loop" includes the exit block, which we can not
+ look up via BASIC_BLOCK. Detect this and use EXIT_BLOCK_PTR
+ directly. Do the same for the entry block. */
+ bb = BASIC_BLOCK (i);
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
- int count = 0;
-
- for (e = bb->pred; e; e = e->pred_next)
- if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK))
- count++;
- else if (BLOCK_INFO (e->src)->tovisit
- && rtl_dump_file && !EDGE_INFO (e)->back_edge)
- fprintf (rtl_dump_file,
- "Irreducible region hit, ignoring edge to %i->%i\n",
- e->src->index, bb->index);
- BLOCK_INFO (bb)->npredecessors = count;
+ bool visit = bitmap_bit_p (tovisit, e->src->index);
+
+ if (visit && !(e->flags & EDGE_DFS_BACK))
+ count++;
+ else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
+ fprintf (dump_file,
+ "Irreducible region hit, ignoring edge to %i->%i\n",
+ e->src->index, bb->index);
}
+ BLOCK_INFO (bb)->npredecessors = count;
}
memcpy (&BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one));
last = head;
for (bb = head; bb; bb = nextbb)
{
+ edge_iterator ei;
sreal cyclic_probability, frequency;
memcpy (&cyclic_probability, &real_zero, sizeof (real_zero));
@@ -935,12 +1603,12 @@ propagate_freq (struct loop *loop)
if (bb != head)
{
#ifdef ENABLE_CHECKING
- for (e = bb->pred; e; e = e->pred_next)
- if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK))
- abort ();
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
+ || (e->flags & EDGE_DFS_BACK));
#endif
- for (e = bb->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, bb->preds)
if (EDGE_INFO (e)->back_edge)
{
sreal_add (&cyclic_probability, &cyclic_probability,
@@ -982,26 +1650,25 @@ propagate_freq (struct loop *loop)
}
}
- BLOCK_INFO (bb)->tovisit = 0;
-
- /* Compute back edge frequencies. */
- for (e = bb->succ; e; e = e->succ_next)
- if (e->dest == head)
- {
- sreal tmp;
-
- /* EDGE_INFO (e)->back_edge_prob
- = ((e->probability * BLOCK_INFO (bb)->frequency)
- / REG_BR_PROB_BASE); */
+ bitmap_clear_bit (tovisit, bb->index);
- sreal_init (&tmp, e->probability, 0);
- sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency);
- sreal_mul (&EDGE_INFO (e)->back_edge_prob,
- &tmp, &real_inv_br_prob_base);
- }
+ e = find_edge (bb, head);
+ if (e)
+ {
+ sreal tmp;
+
+ /* EDGE_INFO (e)->back_edge_prob
+ = ((e->probability * BLOCK_INFO (bb)->frequency)
+ / REG_BR_PROB_BASE); */
+
+ sreal_init (&tmp, e->probability, 0);
+ sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency);
+ sreal_mul (&EDGE_INFO (e)->back_edge_prob,
+ &tmp, &real_inv_br_prob_base);
+ }
/* Propagate to successor blocks. */
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
if (!(e->flags & EDGE_DFS_BACK)
&& BLOCK_INFO (e->dest)->npredecessors)
{
@@ -1012,17 +1679,17 @@ propagate_freq (struct loop *loop)
nextbb = e->dest;
else
BLOCK_INFO (last)->next = e->dest;
-
+
last = e->dest;
}
- }
+ }
}
}
/* Estimate probabilities of loopback edges in loops at same nest level. */
static void
-estimate_loops_at_level (struct loop *first_loop)
+estimate_loops_at_level (struct loop *first_loop, bitmap tovisit)
{
struct loop *loop;
@@ -1032,9 +1699,10 @@ estimate_loops_at_level (struct loop *first_loop)
basic_block *bbs;
unsigned i;
- estimate_loops_at_level (loop->inner);
+ estimate_loops_at_level (loop->inner, tovisit);
- if (loop->latch->succ) /* Do not do this for dummy function loop. */
+ /* Do not do this for dummy function loop. */
+ if (EDGE_COUNT (loop->latch->succs) > 0)
{
/* Find current loop back edge and mark it. */
e = loop_latch_edge (loop);
@@ -1043,25 +1711,28 @@ estimate_loops_at_level (struct loop *first_loop)
bbs = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
- BLOCK_INFO (bbs[i])->tovisit = 1;
+ bitmap_set_bit (tovisit, bbs[i]->index);
free (bbs);
- propagate_freq (loop);
+ propagate_freq (loop, tovisit);
}
}
-/* Convert counts measured by profile driven feedback to frequencies. */
+/* Convert counts measured by profile driven feedback to frequencies.
+ Return nonzero iff there was any nonzero execution count. */
-static void
+int
counts_to_freqs (void)
{
- gcov_type count_max = 1;
+ gcov_type count_max, true_count_max = 0;
basic_block bb;
FOR_EACH_BB (bb)
- count_max = MAX (bb->count, count_max);
+ true_count_max = MAX (bb->count, true_count_max);
+ count_max = MAX (true_count_max, 1);
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
+ return true_count_max;
}
/* Return true if function is likely to be expensive, so there is no point to
@@ -1078,8 +1749,7 @@ expensive_function_p (int threshold)
/* We can not compute accurately for large thresholds due to scaled
frequencies. */
- if (threshold > BB_FREQ_MAX)
- abort ();
+ gcc_assert (threshold <= BB_FREQ_MAX);
/* Frequencies are out of range. This either means that function contains
internal loop executing more than BB_FREQ_MAX times or profile feedback
@@ -1114,11 +1784,10 @@ estimate_bb_frequencies (struct loops *loops)
basic_block bb;
sreal freq_max;
- if (flag_branch_probabilities)
- counts_to_freqs ();
- else
+ if (!flag_branch_probabilities || !counts_to_freqs ())
{
static int real_values_initialized = 0;
+ bitmap tovisit;
if (!real_values_initialized)
{
@@ -1133,43 +1802,19 @@ estimate_bb_frequencies (struct loops *loops)
}
mark_dfs_back_edges ();
- /* Fill in the probability values in flowgraph based on the REG_BR_PROB
- notes. */
- FOR_EACH_BB (bb)
- {
- rtx last_insn = BB_END (bb);
-
- if (!can_predict_insn_p (last_insn))
- {
- /* We can predict only conditional jumps at the moment.
- Expect each edge to be equally probable.
- ?? In the future we want to make abnormal edges improbable. */
- int nedges = 0;
- edge e;
-
- for (e = bb->succ; e; e = e->succ_next)
- {
- nedges++;
- if (e->probability != 0)
- break;
- }
- if (!e)
- for (e = bb->succ; e; e = e->succ_next)
- e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
- }
- }
- ENTRY_BLOCK_PTR->succ->probability = REG_BR_PROB_BASE;
+ single_succ_edge (ENTRY_BLOCK_PTR)->probability = REG_BR_PROB_BASE;
/* Set up block info for each basic block. */
+ tovisit = BITMAP_ALLOC (NULL);
alloc_aux_for_blocks (sizeof (struct block_info_def));
alloc_aux_for_edges (sizeof (struct edge_info_def));
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
edge e;
+ edge_iterator ei;
- BLOCK_INFO (bb)->tovisit = 0;
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0);
sreal_mul (&EDGE_INFO (e)->back_edge_prob,
@@ -1180,7 +1825,7 @@ estimate_bb_frequencies (struct loops *loops)
/* First compute probabilities locally for each loop from innermost
to outermost to examine probabilities for back edges. */
- estimate_loops_at_level (loops->tree_root);
+ estimate_loops_at_level (loops->tree_root, tovisit);
memcpy (&freq_max, &real_zero, sizeof (real_zero));
FOR_EACH_BB (bb)
@@ -1199,6 +1844,7 @@ estimate_bb_frequencies (struct loops *loops)
free_aux_for_blocks ();
free_aux_for_edges ();
+ BITMAP_FREE (tovisit);
}
compute_function_frequency ();
if (flag_reorder_functions)
@@ -1238,6 +1884,13 @@ choose_function_section (void)
of all instances. For now just never set frequency for these. */
|| DECL_ONE_ONLY (current_function_decl))
return;
+
+ /* If we are doing the partitioning optimization, let the optimization
+ choose the correct section into which to put things. */
+
+ if (flag_reorder_blocks_and_partition)
+ return;
+
if (cfun->function_frequency == FUNCTION_FREQUENCY_HOT)
DECL_SECTION_NAME (current_function_decl) =
build_string (strlen (HOT_TEXT_SECTION_NAME), HOT_TEXT_SECTION_NAME);
@@ -1246,3 +1899,26 @@ choose_function_section (void)
build_string (strlen (UNLIKELY_EXECUTED_TEXT_SECTION_NAME),
UNLIKELY_EXECUTED_TEXT_SECTION_NAME);
}
+
+static bool
+gate_estimate_probability (void)
+{
+ return flag_guess_branch_prob;
+}
+
+struct tree_opt_pass pass_profile =
+{
+ "profile", /* name */
+ gate_estimate_probability, /* gate */
+ tree_estimate_probability, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_BRANCH_PROB, /* tv_id */
+ PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_ggc_collect | TODO_verify_ssa, /* todo_flags_finish */
+ 0 /* letter */
+};
OpenPOWER on IntegriCloud