@@ -740,7 +740,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
}
-/* Helper routine for inverted_post_order_compute
+/* Helper routine for inverted_rev_post_order_compute
flow_dfs_compute_reverse_execute, and the reverse-CFG
deapth first search in dominance.cc.
BB has to belong to a region of CFG
@@ -820,12 +820,14 @@ dfs_find_deadend (basic_block bb)
and start looking for a "dead end" from that block
and do another inverted traversal from that block. */
-void
-inverted_post_order_compute (vec<int> *post_order,
- sbitmap *start_points)
+int
+inverted_rev_post_order_compute (struct function *fn,
+ int *rev_post_order,
+ sbitmap *start_points)
{
basic_block bb;
- post_order->reserve_exact (n_basic_blocks_for_fn (cfun));
+
+ int rev_post_order_num = n_basic_blocks_for_fn (fn) - 1;
if (flag_checking)
verify_no_unreachable_blocks ();
@@ -855,17 +857,17 @@ inverted_post_order_compute (vec<int> *post_order,
}
}
else
- /* Put all blocks that have no successor into the initial work list. */
- FOR_ALL_BB_FN (bb, cfun)
- if (EDGE_COUNT (bb->succs) == 0)
- {
- /* Push the initial edge on to the stack. */
- if (EDGE_COUNT (bb->preds) > 0)
- {
- stack.quick_push (ei_start (bb->preds));
- bitmap_set_bit (visited, bb->index);
- }
- }
+ /* Put all blocks that have no successor into the initial work list. */
+ FOR_ALL_BB_FN (bb, cfun)
+ if (EDGE_COUNT (bb->succs) == 0)
+ {
+ /* Push the initial edge on to the stack. */
+ if (EDGE_COUNT (bb->preds) > 0)
+ {
+ stack.quick_push (ei_start (bb->preds));
+ bitmap_set_bit (visited, bb->index);
+ }
+ }
do
{
@@ -893,13 +895,13 @@ inverted_post_order_compute (vec<int> *post_order,
time, check its predecessors. */
stack.quick_push (ei_start (pred->preds));
else
- post_order->quick_push (pred->index);
+ rev_post_order[rev_post_order_num--] = pred->index;
}
else
{
if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& ei_one_before_end_p (ei))
- post_order->quick_push (bb->index);
+ rev_post_order[rev_post_order_num--] = bb->index;
if (!ei_one_before_end_p (ei))
ei_next (&stack.last ());
@@ -957,7 +959,8 @@ inverted_post_order_compute (vec<int> *post_order,
while (!stack.is_empty ());
/* EXIT_BLOCK is always included. */
- post_order->quick_push (EXIT_BLOCK);
+ rev_post_order[rev_post_order_num--] = EXIT_BLOCK;
+ return n_basic_blocks_for_fn (fn);
}
/* Compute the depth first search order of FN and store in the array
@@ -66,7 +66,8 @@ extern void add_noreturn_fake_exit_edges (void);
extern void connect_infinite_loops_to_exit (void);
extern int post_order_compute (int *, bool, bool);
extern basic_block dfs_find_deadend (basic_block);
-extern void inverted_post_order_compute (vec<int> *postorder, sbitmap *start_points = 0);
+extern int inverted_rev_post_order_compute (struct function *,
+ int *, sbitmap *start_points = 0);
extern int pre_and_rev_post_order_compute_fn (struct function *,
int *, int *, bool);
extern int pre_and_rev_post_order_compute (int *, int *, bool);
@@ -102,17 +102,18 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
optimistic initialization of ANTIN above. Use reverse postorder
on the inverted graph to make the backward dataflow problem require
less iterations. */
- auto_vec<int> postorder;
- inverted_post_order_compute (&postorder);
- for (int i = postorder.length () - 1; i >= 0; --i)
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ int n = inverted_rev_post_order_compute (cfun, rpo);
+ for (int i = 0; i < n; ++i)
{
- bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
*qin++ = bb;
bb->aux = bb;
}
+ free (rpo);
qin = worklist;
qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
@@ -1405,19 +1405,20 @@ lra_create_live_ranges_1 (bool all_p, bool dead_insn_p)
point_freq_vec.truncate (0);
point_freq_vec.reserve_exact (new_length);
lra_point_freq = point_freq_vec.address ();
- auto_vec<int, 20> post_order_rev_cfg;
- inverted_post_order_compute (&post_order_rev_cfg);
- lra_assert (post_order_rev_cfg.length () == (unsigned) n_basic_blocks_for_fn (cfun));
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ int n = inverted_rev_post_order_compute (cfun, rpo);
+ lra_assert (n == n_basic_blocks_for_fn (cfun));
bb_live_change_p = false;
- for (i = post_order_rev_cfg.length () - 1; i >= 0; --i)
+ for (i = 0; i < n; ++i)
{
- bb = BASIC_BLOCK_FOR_FN (cfun, post_order_rev_cfg[i]);
+ bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
if (process_bb_lives (bb, curr_point, dead_insn_p))
bb_live_change_p = true;
}
+ free (rpo);
if (bb_live_change_p)
{
/* We need to clear pseudo live info as some pseudos can
@@ -1095,7 +1095,7 @@ remove_dead_stmt (gimple_stmt_iterator *i, basic_block bb,
nothing to the program, then we not only remove it, but we need to update
the CFG. We can chose any of edges out of BB as long as we are sure to not
close infinite loops. This is done by always choosing the edge closer to
- exit in inverted_post_order_compute order. */
+ exit in inverted_rev_post_order_compute order. */
if (is_ctrl_stmt (stmt))
{
edge_iterator ei;
@@ -1111,17 +1111,18 @@ remove_dead_stmt (gimple_stmt_iterator *i, basic_block bb,
{
if (!bb_postorder)
{
- auto_vec<int, 20> postorder;
- inverted_post_order_compute (&postorder,
- &bb_contains_live_stmts);
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ int n = inverted_rev_post_order_compute (cfun, rpo,
+ &bb_contains_live_stmts);
bb_postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
- for (unsigned int i = 0; i < postorder.length (); ++i)
- bb_postorder[postorder[i]] = i;
+ for (int i = 0; i < n; ++i)
+ bb_postorder[rpo[i]] = i;
+ free (rpo);
}
FOR_EACH_EDGE (e2, ei, bb->succs)
if (!e || e2->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bb_postorder [e->dest->index]
- < bb_postorder [e2->dest->index])
+ >= bb_postorder [e2->dest->index])
e = e2;
}
gcc_assert (e);
@@ -2464,8 +2464,8 @@ compute_antic (void)
/* For ANTIC computation we need a postorder that also guarantees that
a block with a single successor is visited after its successor.
RPO on the inverted CFG has this property. */
- auto_vec<int, 20> postorder;
- inverted_post_order_compute (&postorder);
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ int n = inverted_rev_post_order_compute (cfun, rpo);
auto_sbitmap worklist (last_basic_block_for_fn (cfun) + 1);
bitmap_clear (worklist);
@@ -2481,11 +2481,11 @@ compute_antic (void)
for PA ANTIC computation. */
num_iterations++;
changed = false;
- for (i = postorder.length () - 1; i >= 0; i--)
+ for (i = 0; i < n; ++i)
{
- if (bitmap_bit_p (worklist, postorder[i]))
+ if (bitmap_bit_p (worklist, rpo[i]))
{
- basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+ basic_block block = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
bitmap_clear_bit (worklist, block->index);
if (compute_antic_aux (block,
bitmap_bit_p (has_abnormal_preds,
@@ -2513,15 +2513,17 @@ compute_antic (void)
if (do_partial_partial)
{
/* For partial antic we ignore backedges and thus we do not need
- to perform any iteration when we process blocks in postorder. */
- for (i = postorder.length () - 1; i >= 0; i--)
+ to perform any iteration when we process blocks in rpo. */
+ for (i = 0; i < n; ++i)
{
- basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+ basic_block block = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
compute_partial_antic_aux (block,
bitmap_bit_p (has_abnormal_preds,
block->index));
}
}
+
+ free (rpo);
}