@@ -24178,6 +24178,10 @@ aarch64_sched_fusion_priority (rtx_insn *insn, int max_pri,
static int
aarch64_sched_adjust_priority (rtx_insn *insn, int priority)
{
+ /* Skip NOTE in empty block. */
+ if (!INSN_P (insn))
+ return priority;
+
rtx x = PATTERN (insn);
if (GET_CODE (x) == SET)
@@ -1207,6 +1207,11 @@ recompute_todo_spec (rtx_insn *next, bool for_backtrack)
int n_replace = 0;
bool first_p = true;
+ /* Since we don't skip empty block any more, it's possible
+ to meet NOTE insn now, early return if so. */
+ if (NOTE_P (next))
+ return 0;
+
if (sd_lists_empty_p (next, SD_LIST_BACK))
/* NEXT has all its dependencies resolved. */
return 0;
@@ -1726,6 +1731,11 @@ setup_insn_reg_pressure_info (rtx_insn *insn)
int *max_reg_pressure;
static int death[N_REG_CLASSES];
+ /* Since we don't skip empty block any more, it's possible to
+ schedule NOTE insn now, we should check for it first. */
+ if (NOTE_P (insn))
+ return;
+
gcc_checking_assert (!DEBUG_INSN_P (insn));
excess_cost_change = 0;
@@ -4017,10 +4027,10 @@ schedule_insn (rtx_insn *insn)
/* Scheduling instruction should have all its dependencies resolved and
should have been removed from the ready list. */
- gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
+ gcc_assert (NOTE_P (insn) || sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
/* Reset debug insns invalidated by moving this insn. */
- if (MAY_HAVE_DEBUG_BIND_INSNS && !DEBUG_INSN_P (insn))
+ if (MAY_HAVE_DEBUG_BIND_INSNS && NONDEBUG_INSN_P (insn))
for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
sd_iterator_cond (&sd_it, &dep);)
{
@@ -4106,61 +4116,66 @@ schedule_insn (rtx_insn *insn)
check_clobbered_conditions (insn);
- /* Update dependent instructions. First, see if by scheduling this insn
- now we broke a dependence in a way that requires us to change another
- insn. */
- for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
- sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
+ /* Since we don't skip empty block any more, it's possible to
+ schedule NOTE insn now, we should check for it first. */
+ if (!NOTE_P (insn))
{
- struct dep_replacement *desc = DEP_REPLACE (dep);
- rtx_insn *pro = DEP_PRO (dep);
- if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
- && desc != NULL && desc->insn == pro)
- apply_replacement (dep, false);
- }
+ /* Update dependent instructions. First, see if by scheduling this insn
+ now we broke a dependence in a way that requires us to change another
+ insn. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
+ {
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ rtx_insn *pro = DEP_PRO (dep);
+ if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED && desc != NULL
+ && desc->insn == pro)
+ apply_replacement (dep, false);
+ }
- /* Go through and resolve forward dependencies. */
- for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
- sd_iterator_cond (&sd_it, &dep);)
- {
- rtx_insn *next = DEP_CON (dep);
- bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
+ /* Go through and resolve forward dependencies. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx_insn *next = DEP_CON (dep);
+ bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
- /* Resolve the dependence between INSN and NEXT.
- sd_resolve_dep () moves current dep to another list thus
- advancing the iterator. */
- sd_resolve_dep (sd_it);
+ /* Resolve the dependence between INSN and NEXT.
+ sd_resolve_dep () moves current dep to another list thus
+ advancing the iterator. */
+ sd_resolve_dep (sd_it);
- if (cancelled)
- {
- if (must_restore_pattern_p (next, dep))
- restore_pattern (dep, false);
- continue;
- }
+ if (cancelled)
+ {
+ if (must_restore_pattern_p (next, dep))
+ restore_pattern (dep, false);
+ continue;
+ }
- /* Don't bother trying to mark next as ready if insn is a debug
- insn. If insn is the last hard dependency, it will have
- already been discounted. */
- if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
- continue;
+ /* Don't bother trying to mark next as ready if insn is a debug
+ insn. If insn is the last hard dependency, it will have
+ already been discounted. */
+ if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
+ continue;
- if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
- {
- int effective_cost;
+ if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
+ {
+ int effective_cost;
- effective_cost = try_ready (next);
+ effective_cost = try_ready (next);
- if (effective_cost >= 0
- && SCHED_GROUP_P (next)
- && advance < effective_cost)
- advance = effective_cost;
- }
- else
- /* Check always has only one forward dependence (to the first insn in
- the recovery block), therefore, this will be executed only once. */
- {
- gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
- fix_recovery_deps (RECOVERY_BLOCK (insn));
+ if (effective_cost >= 0 && SCHED_GROUP_P (next)
+ && advance < effective_cost)
+ advance = effective_cost;
+ }
+ else
+ /* Check always has only one forward dependence (to the first insn
+ in the recovery block), therefore, this will be executed only
+ once. */
+ {
+ gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
+ fix_recovery_deps (RECOVERY_BLOCK (insn));
+ }
}
}
@@ -4170,9 +4185,9 @@ schedule_insn (rtx_insn *insn)
may use this information to decide how the instruction should
be aligned. */
if (issue_rate > 1
+ && NONDEBUG_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER
- && !DEBUG_INSN_P (insn))
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
{
if (reload_completed)
PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
@@ -5033,20 +5048,6 @@ get_ebb_head_tail (basic_block beg, basic_block end,
*tailp = end_tail;
}
-/* Return true if there are no real insns in the range [ HEAD, TAIL ]. */
-
-bool
-no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
-{
- while (head != NEXT_INSN (tail))
- {
- if (!NOTE_P (head) && !LABEL_P (head))
- return false;
- head = NEXT_INSN (head);
- }
- return true;
-}
-
/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
previously found among the insns. Insert them just before HEAD. */
rtx_insn *
@@ -6224,8 +6225,12 @@ commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
scheduled_insns.iterate (i, &insn);
i++)
{
- if (control_flow_insn_p (last_scheduled_insn)
- || current_sched_info->advance_target_bb (*target_bb, insn))
+ /* Since we don't skip empty block any more, it's possible to
+ schedule NOTE insn now, we should check for it here to avoid
+ unexpected target bb advance. */
+ if ((control_flow_insn_p (last_scheduled_insn)
+ || current_sched_info->advance_target_bb (*target_bb, insn))
+ && !NOTE_P (insn))
{
*target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
@@ -6245,7 +6250,7 @@ commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
move_insn (insn, last_scheduled_insn,
current_sched_info->next_tail);
- if (!DEBUG_INSN_P (insn))
+ if (NONDEBUG_INSN_P (insn))
reemit_notes (insn);
last_scheduled_insn = insn;
}
@@ -6296,7 +6301,7 @@ prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
int cost = 0;
const char *reason = "resource conflict";
- if (DEBUG_INSN_P (insn))
+ if (DEBUG_INSN_P (insn) || NOTE_P (insn))
continue;
if (sched_group_found && !SCHED_GROUP_P (insn)
@@ -6504,7 +6509,7 @@ schedule_block (basic_block *target_bb, state_t init_state)
and caused problems because schedule_block and compute_forward_dependences
had different notions of what the "head" insn was. */
- gcc_assert (head != tail || INSN_P (head));
+ gcc_assert (head != tail || INSN_P (head) || NOTE_P (head));
haifa_recovery_bb_recently_added_p = false;
@@ -6539,15 +6544,15 @@ schedule_block (basic_block *target_bb, state_t init_state)
if (targetm.sched.init)
targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
+ gcc_assert (((NOTE_P (prev_head) || DEBUG_INSN_P (prev_head))
+ && BLOCK_FOR_INSN (prev_head) == *target_bb)
+ || (head == tail && NOTE_P (head)));
+
/* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head;
last_nondebug_scheduled_insn = NULL;
nonscheduled_insns_begin = NULL;
- gcc_assert ((NOTE_P (last_scheduled_insn)
- || DEBUG_INSN_P (last_scheduled_insn))
- && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
-
/* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
queue. */
q_ptr = 0;
@@ -6725,15 +6730,16 @@ schedule_block (basic_block *target_bb, state_t init_state)
}
}
- /* We don't want md sched reorder to even see debug isns, so put
- them out right away. */
- if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
+ /* We don't want md sched reorder to even see debug and note insns,
+ so put them out right away. */
+ if (ready.n_ready
+ && !NONDEBUG_INSN_P (ready_element (&ready, 0))
&& (*current_sched_info->schedule_more_p) ())
{
- while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
+ while (ready.n_ready && !NONDEBUG_INSN_P (ready_element (&ready, 0)))
{
rtx_insn *insn = ready_remove_first (&ready);
- gcc_assert (DEBUG_INSN_P (insn));
+ gcc_assert (DEBUG_INSN_P (insn) || NOTE_P (insn));
(*current_sched_info->begin_schedule_ready) (insn);
scheduled_insns.safe_push (insn);
last_scheduled_insn = insn;
@@ -7145,17 +7151,18 @@ schedule_block (basic_block *target_bb, state_t init_state)
int
set_priorities (rtx_insn *head, rtx_insn *tail)
{
+ /* Since we don't skip empty block any more, it's possible to
+ meet NOTE insn now, we don't need to compute priority for
+ such block, so early return. */
+ if (head == tail && !INSN_P (head))
+ return 1;
+
rtx_insn *insn;
- int n_insn;
+ int n_insn = 0;
int sched_max_insns_priority =
current_sched_info->sched_max_insns_priority;
rtx_insn *prev_head;
- if (head == tail && ! INSN_P (head))
- gcc_unreachable ();
-
- n_insn = 0;
-
prev_head = PREV_INSN (head);
for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
{
@@ -7688,7 +7695,8 @@ fix_tick_ready (rtx_insn *next)
{
int tick, delay;
- if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
+ if (NONDEBUG_INSN_P (next)
+ && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
{
int full_p;
sd_iterator_def sd_it;
@@ -2695,8 +2695,8 @@ do { \
/* During sched, 1 if RTX is an insn that must be scheduled together
with the preceding insn. */
#define SCHED_GROUP_P(RTX) \
- (RTL_FLAG_CHECK4 ("SCHED_GROUP_P", (RTX), DEBUG_INSN, INSN, \
- JUMP_INSN, CALL_INSN)->in_struct)
+ (RTL_FLAG_CHECK5 ("SCHED_GROUP_P", (RTX), DEBUG_INSN, INSN, \
+ JUMP_INSN, CALL_INSN, NOTE)->in_struct)
/* For a SET rtx, SET_DEST is the place that is set
and SET_SRC is the value it is set to. */
@@ -478,12 +478,10 @@ schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
a note or two. */
while (head != tail)
{
- if (NOTE_P (head) || DEBUG_INSN_P (head))
+ if (LABEL_P (head) || NOTE_P (head) || DEBUG_INSN_P (head))
head = NEXT_INSN (head);
else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
tail = PREV_INSN (tail);
- else if (LABEL_P (head))
- head = NEXT_INSN (head);
else
break;
}
@@ -491,10 +489,8 @@ schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
first_bb = BLOCK_FOR_INSN (head);
last_bb = BLOCK_FOR_INSN (tail);
- if (no_real_insns_p (head, tail))
- return BLOCK_FOR_INSN (tail);
-
- gcc_assert (INSN_P (head) && INSN_P (tail));
+ gcc_assert ((NOTE_P (head) && head == tail)
+ || (INSN_P (head) && INSN_P (tail)));
if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
{
@@ -1397,7 +1397,6 @@ extern void free_global_sched_pressure_data (void);
extern int haifa_classify_insn (const_rtx);
extern void get_ebb_head_tail (basic_block, basic_block,
rtx_insn **, rtx_insn **);
-extern bool no_real_insns_p (const rtx_insn *, const rtx_insn *);
extern int insn_sched_cost (rtx_insn *);
extern int dep_cost_1 (dep_t, dw_t);
@@ -228,6 +228,9 @@ static edgeset *pot_split;
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
+/* Indicate the bb is empty initially if set. */
+static bitmap rgn_init_empty_bb;
+
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
/* Speculative scheduling functions. */
@@ -2757,10 +2760,6 @@ free_block_dependencies (int bb)
rtx_insn *tail;
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
-
- if (no_real_insns_p (head, tail))
- return;
-
sched_free_deps (head, tail, true);
}
@@ -3024,9 +3023,6 @@ compute_priorities (void)
gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
- if (no_real_insns_p (head, tail))
- continue;
-
rgn_n_insns += set_priorities (head, tail);
}
current_sched_info->sched_max_insns_priority++;
@@ -3157,12 +3153,6 @@ schedule_region (int rgn)
last_bb = EBB_LAST_BB (bb);
get_ebb_head_tail (first_bb, last_bb, &head, &tail);
-
- if (no_real_insns_p (head, tail))
- {
- gcc_assert (first_bb == last_bb);
- continue;
- }
sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
}
}
@@ -3178,13 +3168,6 @@ schedule_region (int rgn)
get_ebb_head_tail (first_bb, last_bb, &head, &tail);
- if (no_real_insns_p (head, tail))
- {
- gcc_assert (first_bb == last_bb);
- save_state_for_fallthru_edge (last_bb, bb_state[first_bb->index]);
- continue;
- }
-
current_sched_info->prev_head = PREV_INSN (head);
current_sched_info->next_tail = NEXT_INSN (tail);
@@ -3216,6 +3199,14 @@ schedule_region (int rgn)
/* Clean up. */
if (current_nr_blocks > 1)
free_trg_info ();
+
+ /* This empty block isn't empty initially, it means the only NOTE
+ inside was not counted when computing rgn_n_insns, so fix it up
+ now. */
+ if (head == tail
+ && NOTE_P (head)
+ && !bitmap_bit_p (rgn_init_empty_bb, bb))
+ rgn_n_insns++;
}
/* Sanity check: verify that all region insns were scheduled. */
@@ -3448,7 +3439,16 @@ sched_rgn_local_init (int rgn)
continue;
FOR_EACH_EDGE (e, ei, block->succs)
e->aux = NULL;
- }
+ }
+ }
+
+ rgn_init_empty_bb = BITMAP_ALLOC (NULL);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ {
+ rtx_insn *head, *tail;
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+ if (head == tail && NOTE_P (head))
+ bitmap_set_bit (rgn_init_empty_bb, bb);
}
}
@@ -3461,6 +3461,7 @@ sched_rgn_local_free (void)
sbitmap_vector_free (pot_split);
sbitmap_vector_free (ancestor_edges);
free (rgn_edges);
+ BITMAP_FREE (rgn_init_empty_bb);
}
/* Free data computed for the finished region. */
@@ -7213,9 +7213,6 @@ sel_region_target_finish (bool reset_sched_cycles_p)
find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
- if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
- continue;
-
if (reset_sched_cycles_p)
reset_sched_cycles_in_current_ebb ();