1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
60 2. choose insn with least contribution to register pressure,
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
127 #include "coretypes.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
135 #include "hash-set.h"
137 #include "machmode.h"
139 #include "function.h"
141 #include "insn-config.h"
142 #include "insn-attr.h"
145 #include "dominance.h"
148 #include "cfgbuild.h"
150 #include "basic-block.h"
151 #include "sched-int.h"
153 #include "common/common-target.h"
158 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
159 #include "hash-table.h"
160 #include "dumpfile.h"
162 #ifdef INSN_SCHEDULING
164 /* True if we do register pressure relief through live-range
166 static bool live_range_shrinkage_p;
168 /* Switch on live range shrinkage. */
170 initialize_live_range_shrinkage (void)
172 live_range_shrinkage_p = true;
175 /* Switch off live range shrinkage. */
177 finish_live_range_shrinkage (void)
179 live_range_shrinkage_p = false;
182 /* issue_rate is the number of insns that can be scheduled in the same
183 machine cycle. It can be defined in the config/mach/mach.h file,
184 otherwise we set it to 1. */
188 /* This can be set to true by a backend if the scheduler should not
189 enable a DCE pass. */
192 /* The current initiation interval used when modulo scheduling. */
193 static int modulo_ii;
195 /* The maximum number of stages we are prepared to handle. */
196 static int modulo_max_stages;
198 /* The number of insns that exist in each iteration of the loop. We use this
199 to detect when we've scheduled all insns from the first iteration. */
200 static int modulo_n_insns;
202 /* The current count of insns in the first iteration of the loop that have
203 already been scheduled. */
204 static int modulo_insns_scheduled;
206 /* The maximum uid of insns from the first iteration of the loop. */
207 static int modulo_iter0_max_uid;
209 /* The number of times we should attempt to backtrack when modulo scheduling.
210 Decreased each time we have to backtrack. */
211 static int modulo_backtracks_left;
213 /* The stage in which the last insn from the original loop was
215 static int modulo_last_stage;
217 /* sched-verbose controls the amount of debugging output the
218 scheduler prints. It is controlled by -fsched-verbose=N:
219 N>0 and no -DSR : the output is directed to stderr.
220 N>=10 will direct the printouts to stderr (regardless of -dSR).
222 N=2: bb's probabilities, detailed ready list info, unit/insn info.
223 N=3: rtl at abort point, control-flow, regions info.
224 N=5: dependences info. */
226 int sched_verbose = 0;
228 /* Debugging file. All printouts are sent to dump, which is always set,
229 either to stderr, or to the dump listing file (-dRS). */
230 FILE *sched_dump = 0;
232 /* This is a placeholder for the scheduler parameters common
233 to all schedulers. */
234 struct common_sched_info_def *common_sched_info;
236 #define INSN_TICK(INSN) (HID (INSN)->tick)
237 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
238 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
239 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
240 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
241 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
242 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
243 /* Cached cost of the instruction. Use insn_cost to get cost of the
244 insn. -1 here means that the field is not initialized. */
245 #define INSN_COST(INSN) (HID (INSN)->cost)
247 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
248 then it should be recalculated from scratch. */
249 #define INVALID_TICK (-(max_insn_queue_index + 1))
250 /* The minimal value of the INSN_TICK of an instruction. */
251 #define MIN_TICK (-max_insn_queue_index)
253 /* Original order of insns in the ready list.
254 Used to keep order of normal insns while separating DEBUG_INSNs. */
255 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
257 /* The deciding reason for INSN's place in the ready list. */
258 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
260 /* List of important notes we must keep around. This is a pointer to the
261 last element in the list. */
264 static struct spec_info_def spec_info_var;
265 /* Description of the speculative part of the scheduling.
266 If NULL - no speculation. */
267 spec_info_t spec_info = NULL;
269 /* True, if recovery block was added during scheduling of current block.
270 Used to determine, if we need to fix INSN_TICKs. */
271 static bool haifa_recovery_bb_recently_added_p;
273 /* True, if recovery block was added during this scheduling pass.
274 Used to determine if we should have empty memory pools of dependencies
275 after finishing current region. */
276 bool haifa_recovery_bb_ever_added_p;
278 /* Counters of different types of speculative instructions. */
279 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
281 /* Array used in {unlink, restore}_bb_notes. */
282 static rtx_insn **bb_header = 0;
284 /* Basic block after which recovery blocks will be created. */
285 static basic_block before_recovery;
287 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
289 basic_block after_recovery;
291 /* FALSE if we add bb to another region, so we don't need to initialize it. */
292 bool adding_bb_to_current_region_p = true;
296 /* An instruction is ready to be scheduled when all insns preceding it
297 have already been scheduled. It is important to ensure that all
298 insns which use its result will not be executed until its result
299 has been computed. An insn is maintained in one of four structures:
301 (P) the "Pending" set of insns which cannot be scheduled until
302 their dependencies have been satisfied.
303 (Q) the "Queued" set of insns that can be scheduled when sufficient
305 (R) the "Ready" list of unscheduled, uncommitted insns.
306 (S) the "Scheduled" list of insns.
308 Initially, all insns are either "Pending" or "Ready" depending on
309 whether their dependencies are satisfied.
311 Insns move from the "Ready" list to the "Scheduled" list as they
312 are committed to the schedule. As this occurs, the insns in the
313 "Pending" list have their dependencies satisfied and move to either
314 the "Ready" list or the "Queued" set depending on whether
315 sufficient time has passed to make them ready. As time passes,
316 insns move from the "Queued" set to the "Ready" list.
318 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
319 unscheduled insns, i.e., those that are ready, queued, and pending.
320 The "Queued" set (Q) is implemented by the variable `insn_queue'.
321 The "Ready" list (R) is implemented by the variables `ready' and
323 The "Scheduled" list (S) is the new insn chain built by this pass.
325 The transition (R->S) is implemented in the scheduling loop in
326 `schedule_block' when the best insn to schedule is chosen.
327 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
328 insns move from the ready list to the scheduled list.
329 The transition (Q->R) is implemented in 'queue_to_insn' as time
330 passes or stalls are introduced. */
332 /* Implement a circular buffer to delay instructions until sufficient
333 time has passed. For the new pipeline description interface,
334 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
335 than maximal time of instruction execution computed by genattr.c on
336 the base maximal time of functional unit reservations and getting a
337 result. This is the longest time an insn may be queued. */
339 static rtx_insn_list **insn_queue;
340 static int q_ptr = 0;
341 static int q_size = 0;
342 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
343 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
345 #define QUEUE_SCHEDULED (-3)
346 #define QUEUE_NOWHERE (-2)
347 #define QUEUE_READY (-1)
348 /* QUEUE_SCHEDULED - INSN is scheduled.
349 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
351 QUEUE_READY - INSN is in ready list.
352 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
354 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
356 /* The following variable value refers for all current and future
357 reservations of the processor units. */
360 /* The following variable value is size of memory representing all
361 current and future reservations of the processor units. */
362 size_t dfa_state_size;
364 /* The following array is used to find the best insn from ready when
365 the automaton pipeline interface is used. */
366 signed char *ready_try = NULL;
368 /* The ready list. */
369 struct ready_list ready = {NULL, 0, 0, 0, 0};
371 /* The pointer to the ready list (to be removed). */
372 static struct ready_list *readyp = &ready;
374 /* Scheduling clock. */
375 static int clock_var;
377 /* Clock at which the previous instruction was issued. */
378 static int last_clock_var;
380 /* Set to true if, when queuing a shadow insn, we discover that it would be
381 scheduled too late. */
382 static bool must_backtrack;
384 /* The following variable value is number of essential insns issued on
385 the current cycle. An insn is essential one if it changes the
387 int cycle_issued_insns;
389 /* This records the actual schedule. It is built up during the main phase
390 of schedule_block, and afterwards used to reorder the insns in the RTL. */
391 static vec<rtx_insn *> scheduled_insns;
393 static int may_trap_exp (const_rtx, int);
395 /* Nonzero iff the address is comprised from at most 1 register. */
396 #define CONST_BASED_ADDRESS_P(x) \
398 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
399 || (GET_CODE (x) == LO_SUM)) \
400 && (CONSTANT_P (XEXP (x, 0)) \
401 || CONSTANT_P (XEXP (x, 1)))))
403 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
404 as found by analyzing insn's expression. */
407 static int haifa_luid_for_non_insn (rtx x);
409 /* Haifa version of sched_info hooks common to all headers. */
410 const struct common_sched_info_def haifa_common_sched_info =
412 NULL, /* fix_recovery_cfg */
413 NULL, /* add_block */
414 NULL, /* estimate_number_of_insns */
415 haifa_luid_for_non_insn, /* luid_for_non_insn */
416 SCHED_PASS_UNKNOWN /* sched_pass_id */
419 /* Mapping from instruction UID to its Logical UID. */
420 vec<int> sched_luids = vNULL;
422 /* Next LUID to assign to an instruction. */
423 int sched_max_luid = 1;
425 /* Haifa Instruction Data. */
426 vec<haifa_insn_data_def> h_i_d = vNULL;
428 void (* sched_init_only_bb) (basic_block, basic_block);
430 /* Split block function. Different schedulers might use different functions
431 to handle their internal data consistent. */
432 basic_block (* sched_split_block) (basic_block, rtx);
434 /* Create empty basic block after the specified block. */
435 basic_block (* sched_create_empty_bb) (basic_block);
437 /* Return the number of cycles until INSN is expected to be ready.
438 Return zero if it already is. */
440 insn_delay (rtx_insn *insn)
442 return MAX (INSN_TICK (insn) - clock_var, 0);
446 may_trap_exp (const_rtx x, int is_store)
455 if (code == MEM && may_trap_p (x))
462 /* The insn uses memory: a volatile load. */
463 if (MEM_VOLATILE_P (x))
465 /* An exception-free load. */
468 /* A load with 1 base register, to be further checked. */
469 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
470 return PFREE_CANDIDATE;
471 /* No info on the load, to be further checked. */
472 return PRISKY_CANDIDATE;
477 int i, insn_class = TRAP_FREE;
479 /* Neither store nor load, check if it may cause a trap. */
482 /* Recursive step: walk the insn... */
483 fmt = GET_RTX_FORMAT (code);
484 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
488 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
489 insn_class = WORST_CLASS (insn_class, tmp_class);
491 else if (fmt[i] == 'E')
494 for (j = 0; j < XVECLEN (x, i); j++)
496 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
497 insn_class = WORST_CLASS (insn_class, tmp_class);
498 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
502 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
509 /* Classifies rtx X of an insn for the purpose of verifying that X can be
510 executed speculatively (and consequently the insn can be moved
511 speculatively), by examining X, returning:
512 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
513 TRAP_FREE: non-load insn.
514 IFREE: load from a globally safe location.
515 IRISKY: volatile load.
516 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
517 being either PFREE or PRISKY. */
520 haifa_classify_rtx (const_rtx x)
522 int tmp_class = TRAP_FREE;
523 int insn_class = TRAP_FREE;
526 if (GET_CODE (x) == PARALLEL)
528 int i, len = XVECLEN (x, 0);
530 for (i = len - 1; i >= 0; i--)
532 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
533 insn_class = WORST_CLASS (insn_class, tmp_class);
534 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
544 /* Test if it is a 'store'. */
545 tmp_class = may_trap_exp (XEXP (x, 0), 1);
548 /* Test if it is a store. */
549 tmp_class = may_trap_exp (SET_DEST (x), 1);
550 if (tmp_class == TRAP_RISKY)
552 /* Test if it is a load. */
554 WORST_CLASS (tmp_class,
555 may_trap_exp (SET_SRC (x), 0));
558 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
559 if (tmp_class == TRAP_RISKY)
561 tmp_class = WORST_CLASS (tmp_class,
562 may_trap_exp (COND_EXEC_TEST (x), 0));
565 tmp_class = TRAP_RISKY;
569 insn_class = tmp_class;
576 haifa_classify_insn (const_rtx insn)
578 return haifa_classify_rtx (PATTERN (insn));
581 /* After the scheduler initialization function has been called, this function
582 can be called to enable modulo scheduling. II is the initiation interval
583 we should use, it affects the delays for delay_pairs that were recorded as
584 separated by a given number of stages.
586 MAX_STAGES provides us with a limit
587 after which we give up scheduling; the caller must have unrolled at least
588 as many copies of the loop body and recorded delay_pairs for them.
590 INSNS is the number of real (non-debug) insns in one iteration of
591 the loop. MAX_UID can be used to test whether an insn belongs to
592 the first iteration of the loop; all of them have a uid lower than
595 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
598 modulo_max_stages = max_stages;
599 modulo_n_insns = insns;
600 modulo_iter0_max_uid = max_uid;
601 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
604 /* A structure to record a pair of insns where the first one is a real
605 insn that has delay slots, and the second is its delayed shadow.
606 I1 is scheduled normally and will emit an assembly instruction,
607 while I2 describes the side effect that takes place at the
608 transition between cycles CYCLES and (CYCLES + 1) after I1. */
611 struct delay_pair *next_same_i1;
614 /* When doing modulo scheduling, we a delay_pair can also be used to
615 show that I1 and I2 are the same insn in a different stage. If that
616 is the case, STAGES will be nonzero. */
620 /* Helpers for delay hashing. */
622 struct delay_i1_hasher : typed_noop_remove <delay_pair>
624 typedef delay_pair value_type;
625 typedef void compare_type;
626 static inline hashval_t hash (const value_type *);
627 static inline bool equal (const value_type *, const compare_type *);
630 /* Returns a hash value for X, based on hashing just I1. */
633 delay_i1_hasher::hash (const value_type *x)
635 return htab_hash_pointer (x->i1);
638 /* Return true if I1 of pair X is the same as that of pair Y. */
641 delay_i1_hasher::equal (const value_type *x, const compare_type *y)
646 struct delay_i2_hasher : typed_free_remove <delay_pair>
648 typedef delay_pair value_type;
649 typedef void compare_type;
650 static inline hashval_t hash (const value_type *);
651 static inline bool equal (const value_type *, const compare_type *);
654 /* Returns a hash value for X, based on hashing just I2. */
657 delay_i2_hasher::hash (const value_type *x)
659 return htab_hash_pointer (x->i2);
662 /* Return true if I2 of pair X is the same as that of pair Y. */
665 delay_i2_hasher::equal (const value_type *x, const compare_type *y)
670 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
672 static hash_table<delay_i1_hasher> *delay_htab;
673 static hash_table<delay_i2_hasher> *delay_htab_i2;
675 /* Called through htab_traverse. Walk the hashtable using I2 as
676 index, and delete all elements involving an UID higher than
677 that pointed to by *DATA. */
679 haifa_htab_i2_traverse (delay_pair **slot, int *data)
682 struct delay_pair *p = *slot;
683 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
685 delay_htab_i2->clear_slot (slot);
690 /* Called through htab_traverse. Walk the hashtable using I2 as
691 index, and delete all elements involving an UID higher than
692 that pointed to by *DATA. */
694 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
697 struct delay_pair *p, *first, **pprev;
699 if (INSN_UID ((*pslot)->i1) >= maxuid)
701 delay_htab->clear_slot (pslot);
705 for (p = *pslot; p; p = p->next_same_i1)
707 if (INSN_UID (p->i2) < maxuid)
710 pprev = &p->next_same_i1;
715 delay_htab->clear_slot (pslot);
721 /* Discard all delay pairs which involve an insn with an UID higher
724 discard_delay_pairs_above (int max_uid)
726 delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
727 delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
730 /* This function can be called by a port just before it starts the final
731 scheduling pass. It records the fact that an instruction with delay
732 slots has been split into two insns, I1 and I2. The first one will be
733 scheduled normally and initiates the operation. The second one is a
734 shadow which must follow a specific number of cycles after I1; its only
735 purpose is to show the side effect that occurs at that cycle in the RTL.
736 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
737 while I2 retains the original insn type.
739 There are two ways in which the number of cycles can be specified,
740 involving the CYCLES and STAGES arguments to this function. If STAGES
741 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
742 which is multiplied by MODULO_II to give the number of cycles. This is
743 only useful if the caller also calls set_modulo_params to enable modulo
747 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
749 struct delay_pair *p = XNEW (struct delay_pair);
750 struct delay_pair **slot;
759 delay_htab = new hash_table<delay_i1_hasher> (10);
760 delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
762 slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
763 p->next_same_i1 = *slot;
765 slot = delay_htab_i2->find_slot (p, INSERT);
769 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
770 and return the other insn if so. Return NULL otherwise. */
772 real_insn_for_shadow (rtx_insn *insn)
774 struct delay_pair *pair;
779 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
780 if (!pair || pair->stages > 0)
785 /* For a pair P of insns, return the fixed distance in cycles from the first
786 insn after which the second must be scheduled. */
788 pair_delay (struct delay_pair *p)
793 return p->stages * modulo_ii;
796 /* Given an insn INSN, add a dependence on its delayed shadow if it
797 has one. Also try to find situations where shadows depend on each other
798 and add dependencies to the real insns to limit the amount of backtracking
801 add_delay_dependencies (rtx_insn *insn)
803 struct delay_pair *pair;
804 sd_iterator_def sd_it;
810 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
813 add_dependence (insn, pair->i1, REG_DEP_ANTI);
817 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
819 rtx_insn *pro = DEP_PRO (dep);
820 struct delay_pair *other_pair
821 = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
822 if (!other_pair || other_pair->stages)
824 if (pair_delay (other_pair) >= pair_delay (pair))
826 if (sched_verbose >= 4)
828 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
829 INSN_UID (other_pair->i1),
830 INSN_UID (pair->i1));
831 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
835 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
836 INSN_UID (other_pair->i1),
837 INSN_UID (other_pair->i2),
838 pair_delay (other_pair));
840 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
845 /* Forward declarations. */
847 static int priority (rtx_insn *);
848 static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
849 static int rank_for_schedule (const void *, const void *);
850 static void swap_sort (rtx_insn **, int);
851 static void queue_insn (rtx_insn *, int, const char *);
852 static int schedule_insn (rtx_insn *);
853 static void adjust_priority (rtx_insn *);
854 static void advance_one_cycle (void);
855 static void extend_h_i_d (void);
858 /* Notes handling mechanism:
859 =========================
860 Generally, NOTES are saved before scheduling and restored after scheduling.
861 The scheduler distinguishes between two types of notes:
863 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
864 Before scheduling a region, a pointer to the note is added to the insn
865 that follows or precedes it. (This happens as part of the data dependence
866 computation). After scheduling an insn, the pointer contained in it is
867 used for regenerating the corresponding note (in reemit_notes).
869 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
870 these notes are put in a list (in rm_other_notes() and
871 unlink_other_notes ()). After scheduling the block, these notes are
872 inserted at the beginning of the block (in schedule_block()). */
874 static void ready_add (struct ready_list *, rtx_insn *, bool);
875 static rtx_insn *ready_remove_first (struct ready_list *);
876 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
878 static void queue_to_ready (struct ready_list *);
879 static int early_queue_to_ready (state_t, struct ready_list *);
881 /* The following functions are used to implement multi-pass scheduling
882 on the first cycle. */
883 static rtx_insn *ready_remove (struct ready_list *, int);
884 static void ready_remove_insn (rtx);
886 static void fix_inter_tick (rtx_insn *, rtx_insn *);
887 static int fix_tick_ready (rtx_insn *);
888 static void change_queue_index (rtx_insn *, int);
890 /* The following functions are used to implement scheduling of data/control
891 speculative instructions. */
893 static void extend_h_i_d (void);
894 static void init_h_i_d (rtx_insn *);
895 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
896 static void generate_recovery_code (rtx_insn *);
897 static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
898 static void begin_speculative_block (rtx_insn *);
899 static void add_to_speculative_block (rtx_insn *);
900 static void init_before_recovery (basic_block *);
901 static void create_check_block_twin (rtx_insn *, bool);
902 static void fix_recovery_deps (basic_block);
903 static bool haifa_change_pattern (rtx_insn *, rtx);
904 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
905 static void restore_bb_notes (basic_block);
906 static void fix_jump_move (rtx_insn *);
907 static void move_block_after_check (rtx_insn *);
908 static void move_succs (vec<edge, va_gc> **, basic_block);
909 static void sched_remove_insn (rtx_insn *);
910 static void clear_priorities (rtx_insn *, rtx_vec_t *);
911 static void calc_priorities (rtx_vec_t);
912 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
914 #endif /* INSN_SCHEDULING */
916 /* Point to state used for the current scheduling pass. */
917 struct haifa_sched_info *current_sched_info;
919 #ifndef INSN_SCHEDULING
921 schedule_insns (void)
926 /* Do register pressure sensitive insn scheduling if the flag is set
928 enum sched_pressure_algorithm sched_pressure;
930 /* Map regno -> its pressure class. The map defined only when
931 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
932 enum reg_class *sched_regno_pressure_class;
934 /* The current register pressure. Only elements corresponding pressure
935 classes are defined. */
936 static int curr_reg_pressure[N_REG_CLASSES];
938 /* Saved value of the previous array. */
939 static int saved_reg_pressure[N_REG_CLASSES];
941 /* Register living at given scheduling point. */
942 static bitmap curr_reg_live;
944 /* Saved value of the previous array. */
945 static bitmap saved_reg_live;
947 /* Registers mentioned in the current region. */
948 static bitmap region_ref_regs;
950 /* Effective number of available registers of a given class (see comment
951 in sched_pressure_start_bb). */
952 static int sched_class_regs_num[N_REG_CLASSES];
953 /* Number of call_used_regs. This is a helper for calculating of
954 sched_class_regs_num. */
955 static int call_used_regs_num[N_REG_CLASSES];
957 /* Initiate register pressure relative info for scheduling the current
958 region. Currently it is only clearing register mentioned in the
961 sched_init_region_reg_pressure_info (void)
963 bitmap_clear (region_ref_regs);
966 /* PRESSURE[CL] describes the pressure on register class CL. Update it
967 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
968 LIVE tracks the set of live registers; if it is null, assume that
969 every birth or death is genuine. */
971 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
973 enum reg_class pressure_class;
975 pressure_class = sched_regno_pressure_class[regno];
976 if (regno >= FIRST_PSEUDO_REGISTER)
978 if (pressure_class != NO_REGS)
982 if (!live || bitmap_set_bit (live, regno))
983 pressure[pressure_class]
984 += (ira_reg_class_max_nregs
985 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
989 if (!live || bitmap_clear_bit (live, regno))
990 pressure[pressure_class]
991 -= (ira_reg_class_max_nregs
992 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
996 else if (pressure_class != NO_REGS
997 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1001 if (!live || bitmap_set_bit (live, regno))
1002 pressure[pressure_class]++;
1006 if (!live || bitmap_clear_bit (live, regno))
1007 pressure[pressure_class]--;
1012 /* Initiate current register pressure related info from living
1013 registers given by LIVE. */
1015 initiate_reg_pressure_info (bitmap live)
1021 for (i = 0; i < ira_pressure_classes_num; i++)
1022 curr_reg_pressure[ira_pressure_classes[i]] = 0;
1023 bitmap_clear (curr_reg_live);
1024 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1025 if (sched_pressure == SCHED_PRESSURE_MODEL
1026 || current_nr_blocks == 1
1027 || bitmap_bit_p (region_ref_regs, j))
1028 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1031 /* Mark registers in X as mentioned in the current region. */
1033 setup_ref_regs (rtx x)
1036 const RTX_CODE code = GET_CODE (x);
1042 if (HARD_REGISTER_NUM_P (regno))
1043 bitmap_set_range (region_ref_regs, regno,
1044 hard_regno_nregs[regno][GET_MODE (x)]);
1046 bitmap_set_bit (region_ref_regs, REGNO (x));
1049 fmt = GET_RTX_FORMAT (code);
1050 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1052 setup_ref_regs (XEXP (x, i));
1053 else if (fmt[i] == 'E')
1055 for (j = 0; j < XVECLEN (x, i); j++)
1056 setup_ref_regs (XVECEXP (x, i, j));
1060 /* Initiate current register pressure related info at the start of
1063 initiate_bb_reg_pressure_info (basic_block bb)
1065 unsigned int i ATTRIBUTE_UNUSED;
1068 if (current_nr_blocks > 1)
1069 FOR_BB_INSNS (bb, insn)
1070 if (NONDEBUG_INSN_P (insn))
1071 setup_ref_regs (PATTERN (insn));
1072 initiate_reg_pressure_info (df_get_live_in (bb));
1073 #ifdef EH_RETURN_DATA_REGNO
1074 if (bb_has_eh_pred (bb))
1077 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1079 if (regno == INVALID_REGNUM)
1081 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1082 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1088 /* Save current register pressure related info. */
1090 save_reg_pressure (void)
1094 for (i = 0; i < ira_pressure_classes_num; i++)
1095 saved_reg_pressure[ira_pressure_classes[i]]
1096 = curr_reg_pressure[ira_pressure_classes[i]];
1097 bitmap_copy (saved_reg_live, curr_reg_live);
1100 /* Restore saved register pressure related info. */
1102 restore_reg_pressure (void)
1106 for (i = 0; i < ira_pressure_classes_num; i++)
1107 curr_reg_pressure[ira_pressure_classes[i]]
1108 = saved_reg_pressure[ira_pressure_classes[i]];
1109 bitmap_copy (curr_reg_live, saved_reg_live);
1112 /* Return TRUE if the register is dying after its USE. */
1114 dying_use_p (struct reg_use_data *use)
1116 struct reg_use_data *next;
1118 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1119 if (NONDEBUG_INSN_P (next->insn)
1120 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1125 /* Print info about the current register pressure and its excess for
1126 each pressure class. */
1128 print_curr_reg_pressure (void)
1133 fprintf (sched_dump, ";;\t");
1134 for (i = 0; i < ira_pressure_classes_num; i++)
1136 cl = ira_pressure_classes[i];
1137 gcc_assert (curr_reg_pressure[cl] >= 0);
1138 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1139 curr_reg_pressure[cl],
1140 curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1142 fprintf (sched_dump, "\n");
1145 /* Determine if INSN has a condition that is clobbered if a register
1146 in SET_REGS is modified. */
1148 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1150 rtx pat = PATTERN (insn);
1151 gcc_assert (GET_CODE (pat) == COND_EXEC);
1152 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1154 sd_iterator_def sd_it;
1156 haifa_change_pattern (insn, ORIG_PAT (insn));
1157 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1158 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1159 TODO_SPEC (insn) = HARD_DEP;
1160 if (sched_verbose >= 2)
1161 fprintf (sched_dump,
1162 ";;\t\tdequeue insn %s because of clobbered condition\n",
1163 (*current_sched_info->print_insn) (insn, 0));
1170 /* This function should be called after modifying the pattern of INSN,
1171 to update scheduler data structures as needed. */
1173 update_insn_after_change (rtx_insn *insn)
1175 sd_iterator_def sd_it;
1178 dfa_clear_single_insn_cache (insn);
1180 sd_it = sd_iterator_start (insn,
1181 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1182 while (sd_iterator_cond (&sd_it, &dep))
1184 DEP_COST (dep) = UNKNOWN_DEP_COST;
1185 sd_iterator_next (&sd_it);
1188 /* Invalidate INSN_COST, so it'll be recalculated. */
1189 INSN_COST (insn) = -1;
1190 /* Invalidate INSN_TICK, so it'll be recalculated. */
1191 INSN_TICK (insn) = INVALID_TICK;
1193 /* Invalidate autoprefetch data entry. */
1194 INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1195 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1196 INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1197 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1201 /* Two VECs, one to hold dependencies for which pattern replacements
1202 need to be applied or restored at the start of the next cycle, and
1203 another to hold an integer that is either one, to apply the
1204 corresponding replacement, or zero to restore it. */
1205 static vec<dep_t> next_cycle_replace_deps;
1206 static vec<int> next_cycle_apply;
1208 static void apply_replacement (dep_t, bool);
1209 static void restore_pattern (dep_t, bool);
1211 /* Look at the remaining dependencies for insn NEXT, and compute and return
1212 the TODO_SPEC value we should use for it. This is called after one of
1213 NEXT's dependencies has been resolved.
1214 We also perform pattern replacements for predication, and for broken
1215 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1219 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1222 sd_iterator_def sd_it;
1223 dep_t dep, modify_dep = NULL;
1227 bool first_p = true;
1229 if (sd_lists_empty_p (next, SD_LIST_BACK))
1230 /* NEXT has all its dependencies resolved. */
1233 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1236 /* If NEXT is intended to sit adjacent to this instruction, we don't
1237 want to try to break any dependencies. Treat it as a HARD_DEP. */
1238 if (SCHED_GROUP_P (next))
1241 /* Now we've got NEXT with speculative deps only.
1242 1. Look at the deps to see what we have to do.
1243 2. Check if we can do 'todo'. */
1246 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1248 rtx_insn *pro = DEP_PRO (dep);
1249 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1251 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1264 new_ds = ds_merge (new_ds, ds);
1266 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1268 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1273 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1275 else if (DEP_REPLACE (dep) != NULL)
1277 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1282 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1286 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1288 if (!dbg_cnt (sched_breakdep))
1290 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1292 struct dep_replacement *desc = DEP_REPLACE (dep);
1295 if (desc->insn == next && !for_backtrack)
1297 gcc_assert (n_replace == 1);
1298 apply_replacement (dep, true);
1300 DEP_STATUS (dep) |= DEP_CANCELLED;
1306 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1308 rtx_insn *pro, *other;
1310 rtx cond = NULL_RTX;
1312 rtx_insn *prev = NULL;
1316 if ((current_sched_info->flags & DO_PREDICATION) == 0
1317 || (ORIG_PAT (next) != NULL_RTX
1318 && PREDICATED_PAT (next) == NULL_RTX))
1321 pro = DEP_PRO (modify_dep);
1322 other = real_insn_for_shadow (pro);
1323 if (other != NULL_RTX)
1326 cond = sched_get_reverse_condition_uncached (pro);
1327 regno = REGNO (XEXP (cond, 0));
1329 /* Find the last scheduled insn that modifies the condition register.
1330 We can stop looking once we find the insn we depend on through the
1331 REG_DEP_CONTROL; if the condition register isn't modified after it,
1332 we know that it still has the right value. */
1333 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1334 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1338 find_all_hard_reg_sets (prev, &t, true);
1339 if (TEST_HARD_REG_BIT (t, regno))
1344 if (ORIG_PAT (next) == NULL_RTX)
1346 ORIG_PAT (next) = PATTERN (next);
1348 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1349 success = haifa_change_pattern (next, new_pat);
1352 PREDICATED_PAT (next) = new_pat;
1354 else if (PATTERN (next) != PREDICATED_PAT (next))
1356 bool success = haifa_change_pattern (next,
1357 PREDICATED_PAT (next));
1358 gcc_assert (success);
1360 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1364 if (PREDICATED_PAT (next) != NULL_RTX)
1366 int tick = INSN_TICK (next);
1367 bool success = haifa_change_pattern (next,
1369 INSN_TICK (next) = tick;
1370 gcc_assert (success);
1373 /* We can't handle the case where there are both speculative and control
1374 dependencies, so we return HARD_DEP in such a case. Also fail if
1375 we have speculative dependencies with not enough points, or more than
1376 one control dependency. */
1377 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1379 /* Too few points? */
1380 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1388 /* Pointer to the last instruction scheduled. */
1389 static rtx_insn *last_scheduled_insn;
1391 /* Pointer to the last nondebug instruction scheduled within the
1392 block, or the prev_head of the scheduling block. Used by
1393 rank_for_schedule, so that insns independent of the last scheduled
1394 insn will be preferred over dependent instructions. */
1395 static rtx last_nondebug_scheduled_insn;
1397 /* Pointer that iterates through the list of unscheduled insns if we
1398 have a dbg_cnt enabled. It always points at an insn prior to the
1399 first unscheduled one. */
1400 static rtx_insn *nonscheduled_insns_begin;
1402 /* Compute cost of executing INSN.
1403 This is the number of cycles between instruction issue and
1404 instruction results. */
1406 insn_cost (rtx_insn *insn)
1415 if (recog_memoized (insn) < 0)
1418 cost = insn_default_latency (insn);
1425 cost = INSN_COST (insn);
1429 /* A USE insn, or something else we don't need to
1430 understand. We can't pass these directly to
1431 result_ready_cost or insn_default_latency because it will
1432 trigger a fatal error for unrecognizable insns. */
1433 if (recog_memoized (insn) < 0)
1435 INSN_COST (insn) = 0;
1440 cost = insn_default_latency (insn);
1444 INSN_COST (insn) = cost;
1451 /* Compute cost of dependence LINK.
1452 This is the number of cycles between instruction issue and
1453 instruction results.
1454 ??? We also use this function to call recog_memoized on all insns. */
1456 dep_cost_1 (dep_t link, dw_t dw)
1458 rtx_insn *insn = DEP_PRO (link);
1459 rtx_insn *used = DEP_CON (link);
1462 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1463 return DEP_COST (link);
1467 struct delay_pair *delay_entry;
1469 = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1472 if (delay_entry->i1 == insn)
1474 DEP_COST (link) = pair_delay (delay_entry);
1475 return DEP_COST (link);
1480 /* A USE insn should never require the value used to be computed.
1481 This allows the computation of a function's result and parameter
1482 values to overlap the return and call. We don't care about the
1483 dependence cost when only decreasing register pressure. */
1484 if (recog_memoized (used) < 0)
1487 recog_memoized (insn);
1491 enum reg_note dep_type = DEP_TYPE (link);
1493 cost = insn_cost (insn);
1495 if (INSN_CODE (insn) >= 0)
1497 if (dep_type == REG_DEP_ANTI)
1499 else if (dep_type == REG_DEP_OUTPUT)
1501 cost = (insn_default_latency (insn)
1502 - insn_default_latency (used));
1506 else if (bypass_p (insn))
1507 cost = insn_latency (insn, used);
1511 if (targetm.sched.adjust_cost_2)
1512 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1514 else if (targetm.sched.adjust_cost != NULL)
1516 /* This variable is used for backward compatibility with the
1518 rtx_insn_list *dep_cost_rtx_link =
1519 alloc_INSN_LIST (NULL_RTX, NULL);
1521 /* Make it self-cycled, so that if some tries to walk over this
1522 incomplete list he/she will be caught in an endless loop. */
1523 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1525 /* Targets use only REG_NOTE_KIND of the link. */
1526 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1528 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1531 free_INSN_LIST_node (dep_cost_rtx_link);
1538 DEP_COST (link) = cost;
1542 /* Compute cost of dependence LINK.
1543 This is the number of cycles between instruction issue and
1544 instruction results. */
1546 dep_cost (dep_t link)
1548 return dep_cost_1 (link, 0);
1551 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1552 INSN_PRIORITY explicitly. */
1554 increase_insn_priority (rtx_insn *insn, int amount)
1556 if (!sel_sched_p ())
1558 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1559 if (INSN_PRIORITY_KNOWN (insn))
1560 INSN_PRIORITY (insn) += amount;
1564 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1565 Use EXPR_PRIORITY instead. */
1566 sel_add_to_insn_priority (insn, amount);
1570 /* Return 'true' if DEP should be included in priority calculations. */
1572 contributes_to_priority_p (dep_t dep)
1574 if (DEBUG_INSN_P (DEP_CON (dep))
1575 || DEBUG_INSN_P (DEP_PRO (dep)))
1578 /* Critical path is meaningful in block boundaries only. */
1579 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1583 if (DEP_REPLACE (dep) != NULL)
1586 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1587 then speculative instructions will less likely be
1588 scheduled. That is because the priority of
1589 their producers will increase, and, thus, the
1590 producers will more likely be scheduled, thus,
1591 resolving the dependence. */
1592 if (sched_deps_info->generate_spec_deps
1593 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1594 && (DEP_STATUS (dep) & SPECULATIVE))
1600 /* Compute the number of nondebug deps in list LIST for INSN. */
1603 dep_list_size (rtx insn, sd_list_types_def list)
1605 sd_iterator_def sd_it;
1607 int dbgcount = 0, nodbgcount = 0;
1609 if (!MAY_HAVE_DEBUG_INSNS)
1610 return sd_lists_size (insn, list);
1612 FOR_EACH_DEP (insn, list, sd_it, dep)
1614 if (DEBUG_INSN_P (DEP_CON (dep)))
1616 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1620 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1627 /* Compute the priority number for INSN. */
1629 priority (rtx_insn *insn)
1631 if (! INSN_P (insn))
1634 /* We should not be interested in priority of an already scheduled insn. */
1635 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1637 if (!INSN_PRIORITY_KNOWN (insn))
1639 int this_priority = -1;
1643 int this_fusion_priority;
1645 targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1646 &this_fusion_priority, &this_priority);
1647 INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1649 else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1650 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1651 some forward deps but all of them are ignored by
1652 contributes_to_priority hook. At the moment we set priority of
1654 this_priority = insn_cost (insn);
1657 rtx_insn *prev_first, *twin;
1660 /* For recovery check instructions we calculate priority slightly
1661 different than that of normal instructions. Instead of walking
1662 through INSN_FORW_DEPS (check) list, we walk through
1663 INSN_FORW_DEPS list of each instruction in the corresponding
1666 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1667 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1668 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1670 prev_first = PREV_INSN (insn);
1675 prev_first = NEXT_INSN (BB_HEAD (rec));
1676 twin = PREV_INSN (BB_END (rec));
1681 sd_iterator_def sd_it;
1684 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1689 next = DEP_CON (dep);
1691 if (BLOCK_FOR_INSN (next) != rec)
1695 if (!contributes_to_priority_p (dep))
1699 cost = dep_cost (dep);
1702 struct _dep _dep1, *dep1 = &_dep1;
1704 init_dep (dep1, insn, next, REG_DEP_ANTI);
1706 cost = dep_cost (dep1);
1709 next_priority = cost + priority (next);
1711 if (next_priority > this_priority)
1712 this_priority = next_priority;
1716 twin = PREV_INSN (twin);
1718 while (twin != prev_first);
1721 if (this_priority < 0)
1723 gcc_assert (this_priority == -1);
1725 this_priority = insn_cost (insn);
1728 INSN_PRIORITY (insn) = this_priority;
1729 INSN_PRIORITY_STATUS (insn) = 1;
1732 return INSN_PRIORITY (insn);
1735 /* Macros and functions for keeping the priority queue sorted, and
1736 dealing with queuing and dequeuing of instructions. */
1738 /* For each pressure class CL, set DEATH[CL] to the number of registers
1739 in that class that die in INSN. */
1742 calculate_reg_deaths (rtx_insn *insn, int *death)
1745 struct reg_use_data *use;
1747 for (i = 0; i < ira_pressure_classes_num; i++)
1748 death[ira_pressure_classes[i]] = 0;
1749 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1750 if (dying_use_p (use))
1751 mark_regno_birth_or_death (0, death, use->regno, true);
1754 /* Setup info about the current register pressure impact of scheduling
1755 INSN at the current scheduling point. */
1757 setup_insn_reg_pressure_info (rtx_insn *insn)
1759 int i, change, before, after, hard_regno;
1760 int excess_cost_change;
1763 struct reg_pressure_data *pressure_info;
1764 int *max_reg_pressure;
1765 static int death[N_REG_CLASSES];
1767 gcc_checking_assert (!DEBUG_INSN_P (insn));
1769 excess_cost_change = 0;
1770 calculate_reg_deaths (insn, death);
1771 pressure_info = INSN_REG_PRESSURE (insn);
1772 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1773 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1774 for (i = 0; i < ira_pressure_classes_num; i++)
1776 cl = ira_pressure_classes[i];
1777 gcc_assert (curr_reg_pressure[cl] >= 0);
1778 change = (int) pressure_info[i].set_increase - death[cl];
1779 before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1780 after = MAX (0, max_reg_pressure[i] + change
1781 - sched_class_regs_num[cl]);
1782 hard_regno = ira_class_hard_regs[cl][0];
1783 gcc_assert (hard_regno >= 0);
1784 mode = reg_raw_mode[hard_regno];
1785 excess_cost_change += ((after - before)
1786 * (ira_memory_move_cost[mode][cl][0]
1787 + ira_memory_move_cost[mode][cl][1]));
1789 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1792 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1793 It tries to make the scheduler take register pressure into account
1794 without introducing too many unnecessary stalls. It hooks into the
1795 main scheduling algorithm at several points:
1797 - Before scheduling starts, model_start_schedule constructs a
1798 "model schedule" for the current block. This model schedule is
1799 chosen solely to keep register pressure down. It does not take the
1800 target's pipeline or the original instruction order into account,
1801 except as a tie-breaker. It also doesn't work to a particular
1804 This model schedule gives us an idea of what pressure can be
1805 achieved for the block and gives us an example of a schedule that
1806 keeps to that pressure. It also makes the final schedule less
1807 dependent on the original instruction order. This is important
1808 because the original order can either be "wide" (many values live
1809 at once, such as in user-scheduled code) or "narrow" (few values
1810 live at once, such as after loop unrolling, where several
1811 iterations are executed sequentially).
1813 We do not apply this model schedule to the rtx stream. We simply
1814 record it in model_schedule. We also compute the maximum pressure,
1815 MP, that was seen during this schedule.
1817 - Instructions are added to the ready queue even if they require
1818 a stall. The length of the stall is instead computed as:
1820 MAX (INSN_TICK (INSN) - clock_var, 0)
1822 (= insn_delay). This allows rank_for_schedule to choose between
1823 introducing a deliberate stall or increasing pressure.
1825 - Before sorting the ready queue, model_set_excess_costs assigns
1826 a pressure-based cost to each ready instruction in the queue.
1827 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1828 (ECC for short) and is effectively measured in cycles.
1830 - rank_for_schedule ranks instructions based on:
1832 ECC (insn) + insn_delay (insn)
1838 So, for example, an instruction X1 with an ECC of 1 that can issue
1839 now will win over an instruction X0 with an ECC of zero that would
1840 introduce a stall of one cycle. However, an instruction X2 with an
1841 ECC of 2 that can issue now will lose to both X0 and X1.
1843 - When an instruction is scheduled, model_recompute updates the model
1844 schedule with the new pressures (some of which might now exceed the
1845 original maximum pressure MP). model_update_limit_points then searches
1846 for the new point of maximum pressure, if not already known. */
1848 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1849 from surrounding debug information. */
1851 ";;\t\t+------------------------------------------------------\n"
1853 /* Information about the pressure on a particular register class at a
1854 particular point of the model schedule. */
1855 struct model_pressure_data {
1856 /* The pressure at this point of the model schedule, or -1 if the
1857 point is associated with an instruction that has already been
1861 /* The maximum pressure during or after this point of the model schedule. */
1865 /* Per-instruction information that is used while building the model
1866 schedule. Here, "schedule" refers to the model schedule rather
1867 than the main schedule. */
1868 struct model_insn_info {
1869 /* The instruction itself. */
1872 /* If this instruction is in model_worklist, these fields link to the
1873 previous (higher-priority) and next (lower-priority) instructions
1875 struct model_insn_info *prev;
1876 struct model_insn_info *next;
1878 /* While constructing the schedule, QUEUE_INDEX describes whether an
1879 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1880 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1881 old_queue records the value that QUEUE_INDEX had before scheduling
1882 started, so that we can restore it once the schedule is complete. */
1885 /* The relative importance of an unscheduled instruction. Higher
1886 values indicate greater importance. */
1887 unsigned int model_priority;
1889 /* The length of the longest path of satisfied true dependencies
1890 that leads to this instruction. */
1893 /* The length of the longest path of dependencies of any kind
1894 that leads from this instruction. */
1897 /* The number of predecessor nodes that must still be scheduled. */
1898 int unscheduled_preds;
1901 /* Information about the pressure limit for a particular register class.
1902 This structure is used when applying a model schedule to the main
1904 struct model_pressure_limit {
1905 /* The maximum register pressure seen in the original model schedule. */
1908 /* The maximum register pressure seen in the current model schedule
1909 (which excludes instructions that have already been scheduled). */
1912 /* The point of the current model schedule at which PRESSURE is first
1913 reached. It is set to -1 if the value needs to be recomputed. */
1917 /* Describes a particular way of measuring register pressure. */
1918 struct model_pressure_group {
1919 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1920 struct model_pressure_limit limits[N_REG_CLASSES];
1922 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1923 on register class ira_pressure_classes[PCI] at point POINT of the
1924 current model schedule. A POINT of model_num_insns describes the
1925 pressure at the end of the schedule. */
1926 struct model_pressure_data *model;
1929 /* Index POINT gives the instruction at point POINT of the model schedule.
1930 This array doesn't change during main scheduling. */
1931 static vec<rtx_insn *> model_schedule;
1933 /* The list of instructions in the model worklist, sorted in order of
1934 decreasing priority. */
1935 static struct model_insn_info *model_worklist;
1937 /* Index I describes the instruction with INSN_LUID I. */
1938 static struct model_insn_info *model_insns;
1940 /* The number of instructions in the model schedule. */
1941 static int model_num_insns;
1943 /* The index of the first instruction in model_schedule that hasn't yet been
1944 added to the main schedule, or model_num_insns if all of them have. */
1945 static int model_curr_point;
1947 /* Describes the pressure before each instruction in the model schedule. */
1948 static struct model_pressure_group model_before_pressure;
1950 /* The first unused model_priority value (as used in model_insn_info). */
1951 static unsigned int model_next_priority;
1954 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1955 at point POINT of the model schedule. */
1956 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1957 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1959 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1960 after point POINT of the model schedule. */
1961 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1962 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1964 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1965 of the model schedule. */
1966 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1967 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1969 /* Information about INSN that is used when creating the model schedule. */
1970 #define MODEL_INSN_INFO(INSN) \
1971 (&model_insns[INSN_LUID (INSN)])
1973 /* The instruction at point POINT of the model schedule. */
1974 #define MODEL_INSN(POINT) \
1975 (model_schedule[POINT])
1978 /* Return INSN's index in the model schedule, or model_num_insns if it
1979 doesn't belong to that schedule. */
1982 model_index (rtx_insn *insn)
1984 if (INSN_MODEL_INDEX (insn) == 0)
1985 return model_num_insns;
1986 return INSN_MODEL_INDEX (insn) - 1;
1989 /* Make sure that GROUP->limits is up-to-date for the current point
1990 of the model schedule. */
1993 model_update_limit_points_in_group (struct model_pressure_group *group)
1995 int pci, max_pressure, point;
1997 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1999 /* We may have passed the final point at which the pressure in
2000 group->limits[pci].pressure was reached. Update the limit if so. */
2001 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
2002 group->limits[pci].pressure = max_pressure;
2004 /* Find the point at which MAX_PRESSURE is first reached. We need
2005 to search in three cases:
2007 - We've already moved past the previous pressure point.
2008 In this case we search forward from model_curr_point.
2010 - We scheduled the previous point of maximum pressure ahead of
2011 its position in the model schedule, but doing so didn't bring
2012 the pressure point earlier. In this case we search forward
2013 from that previous pressure point.
2015 - Scheduling an instruction early caused the maximum pressure
2016 to decrease. In this case we will have set the pressure
2017 point to -1, and we search forward from model_curr_point. */
2018 point = MAX (group->limits[pci].point, model_curr_point);
2019 while (point < model_num_insns
2020 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
2022 group->limits[pci].point = point;
2024 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
2025 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
2029 /* Make sure that all register-pressure limits are up-to-date for the
2030 current position in the model schedule. */
2033 model_update_limit_points (void)
2035 model_update_limit_points_in_group (&model_before_pressure);
2038 /* Return the model_index of the last unscheduled use in chain USE
2039 outside of USE's instruction. Return -1 if there are no other uses,
2040 or model_num_insns if the register is live at the end of the block. */
2043 model_last_use_except (struct reg_use_data *use)
2045 struct reg_use_data *next;
2049 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2050 if (NONDEBUG_INSN_P (next->insn)
2051 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2053 index = model_index (next->insn);
2054 if (index == model_num_insns)
2055 return model_num_insns;
2062 /* An instruction with model_index POINT has just been scheduled, and it
2063 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2064 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2065 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2068 model_start_update_pressure (struct model_pressure_group *group,
2069 int point, int pci, int delta)
2071 int next_max_pressure;
2073 if (point == model_num_insns)
2075 /* The instruction wasn't part of the model schedule; it was moved
2076 from a different block. Update the pressure for the end of
2077 the model schedule. */
2078 MODEL_REF_PRESSURE (group, point, pci) += delta;
2079 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2083 /* Record that this instruction has been scheduled. Nothing now
2084 changes between POINT and POINT + 1, so get the maximum pressure
2085 from the latter. If the maximum pressure decreases, the new
2086 pressure point may be before POINT. */
2087 MODEL_REF_PRESSURE (group, point, pci) = -1;
2088 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2089 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2091 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2092 if (group->limits[pci].point == point)
2093 group->limits[pci].point = -1;
2098 /* Record that scheduling a later instruction has changed the pressure
2099 at point POINT of the model schedule by DELTA (which might be 0).
2100 Update GROUP accordingly. Return nonzero if these changes might
2101 trigger changes to previous points as well. */
2104 model_update_pressure (struct model_pressure_group *group,
2105 int point, int pci, int delta)
2107 int ref_pressure, max_pressure, next_max_pressure;
2109 /* If POINT hasn't yet been scheduled, update its pressure. */
2110 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2111 if (ref_pressure >= 0 && delta != 0)
2113 ref_pressure += delta;
2114 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2116 /* Check whether the maximum pressure in the overall schedule
2117 has increased. (This means that the MODEL_MAX_PRESSURE of
2118 every point <= POINT will need to increase too; see below.) */
2119 if (group->limits[pci].pressure < ref_pressure)
2120 group->limits[pci].pressure = ref_pressure;
2122 /* If we are at maximum pressure, and the maximum pressure
2123 point was previously unknown or later than POINT,
2124 bring it forward. */
2125 if (group->limits[pci].pressure == ref_pressure
2126 && !IN_RANGE (group->limits[pci].point, 0, point))
2127 group->limits[pci].point = point;
2129 /* If POINT used to be the point of maximum pressure, but isn't
2130 any longer, we need to recalculate it using a forward walk. */
2131 if (group->limits[pci].pressure > ref_pressure
2132 && group->limits[pci].point == point)
2133 group->limits[pci].point = -1;
2136 /* Update the maximum pressure at POINT. Changes here might also
2137 affect the maximum pressure at POINT - 1. */
2138 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2139 max_pressure = MAX (ref_pressure, next_max_pressure);
2140 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2142 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2148 /* INSN has just been scheduled. Update the model schedule accordingly. */
2151 model_recompute (rtx_insn *insn)
2156 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2157 struct reg_use_data *use;
2158 struct reg_pressure_data *reg_pressure;
2159 int delta[N_REG_CLASSES];
2160 int pci, point, mix, new_last, cl, ref_pressure, queue;
2161 unsigned int i, num_uses, num_pending_births;
2164 /* The destinations of INSN were previously live from POINT onwards, but are
2165 now live from model_curr_point onwards. Set up DELTA accordingly. */
2166 point = model_index (insn);
2167 reg_pressure = INSN_REG_PRESSURE (insn);
2168 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2170 cl = ira_pressure_classes[pci];
2171 delta[cl] = reg_pressure[pci].set_increase;
2174 /* Record which registers previously died at POINT, but which now die
2175 before POINT. Adjust DELTA so that it represents the effect of
2176 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2177 registers that will be born in the range [model_curr_point, POINT). */
2179 num_pending_births = 0;
2180 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2182 new_last = model_last_use_except (use);
2183 if (new_last < point)
2185 gcc_assert (num_uses < ARRAY_SIZE (uses));
2186 uses[num_uses].last_use = new_last;
2187 uses[num_uses].regno = use->regno;
2188 /* This register is no longer live after POINT - 1. */
2189 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2192 num_pending_births++;
2196 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2197 Also set each group pressure limit for POINT. */
2198 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2200 cl = ira_pressure_classes[pci];
2201 model_start_update_pressure (&model_before_pressure,
2202 point, pci, delta[cl]);
2205 /* Walk the model schedule backwards, starting immediately before POINT. */
2207 if (point != model_curr_point)
2211 insn = MODEL_INSN (point);
2212 queue = QUEUE_INDEX (insn);
2214 if (queue != QUEUE_SCHEDULED)
2216 /* DELTA describes the effect of the move on the register pressure
2217 after POINT. Make it describe the effect on the pressure
2220 while (i < num_uses)
2222 if (uses[i].last_use == point)
2224 /* This register is now live again. */
2225 mark_regno_birth_or_death (NULL, delta,
2226 uses[i].regno, true);
2228 /* Remove this use from the array. */
2229 uses[i] = uses[num_uses - 1];
2231 num_pending_births--;
2237 if (sched_verbose >= 5)
2241 fprintf (sched_dump, MODEL_BAR);
2242 fprintf (sched_dump, ";;\t\t| New pressure for model"
2244 fprintf (sched_dump, MODEL_BAR);
2248 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2249 point, INSN_UID (insn),
2250 str_pattern_slim (PATTERN (insn)));
2251 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2253 cl = ira_pressure_classes[pci];
2254 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2256 fprintf (sched_dump, " %s:[%d->%d]",
2257 reg_class_names[ira_pressure_classes[pci]],
2258 ref_pressure, ref_pressure + delta[cl]);
2260 fprintf (sched_dump, "\n");
2264 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2265 might have changed as well. */
2266 mix = num_pending_births;
2267 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2269 cl = ira_pressure_classes[pci];
2271 mix |= model_update_pressure (&model_before_pressure,
2272 point, pci, delta[cl]);
2275 while (mix && point > model_curr_point);
2278 fprintf (sched_dump, MODEL_BAR);
2281 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2282 check whether the insn's pattern needs restoring. */
2284 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2286 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2289 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2291 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2292 gcc_assert (next == DEP_CON (dep));
2296 struct dep_replacement *desc = DEP_REPLACE (dep);
2297 if (desc->insn != next)
2299 gcc_assert (*desc->loc == desc->orig);
2306 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2307 pressure on CL from P to P'. We use this to calculate a "base ECC",
2308 baseECC (CL, X), for each pressure class CL and each instruction X.
2309 Supposing X changes the pressure on CL from P to P', and that the
2310 maximum pressure on CL in the current model schedule is MP', then:
2312 * if X occurs before or at the next point of maximum pressure in
2313 the model schedule and P' > MP', then:
2315 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2317 The idea is that the pressure after scheduling a fixed set of
2318 instructions -- in this case, the set up to and including the
2319 next maximum pressure point -- is going to be the same regardless
2320 of the order; we simply want to keep the intermediate pressure
2321 under control. Thus X has a cost of zero unless scheduling it
2322 now would exceed MP'.
2324 If all increases in the set are by the same amount, no zero-cost
2325 instruction will ever cause the pressure to exceed MP'. However,
2326 if X is instead moved past an instruction X' with pressure in the
2327 range (MP' - (P' - P), MP'), the pressure at X' will increase
2328 beyond MP'. Since baseECC is very much a heuristic anyway,
2329 it doesn't seem worth the overhead of tracking cases like these.
2331 The cost of exceeding MP' is always based on the original maximum
2332 pressure MP. This is so that going 2 registers over the original
2333 limit has the same cost regardless of whether it comes from two
2334 separate +1 deltas or from a single +2 delta.
2336 * if X occurs after the next point of maximum pressure in the model
2337 schedule and P' > P, then:
2339 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2341 That is, if we move X forward across a point of maximum pressure,
2342 and if X increases the pressure by P' - P, then we conservatively
2343 assume that scheduling X next would increase the maximum pressure
2344 by P' - P. Again, the cost of doing this is based on the original
2345 maximum pressure MP, for the same reason as above.
2347 * if P' < P, P > MP, and X occurs at or after the next point of
2348 maximum pressure, then:
2350 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2352 That is, if we have already exceeded the original maximum pressure MP,
2353 and if X might reduce the maximum pressure again -- or at least push
2354 it further back, and thus allow more scheduling freedom -- it is given
2355 a negative cost to reflect the improvement.
2361 In this case, X is not expected to affect the maximum pressure MP',
2362 so it has zero cost.
2364 We then create a combined value baseECC (X) that is the sum of
2365 baseECC (CL, X) for each pressure class CL.
2367 baseECC (X) could itself be used as the ECC value described above.
2368 However, this is often too conservative, in the sense that it
2369 tends to make high-priority instructions that increase pressure
2370 wait too long in cases where introducing a spill would be better.
2371 For this reason the final ECC is a priority-adjusted form of
2372 baseECC (X). Specifically, we calculate:
2374 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2375 baseP = MAX { P (X) | baseECC (X) <= 0 }
2379 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2381 Thus an instruction's effect on pressure is ignored if it has a high
2382 enough priority relative to the ones that don't increase pressure.
2383 Negative values of baseECC (X) do not increase the priority of X
2384 itself, but they do make it harder for other instructions to
2385 increase the pressure further.
2387 This pressure cost is deliberately timid. The intention has been
2388 to choose a heuristic that rarely interferes with the normal list
2389 scheduler in cases where that scheduler would produce good code.
2390 We simply want to curb some of its worst excesses. */
2392 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2394 Here we use the very simplistic cost model that every register above
2395 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2396 measures instead, such as one based on MEMORY_MOVE_COST. However:
2398 (1) In order for an instruction to be scheduled, the higher cost
2399 would need to be justified in a single saving of that many stalls.
2400 This is overly pessimistic, because the benefit of spilling is
2401 often to avoid a sequence of several short stalls rather than
2404 (2) The cost is still arbitrary. Because we are not allocating
2405 registers during scheduling, we have no way of knowing for
2406 sure how many memory accesses will be required by each spill,
2407 where the spills will be placed within the block, or even
2408 which block(s) will contain the spills.
2410 So a higher cost than 1 is often too conservative in practice,
2411 forcing blocks to contain unnecessary stalls instead of spill code.
2412 The simple cost below seems to be the best compromise. It reduces
2413 the interference with the normal list scheduler, which helps make
2414 it more suitable for a default-on option. */
2417 model_spill_cost (int cl, int from, int to)
2419 from = MAX (from, sched_class_regs_num[cl]);
2420 return MAX (to, from) - from;
2423 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2424 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2428 model_excess_group_cost (struct model_pressure_group *group,
2429 int point, int pci, int delta)
2433 cl = ira_pressure_classes[pci];
2434 if (delta < 0 && point >= group->limits[pci].point)
2436 pressure = MAX (group->limits[pci].orig_pressure,
2437 curr_reg_pressure[cl] + delta);
2438 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2443 if (point > group->limits[pci].point)
2444 pressure = group->limits[pci].pressure + delta;
2446 pressure = curr_reg_pressure[cl] + delta;
2448 if (pressure > group->limits[pci].pressure)
2449 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2456 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2460 model_excess_cost (rtx_insn *insn, bool print_p)
2462 int point, pci, cl, cost, this_cost, delta;
2463 struct reg_pressure_data *insn_reg_pressure;
2464 int insn_death[N_REG_CLASSES];
2466 calculate_reg_deaths (insn, insn_death);
2467 point = model_index (insn);
2468 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2472 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2473 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2475 /* Sum up the individual costs for each register class. */
2476 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2478 cl = ira_pressure_classes[pci];
2479 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2480 this_cost = model_excess_group_cost (&model_before_pressure,
2484 fprintf (sched_dump, " %s:[%d base cost %d]",
2485 reg_class_names[cl], delta, this_cost);
2489 fprintf (sched_dump, "\n");
2494 /* Dump the next points of maximum pressure for GROUP. */
2497 model_dump_pressure_points (struct model_pressure_group *group)
2501 fprintf (sched_dump, ";;\t\t| pressure points");
2502 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2504 cl = ira_pressure_classes[pci];
2505 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2506 curr_reg_pressure[cl], group->limits[pci].pressure);
2507 if (group->limits[pci].point < model_num_insns)
2508 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2509 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2511 fprintf (sched_dump, "end]");
2513 fprintf (sched_dump, "\n");
2516 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2519 model_set_excess_costs (rtx_insn **insns, int count)
2521 int i, cost, priority_base, priority;
2524 /* Record the baseECC value for each instruction in the model schedule,
2525 except that negative costs are converted to zero ones now rather than
2526 later. Do not assign a cost to debug instructions, since they must
2527 not change code-generation decisions. Experiments suggest we also
2528 get better results by not assigning a cost to instructions from
2531 Set PRIORITY_BASE to baseP in the block comment above. This is the
2532 maximum priority of the "cheap" instructions, which should always
2533 include the next model instruction. */
2536 for (i = 0; i < count; i++)
2537 if (INSN_MODEL_INDEX (insns[i]))
2539 if (sched_verbose >= 6 && !print_p)
2541 fprintf (sched_dump, MODEL_BAR);
2542 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2543 model_dump_pressure_points (&model_before_pressure);
2544 fprintf (sched_dump, MODEL_BAR);
2547 cost = model_excess_cost (insns[i], print_p);
2550 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2551 priority_base = MAX (priority_base, priority);
2554 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2557 fprintf (sched_dump, MODEL_BAR);
2559 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2561 for (i = 0; i < count; i++)
2563 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2564 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2565 if (cost > 0 && priority > priority_base)
2567 cost += priority_base - priority;
2568 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2574 /* Enum of rank_for_schedule heuristic decisions. */
2576 RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2577 RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2578 RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2579 RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2580 RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N };
2582 /* Corresponding strings for print outs. */
2583 static const char *rfs_str[RFS_N] = {
2584 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2585 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2586 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2587 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2588 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
2590 /* Statistical breakdown of rank_for_schedule decisions. */
2591 typedef struct { unsigned stats[RFS_N]; } rank_for_schedule_stats_t;
2592 static rank_for_schedule_stats_t rank_for_schedule_stats;
2594 /* Return the result of comparing insns TMP and TMP2 and update
2595 Rank_For_Schedule statistics. */
2597 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2599 ++rank_for_schedule_stats.stats[decision];
2601 INSN_LAST_RFS_WIN (tmp) = decision;
2602 else if (result > 0)
2603 INSN_LAST_RFS_WIN (tmp2) = decision;
2609 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2610 keeping normal insns in original order. */
2613 rank_for_schedule_debug (const void *x, const void *y)
2615 rtx_insn *tmp = *(rtx_insn * const *) y;
2616 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2618 /* Schedule debug insns as early as possible. */
2619 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2621 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2623 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2624 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2626 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2629 /* Returns a positive value if x is preferred; returns a negative value if
2630 y is preferred. Should never return 0, since that will make the sort
2634 rank_for_schedule (const void *x, const void *y)
2636 rtx_insn *tmp = *(rtx_insn * const *) y;
2637 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2638 int tmp_class, tmp2_class;
2639 int val, priority_val, info_val, diff;
2641 if (live_range_shrinkage_p)
2643 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2645 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2646 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2647 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2648 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2649 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2650 return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2651 /* Sort by INSN_LUID (original insn order), so that we make the
2652 sort stable. This minimizes instruction movement, thus
2653 minimizing sched's effect on debugging and cross-jumping. */
2654 return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2655 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2658 /* The insn in a schedule group should be issued the first. */
2659 if (flag_sched_group_heuristic &&
2660 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2661 return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2664 /* Make sure that priority of TMP and TMP2 are initialized. */
2665 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2669 /* The instruction that has the same fusion priority as the last
2670 instruction is the instruction we picked next. If that is not
2671 the case, we sort ready list firstly by fusion priority, then
2672 by priority, and at last by INSN_LUID. */
2673 int a = INSN_FUSION_PRIORITY (tmp);
2674 int b = INSN_FUSION_PRIORITY (tmp2);
2677 if (last_nondebug_scheduled_insn
2678 && !NOTE_P (last_nondebug_scheduled_insn)
2679 && BLOCK_FOR_INSN (tmp)
2680 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2681 last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2683 if (a != last && b != last)
2687 a = INSN_PRIORITY (tmp);
2688 b = INSN_PRIORITY (tmp2);
2691 return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2693 return rfs_result (RFS_FUSION,
2694 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2698 gcc_assert (last_nondebug_scheduled_insn
2699 && !NOTE_P (last_nondebug_scheduled_insn));
2700 last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2702 a = abs (INSN_PRIORITY (tmp) - last);
2703 b = abs (INSN_PRIORITY (tmp2) - last);
2705 return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2707 return rfs_result (RFS_FUSION,
2708 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2711 return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2713 return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2716 if (sched_pressure != SCHED_PRESSURE_NONE)
2718 /* Prefer insn whose scheduling results in the smallest register
2720 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2722 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2723 - insn_delay (tmp2))))
2724 return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2727 if (sched_pressure != SCHED_PRESSURE_NONE
2728 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2729 && INSN_TICK (tmp2) != INSN_TICK (tmp))
2731 diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2732 return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2735 /* If we are doing backtracking in this schedule, prefer insns that
2736 have forward dependencies with negative cost against an insn that
2737 was already scheduled. */
2738 if (current_sched_info->flags & DO_BACKTRACKING)
2740 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2742 return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2745 /* Prefer insn with higher priority. */
2746 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2748 if (flag_sched_critical_path_heuristic && priority_val)
2749 return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2751 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
2753 int autopref = autopref_rank_for_schedule (tmp, tmp2);
2758 /* Prefer speculative insn with greater dependencies weakness. */
2759 if (flag_sched_spec_insn_heuristic && spec_info)
2765 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2767 dw1 = ds_weak (ds1);
2771 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2773 dw2 = ds_weak (ds2);
2778 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2779 return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2782 info_val = (*current_sched_info->rank) (tmp, tmp2);
2783 if (flag_sched_rank_heuristic && info_val)
2784 return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2786 /* Compare insns based on their relation to the last scheduled
2788 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2792 rtx last = last_nondebug_scheduled_insn;
2794 /* Classify the instructions into three classes:
2795 1) Data dependent on last schedule insn.
2796 2) Anti/Output dependent on last scheduled insn.
2797 3) Independent of last scheduled insn, or has latency of one.
2798 Choose the insn from the highest numbered class if different. */
2799 dep1 = sd_find_dep_between (last, tmp, true);
2801 if (dep1 == NULL || dep_cost (dep1) == 1)
2803 else if (/* Data dependence. */
2804 DEP_TYPE (dep1) == REG_DEP_TRUE)
2809 dep2 = sd_find_dep_between (last, tmp2, true);
2811 if (dep2 == NULL || dep_cost (dep2) == 1)
2813 else if (/* Data dependence. */
2814 DEP_TYPE (dep2) == REG_DEP_TRUE)
2819 if ((val = tmp2_class - tmp_class))
2820 return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2823 /* Prefer instructions that occur earlier in the model schedule. */
2824 if (sched_pressure == SCHED_PRESSURE_MODEL
2825 && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
2827 diff = model_index (tmp) - model_index (tmp2);
2828 gcc_assert (diff != 0);
2829 return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2832 /* Prefer the insn which has more later insns that depend on it.
2833 This gives the scheduler more freedom when scheduling later
2834 instructions at the expense of added register pressure. */
2836 val = (dep_list_size (tmp2, SD_LIST_FORW)
2837 - dep_list_size (tmp, SD_LIST_FORW));
2839 if (flag_sched_dep_count_heuristic && val != 0)
2840 return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2842 /* If insns are equally good, sort by INSN_LUID (original insn order),
2843 so that we make the sort stable. This minimizes instruction movement,
2844 thus minimizing sched's effect on debugging and cross-jumping. */
2845 return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2848 /* Resort the array A in which only element at index N may be out of order. */
2850 HAIFA_INLINE static void
2851 swap_sort (rtx_insn **a, int n)
2853 rtx_insn *insn = a[n - 1];
2856 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2864 /* Add INSN to the insn queue so that it can be executed at least
2865 N_CYCLES after the currently executing insn. Preserve insns
2866 chain for debugging purposes. REASON will be printed in debugging
2869 HAIFA_INLINE static void
2870 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2872 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2873 rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2876 gcc_assert (n_cycles <= max_insn_queue_index);
2877 gcc_assert (!DEBUG_INSN_P (insn));
2879 insn_queue[next_q] = link;
2882 if (sched_verbose >= 2)
2884 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2885 (*current_sched_info->print_insn) (insn, 0));
2887 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2890 QUEUE_INDEX (insn) = next_q;
2892 if (current_sched_info->flags & DO_BACKTRACKING)
2894 new_tick = clock_var + n_cycles;
2895 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2896 INSN_TICK (insn) = new_tick;
2898 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2899 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2901 must_backtrack = true;
2902 if (sched_verbose >= 2)
2903 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2908 /* Remove INSN from queue. */
2910 queue_remove (rtx_insn *insn)
2912 gcc_assert (QUEUE_INDEX (insn) >= 0);
2913 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2915 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2918 /* Return a pointer to the bottom of the ready list, i.e. the insn
2919 with the lowest priority. */
2922 ready_lastpos (struct ready_list *ready)
2924 gcc_assert (ready->n_ready >= 1);
2925 return ready->vec + ready->first - ready->n_ready + 1;
2928 /* Add an element INSN to the ready list so that it ends up with the
2929 lowest/highest priority depending on FIRST_P. */
2931 HAIFA_INLINE static void
2932 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2936 if (ready->first == ready->n_ready)
2938 memmove (ready->vec + ready->veclen - ready->n_ready,
2939 ready_lastpos (ready),
2940 ready->n_ready * sizeof (rtx));
2941 ready->first = ready->veclen - 1;
2943 ready->vec[ready->first - ready->n_ready] = insn;
2947 if (ready->first == ready->veclen - 1)
2950 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2951 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2952 ready_lastpos (ready),
2953 ready->n_ready * sizeof (rtx));
2954 ready->first = ready->veclen - 2;
2956 ready->vec[++(ready->first)] = insn;
2960 if (DEBUG_INSN_P (insn))
2963 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2964 QUEUE_INDEX (insn) = QUEUE_READY;
2966 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2967 && INSN_EXACT_TICK (insn) < clock_var)
2969 must_backtrack = true;
2973 /* Remove the element with the highest priority from the ready list and
2976 HAIFA_INLINE static rtx_insn *
2977 ready_remove_first (struct ready_list *ready)
2981 gcc_assert (ready->n_ready);
2982 t = ready->vec[ready->first--];
2984 if (DEBUG_INSN_P (t))
2986 /* If the queue becomes empty, reset it. */
2987 if (ready->n_ready == 0)
2988 ready->first = ready->veclen - 1;
2990 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2991 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2996 /* The following code implements multi-pass scheduling for the first
2997 cycle. In other words, we will try to choose ready insn which
2998 permits to start maximum number of insns on the same cycle. */
3000 /* Return a pointer to the element INDEX from the ready. INDEX for
3001 insn with the highest priority is 0, and the lowest priority has
3005 ready_element (struct ready_list *ready, int index)
3007 gcc_assert (ready->n_ready && index < ready->n_ready);
3009 return ready->vec[ready->first - index];
3012 /* Remove the element INDEX from the ready list and return it. INDEX
3013 for insn with the highest priority is 0, and the lowest priority
3016 HAIFA_INLINE static rtx_insn *
3017 ready_remove (struct ready_list *ready, int index)
3023 return ready_remove_first (ready);
3024 gcc_assert (ready->n_ready && index < ready->n_ready);
3025 t = ready->vec[ready->first - index];
3027 if (DEBUG_INSN_P (t))
3029 for (i = index; i < ready->n_ready; i++)
3030 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3031 QUEUE_INDEX (t) = QUEUE_NOWHERE;
3035 /* Remove INSN from the ready list. */
3037 ready_remove_insn (rtx insn)
3041 for (i = 0; i < readyp->n_ready; i++)
3042 if (ready_element (readyp, i) == insn)
3044 ready_remove (readyp, i);
3050 /* Calculate difference of two statistics set WAS and NOW.
3051 Result returned in WAS. */
3053 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3054 const rank_for_schedule_stats_t *now)
3056 for (int i = 0; i < RFS_N; ++i)
3057 was->stats[i] = now->stats[i] - was->stats[i];
3060 /* Print rank_for_schedule statistics. */
3062 print_rank_for_schedule_stats (const char *prefix,
3063 const rank_for_schedule_stats_t *stats,
3064 struct ready_list *ready)
3066 for (int i = 0; i < RFS_N; ++i)
3067 if (stats->stats[i])
3069 fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3072 /* Print out insns that won due to RFS_<I>. */
3074 rtx_insn **p = ready_lastpos (ready);
3076 fprintf (sched_dump, ":");
3077 /* Start with 1 since least-priority insn didn't have any wins. */
3078 for (int j = 1; j < ready->n_ready; ++j)
3079 if (INSN_LAST_RFS_WIN (p[j]) == i)
3080 fprintf (sched_dump, " %s",
3081 (*current_sched_info->print_insn) (p[j], 0));
3083 fprintf (sched_dump, "\n");
3087 /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3090 ready_sort_debug (struct ready_list *ready)
3093 rtx_insn **first = ready_lastpos (ready);
3095 for (i = 0; i < ready->n_ready; ++i)
3096 if (!DEBUG_INSN_P (first[i]))
3097 INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3099 qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3102 /* Sort non-debug insns in the ready list READY by ascending priority.
3103 Assumes that all debug insns are separated from the real insns. */
3105 ready_sort_real (struct ready_list *ready)
3108 rtx_insn **first = ready_lastpos (ready);
3109 int n_ready_real = ready->n_ready - ready->n_debug;
3111 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3112 for (i = 0; i < n_ready_real; ++i)
3113 setup_insn_reg_pressure_info (first[i]);
3114 else if (sched_pressure == SCHED_PRESSURE_MODEL
3115 && model_curr_point < model_num_insns)
3116 model_set_excess_costs (first, n_ready_real);
3118 rank_for_schedule_stats_t stats1;
3119 if (sched_verbose >= 4)
3120 stats1 = rank_for_schedule_stats;
3122 if (n_ready_real == 2)
3123 swap_sort (first, n_ready_real);
3124 else if (n_ready_real > 2)
3125 qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3127 if (sched_verbose >= 4)
3129 rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3130 print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3134 /* Sort the ready list READY by ascending priority. */
3136 ready_sort (struct ready_list *ready)
3138 if (ready->n_debug > 0)
3139 ready_sort_debug (ready);
3141 ready_sort_real (ready);
3144 /* PREV is an insn that is ready to execute. Adjust its priority if that
3145 will help shorten or lengthen register lifetimes as appropriate. Also
3146 provide a hook for the target to tweak itself. */
3148 HAIFA_INLINE static void
3149 adjust_priority (rtx_insn *prev)
3151 /* ??? There used to be code here to try and estimate how an insn
3152 affected register lifetimes, but it did it by looking at REG_DEAD
3153 notes, which we removed in schedule_region. Nor did it try to
3154 take into account register pressure or anything useful like that.
3156 Revisit when we have a machine model to work with and not before. */
3158 if (targetm.sched.adjust_priority)
3159 INSN_PRIORITY (prev) =
3160 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3163 /* Advance DFA state STATE on one cycle. */
3165 advance_state (state_t state)
3167 if (targetm.sched.dfa_pre_advance_cycle)
3168 targetm.sched.dfa_pre_advance_cycle ();
3170 if (targetm.sched.dfa_pre_cycle_insn)
3171 state_transition (state,
3172 targetm.sched.dfa_pre_cycle_insn ());
3174 state_transition (state, NULL);
3176 if (targetm.sched.dfa_post_cycle_insn)
3177 state_transition (state,
3178 targetm.sched.dfa_post_cycle_insn ());
3180 if (targetm.sched.dfa_post_advance_cycle)
3181 targetm.sched.dfa_post_advance_cycle ();
3184 /* Advance time on one cycle. */
3185 HAIFA_INLINE static void
3186 advance_one_cycle (void)
3188 advance_state (curr_state);
3189 if (sched_verbose >= 4)
3190 fprintf (sched_dump, ";;\tAdvance the current state.\n");
3193 /* Update register pressure after scheduling INSN. */
3195 update_register_pressure (rtx_insn *insn)
3197 struct reg_use_data *use;
3198 struct reg_set_data *set;
3200 gcc_checking_assert (!DEBUG_INSN_P (insn));
3202 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3203 if (dying_use_p (use))
3204 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3206 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3207 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3211 /* Set up or update (if UPDATE_P) max register pressure (see its
3212 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3213 after insn AFTER. */
3215 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3220 static int max_reg_pressure[N_REG_CLASSES];
3222 save_reg_pressure ();
3223 for (i = 0; i < ira_pressure_classes_num; i++)
3224 max_reg_pressure[ira_pressure_classes[i]]
3225 = curr_reg_pressure[ira_pressure_classes[i]];
3226 for (insn = NEXT_INSN (after);
3227 insn != NULL_RTX && ! BARRIER_P (insn)
3228 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3229 insn = NEXT_INSN (insn))
3230 if (NONDEBUG_INSN_P (insn))
3233 for (i = 0; i < ira_pressure_classes_num; i++)
3235 p = max_reg_pressure[ira_pressure_classes[i]];
3236 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3239 INSN_MAX_REG_PRESSURE (insn)[i]
3240 = max_reg_pressure[ira_pressure_classes[i]];
3243 if (update_p && eq_p)
3245 update_register_pressure (insn);
3246 for (i = 0; i < ira_pressure_classes_num; i++)
3247 if (max_reg_pressure[ira_pressure_classes[i]]
3248 < curr_reg_pressure[ira_pressure_classes[i]])
3249 max_reg_pressure[ira_pressure_classes[i]]
3250 = curr_reg_pressure[ira_pressure_classes[i]];
3252 restore_reg_pressure ();
3255 /* Update the current register pressure after scheduling INSN. Update
3256 also max register pressure for unscheduled insns of the current
3259 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3262 int before[N_REG_CLASSES];
3264 for (i = 0; i < ira_pressure_classes_num; i++)
3265 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3266 update_register_pressure (insn);
3267 for (i = 0; i < ira_pressure_classes_num; i++)
3268 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3270 if (i < ira_pressure_classes_num)
3271 setup_insn_max_reg_pressure (insn, true);
3274 /* Set up register pressure at the beginning of basic block BB whose
3275 insns starting after insn AFTER. Set up also max register pressure
3276 for all insns of the basic block. */
3278 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3280 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3281 initiate_bb_reg_pressure_info (bb);
3282 setup_insn_max_reg_pressure (after, false);
3285 /* If doing predication while scheduling, verify whether INSN, which
3286 has just been scheduled, clobbers the conditions of any
3287 instructions that must be predicated in order to break their
3288 dependencies. If so, remove them from the queues so that they will
3289 only be scheduled once their control dependency is resolved. */
3292 check_clobbered_conditions (rtx insn)
3297 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3300 find_all_hard_reg_sets (insn, &t, true);
3303 for (i = 0; i < ready.n_ready; i++)
3305 rtx_insn *x = ready_element (&ready, i);
3306 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3308 ready_remove_insn (x);
3312 for (i = 0; i <= max_insn_queue_index; i++)
3314 rtx_insn_list *link;
3315 int q = NEXT_Q_AFTER (q_ptr, i);
3318 for (link = insn_queue[q]; link; link = link->next ())
3320 rtx_insn *x = link->insn ();
3321 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3330 /* Return (in order):
3332 - positive if INSN adversely affects the pressure on one
3335 - negative if INSN reduces the pressure on one register class
3337 - 0 if INSN doesn't affect the pressure on any register class. */
3340 model_classify_pressure (struct model_insn_info *insn)
3342 struct reg_pressure_data *reg_pressure;
3343 int death[N_REG_CLASSES];
3346 calculate_reg_deaths (insn->insn, death);
3347 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3349 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3351 cl = ira_pressure_classes[pci];
3352 if (death[cl] < reg_pressure[pci].set_increase)
3354 sum += reg_pressure[pci].set_increase - death[cl];
3359 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3362 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3364 unsigned int height1, height2;
3365 unsigned int priority1, priority2;
3367 /* Prefer instructions with a higher model priority. */
3368 if (insn1->model_priority != insn2->model_priority)
3369 return insn1->model_priority > insn2->model_priority;
3371 /* Combine the length of the longest path of satisfied true dependencies
3372 that leads to each instruction (depth) with the length of the longest
3373 path of any dependencies that leads from the instruction (alap).
3374 Prefer instructions with the greatest combined length. If the combined
3375 lengths are equal, prefer instructions with the greatest depth.
3377 The idea is that, if we have a set S of "equal" instructions that each
3378 have ALAP value X, and we pick one such instruction I, any true-dependent
3379 successors of I that have ALAP value X - 1 should be preferred over S.
3380 This encourages the schedule to be "narrow" rather than "wide".
3381 However, if I is a low-priority instruction that we decided to
3382 schedule because of its model_classify_pressure, and if there
3383 is a set of higher-priority instructions T, the aforementioned
3384 successors of I should not have the edge over T. */
3385 height1 = insn1->depth + insn1->alap;
3386 height2 = insn2->depth + insn2->alap;
3387 if (height1 != height2)
3388 return height1 > height2;
3389 if (insn1->depth != insn2->depth)
3390 return insn1->depth > insn2->depth;
3392 /* We have no real preference between INSN1 an INSN2 as far as attempts
3393 to reduce pressure go. Prefer instructions with higher priorities. */
3394 priority1 = INSN_PRIORITY (insn1->insn);
3395 priority2 = INSN_PRIORITY (insn2->insn);
3396 if (priority1 != priority2)
3397 return priority1 > priority2;
3399 /* Use the original rtl sequence as a tie-breaker. */
3400 return insn1 < insn2;
3403 /* Add INSN to the model worklist immediately after PREV. Add it to the
3404 beginning of the list if PREV is null. */
3407 model_add_to_worklist_at (struct model_insn_info *insn,
3408 struct model_insn_info *prev)
3410 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3411 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3416 insn->next = prev->next;
3421 insn->next = model_worklist;
3422 model_worklist = insn;
3425 insn->next->prev = insn;
3428 /* Remove INSN from the model worklist. */
3431 model_remove_from_worklist (struct model_insn_info *insn)
3433 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3434 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3437 insn->prev->next = insn->next;
3439 model_worklist = insn->next;
3441 insn->next->prev = insn->prev;
3444 /* Add INSN to the model worklist. Start looking for a suitable position
3445 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3446 insns either side. A null PREV indicates the beginning of the list and
3447 a null NEXT indicates the end. */
3450 model_add_to_worklist (struct model_insn_info *insn,
3451 struct model_insn_info *prev,
3452 struct model_insn_info *next)
3456 count = MAX_SCHED_READY_INSNS;
3457 if (count > 0 && prev && model_order_p (insn, prev))
3463 while (count > 0 && prev && model_order_p (insn, prev));
3465 while (count > 0 && next && model_order_p (next, insn))
3471 model_add_to_worklist_at (insn, prev);
3474 /* INSN may now have a higher priority (in the model_order_p sense)
3475 than before. Move it up the worklist if necessary. */
3478 model_promote_insn (struct model_insn_info *insn)
3480 struct model_insn_info *prev;
3484 count = MAX_SCHED_READY_INSNS;
3485 while (count > 0 && prev && model_order_p (insn, prev))
3490 if (prev != insn->prev)
3492 model_remove_from_worklist (insn);
3493 model_add_to_worklist_at (insn, prev);
3497 /* Add INSN to the end of the model schedule. */
3500 model_add_to_schedule (rtx_insn *insn)
3504 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3505 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3507 point = model_schedule.length ();
3508 model_schedule.quick_push (insn);
3509 INSN_MODEL_INDEX (insn) = point + 1;
3512 /* Analyze the instructions that are to be scheduled, setting up
3513 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3514 instructions to model_worklist. */
3517 model_analyze_insns (void)
3519 rtx_insn *start, *end, *iter;
3520 sd_iterator_def sd_it;
3522 struct model_insn_info *insn, *con;
3524 model_num_insns = 0;
3525 start = PREV_INSN (current_sched_info->next_tail);
3526 end = current_sched_info->prev_head;
3527 for (iter = start; iter != end; iter = PREV_INSN (iter))
3528 if (NONDEBUG_INSN_P (iter))
3530 insn = MODEL_INSN_INFO (iter);
3532 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3534 con = MODEL_INSN_INFO (DEP_CON (dep));
3535 if (con->insn && insn->alap < con->alap + 1)
3536 insn->alap = con->alap + 1;
3539 insn->old_queue = QUEUE_INDEX (iter);
3540 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3542 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3543 if (insn->unscheduled_preds == 0)
3544 model_add_to_worklist (insn, NULL, model_worklist);
3550 /* The global state describes the register pressure at the start of the
3551 model schedule. Initialize GROUP accordingly. */
3554 model_init_pressure_group (struct model_pressure_group *group)
3558 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3560 cl = ira_pressure_classes[pci];
3561 group->limits[pci].pressure = curr_reg_pressure[cl];
3562 group->limits[pci].point = 0;
3564 /* Use index model_num_insns to record the state after the last
3565 instruction in the model schedule. */
3566 group->model = XNEWVEC (struct model_pressure_data,
3567 (model_num_insns + 1) * ira_pressure_classes_num);
3570 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3571 Update the maximum pressure for the whole schedule. */
3574 model_record_pressure (struct model_pressure_group *group,
3575 int point, int pci, int pressure)
3577 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3578 if (group->limits[pci].pressure < pressure)
3580 group->limits[pci].pressure = pressure;
3581 group->limits[pci].point = point;
3585 /* INSN has just been added to the end of the model schedule. Record its
3586 register-pressure information. */
3589 model_record_pressures (struct model_insn_info *insn)
3591 struct reg_pressure_data *reg_pressure;
3592 int point, pci, cl, delta;
3593 int death[N_REG_CLASSES];
3595 point = model_index (insn->insn);
3596 if (sched_verbose >= 2)
3600 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3601 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3603 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3604 point, INSN_UID (insn->insn), insn->model_priority,
3605 insn->depth + insn->alap, insn->depth,
3606 INSN_PRIORITY (insn->insn),
3607 str_pattern_slim (PATTERN (insn->insn)));
3609 calculate_reg_deaths (insn->insn, death);
3610 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3611 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3613 cl = ira_pressure_classes[pci];
3614 delta = reg_pressure[pci].set_increase - death[cl];
3615 if (sched_verbose >= 2)
3616 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3617 curr_reg_pressure[cl], delta);
3618 model_record_pressure (&model_before_pressure, point, pci,
3619 curr_reg_pressure[cl]);
3621 if (sched_verbose >= 2)
3622 fprintf (sched_dump, "\n");
3625 /* All instructions have been added to the model schedule. Record the
3626 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3629 model_record_final_pressures (struct model_pressure_group *group)
3631 int point, pci, max_pressure, ref_pressure, cl;
3633 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3635 /* Record the final pressure for this class. */
3636 cl = ira_pressure_classes[pci];
3637 point = model_num_insns;
3638 ref_pressure = curr_reg_pressure[cl];
3639 model_record_pressure (group, point, pci, ref_pressure);
3641 /* Record the original maximum pressure. */
3642 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3644 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3645 max_pressure = ref_pressure;
3646 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3650 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3651 max_pressure = MAX (max_pressure, ref_pressure);
3652 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3657 /* Update all successors of INSN, given that INSN has just been scheduled. */
3660 model_add_successors_to_worklist (struct model_insn_info *insn)
3662 sd_iterator_def sd_it;
3663 struct model_insn_info *con;
3666 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3668 con = MODEL_INSN_INFO (DEP_CON (dep));
3669 /* Ignore debug instructions, and instructions from other blocks. */
3672 con->unscheduled_preds--;
3674 /* Update the depth field of each true-dependent successor.
3675 Increasing the depth gives them a higher priority than
3677 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3679 con->depth = insn->depth + 1;
3680 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3681 model_promote_insn (con);
3684 /* If this is a true dependency, or if there are no remaining
3685 dependencies for CON (meaning that CON only had non-true
3686 dependencies), make sure that CON is on the worklist.
3687 We don't bother otherwise because it would tend to fill the
3688 worklist with a lot of low-priority instructions that are not
3689 yet ready to issue. */
3690 if ((con->depth > 0 || con->unscheduled_preds == 0)
3691 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3692 model_add_to_worklist (con, insn, insn->next);
3697 /* Give INSN a higher priority than any current instruction, then give
3698 unscheduled predecessors of INSN a higher priority still. If any of
3699 those predecessors are not on the model worklist, do the same for its
3700 predecessors, and so on. */
3703 model_promote_predecessors (struct model_insn_info *insn)
3705 struct model_insn_info *pro, *first;
3706 sd_iterator_def sd_it;
3709 if (sched_verbose >= 7)
3710 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3711 INSN_UID (insn->insn), model_next_priority);
3712 insn->model_priority = model_next_priority++;
3713 model_remove_from_worklist (insn);
3714 model_add_to_worklist_at (insn, NULL);
3719 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3721 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3722 /* The first test is to ignore debug instructions, and instructions
3723 from other blocks. */
3725 && pro->model_priority != model_next_priority
3726 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3728 pro->model_priority = model_next_priority;
3729 if (sched_verbose >= 7)
3730 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3731 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3733 /* PRO is already in the worklist, but it now has
3734 a higher priority than before. Move it at the
3735 appropriate place. */
3736 model_remove_from_worklist (pro);
3737 model_add_to_worklist (pro, NULL, model_worklist);
3741 /* PRO isn't in the worklist. Recursively process
3742 its predecessors until we find one that is. */
3753 if (sched_verbose >= 7)
3754 fprintf (sched_dump, " = %d\n", model_next_priority);
3755 model_next_priority++;
3758 /* Pick one instruction from model_worklist and process it. */
3761 model_choose_insn (void)
3763 struct model_insn_info *insn, *fallback;
3766 if (sched_verbose >= 7)
3768 fprintf (sched_dump, ";;\t+--- worklist:\n");
3769 insn = model_worklist;
3770 count = MAX_SCHED_READY_INSNS;
3771 while (count > 0 && insn)
3773 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3774 INSN_UID (insn->insn), insn->model_priority,
3775 insn->depth + insn->alap, insn->depth,
3776 INSN_PRIORITY (insn->insn));
3782 /* Look for a ready instruction whose model_classify_priority is zero
3783 or negative, picking the highest-priority one. Adding such an
3784 instruction to the schedule now should do no harm, and may actually
3787 Failing that, see whether there is an instruction with the highest
3788 extant model_priority that is not yet ready, but which would reduce
3789 pressure if it became ready. This is designed to catch cases like:
3791 (set (mem (reg R1)) (reg R2))
3793 where the instruction is the last remaining use of R1 and where the
3794 value of R2 is not yet available (or vice versa). The death of R1
3795 means that this instruction already reduces pressure. It is of
3796 course possible that the computation of R2 involves other registers
3797 that are hard to kill, but such cases are rare enough for this
3798 heuristic to be a win in general.
3800 Failing that, just pick the highest-priority instruction in the
3802 count = MAX_SCHED_READY_INSNS;
3803 insn = model_worklist;
3807 if (count == 0 || !insn)
3809 insn = fallback ? fallback : model_worklist;
3812 if (insn->unscheduled_preds)
3814 if (model_worklist->model_priority == insn->model_priority
3816 && model_classify_pressure (insn) < 0)
3821 if (model_classify_pressure (insn) <= 0)
3828 if (sched_verbose >= 7 && insn != model_worklist)
3830 if (insn->unscheduled_preds)
3831 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3832 INSN_UID (insn->insn));
3834 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3835 INSN_UID (insn->insn));
3837 if (insn->unscheduled_preds)
3838 /* INSN isn't yet ready to issue. Give all its predecessors the
3839 highest priority. */
3840 model_promote_predecessors (insn);
3843 /* INSN is ready. Add it to the end of model_schedule and
3844 process its successors. */
3845 model_add_successors_to_worklist (insn);
3846 model_remove_from_worklist (insn);
3847 model_add_to_schedule (insn->insn);
3848 model_record_pressures (insn);
3849 update_register_pressure (insn->insn);
3853 /* Restore all QUEUE_INDEXs to the values that they had before
3854 model_start_schedule was called. */
3857 model_reset_queue_indices (void)
3862 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3863 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3866 /* We have calculated the model schedule and spill costs. Print a summary
3870 model_dump_pressure_summary (void)
3874 fprintf (sched_dump, ";; Pressure summary:");
3875 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3877 cl = ira_pressure_classes[pci];
3878 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3879 model_before_pressure.limits[pci].pressure);
3881 fprintf (sched_dump, "\n\n");
3884 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3885 scheduling region. */
3888 model_start_schedule (basic_block bb)
3890 model_next_priority = 1;
3891 model_schedule.create (sched_max_luid);
3892 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3894 gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3895 initiate_reg_pressure_info (df_get_live_in (bb));
3897 model_analyze_insns ();
3898 model_init_pressure_group (&model_before_pressure);
3899 while (model_worklist)
3900 model_choose_insn ();
3901 gcc_assert (model_num_insns == (int) model_schedule.length ());
3902 if (sched_verbose >= 2)
3903 fprintf (sched_dump, "\n");
3905 model_record_final_pressures (&model_before_pressure);
3906 model_reset_queue_indices ();
3908 XDELETEVEC (model_insns);
3910 model_curr_point = 0;
3911 initiate_reg_pressure_info (df_get_live_in (bb));
3912 if (sched_verbose >= 1)
3913 model_dump_pressure_summary ();
3916 /* Free the information associated with GROUP. */
3919 model_finalize_pressure_group (struct model_pressure_group *group)
3921 XDELETEVEC (group->model);
3924 /* Free the information created by model_start_schedule. */
3927 model_end_schedule (void)
3929 model_finalize_pressure_group (&model_before_pressure);
3930 model_schedule.release ();
3933 /* Prepare reg pressure scheduling for basic block BB. */
3935 sched_pressure_start_bb (basic_block bb)
3937 /* Set the number of available registers for each class taking into account
3938 relative probability of current basic block versus function prologue and
3940 * If the basic block executes much more often than the prologue/epilogue
3941 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3942 nil, so the effective number of available registers is
3943 (ira_class_hard_regs_num[cl] - 0).
3944 * If the basic block executes as often as the prologue/epilogue,
3945 then spill in the block is as costly as in the prologue, so the effective
3946 number of available registers is
3947 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3948 Note that all-else-equal, we prefer to spill in the prologue, since that
3949 allows "extra" registers for other basic blocks of the function.
3950 * If the basic block is on the cold path of the function and executes
3951 rarely, then we should always prefer to spill in the block, rather than
3952 in the prologue/epilogue. The effective number of available register is
3953 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]). */
3956 int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
3957 int bb_freq = bb->frequency;
3961 if (entry_freq == 0)
3962 entry_freq = bb_freq = 1;
3964 if (bb_freq < entry_freq)
3965 bb_freq = entry_freq;
3967 for (i = 0; i < ira_pressure_classes_num; ++i)
3969 enum reg_class cl = ira_pressure_classes[i];
3970 sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
3971 sched_class_regs_num[cl]
3972 -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
3976 if (sched_pressure == SCHED_PRESSURE_MODEL)
3977 model_start_schedule (bb);
3980 /* A structure that holds local state for the loop in schedule_block. */
3981 struct sched_block_state
3983 /* True if no real insns have been scheduled in the current cycle. */
3984 bool first_cycle_insn_p;
3985 /* True if a shadow insn has been scheduled in the current cycle, which
3986 means that no more normal insns can be issued. */
3987 bool shadows_only_p;
3988 /* True if we're winding down a modulo schedule, which means that we only
3989 issue insns with INSN_EXACT_TICK set. */
3990 bool modulo_epilogue;
3991 /* Initialized with the machine's issue rate every cycle, and updated
3992 by calls to the variable_issue hook. */
3996 /* INSN is the "currently executing insn". Launch each insn which was
3997 waiting on INSN. READY is the ready list which contains the insns
3998 that are ready to fire. CLOCK is the current cycle. The function
3999 returns necessary cycle advance after issuing the insn (it is not
4000 zero for insns in a schedule group). */
4003 schedule_insn (rtx_insn *insn)
4005 sd_iterator_def sd_it;
4010 if (sched_verbose >= 1)
4012 struct reg_pressure_data *pressure_info;
4013 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
4014 clock_var, (*current_sched_info->print_insn) (insn, 1),
4015 str_pattern_slim (PATTERN (insn)));
4017 if (recog_memoized (insn) < 0)
4018 fprintf (sched_dump, "nothing");
4020 print_reservation (sched_dump, insn);
4021 pressure_info = INSN_REG_PRESSURE (insn);
4022 if (pressure_info != NULL)
4024 fputc (':', sched_dump);
4025 for (i = 0; i < ira_pressure_classes_num; i++)
4026 fprintf (sched_dump, "%s%s%+d(%d)",
4027 scheduled_insns.length () > 1
4029 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4030 reg_class_names[ira_pressure_classes[i]],
4031 pressure_info[i].set_increase, pressure_info[i].change);
4033 if (sched_pressure == SCHED_PRESSURE_MODEL
4034 && model_curr_point < model_num_insns
4035 && model_index (insn) == model_curr_point)
4036 fprintf (sched_dump, ":model %d", model_curr_point);
4037 fputc ('\n', sched_dump);
4040 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4041 update_reg_and_insn_max_reg_pressure (insn);
4043 /* Scheduling instruction should have all its dependencies resolved and
4044 should have been removed from the ready list. */
4045 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4047 /* Reset debug insns invalidated by moving this insn. */
4048 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
4049 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4050 sd_iterator_cond (&sd_it, &dep);)
4052 rtx_insn *dbg = DEP_PRO (dep);
4053 struct reg_use_data *use, *next;
4055 if (DEP_STATUS (dep) & DEP_CANCELLED)
4057 sd_iterator_next (&sd_it);
4061 gcc_assert (DEBUG_INSN_P (dbg));
4063 if (sched_verbose >= 6)
4064 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4067 /* ??? Rather than resetting the debug insn, we might be able
4068 to emit a debug temp before the just-scheduled insn, but
4069 this would involve checking that the expression at the
4070 point of the debug insn is equivalent to the expression
4071 before the just-scheduled insn. They might not be: the
4072 expression in the debug insn may depend on other insns not
4073 yet scheduled that set MEMs, REGs or even other debug
4074 insns. It's not clear that attempting to preserve debug
4075 information in these cases is worth the effort, given how
4076 uncommon these resets are and the likelihood that the debug
4077 temps introduced won't survive the schedule change. */
4078 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4079 df_insn_rescan (dbg);
4081 /* Unknown location doesn't use any registers. */
4082 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4084 struct reg_use_data *prev = use;
4086 /* Remove use from the cyclic next_regno_use chain first. */
4087 while (prev->next_regno_use != use)
4088 prev = prev->next_regno_use;
4089 prev->next_regno_use = use->next_regno_use;
4090 next = use->next_insn_use;
4093 INSN_REG_USE_LIST (dbg) = NULL;
4095 /* We delete rather than resolve these deps, otherwise we
4096 crash in sched_free_deps(), because forward deps are
4097 expected to be released before backward deps. */
4098 sd_delete_dep (sd_it);
4101 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4102 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4104 if (sched_pressure == SCHED_PRESSURE_MODEL
4105 && model_curr_point < model_num_insns
4106 && NONDEBUG_INSN_P (insn))
4108 if (model_index (insn) == model_curr_point)
4111 while (model_curr_point < model_num_insns
4112 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4113 == QUEUE_SCHEDULED));
4115 model_recompute (insn);
4116 model_update_limit_points ();
4117 update_register_pressure (insn);
4118 if (sched_verbose >= 2)
4119 print_curr_reg_pressure ();
4122 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4123 if (INSN_TICK (insn) > clock_var)
4124 /* INSN has been prematurely moved from the queue to the ready list.
4125 This is possible only if following flags are set. */
4126 gcc_assert (flag_sched_stalled_insns || sched_fusion);
4128 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4129 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4130 INSN_TICK (insn) = clock_var;
4132 check_clobbered_conditions (insn);
4134 /* Update dependent instructions. First, see if by scheduling this insn
4135 now we broke a dependence in a way that requires us to change another
4137 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4138 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4140 struct dep_replacement *desc = DEP_REPLACE (dep);
4141 rtx_insn *pro = DEP_PRO (dep);
4142 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4143 && desc != NULL && desc->insn == pro)
4144 apply_replacement (dep, false);
4147 /* Go through and resolve forward dependencies. */
4148 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4149 sd_iterator_cond (&sd_it, &dep);)
4151 rtx_insn *next = DEP_CON (dep);
4152 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4154 /* Resolve the dependence between INSN and NEXT.
4155 sd_resolve_dep () moves current dep to another list thus
4156 advancing the iterator. */
4157 sd_resolve_dep (sd_it);
4161 if (must_restore_pattern_p (next, dep))
4162 restore_pattern (dep, false);
4166 /* Don't bother trying to mark next as ready if insn is a debug
4167 insn. If insn is the last hard dependency, it will have
4168 already been discounted. */
4169 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4172 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4176 effective_cost = try_ready (next);
4178 if (effective_cost >= 0
4179 && SCHED_GROUP_P (next)
4180 && advance < effective_cost)
4181 advance = effective_cost;
4184 /* Check always has only one forward dependence (to the first insn in
4185 the recovery block), therefore, this will be executed only once. */
4187 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4188 fix_recovery_deps (RECOVERY_BLOCK (insn));
4192 /* Annotate the instruction with issue information -- TImode
4193 indicates that the instruction is expected not to be able
4194 to issue on the same cycle as the previous insn. A machine
4195 may use this information to decide how the instruction should
4198 && GET_CODE (PATTERN (insn)) != USE
4199 && GET_CODE (PATTERN (insn)) != CLOBBER
4200 && !DEBUG_INSN_P (insn))
4202 if (reload_completed)
4203 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4204 last_clock_var = clock_var;
4207 if (nonscheduled_insns_begin != NULL_RTX)
4208 /* Indicate to debug counters that INSN is scheduled. */
4209 nonscheduled_insns_begin = insn;
4214 /* Functions for handling of notes. */
4216 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4218 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4220 rtx_insn *from_start;
4222 /* It's easy when have nothing to concat. */
4223 if (from_end == NULL)
4226 /* It's also easy when destination is empty. */
4227 if (*to_endp == NULL)
4229 *to_endp = from_end;
4233 from_start = from_end;
4234 while (PREV_INSN (from_start) != NULL)
4235 from_start = PREV_INSN (from_start);
4237 SET_PREV_INSN (from_start) = *to_endp;
4238 SET_NEXT_INSN (*to_endp) = from_start;
4239 *to_endp = from_end;
4242 /* Delete notes between HEAD and TAIL and put them in the chain
4243 of notes ended by NOTE_LIST. */
4245 remove_notes (rtx_insn *head, rtx_insn *tail)
4247 rtx_insn *next_tail, *insn, *next;
4250 if (head == tail && !INSN_P (head))
4253 next_tail = NEXT_INSN (tail);
4254 for (insn = head; insn != next_tail; insn = next)
4256 next = NEXT_INSN (insn);
4260 switch (NOTE_KIND (insn))
4262 case NOTE_INSN_BASIC_BLOCK:
4265 case NOTE_INSN_EPILOGUE_BEG:
4269 add_reg_note (next, REG_SAVE_NOTE,
4270 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4278 /* Add the note to list that ends at NOTE_LIST. */
4279 SET_PREV_INSN (insn) = note_list;
4280 SET_NEXT_INSN (insn) = NULL_RTX;
4282 SET_NEXT_INSN (note_list) = insn;
4287 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4291 /* A structure to record enough data to allow us to backtrack the scheduler to
4292 a previous state. */
4293 struct haifa_saved_data
4295 /* Next entry on the list. */
4296 struct haifa_saved_data *next;
4298 /* Backtracking is associated with scheduling insns that have delay slots.
4299 DELAY_PAIR points to the structure that contains the insns involved, and
4300 the number of cycles between them. */
4301 struct delay_pair *delay_pair;
4303 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4304 void *fe_saved_data;
4305 /* Data used by the backend. */
4306 void *be_saved_data;
4308 /* Copies of global state. */
4309 int clock_var, last_clock_var;
4310 struct ready_list ready;
4313 rtx_insn *last_scheduled_insn;
4314 rtx last_nondebug_scheduled_insn;
4315 rtx_insn *nonscheduled_insns_begin;
4316 int cycle_issued_insns;
4318 /* Copies of state used in the inner loop of schedule_block. */
4319 struct sched_block_state sched_block;
4321 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4322 to 0 when restoring. */
4324 rtx_insn_list **insn_queue;
4326 /* Describe pattern replacements that occurred since this backtrack point
4328 vec<dep_t> replacement_deps;
4329 vec<int> replace_apply;
4331 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4333 vec<dep_t> next_cycle_deps;
4334 vec<int> next_cycle_apply;
4337 /* A record, in reverse order, of all scheduled insns which have delay slots
4338 and may require backtracking. */
4339 static struct haifa_saved_data *backtrack_queue;
4341 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4344 mark_backtrack_feeds (rtx insn, int set_p)
4346 sd_iterator_def sd_it;
4348 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4350 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4354 /* Save the current scheduler state so that we can backtrack to it
4355 later if necessary. PAIR gives the insns that make it necessary to
4356 save this point. SCHED_BLOCK is the local state of schedule_block
4357 that need to be saved. */
4359 save_backtrack_point (struct delay_pair *pair,
4360 struct sched_block_state sched_block)
4363 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4365 save->curr_state = xmalloc (dfa_state_size);
4366 memcpy (save->curr_state, curr_state, dfa_state_size);
4368 save->ready.first = ready.first;
4369 save->ready.n_ready = ready.n_ready;
4370 save->ready.n_debug = ready.n_debug;
4371 save->ready.veclen = ready.veclen;
4372 save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4373 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4375 save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4376 save->q_size = q_size;
4377 for (i = 0; i <= max_insn_queue_index; i++)
4379 int q = NEXT_Q_AFTER (q_ptr, i);
4380 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4383 save->clock_var = clock_var;
4384 save->last_clock_var = last_clock_var;
4385 save->cycle_issued_insns = cycle_issued_insns;
4386 save->last_scheduled_insn = last_scheduled_insn;
4387 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4388 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4390 save->sched_block = sched_block;
4392 save->replacement_deps.create (0);
4393 save->replace_apply.create (0);
4394 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4395 save->next_cycle_apply = next_cycle_apply.copy ();
4397 if (current_sched_info->save_state)
4398 save->fe_saved_data = (*current_sched_info->save_state) ();
4400 if (targetm.sched.alloc_sched_context)
4402 save->be_saved_data = targetm.sched.alloc_sched_context ();
4403 targetm.sched.init_sched_context (save->be_saved_data, false);
4406 save->be_saved_data = NULL;
4408 save->delay_pair = pair;
4410 save->next = backtrack_queue;
4411 backtrack_queue = save;
4415 mark_backtrack_feeds (pair->i2, 1);
4416 INSN_TICK (pair->i2) = INVALID_TICK;
4417 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4418 SHADOW_P (pair->i2) = pair->stages == 0;
4419 pair = pair->next_same_i1;
4423 /* Walk the ready list and all queues. If any insns have unresolved backwards
4424 dependencies, these must be cancelled deps, broken by predication. Set or
4425 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4428 toggle_cancelled_flags (bool set)
4431 sd_iterator_def sd_it;
4434 if (ready.n_ready > 0)
4436 rtx_insn **first = ready_lastpos (&ready);
4437 for (i = 0; i < ready.n_ready; i++)
4438 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4439 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4442 DEP_STATUS (dep) |= DEP_CANCELLED;
4444 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4447 for (i = 0; i <= max_insn_queue_index; i++)
4449 int q = NEXT_Q_AFTER (q_ptr, i);
4450 rtx_insn_list *link;
4451 for (link = insn_queue[q]; link; link = link->next ())
4453 rtx_insn *insn = link->insn ();
4454 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4455 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4458 DEP_STATUS (dep) |= DEP_CANCELLED;
4460 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4466 /* Undo the replacements that have occurred after backtrack point SAVE
4469 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4471 while (!save->replacement_deps.is_empty ())
4473 dep_t dep = save->replacement_deps.pop ();
4474 int apply_p = save->replace_apply.pop ();
4477 restore_pattern (dep, true);
4479 apply_replacement (dep, true);
4481 save->replacement_deps.release ();
4482 save->replace_apply.release ();
4485 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4486 Restore their dependencies to an unresolved state, and mark them as
4490 unschedule_insns_until (rtx insn)
4492 auto_vec<rtx_insn *> recompute_vec;
4494 /* Make two passes over the insns to be unscheduled. First, we clear out
4495 dependencies and other trivial bookkeeping. */
4499 sd_iterator_def sd_it;
4502 last = scheduled_insns.pop ();
4504 /* This will be changed by restore_backtrack_point if the insn is in
4506 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4508 INSN_TICK (last) = INVALID_TICK;
4510 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4511 modulo_insns_scheduled--;
4513 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4514 sd_iterator_cond (&sd_it, &dep);)
4516 rtx_insn *con = DEP_CON (dep);
4517 sd_unresolve_dep (sd_it);
4518 if (!MUST_RECOMPUTE_SPEC_P (con))
4520 MUST_RECOMPUTE_SPEC_P (con) = 1;
4521 recompute_vec.safe_push (con);
4529 /* A second pass, to update ready and speculation status for insns
4530 depending on the unscheduled ones. The first pass must have
4531 popped the scheduled_insns vector up to the point where we
4532 restart scheduling, as recompute_todo_spec requires it to be
4534 while (!recompute_vec.is_empty ())
4538 con = recompute_vec.pop ();
4539 MUST_RECOMPUTE_SPEC_P (con) = 0;
4540 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4542 TODO_SPEC (con) = HARD_DEP;
4543 INSN_TICK (con) = INVALID_TICK;
4544 if (PREDICATED_PAT (con) != NULL_RTX)
4545 haifa_change_pattern (con, ORIG_PAT (con));
4547 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4548 TODO_SPEC (con) = recompute_todo_spec (con, true);
4552 /* Restore scheduler state from the topmost entry on the backtracking queue.
4553 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4554 overwrite with the saved data.
4555 The caller must already have called unschedule_insns_until. */
4558 restore_last_backtrack_point (struct sched_block_state *psched_block)
4561 struct haifa_saved_data *save = backtrack_queue;
4563 backtrack_queue = save->next;
4565 if (current_sched_info->restore_state)
4566 (*current_sched_info->restore_state) (save->fe_saved_data);
4568 if (targetm.sched.alloc_sched_context)
4570 targetm.sched.set_sched_context (save->be_saved_data);
4571 targetm.sched.free_sched_context (save->be_saved_data);
4574 /* Do this first since it clobbers INSN_TICK of the involved
4576 undo_replacements_for_backtrack (save);
4578 /* Clear the QUEUE_INDEX of everything in the ready list or one
4580 if (ready.n_ready > 0)
4582 rtx_insn **first = ready_lastpos (&ready);
4583 for (i = 0; i < ready.n_ready; i++)
4585 rtx_insn *insn = first[i];
4586 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4587 INSN_TICK (insn) = INVALID_TICK;
4590 for (i = 0; i <= max_insn_queue_index; i++)
4592 int q = NEXT_Q_AFTER (q_ptr, i);
4594 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4596 rtx_insn *x = link->insn ();
4597 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4598 INSN_TICK (x) = INVALID_TICK;
4600 free_INSN_LIST_list (&insn_queue[q]);
4604 ready = save->ready;
4606 if (ready.n_ready > 0)
4608 rtx_insn **first = ready_lastpos (&ready);
4609 for (i = 0; i < ready.n_ready; i++)
4611 rtx_insn *insn = first[i];
4612 QUEUE_INDEX (insn) = QUEUE_READY;
4613 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4614 INSN_TICK (insn) = save->clock_var;
4619 q_size = save->q_size;
4620 for (i = 0; i <= max_insn_queue_index; i++)
4622 int q = NEXT_Q_AFTER (q_ptr, i);
4624 insn_queue[q] = save->insn_queue[q];
4626 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4628 rtx_insn *x = link->insn ();
4629 QUEUE_INDEX (x) = i;
4630 TODO_SPEC (x) = recompute_todo_spec (x, true);
4631 INSN_TICK (x) = save->clock_var + i;
4634 free (save->insn_queue);
4636 toggle_cancelled_flags (true);
4638 clock_var = save->clock_var;
4639 last_clock_var = save->last_clock_var;
4640 cycle_issued_insns = save->cycle_issued_insns;
4641 last_scheduled_insn = save->last_scheduled_insn;
4642 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4643 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4645 *psched_block = save->sched_block;
4647 memcpy (curr_state, save->curr_state, dfa_state_size);
4648 free (save->curr_state);
4650 mark_backtrack_feeds (save->delay_pair->i2, 0);
4652 gcc_assert (next_cycle_replace_deps.is_empty ());
4653 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4654 next_cycle_apply = save->next_cycle_apply.copy ();
4658 for (save = backtrack_queue; save; save = save->next)
4660 mark_backtrack_feeds (save->delay_pair->i2, 1);
4664 /* Discard all data associated with the topmost entry in the backtrack
4665 queue. If RESET_TICK is false, we just want to free the data. If true,
4666 we are doing this because we discovered a reason to backtrack. In the
4667 latter case, also reset the INSN_TICK for the shadow insn. */
4669 free_topmost_backtrack_point (bool reset_tick)
4671 struct haifa_saved_data *save = backtrack_queue;
4674 backtrack_queue = save->next;
4678 struct delay_pair *pair = save->delay_pair;
4681 INSN_TICK (pair->i2) = INVALID_TICK;
4682 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4683 pair = pair->next_same_i1;
4685 undo_replacements_for_backtrack (save);
4689 save->replacement_deps.release ();
4690 save->replace_apply.release ();
4693 if (targetm.sched.free_sched_context)
4694 targetm.sched.free_sched_context (save->be_saved_data);
4695 if (current_sched_info->restore_state)
4696 free (save->fe_saved_data);
4697 for (i = 0; i <= max_insn_queue_index; i++)
4698 free_INSN_LIST_list (&save->insn_queue[i]);
4699 free (save->insn_queue);
4700 free (save->curr_state);
4701 free (save->ready.vec);
4705 /* Free the entire backtrack queue. */
4707 free_backtrack_queue (void)
4709 while (backtrack_queue)
4710 free_topmost_backtrack_point (false);
4713 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4714 may have to postpone the replacement until the start of the next cycle,
4715 at which point we will be called again with IMMEDIATELY true. This is
4716 only done for machines which have instruction packets with explicit
4717 parallelism however. */
4719 apply_replacement (dep_t dep, bool immediately)
4721 struct dep_replacement *desc = DEP_REPLACE (dep);
4722 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4724 next_cycle_replace_deps.safe_push (dep);
4725 next_cycle_apply.safe_push (1);
4731 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4734 if (sched_verbose >= 5)
4735 fprintf (sched_dump, "applying replacement for insn %d\n",
4736 INSN_UID (desc->insn));
4738 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4739 gcc_assert (success);
4741 update_insn_after_change (desc->insn);
4742 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4743 fix_tick_ready (desc->insn);
4745 if (backtrack_queue != NULL)
4747 backtrack_queue->replacement_deps.safe_push (dep);
4748 backtrack_queue->replace_apply.safe_push (1);
4753 /* We have determined that a pattern involved in DEP must be restored.
4754 If IMMEDIATELY is false, we may have to postpone the replacement
4755 until the start of the next cycle, at which point we will be called
4756 again with IMMEDIATELY true. */
4758 restore_pattern (dep_t dep, bool immediately)
4760 rtx_insn *next = DEP_CON (dep);
4761 int tick = INSN_TICK (next);
4763 /* If we already scheduled the insn, the modified version is
4765 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4768 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4770 next_cycle_replace_deps.safe_push (dep);
4771 next_cycle_apply.safe_push (0);
4776 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4778 if (sched_verbose >= 5)
4779 fprintf (sched_dump, "restoring pattern for insn %d\n",
4781 haifa_change_pattern (next, ORIG_PAT (next));
4785 struct dep_replacement *desc = DEP_REPLACE (dep);
4788 if (sched_verbose >= 5)
4789 fprintf (sched_dump, "restoring pattern for insn %d\n",
4790 INSN_UID (desc->insn));
4791 tick = INSN_TICK (desc->insn);
4793 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4794 gcc_assert (success);
4795 update_insn_after_change (desc->insn);
4796 if (backtrack_queue != NULL)
4798 backtrack_queue->replacement_deps.safe_push (dep);
4799 backtrack_queue->replace_apply.safe_push (0);
4802 INSN_TICK (next) = tick;
4803 if (TODO_SPEC (next) == DEP_POSTPONED)
4806 if (sd_lists_empty_p (next, SD_LIST_BACK))
4807 TODO_SPEC (next) = 0;
4808 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4809 TODO_SPEC (next) = HARD_DEP;
4812 /* Perform pattern replacements that were queued up until the next
4815 perform_replacements_new_cycle (void)
4819 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4821 int apply_p = next_cycle_apply[i];
4823 apply_replacement (dep, true);
4825 restore_pattern (dep, true);
4827 next_cycle_replace_deps.truncate (0);
4828 next_cycle_apply.truncate (0);
4831 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4832 instructions we've previously encountered, a set bit prevents
4833 recursion. BUDGET is a limit on how far ahead we look, it is
4834 reduced on recursive calls. Return true if we produced a good
4835 estimate, or false if we exceeded the budget. */
4837 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4839 sd_iterator_def sd_it;
4841 int earliest = INSN_TICK (insn);
4843 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4845 rtx_insn *pro = DEP_PRO (dep);
4848 if (DEP_STATUS (dep) & DEP_CANCELLED)
4851 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4852 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4855 int cost = dep_cost (dep);
4858 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4860 if (!estimate_insn_tick (processed, pro, budget - cost))
4863 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4864 t = INSN_TICK_ESTIMATE (pro) + cost;
4865 if (earliest == INVALID_TICK || t > earliest)
4869 bitmap_set_bit (processed, INSN_LUID (insn));
4870 INSN_TICK_ESTIMATE (insn) = earliest;
4874 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4875 infinite resources) the cycle in which the delayed shadow can be issued.
4876 Return the number of cycles that must pass before the real insn can be
4877 issued in order to meet this constraint. */
4879 estimate_shadow_tick (struct delay_pair *p)
4881 bitmap_head processed;
4884 bitmap_initialize (&processed, 0);
4886 cutoff = !estimate_insn_tick (&processed, p->i2,
4887 max_insn_queue_index + pair_delay (p));
4888 bitmap_clear (&processed);
4890 return max_insn_queue_index;
4891 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4897 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4898 recursively resolve all its forward dependencies. */
4900 resolve_dependencies (rtx_insn *insn)
4902 sd_iterator_def sd_it;
4905 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4906 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4907 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4910 if (sched_verbose >= 4)
4911 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4913 if (QUEUE_INDEX (insn) >= 0)
4914 queue_remove (insn);
4916 scheduled_insns.safe_push (insn);
4918 /* Update dependent instructions. */
4919 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4920 sd_iterator_cond (&sd_it, &dep);)
4922 rtx_insn *next = DEP_CON (dep);
4924 if (sched_verbose >= 4)
4925 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4928 /* Resolve the dependence between INSN and NEXT.
4929 sd_resolve_dep () moves current dep to another list thus
4930 advancing the iterator. */
4931 sd_resolve_dep (sd_it);
4933 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4935 resolve_dependencies (next);
4938 /* Check always has only one forward dependence (to the first insn in
4939 the recovery block), therefore, this will be executed only once. */
4941 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4947 /* Return the head and tail pointers of ebb starting at BEG and ending
4950 get_ebb_head_tail (basic_block beg, basic_block end,
4951 rtx_insn **headp, rtx_insn **tailp)
4953 rtx_insn *beg_head = BB_HEAD (beg);
4954 rtx_insn * beg_tail = BB_END (beg);
4955 rtx_insn * end_head = BB_HEAD (end);
4956 rtx_insn * end_tail = BB_END (end);
4958 /* Don't include any notes or labels at the beginning of the BEG
4959 basic block, or notes at the end of the END basic blocks. */
4961 if (LABEL_P (beg_head))
4962 beg_head = NEXT_INSN (beg_head);
4964 while (beg_head != beg_tail)
4965 if (NOTE_P (beg_head))
4966 beg_head = NEXT_INSN (beg_head);
4967 else if (DEBUG_INSN_P (beg_head))
4969 rtx_insn * note, *next;
4971 for (note = NEXT_INSN (beg_head);
4975 next = NEXT_INSN (note);
4978 if (sched_verbose >= 9)
4979 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4981 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4983 if (BLOCK_FOR_INSN (note) != beg)
4984 df_insn_change_bb (note, beg);
4986 else if (!DEBUG_INSN_P (note))
4998 end_head = beg_head;
4999 else if (LABEL_P (end_head))
5000 end_head = NEXT_INSN (end_head);
5002 while (end_head != end_tail)
5003 if (NOTE_P (end_tail))
5004 end_tail = PREV_INSN (end_tail);
5005 else if (DEBUG_INSN_P (end_tail))
5007 rtx_insn * note, *prev;
5009 for (note = PREV_INSN (end_tail);
5013 prev = PREV_INSN (note);
5016 if (sched_verbose >= 9)
5017 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
5019 reorder_insns_nobb (note, note, end_tail);
5021 if (end_tail == BB_END (end))
5022 BB_END (end) = note;
5024 if (BLOCK_FOR_INSN (note) != end)
5025 df_insn_change_bb (note, end);
5027 else if (!DEBUG_INSN_P (note))
5039 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5042 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5044 while (head != NEXT_INSN (tail))
5046 if (!NOTE_P (head) && !LABEL_P (head))
5048 head = NEXT_INSN (head);
5053 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5054 previously found among the insns. Insert them just before HEAD. */
5056 restore_other_notes (rtx_insn *head, basic_block head_bb)
5060 rtx_insn *note_head = note_list;
5063 head_bb = BLOCK_FOR_INSN (head);
5065 head = NEXT_INSN (bb_note (head_bb));
5067 while (PREV_INSN (note_head))
5069 set_block_for_insn (note_head, head_bb);
5070 note_head = PREV_INSN (note_head);
5072 /* In the above cycle we've missed this note. */
5073 set_block_for_insn (note_head, head_bb);
5075 SET_PREV_INSN (note_head) = PREV_INSN (head);
5076 SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5077 SET_PREV_INSN (head) = note_list;
5078 SET_NEXT_INSN (note_list) = head;
5080 if (BLOCK_FOR_INSN (head) != head_bb)
5081 BB_END (head_bb) = note_list;
5089 /* When we know we are going to discard the schedule due to a failed attempt
5090 at modulo scheduling, undo all replacements. */
5092 undo_all_replacements (void)
5097 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5099 sd_iterator_def sd_it;
5102 /* See if we must undo a replacement. */
5103 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5104 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5106 struct dep_replacement *desc = DEP_REPLACE (dep);
5108 validate_change (desc->insn, desc->loc, desc->orig, 0);
5113 /* Return first non-scheduled insn in the current scheduling block.
5114 This is mostly used for debug-counter purposes. */
5116 first_nonscheduled_insn (void)
5118 rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5119 ? nonscheduled_insns_begin
5120 : current_sched_info->prev_head);
5124 insn = next_nonnote_nondebug_insn (insn);
5126 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5131 /* Move insns that became ready to fire from queue to ready list. */
5134 queue_to_ready (struct ready_list *ready)
5137 rtx_insn_list *link;
5140 q_ptr = NEXT_Q (q_ptr);
5142 if (dbg_cnt (sched_insn) == false)
5143 /* If debug counter is activated do not requeue the first
5144 nonscheduled insn. */
5145 skip_insn = first_nonscheduled_insn ();
5147 skip_insn = NULL_RTX;
5149 /* Add all pending insns that can be scheduled without stalls to the
5151 for (link = insn_queue[q_ptr]; link; link = link->next ())
5153 insn = link->insn ();
5156 if (sched_verbose >= 2)
5157 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5158 (*current_sched_info->print_insn) (insn, 0));
5160 /* If the ready list is full, delay the insn for 1 cycle.
5161 See the comment in schedule_block for the rationale. */
5162 if (!reload_completed
5163 && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5164 || (sched_pressure == SCHED_PRESSURE_MODEL
5165 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5166 instructions too. */
5167 && model_index (insn) > (model_curr_point
5168 + MAX_SCHED_READY_INSNS)))
5169 && !(sched_pressure == SCHED_PRESSURE_MODEL
5170 && model_curr_point < model_num_insns
5171 /* Always allow the next model instruction to issue. */
5172 && model_index (insn) == model_curr_point)
5173 && !SCHED_GROUP_P (insn)
5174 && insn != skip_insn)
5176 if (sched_verbose >= 2)
5177 fprintf (sched_dump, "keeping in queue, ready full\n");
5178 queue_insn (insn, 1, "ready full");
5182 ready_add (ready, insn, false);
5183 if (sched_verbose >= 2)
5184 fprintf (sched_dump, "moving to ready without stalls\n");
5187 free_INSN_LIST_list (&insn_queue[q_ptr]);
5189 /* If there are no ready insns, stall until one is ready and add all
5190 of the pending insns at that point to the ready list. */
5191 if (ready->n_ready == 0)
5195 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5197 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5199 for (; link; link = link->next ())
5201 insn = link->insn ();
5204 if (sched_verbose >= 2)
5205 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5206 (*current_sched_info->print_insn) (insn, 0));
5208 ready_add (ready, insn, false);
5209 if (sched_verbose >= 2)
5210 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5212 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5214 advance_one_cycle ();
5219 advance_one_cycle ();
5222 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5223 clock_var += stalls;
5224 if (sched_verbose >= 2)
5225 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5230 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5231 prematurely move INSN from the queue to the ready list. Currently,
5232 if a target defines the hook 'is_costly_dependence', this function
5233 uses the hook to check whether there exist any dependences which are
5234 considered costly by the target, between INSN and other insns that
5235 have already been scheduled. Dependences are checked up to Y cycles
5236 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5237 controlling this value.
5238 (Other considerations could be taken into account instead (or in
5239 addition) depending on user flags and target hooks. */
5242 ok_for_early_queue_removal (rtx insn)
5244 if (targetm.sched.is_costly_dependence)
5248 int i = scheduled_insns.length ();
5249 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5255 prev_insn = scheduled_insns[i];
5257 if (!NOTE_P (prev_insn))
5261 dep = sd_find_dep_between (prev_insn, insn, true);
5265 cost = dep_cost (dep);
5267 if (targetm.sched.is_costly_dependence (dep, cost,
5268 flag_sched_stalled_insns_dep - n_cycles))
5273 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5286 /* Remove insns from the queue, before they become "ready" with respect
5287 to FU latency considerations. */
5290 early_queue_to_ready (state_t state, struct ready_list *ready)
5293 rtx_insn_list *link;
5294 rtx_insn_list *next_link;
5295 rtx_insn_list *prev_link;
5298 state_t temp_state = alloca (dfa_state_size);
5300 int insns_removed = 0;
5303 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5306 X == 0: There is no limit on how many queued insns can be removed
5307 prematurely. (flag_sched_stalled_insns = -1).
5309 X >= 1: Only X queued insns can be removed prematurely in each
5310 invocation. (flag_sched_stalled_insns = X).
5312 Otherwise: Early queue removal is disabled.
5313 (flag_sched_stalled_insns = 0)
5316 if (! flag_sched_stalled_insns)
5319 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5321 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5323 if (sched_verbose > 6)
5324 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5329 next_link = link->next ();
5330 insn = link->insn ();
5331 if (insn && sched_verbose > 6)
5332 print_rtl_single (sched_dump, insn);
5334 memcpy (temp_state, state, dfa_state_size);
5335 if (recog_memoized (insn) < 0)
5336 /* non-negative to indicate that it's not ready
5337 to avoid infinite Q->R->Q->R... */
5340 cost = state_transition (temp_state, insn);
5342 if (sched_verbose >= 6)
5343 fprintf (sched_dump, "transition cost = %d\n", cost);
5345 move_to_ready = false;
5348 move_to_ready = ok_for_early_queue_removal (insn);
5349 if (move_to_ready == true)
5351 /* move from Q to R */
5353 ready_add (ready, insn, false);
5356 XEXP (prev_link, 1) = next_link;
5358 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5360 free_INSN_LIST_node (link);
5362 if (sched_verbose >= 2)
5363 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5364 (*current_sched_info->print_insn) (insn, 0));
5367 if (insns_removed == flag_sched_stalled_insns)
5368 /* Remove no more than flag_sched_stalled_insns insns
5369 from Q at a time. */
5370 return insns_removed;
5374 if (move_to_ready == false)
5381 } /* for stalls.. */
5383 return insns_removed;
5387 /* Print the ready list for debugging purposes.
5388 If READY_TRY is non-zero then only print insns that max_issue
5391 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5396 if (ready->n_ready == 0)
5398 fprintf (sched_dump, "\n");
5402 p = ready_lastpos (ready);
5403 for (i = 0; i < ready->n_ready; i++)
5405 if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5408 fprintf (sched_dump, " %s:%d",
5409 (*current_sched_info->print_insn) (p[i], 0),
5411 if (sched_pressure != SCHED_PRESSURE_NONE)
5412 fprintf (sched_dump, "(cost=%d",
5413 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5414 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5415 if (INSN_TICK (p[i]) > clock_var)
5416 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5417 if (sched_pressure == SCHED_PRESSURE_MODEL)
5418 fprintf (sched_dump, ":idx=%d",
5419 model_index (p[i]));
5420 if (sched_pressure != SCHED_PRESSURE_NONE)
5421 fprintf (sched_dump, ")");
5423 fprintf (sched_dump, "\n");
5426 /* Print the ready list. Callable from debugger. */
5428 debug_ready_list (struct ready_list *ready)
5430 debug_ready_list_1 (ready, NULL);
5433 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5434 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5435 replaces the epilogue note in the correct basic block. */
5437 reemit_notes (rtx_insn *insn)
5440 rtx_insn *last = insn;
5442 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5444 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5446 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5448 last = emit_note_before (note_type, last);
5449 remove_note (insn, note);
5454 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5456 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5458 if (PREV_INSN (insn) != last)
5464 bb = BLOCK_FOR_INSN (insn);
5466 /* BB_HEAD is either LABEL or NOTE. */
5467 gcc_assert (BB_HEAD (bb) != insn);
5469 if (BB_END (bb) == insn)
5470 /* If this is last instruction in BB, move end marker one
5473 /* Jumps are always placed at the end of basic block. */
5474 jump_p = control_flow_insn_p (insn);
5477 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5478 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5479 || (common_sched_info->sched_pass_id
5480 == SCHED_EBB_PASS));
5482 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5484 BB_END (bb) = PREV_INSN (insn);
5487 gcc_assert (BB_END (bb) != last);
5490 /* We move the block note along with jump. */
5494 note = NEXT_INSN (insn);
5495 while (NOTE_NOT_BB_P (note) && note != nt)
5496 note = NEXT_INSN (note);
5500 || BARRIER_P (note)))
5501 note = NEXT_INSN (note);
5503 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5508 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5509 SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5511 SET_NEXT_INSN (note) = NEXT_INSN (last);
5512 SET_PREV_INSN (NEXT_INSN (last)) = note;
5514 SET_NEXT_INSN (last) = insn;
5515 SET_PREV_INSN (insn) = last;
5517 bb = BLOCK_FOR_INSN (last);
5521 fix_jump_move (insn);
5523 if (BLOCK_FOR_INSN (insn) != bb)
5524 move_block_after_check (insn);
5526 gcc_assert (BB_END (bb) == last);
5529 df_insn_change_bb (insn, bb);
5531 /* Update BB_END, if needed. */
5532 if (BB_END (bb) == last)
5536 SCHED_GROUP_P (insn) = 0;
5539 /* Return true if scheduling INSN will finish current clock cycle. */
5541 insn_finishes_cycle_p (rtx_insn *insn)
5543 if (SCHED_GROUP_P (insn))
5544 /* After issuing INSN, rest of the sched_group will be forced to issue
5545 in order. Don't make any plans for the rest of cycle. */
5548 /* Finishing the block will, apparently, finish the cycle. */
5549 if (current_sched_info->insn_finishes_block_p
5550 && current_sched_info->insn_finishes_block_p (insn))
5556 /* Functions to model cache auto-prefetcher.
5558 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5559 memory prefetches if it sees instructions with consequitive memory accesses
5560 in the instruction stream. Details of such hardware units are not published,
5561 so we can only guess what exactly is going on there.
5562 In the scheduler, we model abstract auto-prefetcher. If there are memory
5563 insns in the ready list (or the queue) that have same memory base, but
5564 different offsets, then we delay the insns with larger offsets until insns
5565 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5566 is "1", then we look at the ready list; if it is N>1, then we also look
5567 through N-1 queue entries.
5568 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5569 among its heuristics.
5570 Param value of "-1" disables modelling of the auto-prefetcher. */
5572 /* Initialize autoprefetcher model data for INSN. */
5574 autopref_multipass_init (const rtx_insn *insn, int write)
5576 autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5578 gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5579 data->base = NULL_RTX;
5581 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5582 data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5584 rtx set = single_set (insn);
5585 if (set == NULL_RTX)
5588 rtx mem = write ? SET_DEST (set) : SET_SRC (set);
5592 struct address_info info;
5593 decompose_mem_address (&info, mem);
5595 /* TODO: Currently only (base+const) addressing is supported. */
5596 if (info.base == NULL || !REG_P (*info.base)
5597 || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5600 /* This insn is relevant for auto-prefetcher. */
5601 data->base = *info.base;
5602 data->offset = info.disp ? INTVAL (*info.disp) : 0;
5603 data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5606 /* Helper function for rank_for_schedule sorting. */
5608 autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5610 for (int write = 0; write < 2; ++write)
5612 autopref_multipass_data_t data1
5613 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5614 autopref_multipass_data_t data2
5615 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5617 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5618 autopref_multipass_init (insn1, write);
5619 if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5622 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5623 autopref_multipass_init (insn2, write);
5624 if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5627 if (!rtx_equal_p (data1->base, data2->base))
5630 return data1->offset - data2->offset;
5636 /* True if header of debug dump was printed. */
5637 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5639 /* Helper for autopref_multipass_dfa_lookahead_guard.
5640 Return "1" if INSN1 should be delayed in favor of INSN2. */
5642 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5643 const rtx_insn *insn2, int write)
5645 autopref_multipass_data_t data1
5646 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5647 autopref_multipass_data_t data2
5648 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5650 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5651 autopref_multipass_init (insn2, write);
5652 if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5655 if (rtx_equal_p (data1->base, data2->base)
5656 && data1->offset > data2->offset)
5658 if (sched_verbose >= 2)
5660 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5662 fprintf (sched_dump,
5663 ";;\t\tnot trying in max_issue due to autoprefetch "
5665 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5668 fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5679 We could have also hooked autoprefetcher model into
5680 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5681 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5682 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5683 unblocked). We don't bother about this yet because target of interest
5684 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5686 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5687 Return "1" if INSN1 should not be considered in max_issue due to
5688 auto-prefetcher considerations. */
5690 autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5694 /* Exit early if the param forbids this or if we're not entering here through
5695 normal haifa scheduling. This can happen if selective scheduling is
5696 explicitly enabled. */
5697 if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
5700 if (sched_verbose >= 2 && ready_index == 0)
5701 autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5703 for (int write = 0; write < 2; ++write)
5705 autopref_multipass_data_t data1
5706 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5708 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5709 autopref_multipass_init (insn1, write);
5710 if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5713 if (ready_index == 0
5714 && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5715 /* We allow only a single delay on priviledged instructions.
5716 Doing otherwise would cause infinite loop. */
5718 if (sched_verbose >= 2)
5720 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5722 fprintf (sched_dump,
5723 ";;\t\tnot trying in max_issue due to autoprefetch "
5725 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5728 fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5733 for (int i2 = 0; i2 < ready.n_ready; ++i2)
5735 rtx_insn *insn2 = get_ready_element (i2);
5738 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5741 if (ready_index == 0)
5744 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5750 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
5753 /* Everything from the current queue slot should have been moved to
5755 gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5757 int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
5758 if (n_stalls > max_insn_queue_index)
5759 n_stalls = max_insn_queue_index;
5761 for (int stalls = 1; stalls <= n_stalls; ++stalls)
5763 for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5765 link = link->next ())
5767 rtx_insn *insn2 = link->insn ();
5768 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5772 /* Queue INSN1 until INSN2 can issue. */
5774 if (ready_index == 0)
5775 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5783 if (sched_verbose >= 2
5784 && autopref_multipass_dfa_lookahead_guard_started_dump_p
5785 && (ready_index == ready.n_ready - 1 || r < 0))
5786 /* This does not /always/ trigger. We don't output EOL if the last
5787 insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5788 called. We can live with this. */
5789 fprintf (sched_dump, "\n");
5794 /* Define type for target data used in multipass scheduling. */
5795 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5796 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5798 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5800 /* The following structure describe an entry of the stack of choices. */
5803 /* Ordinal number of the issued insn in the ready queue. */
5805 /* The number of the rest insns whose issues we should try. */
5807 /* The number of issued essential insns. */
5809 /* State after issuing the insn. */
5811 /* Target-specific data. */
5812 first_cycle_multipass_data_t target_data;
5815 /* The following array is used to implement a stack of choices used in
5816 function max_issue. */
5817 static struct choice_entry *choice_stack;
5819 /* This holds the value of the target dfa_lookahead hook. */
5822 /* The following variable value is maximal number of tries of issuing
5823 insns for the first cycle multipass insn scheduling. We define
5824 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5825 need this constraint if all real insns (with non-negative codes)
5826 had reservations because in this case the algorithm complexity is
5827 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5828 might be incomplete and such insn might occur. For such
5829 descriptions, the complexity of algorithm (without the constraint)
5830 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5831 static int max_lookahead_tries;
5833 /* The following function returns maximal (or close to maximal) number
5834 of insns which can be issued on the same cycle and one of which
5835 insns is insns with the best rank (the first insn in READY). To
5836 make this function tries different samples of ready insns. READY
5837 is current queue `ready'. Global array READY_TRY reflects what
5838 insns are already issued in this try. The function stops immediately,
5839 if it reached the such a solution, that all instruction can be issued.
5840 INDEX will contain index of the best insn in READY. The following
5841 function is used only for first cycle multipass scheduling.
5845 This function expects recognized insns only. All USEs,
5846 CLOBBERs, etc must be filtered elsewhere. */
5848 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5849 bool first_cycle_insn_p, int *index)
5851 int n, i, all, n_ready, best, delay, tries_num;
5853 struct choice_entry *top;
5859 n_ready = ready->n_ready;
5860 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5861 && privileged_n <= n_ready);
5863 /* Init MAX_LOOKAHEAD_TRIES. */
5864 if (max_lookahead_tries == 0)
5866 max_lookahead_tries = 100;
5867 for (i = 0; i < issue_rate; i++)
5868 max_lookahead_tries *= dfa_lookahead;
5871 /* Init max_points. */
5872 more_issue = issue_rate - cycle_issued_insns;
5873 gcc_assert (more_issue >= 0);
5875 /* The number of the issued insns in the best solution. */
5880 /* Set initial state of the search. */
5881 memcpy (top->state, state, dfa_state_size);
5882 top->rest = dfa_lookahead;
5884 if (targetm.sched.first_cycle_multipass_begin)
5885 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5887 first_cycle_insn_p);
5889 /* Count the number of the insns to search among. */
5890 for (all = i = 0; i < n_ready; i++)
5894 if (sched_verbose >= 2)
5896 fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5897 debug_ready_list_1 (ready, ready_try);
5900 /* I is the index of the insn to try next. */
5905 if (/* If we've reached a dead end or searched enough of what we have
5908 /* or have nothing else to try... */
5910 /* or should not issue more. */
5911 || top->n >= more_issue)
5913 /* ??? (... || i == n_ready). */
5914 gcc_assert (i <= n_ready);
5916 /* We should not issue more than issue_rate instructions. */
5917 gcc_assert (top->n <= more_issue);
5919 if (top == choice_stack)
5922 if (best < top - choice_stack)
5927 /* Try to find issued privileged insn. */
5928 while (n && !ready_try[--n])
5932 if (/* If all insns are equally good... */
5934 /* Or a privileged insn will be issued. */
5936 /* Then we have a solution. */
5938 best = top - choice_stack;
5939 /* This is the index of the insn issued first in this
5941 *index = choice_stack [1].index;
5942 if (top->n == more_issue || best == all)
5947 /* Set ready-list index to point to the last insn
5948 ('i++' below will advance it to the next insn). */
5954 if (targetm.sched.first_cycle_multipass_backtrack)
5955 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5956 ready_try, n_ready);
5959 memcpy (state, top->state, dfa_state_size);
5961 else if (!ready_try [i])
5964 if (tries_num > max_lookahead_tries)
5966 insn = ready_element (ready, i);
5967 delay = state_transition (state, insn);
5970 if (state_dead_lock_p (state)
5971 || insn_finishes_cycle_p (insn))
5972 /* We won't issue any more instructions in the next
5979 if (memcmp (top->state, state, dfa_state_size) != 0)
5982 /* Advance to the next choice_entry. */
5984 /* Initialize it. */
5985 top->rest = dfa_lookahead;
5988 memcpy (top->state, state, dfa_state_size);
5991 if (targetm.sched.first_cycle_multipass_issue)
5992 targetm.sched.first_cycle_multipass_issue (&top->target_data,
6002 /* Increase ready-list index. */
6006 if (targetm.sched.first_cycle_multipass_end)
6007 targetm.sched.first_cycle_multipass_end (best != 0
6008 ? &choice_stack[1].target_data
6011 /* Restore the original state of the DFA. */
6012 memcpy (state, choice_stack->state, dfa_state_size);
6017 /* The following function chooses insn from READY and modifies
6018 READY. The following function is used only for first
6019 cycle multipass scheduling.
6021 -1 if cycle should be advanced,
6022 0 if INSN_PTR is set to point to the desirable insn,
6023 1 if choose_ready () should be restarted without advancing the cycle. */
6025 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6026 rtx_insn **insn_ptr)
6028 if (dbg_cnt (sched_insn) == false)
6030 if (nonscheduled_insns_begin == NULL_RTX)
6031 nonscheduled_insns_begin = current_sched_info->prev_head;
6033 rtx_insn *insn = first_nonscheduled_insn ();
6035 if (QUEUE_INDEX (insn) == QUEUE_READY)
6036 /* INSN is in the ready_list. */
6038 ready_remove_insn (insn);
6043 /* INSN is in the queue. Advance cycle to move it to the ready list. */
6044 gcc_assert (QUEUE_INDEX (insn) >= 0);
6048 if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6049 || DEBUG_INSN_P (ready_element (ready, 0)))
6051 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6052 *insn_ptr = ready_remove_first_dispatch (ready);
6054 *insn_ptr = ready_remove_first (ready);
6060 /* Try to choose the best insn. */
6064 insn = ready_element (ready, 0);
6065 if (INSN_CODE (insn) < 0)
6067 *insn_ptr = ready_remove_first (ready);
6071 /* Filter the search space. */
6072 for (i = 0; i < ready->n_ready; i++)
6076 insn = ready_element (ready, i);
6078 /* If this insn is recognizable we should have already
6079 recognized it earlier.
6080 ??? Not very clear where this is supposed to be done.
6082 gcc_checking_assert (INSN_CODE (insn) >= 0
6083 || recog_memoized (insn) < 0);
6084 if (INSN_CODE (insn) < 0)
6086 /* Non-recognized insns at position 0 are handled above. */
6092 if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6095 = (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6098 if (ready_try[i] < 0)
6099 /* Queue instruction for several cycles.
6100 We need to restart choose_ready as we have changed
6103 change_queue_index (insn, -ready_try[i]);
6107 /* Make sure that we didn't end up with 0'th insn filtered out.
6108 Don't be tempted to make life easier for backends and just
6109 requeue 0'th insn if (ready_try[0] == 0) and restart
6110 choose_ready. Backends should be very considerate about
6111 requeueing instructions -- especially the highest priority
6112 one at position 0. */
6113 gcc_assert (ready_try[i] == 0 || i > 0);
6118 gcc_assert (ready_try[i] == 0);
6119 /* INSN made it through the scrutiny of filters! */
6122 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6124 *insn_ptr = ready_remove_first (ready);
6125 if (sched_verbose >= 4)
6126 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6127 (*current_sched_info->print_insn) (*insn_ptr, 0));
6132 if (sched_verbose >= 4)
6133 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6134 (*current_sched_info->print_insn)
6135 (ready_element (ready, index), 0));
6137 *insn_ptr = ready_remove (ready, index);
6143 /* This function is called when we have successfully scheduled a
6144 block. It uses the schedule stored in the scheduled_insns vector
6145 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
6146 append the scheduled insns; TAIL is the insn after the scheduled
6147 block. TARGET_BB is the argument passed to schedule_block. */
6150 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6155 last_scheduled_insn = prev_head;
6157 scheduled_insns.iterate (i, &insn);
6160 if (control_flow_insn_p (last_scheduled_insn)
6161 || current_sched_info->advance_target_bb (*target_bb, insn))
6163 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6169 x = next_real_insn (last_scheduled_insn);
6171 dump_new_block_header (1, *target_bb, x, tail);
6174 last_scheduled_insn = bb_note (*target_bb);
6177 if (current_sched_info->begin_move_insn)
6178 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6179 move_insn (insn, last_scheduled_insn,
6180 current_sched_info->next_tail);
6181 if (!DEBUG_INSN_P (insn))
6182 reemit_notes (insn);
6183 last_scheduled_insn = insn;
6186 scheduled_insns.truncate (0);
6189 /* Examine all insns on the ready list and queue those which can't be
6190 issued in this cycle. TEMP_STATE is temporary scheduler state we
6191 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
6192 have been issued for the current cycle, which means it is valid to
6193 issue an asm statement.
6195 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6196 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
6197 we only leave insns which have an INSN_EXACT_TICK. */
6200 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6201 bool shadows_only_p, bool modulo_epilogue_p)
6204 bool sched_group_found = false;
6205 int min_cost_group = 1;
6210 for (i = 0; i < ready.n_ready; i++)
6212 rtx_insn *insn = ready_element (&ready, i);
6213 if (SCHED_GROUP_P (insn))
6215 sched_group_found = true;
6220 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6221 such an insn first and note its cost, then schedule all other insns
6222 for one cycle later. */
6223 for (pass = sched_group_found ? 0 : 1; pass < 2; )
6225 int n = ready.n_ready;
6226 for (i = 0; i < n; i++)
6228 rtx_insn *insn = ready_element (&ready, i);
6230 const char *reason = "resource conflict";
6232 if (DEBUG_INSN_P (insn))
6235 if (sched_group_found && !SCHED_GROUP_P (insn))
6239 cost = min_cost_group;
6240 reason = "not in sched group";
6242 else if (modulo_epilogue_p
6243 && INSN_EXACT_TICK (insn) == INVALID_TICK)
6245 cost = max_insn_queue_index;
6246 reason = "not an epilogue insn";
6248 else if (shadows_only_p && !SHADOW_P (insn))
6251 reason = "not a shadow";
6253 else if (recog_memoized (insn) < 0)
6255 if (!first_cycle_insn_p
6256 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6257 || asm_noperands (PATTERN (insn)) >= 0))
6261 else if (sched_pressure != SCHED_PRESSURE_NONE)
6263 if (sched_pressure == SCHED_PRESSURE_MODEL
6264 && INSN_TICK (insn) <= clock_var)
6266 memcpy (temp_state, curr_state, dfa_state_size);
6267 if (state_transition (temp_state, insn) >= 0)
6268 INSN_TICK (insn) = clock_var + 1;
6278 struct delay_pair *delay_entry;
6280 = delay_htab->find_with_hash (insn,
6281 htab_hash_pointer (insn));
6282 while (delay_entry && delay_cost == 0)
6284 delay_cost = estimate_shadow_tick (delay_entry);
6285 if (delay_cost > max_insn_queue_index)
6286 delay_cost = max_insn_queue_index;
6287 delay_entry = delay_entry->next_same_i1;
6291 memcpy (temp_state, curr_state, dfa_state_size);
6292 cost = state_transition (temp_state, insn);
6297 if (cost < delay_cost)
6300 reason = "shadow tick";
6305 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6306 min_cost_group = cost;
6307 ready_remove (&ready, i);
6308 /* Normally we'd want to queue INSN for COST cycles. However,
6309 if SCHED_GROUP_P is set, then we must ensure that nothing
6310 else comes between INSN and its predecessor. If there is
6311 some other insn ready to fire on the next cycle, then that
6312 invariant would be broken.
6314 So when SCHED_GROUP_P is set, just queue this insn for a
6316 queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6326 /* Called when we detect that the schedule is impossible. We examine the
6327 backtrack queue to find the earliest insn that caused this condition. */
6329 static struct haifa_saved_data *
6330 verify_shadows (void)
6332 struct haifa_saved_data *save, *earliest_fail = NULL;
6333 for (save = backtrack_queue; save; save = save->next)
6336 struct delay_pair *pair = save->delay_pair;
6337 rtx_insn *i1 = pair->i1;
6339 for (; pair; pair = pair->next_same_i1)
6341 rtx_insn *i2 = pair->i2;
6343 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6346 t = INSN_TICK (i1) + pair_delay (pair);
6349 if (sched_verbose >= 2)
6350 fprintf (sched_dump,
6351 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6353 INSN_UID (pair->i1), INSN_UID (pair->i2),
6354 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6355 earliest_fail = save;
6358 if (QUEUE_INDEX (i2) >= 0)
6360 int queued_for = INSN_TICK (i2);
6364 if (sched_verbose >= 2)
6365 fprintf (sched_dump,
6366 ";;\t\tfailed delay requirements for %d/%d"
6367 " (%d->%d), queued too late\n",
6368 INSN_UID (pair->i1), INSN_UID (pair->i2),
6369 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6370 earliest_fail = save;
6377 return earliest_fail;
6380 /* Print instructions together with useful scheduling information between
6381 HEAD and TAIL (inclusive). */
6383 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6385 fprintf (sched_dump, ";;\t| insn | prio |\n");
6387 rtx_insn *next_tail = NEXT_INSN (tail);
6388 for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6390 int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6391 const char *pattern = (NOTE_P (insn)
6393 : str_pattern_slim (PATTERN (insn)));
6395 fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6396 INSN_UID (insn), priority, pattern);
6398 if (sched_verbose >= 4)
6400 if (NOTE_P (insn) || recog_memoized (insn) < 0)
6401 fprintf (sched_dump, "nothing");
6403 print_reservation (sched_dump, insn);
6405 fprintf (sched_dump, "\n");
6409 /* Use forward list scheduling to rearrange insns of block pointed to by
6410 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6414 schedule_block (basic_block *target_bb, state_t init_state)
6417 bool success = modulo_ii == 0;
6418 struct sched_block_state ls;
6419 state_t temp_state = NULL; /* It is used for multipass scheduling. */
6420 int sort_p, advance, start_clock_var;
6422 /* Head/tail info for this block. */
6423 rtx_insn *prev_head = current_sched_info->prev_head;
6424 rtx_insn *next_tail = current_sched_info->next_tail;
6425 rtx_insn *head = NEXT_INSN (prev_head);
6426 rtx_insn *tail = PREV_INSN (next_tail);
6428 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6429 && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6430 find_modifiable_mems (head, tail);
6432 /* We used to have code to avoid getting parameters moved from hard
6433 argument registers into pseudos.
6435 However, it was removed when it proved to be of marginal benefit
6436 and caused problems because schedule_block and compute_forward_dependences
6437 had different notions of what the "head" insn was. */
6439 gcc_assert (head != tail || INSN_P (head));
6441 haifa_recovery_bb_recently_added_p = false;
6443 backtrack_queue = NULL;
6448 dump_new_block_header (0, *target_bb, head, tail);
6450 if (sched_verbose >= 2)
6452 dump_insn_stream (head, tail);
6453 memset (&rank_for_schedule_stats, 0,
6454 sizeof (rank_for_schedule_stats));
6458 if (init_state == NULL)
6459 state_reset (curr_state);
6461 memcpy (curr_state, init_state, dfa_state_size);
6463 /* Clear the ready list. */
6464 ready.first = ready.veclen - 1;
6468 /* It is used for first cycle multipass scheduling. */
6469 temp_state = alloca (dfa_state_size);
6471 if (targetm.sched.init)
6472 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6474 /* We start inserting insns after PREV_HEAD. */
6475 last_scheduled_insn = prev_head;
6476 last_nondebug_scheduled_insn = NULL_RTX;
6477 nonscheduled_insns_begin = NULL;
6479 gcc_assert ((NOTE_P (last_scheduled_insn)
6480 || DEBUG_INSN_P (last_scheduled_insn))
6481 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6483 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6488 insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6489 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6491 /* Start just before the beginning of time. */
6494 /* We need queue and ready lists and clock_var be initialized
6495 in try_ready () (which is called through init_ready_list ()). */
6496 (*current_sched_info->init_ready_list) ();
6499 sched_pressure_start_bb (*target_bb);
6501 /* The algorithm is O(n^2) in the number of ready insns at any given
6502 time in the worst case. Before reload we are more likely to have
6503 big lists so truncate them to a reasonable size. */
6504 if (!reload_completed
6505 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6507 ready_sort_debug (&ready);
6508 ready_sort_real (&ready);
6510 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6511 If there are debug insns, we know they're first. */
6512 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6513 if (!SCHED_GROUP_P (ready_element (&ready, i)))
6516 if (sched_verbose >= 2)
6518 fprintf (sched_dump,
6519 ";;\t\tReady list on entry: %d insns: ", ready.n_ready);
6520 debug_ready_list (&ready);
6521 fprintf (sched_dump,
6522 ";;\t\t before reload => truncated to %d insns\n", i);
6525 /* Delay all insns past it for 1 cycle. If debug counter is
6526 activated make an exception for the insn right after
6527 nonscheduled_insns_begin. */
6529 rtx_insn *skip_insn;
6531 if (dbg_cnt (sched_insn) == false)
6532 skip_insn = first_nonscheduled_insn ();
6536 while (i < ready.n_ready)
6540 insn = ready_remove (&ready, i);
6542 if (insn != skip_insn)
6543 queue_insn (insn, 1, "list truncated");
6546 ready_add (&ready, skip_insn, true);
6550 /* Now we can restore basic block notes and maintain precise cfg. */
6551 restore_bb_notes (*target_bb);
6553 last_clock_var = -1;
6557 gcc_assert (scheduled_insns.length () == 0);
6559 must_backtrack = false;
6560 modulo_insns_scheduled = 0;
6562 ls.modulo_epilogue = false;
6563 ls.first_cycle_insn_p = true;
6565 /* Loop until all the insns in BB are scheduled. */
6566 while ((*current_sched_info->schedule_more_p) ())
6568 perform_replacements_new_cycle ();
6571 start_clock_var = clock_var;
6575 advance_one_cycle ();
6577 /* Add to the ready list all pending insns that can be issued now.
6578 If there are no ready insns, increment clock until one
6579 is ready and add all pending insns at that point to the ready
6581 queue_to_ready (&ready);
6583 gcc_assert (ready.n_ready);
6585 if (sched_verbose >= 2)
6587 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6588 debug_ready_list (&ready);
6590 advance -= clock_var - start_clock_var;
6592 while (advance > 0);
6594 if (ls.modulo_epilogue)
6596 int stage = clock_var / modulo_ii;
6597 if (stage > modulo_last_stage * 2 + 2)
6599 if (sched_verbose >= 2)
6600 fprintf (sched_dump,
6601 ";;\t\tmodulo scheduled succeeded at II %d\n",
6607 else if (modulo_ii > 0)
6609 int stage = clock_var / modulo_ii;
6610 if (stage > modulo_max_stages)
6612 if (sched_verbose >= 2)
6613 fprintf (sched_dump,
6614 ";;\t\tfailing schedule due to excessive stages\n");
6617 if (modulo_n_insns == modulo_insns_scheduled
6618 && stage > modulo_last_stage)
6620 if (sched_verbose >= 2)
6621 fprintf (sched_dump,
6622 ";;\t\tfound kernel after %d stages, II %d\n",
6624 ls.modulo_epilogue = true;
6628 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6629 if (ready.n_ready == 0)
6634 ls.shadows_only_p = false;
6635 cycle_issued_insns = 0;
6636 ls.can_issue_more = issue_rate;
6643 if (sort_p && ready.n_ready > 0)
6645 /* Sort the ready list based on priority. This must be
6646 done every iteration through the loop, as schedule_insn
6647 may have readied additional insns that will not be
6648 sorted correctly. */
6649 ready_sort (&ready);
6651 if (sched_verbose >= 2)
6653 fprintf (sched_dump,
6654 ";;\t\tReady list after ready_sort: ");
6655 debug_ready_list (&ready);
6659 /* We don't want md sched reorder to even see debug isns, so put
6660 them out right away. */
6661 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6662 && (*current_sched_info->schedule_more_p) ())
6664 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6666 rtx_insn *insn = ready_remove_first (&ready);
6667 gcc_assert (DEBUG_INSN_P (insn));
6668 (*current_sched_info->begin_schedule_ready) (insn);
6669 scheduled_insns.safe_push (insn);
6670 last_scheduled_insn = insn;
6671 advance = schedule_insn (insn);
6672 gcc_assert (advance == 0);
6673 if (ready.n_ready > 0)
6674 ready_sort (&ready);
6678 if (ls.first_cycle_insn_p && !ready.n_ready)
6681 resume_after_backtrack:
6682 /* Allow the target to reorder the list, typically for
6683 better instruction bundling. */
6685 && (ready.n_ready == 0
6686 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6688 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6690 = targetm.sched.reorder (sched_dump, sched_verbose,
6691 ready_lastpos (&ready),
6692 &ready.n_ready, clock_var);
6693 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6695 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6697 ? ready_lastpos (&ready) : NULL,
6698 &ready.n_ready, clock_var);
6701 restart_choose_ready:
6702 if (sched_verbose >= 2)
6704 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6706 debug_ready_list (&ready);
6707 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6708 print_curr_reg_pressure ();
6711 if (ready.n_ready == 0
6712 && ls.can_issue_more
6713 && reload_completed)
6715 /* Allow scheduling insns directly from the queue in case
6716 there's nothing better to do (ready list is empty) but
6717 there are still vacant dispatch slots in the current cycle. */
6718 if (sched_verbose >= 6)
6719 fprintf (sched_dump,";;\t\tSecond chance\n");
6720 memcpy (temp_state, curr_state, dfa_state_size);
6721 if (early_queue_to_ready (temp_state, &ready))
6722 ready_sort (&ready);
6725 if (ready.n_ready == 0
6726 || !ls.can_issue_more
6727 || state_dead_lock_p (curr_state)
6728 || !(*current_sched_info->schedule_more_p) ())
6731 /* Select and remove the insn from the ready list. */
6737 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6743 goto restart_choose_ready;
6745 gcc_assert (insn != NULL_RTX);
6748 insn = ready_remove_first (&ready);
6750 if (sched_pressure != SCHED_PRESSURE_NONE
6751 && INSN_TICK (insn) > clock_var)
6753 ready_add (&ready, insn, true);
6758 if (targetm.sched.dfa_new_cycle
6759 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6760 insn, last_clock_var,
6761 clock_var, &sort_p))
6762 /* SORT_P is used by the target to override sorting
6763 of the ready list. This is needed when the target
6764 has modified its internal structures expecting that
6765 the insn will be issued next. As we need the insn
6766 to have the highest priority (so it will be returned by
6767 the ready_remove_first call above), we invoke
6768 ready_add (&ready, insn, true).
6769 But, still, there is one issue: INSN can be later
6770 discarded by scheduler's front end through
6771 current_sched_info->can_schedule_ready_p, hence, won't
6774 ready_add (&ready, insn, true);
6780 if (current_sched_info->can_schedule_ready_p
6781 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6782 /* We normally get here only if we don't want to move
6783 insn from the split block. */
6785 TODO_SPEC (insn) = DEP_POSTPONED;
6786 goto restart_choose_ready;
6791 /* If this insn is the first part of a delay-slot pair, record a
6793 struct delay_pair *delay_entry;
6795 = delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6798 save_backtrack_point (delay_entry, ls);
6799 if (sched_verbose >= 2)
6800 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6804 /* DECISION is made. */
6806 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6808 modulo_insns_scheduled++;
6809 modulo_last_stage = clock_var / modulo_ii;
6811 if (TODO_SPEC (insn) & SPECULATIVE)
6812 generate_recovery_code (insn);
6814 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6815 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6817 /* Update counters, etc in the scheduler's front end. */
6818 (*current_sched_info->begin_schedule_ready) (insn);
6819 scheduled_insns.safe_push (insn);
6820 gcc_assert (NONDEBUG_INSN_P (insn));
6821 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6823 if (recog_memoized (insn) >= 0)
6825 memcpy (temp_state, curr_state, dfa_state_size);
6826 cost = state_transition (curr_state, insn);
6827 if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6828 gcc_assert (cost < 0);
6829 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6830 cycle_issued_insns++;
6834 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6835 || asm_noperands (PATTERN (insn)) >= 0);
6837 if (targetm.sched.variable_issue)
6839 targetm.sched.variable_issue (sched_dump, sched_verbose,
6840 insn, ls.can_issue_more);
6841 /* A naked CLOBBER or USE generates no instruction, so do
6842 not count them against the issue rate. */
6843 else if (GET_CODE (PATTERN (insn)) != USE
6844 && GET_CODE (PATTERN (insn)) != CLOBBER)
6845 ls.can_issue_more--;
6846 advance = schedule_insn (insn);
6848 if (SHADOW_P (insn))
6849 ls.shadows_only_p = true;
6851 /* After issuing an asm insn we should start a new cycle. */
6852 if (advance == 0 && asm_p)
6861 ls.first_cycle_insn_p = false;
6862 if (ready.n_ready > 0)
6863 prune_ready_list (temp_state, false, ls.shadows_only_p,
6864 ls.modulo_epilogue);
6868 if (!must_backtrack)
6869 for (i = 0; i < ready.n_ready; i++)
6871 rtx_insn *insn = ready_element (&ready, i);
6872 if (INSN_EXACT_TICK (insn) == clock_var)
6874 must_backtrack = true;
6879 if (must_backtrack && modulo_ii > 0)
6881 if (modulo_backtracks_left == 0)
6883 modulo_backtracks_left--;
6885 while (must_backtrack)
6887 struct haifa_saved_data *failed;
6888 rtx_insn *failed_insn;
6890 must_backtrack = false;
6891 failed = verify_shadows ();
6892 gcc_assert (failed);
6894 failed_insn = failed->delay_pair->i1;
6895 /* Clear these queues. */
6896 perform_replacements_new_cycle ();
6897 toggle_cancelled_flags (false);
6898 unschedule_insns_until (failed_insn);
6899 while (failed != backtrack_queue)
6900 free_topmost_backtrack_point (true);
6901 restore_last_backtrack_point (&ls);
6902 if (sched_verbose >= 2)
6903 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6904 /* Delay by at least a cycle. This could cause additional
6906 queue_insn (failed_insn, 1, "backtracked");
6910 if (ready.n_ready > 0)
6911 goto resume_after_backtrack;
6914 if (clock_var == 0 && ls.first_cycle_insn_p)
6920 ls.first_cycle_insn_p = true;
6922 if (ls.modulo_epilogue)
6925 if (!ls.first_cycle_insn_p || advance)
6926 advance_one_cycle ();
6927 perform_replacements_new_cycle ();
6930 /* Once again, debug insn suckiness: they can be on the ready list
6931 even if they have unresolved dependencies. To make our view
6932 of the world consistent, remove such "ready" insns. */
6933 restart_debug_insn_loop:
6934 for (i = ready.n_ready - 1; i >= 0; i--)
6938 x = ready_element (&ready, i);
6939 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6940 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6942 ready_remove (&ready, i);
6943 goto restart_debug_insn_loop;
6946 for (i = ready.n_ready - 1; i >= 0; i--)
6950 x = ready_element (&ready, i);
6951 resolve_dependencies (x);
6953 for (i = 0; i <= max_insn_queue_index; i++)
6955 rtx_insn_list *link;
6956 while ((link = insn_queue[i]) != NULL)
6958 rtx_insn *x = link->insn ();
6959 insn_queue[i] = link->next ();
6960 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6961 free_INSN_LIST_node (link);
6962 resolve_dependencies (x);
6968 undo_all_replacements ();
6973 fprintf (sched_dump, ";;\tReady list (final): ");
6974 debug_ready_list (&ready);
6977 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6978 /* Sanity check -- queue must be empty now. Meaningless if region has
6980 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6981 else if (modulo_ii == 0)
6983 /* We must maintain QUEUE_INDEX between blocks in region. */
6984 for (i = ready.n_ready - 1; i >= 0; i--)
6988 x = ready_element (&ready, i);
6989 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6990 TODO_SPEC (x) = HARD_DEP;
6994 for (i = 0; i <= max_insn_queue_index; i++)
6996 rtx_insn_list *link;
6997 for (link = insn_queue[i]; link; link = link->next ())
7002 QUEUE_INDEX (x) = QUEUE_NOWHERE;
7003 TODO_SPEC (x) = HARD_DEP;
7005 free_INSN_LIST_list (&insn_queue[i]);
7009 if (sched_pressure == SCHED_PRESSURE_MODEL)
7010 model_end_schedule ();
7014 commit_schedule (prev_head, tail, target_bb);
7016 fprintf (sched_dump, ";; total time = %d\n", clock_var);
7019 last_scheduled_insn = tail;
7021 scheduled_insns.truncate (0);
7023 if (!current_sched_info->queue_must_finish_empty
7024 || haifa_recovery_bb_recently_added_p)
7026 /* INSN_TICK (minimum clock tick at which the insn becomes
7027 ready) may be not correct for the insn in the subsequent
7028 blocks of the region. We should use a correct value of
7029 `clock_var' or modify INSN_TICK. It is better to keep
7030 clock_var value equal to 0 at the start of a basic block.
7031 Therefore we modify INSN_TICK here. */
7032 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7035 if (targetm.sched.finish)
7037 targetm.sched.finish (sched_dump, sched_verbose);
7038 /* Target might have added some instructions to the scheduled block
7039 in its md_finish () hook. These new insns don't have any data
7040 initialized and to identify them we extend h_i_d so that they'll
7042 sched_extend_luids ();
7045 /* Update head/tail boundaries. */
7046 head = NEXT_INSN (prev_head);
7047 tail = last_scheduled_insn;
7051 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n",
7052 INSN_UID (head), INSN_UID (tail));
7054 if (sched_verbose >= 2)
7056 dump_insn_stream (head, tail);
7057 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7061 fprintf (sched_dump, "\n");
7064 head = restore_other_notes (head, NULL);
7066 current_sched_info->head = head;
7067 current_sched_info->tail = tail;
7069 free_backtrack_queue ();
7074 /* Set_priorities: compute priority of each insn in the block. */
7077 set_priorities (rtx_insn *head, rtx_insn *tail)
7081 int sched_max_insns_priority =
7082 current_sched_info->sched_max_insns_priority;
7083 rtx_insn *prev_head;
7085 if (head == tail && ! INSN_P (head))
7090 prev_head = PREV_INSN (head);
7091 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7097 (void) priority (insn);
7099 gcc_assert (INSN_PRIORITY_KNOWN (insn));
7101 sched_max_insns_priority = MAX (sched_max_insns_priority,
7102 INSN_PRIORITY (insn));
7105 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7110 /* Set dump and sched_verbose for the desired debugging output. If no
7111 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
7112 For -fsched-verbose=N, N>=10, print everything to stderr. */
7114 setup_sched_dump (void)
7116 sched_verbose = sched_verbose_param;
7117 if (sched_verbose_param == 0 && dump_file)
7119 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
7120 ? stderr : dump_file);
7123 /* Allocate data for register pressure sensitive scheduling. */
7125 alloc_global_sched_pressure_data (void)
7127 if (sched_pressure != SCHED_PRESSURE_NONE)
7129 int i, max_regno = max_reg_num ();
7131 if (sched_dump != NULL)
7132 /* We need info about pseudos for rtl dumps about pseudo
7133 classes and costs. */
7134 regstat_init_n_sets_and_refs ();
7135 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7136 sched_regno_pressure_class
7137 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7138 for (i = 0; i < max_regno; i++)
7139 sched_regno_pressure_class[i]
7140 = (i < FIRST_PSEUDO_REGISTER
7141 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7142 : ira_pressure_class_translate[reg_allocno_class (i)]);
7143 curr_reg_live = BITMAP_ALLOC (NULL);
7144 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7146 saved_reg_live = BITMAP_ALLOC (NULL);
7147 region_ref_regs = BITMAP_ALLOC (NULL);
7150 /* Calculate number of CALL_USED_REGS in register classes that
7151 we calculate register pressure for. */
7152 for (int c = 0; c < ira_pressure_classes_num; ++c)
7154 enum reg_class cl = ira_pressure_classes[c];
7156 call_used_regs_num[cl] = 0;
7158 for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7159 if (call_used_regs[ira_class_hard_regs[cl][i]])
7160 ++call_used_regs_num[cl];
7165 /* Free data for register pressure sensitive scheduling. Also called
7166 from schedule_region when stopping sched-pressure early. */
7168 free_global_sched_pressure_data (void)
7170 if (sched_pressure != SCHED_PRESSURE_NONE)
7172 if (regstat_n_sets_and_refs != NULL)
7173 regstat_free_n_sets_and_refs ();
7174 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7176 BITMAP_FREE (region_ref_regs);
7177 BITMAP_FREE (saved_reg_live);
7179 BITMAP_FREE (curr_reg_live);
7180 free (sched_regno_pressure_class);
7184 /* Initialize some global state for the scheduler. This function works
7185 with the common data shared between all the schedulers. It is called
7186 from the scheduler specific initialization routine. */
7191 /* Disable speculative loads in their presence if cc0 defined. */
7193 flag_schedule_speculative_load = 0;
7196 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7197 targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7199 if (live_range_shrinkage_p)
7200 sched_pressure = SCHED_PRESSURE_WEIGHTED;
7201 else if (flag_sched_pressure
7202 && !reload_completed
7203 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7204 sched_pressure = ((enum sched_pressure_algorithm)
7205 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
7207 sched_pressure = SCHED_PRESSURE_NONE;
7209 if (sched_pressure != SCHED_PRESSURE_NONE)
7210 ira_setup_eliminable_regset ();
7212 /* Initialize SPEC_INFO. */
7213 if (targetm.sched.set_sched_flags)
7215 spec_info = &spec_info_var;
7216 targetm.sched.set_sched_flags (spec_info);
7218 if (spec_info->mask != 0)
7220 spec_info->data_weakness_cutoff =
7221 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
7222 spec_info->control_weakness_cutoff =
7223 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
7224 * REG_BR_PROB_BASE) / 100;
7227 /* So we won't read anything accidentally. */
7232 /* So we won't read anything accidentally. */
7235 /* Initialize issue_rate. */
7236 if (targetm.sched.issue_rate)
7237 issue_rate = targetm.sched.issue_rate ();
7241 if (targetm.sched.first_cycle_multipass_dfa_lookahead
7242 /* Don't use max_issue with reg_pressure scheduling. Multipass
7243 scheduling and reg_pressure scheduling undo each other's decisions. */
7244 && sched_pressure == SCHED_PRESSURE_NONE)
7245 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7249 /* Set to "0" so that we recalculate. */
7250 max_lookahead_tries = 0;
7252 if (targetm.sched.init_dfa_pre_cycle_insn)
7253 targetm.sched.init_dfa_pre_cycle_insn ();
7255 if (targetm.sched.init_dfa_post_cycle_insn)
7256 targetm.sched.init_dfa_post_cycle_insn ();
7259 dfa_state_size = state_size ();
7261 init_alias_analysis ();
7264 df_set_flags (DF_LR_RUN_DCE);
7265 df_note_add_problem ();
7267 /* More problems needed for interloop dep calculation in SMS. */
7268 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7270 df_rd_add_problem ();
7271 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7276 /* Do not run DCE after reload, as this can kill nops inserted
7278 if (reload_completed)
7279 df_clear_flags (DF_LR_RUN_DCE);
7281 regstat_compute_calls_crossed ();
7283 if (targetm.sched.init_global)
7284 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7286 alloc_global_sched_pressure_data ();
7288 curr_state = xmalloc (dfa_state_size);
7291 static void haifa_init_only_bb (basic_block, basic_block);
7293 /* Initialize data structures specific to the Haifa scheduler. */
7295 haifa_sched_init (void)
7297 setup_sched_dump ();
7300 scheduled_insns.create (0);
7302 if (spec_info != NULL)
7304 sched_deps_info->use_deps_list = 1;
7305 sched_deps_info->generate_spec_deps = 1;
7308 /* Initialize luids, dependency caches, target and h_i_d for the
7312 bbs.create (n_basic_blocks_for_fn (cfun));
7317 FOR_EACH_BB_FN (bb, cfun)
7318 bbs.quick_push (bb);
7319 sched_init_luids (bbs);
7320 sched_deps_init (true);
7321 sched_extend_target ();
7322 haifa_init_h_i_d (bbs);
7327 sched_init_only_bb = haifa_init_only_bb;
7328 sched_split_block = sched_split_block_1;
7329 sched_create_empty_bb = sched_create_empty_bb_1;
7330 haifa_recovery_bb_ever_added_p = false;
7332 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7333 before_recovery = 0;
7339 /* Finish work with the data specific to the Haifa scheduler. */
7341 haifa_sched_finish (void)
7343 sched_create_empty_bb = NULL;
7344 sched_split_block = NULL;
7345 sched_init_only_bb = NULL;
7347 if (spec_info && spec_info->dump)
7349 char c = reload_completed ? 'a' : 'b';
7351 fprintf (spec_info->dump,
7352 ";; %s:\n", current_function_name ());
7354 fprintf (spec_info->dump,
7355 ";; Procedure %cr-begin-data-spec motions == %d\n",
7357 fprintf (spec_info->dump,
7358 ";; Procedure %cr-be-in-data-spec motions == %d\n",
7360 fprintf (spec_info->dump,
7361 ";; Procedure %cr-begin-control-spec motions == %d\n",
7362 c, nr_begin_control);
7363 fprintf (spec_info->dump,
7364 ";; Procedure %cr-be-in-control-spec motions == %d\n",
7365 c, nr_be_in_control);
7368 scheduled_insns.release ();
7370 /* Finalize h_i_d, dependency caches, and luids for the whole
7371 function. Target will be finalized in md_global_finish (). */
7372 sched_deps_finish ();
7373 sched_finish_luids ();
7374 current_sched_info = NULL;
7379 /* Free global data used during insn scheduling. This function works with
7380 the common data shared between the schedulers. */
7385 haifa_finish_h_i_d ();
7386 free_global_sched_pressure_data ();
7389 if (targetm.sched.finish_global)
7390 targetm.sched.finish_global (sched_dump, sched_verbose);
7392 end_alias_analysis ();
7394 regstat_free_calls_crossed ();
7399 /* Free all delay_pair structures that were recorded. */
7401 free_delay_pairs (void)
7405 delay_htab->empty ();
7406 delay_htab_i2->empty ();
7410 /* Fix INSN_TICKs of the instructions in the current block as well as
7411 INSN_TICKs of their dependents.
7412 HEAD and TAIL are the begin and the end of the current scheduled block. */
7414 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7416 /* Set of instructions with corrected INSN_TICK. */
7417 bitmap_head processed;
7418 /* ??? It is doubtful if we should assume that cycle advance happens on
7419 basic block boundaries. Basically insns that are unconditionally ready
7420 on the start of the block are more preferable then those which have
7421 a one cycle dependency over insn from the previous block. */
7422 int next_clock = clock_var + 1;
7424 bitmap_initialize (&processed, 0);
7426 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7427 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7428 across different blocks. */
7429 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7434 sd_iterator_def sd_it;
7437 tick = INSN_TICK (head);
7438 gcc_assert (tick >= MIN_TICK);
7440 /* Fix INSN_TICK of instruction from just scheduled block. */
7441 if (bitmap_set_bit (&processed, INSN_LUID (head)))
7445 if (tick < MIN_TICK)
7448 INSN_TICK (head) = tick;
7451 if (DEBUG_INSN_P (head))
7454 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7458 next = DEP_CON (dep);
7459 tick = INSN_TICK (next);
7461 if (tick != INVALID_TICK
7462 /* If NEXT has its INSN_TICK calculated, fix it.
7463 If not - it will be properly calculated from
7464 scratch later in fix_tick_ready. */
7465 && bitmap_set_bit (&processed, INSN_LUID (next)))
7469 if (tick < MIN_TICK)
7472 if (tick > INTER_TICK (next))
7473 INTER_TICK (next) = tick;
7475 tick = INTER_TICK (next);
7477 INSN_TICK (next) = tick;
7482 bitmap_clear (&processed);
7485 /* Check if NEXT is ready to be added to the ready or queue list.
7486 If "yes", add it to the proper list.
7488 -1 - is not ready yet,
7489 0 - added to the ready list,
7490 0 < N - queued for N cycles. */
7492 try_ready (rtx_insn *next)
7494 ds_t old_ts, new_ts;
7496 old_ts = TODO_SPEC (next);
7498 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7499 && (old_ts == HARD_DEP
7500 || old_ts == DEP_POSTPONED
7501 || (old_ts & SPECULATIVE)
7502 || old_ts == DEP_CONTROL));
7504 new_ts = recompute_todo_spec (next, false);
7506 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7507 gcc_assert (new_ts == old_ts
7508 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
7509 else if (current_sched_info->new_ready)
7510 new_ts = current_sched_info->new_ready (next, new_ts);
7512 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7513 have its original pattern or changed (speculative) one. This is due
7514 to changing ebb in region scheduling.
7515 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7516 has speculative pattern.
7518 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7519 control-speculative NEXT could have been discarded by sched-rgn.c
7520 (the same case as when discarded by can_schedule_ready_p ()). */
7522 if ((new_ts & SPECULATIVE)
7523 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7524 need to change anything. */
7525 && new_ts != old_ts)
7530 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7532 res = haifa_speculate_insn (next, new_ts, &new_pat);
7537 /* It would be nice to change DEP_STATUS of all dependences,
7538 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7539 so we won't reanalyze anything. */
7544 /* We follow the rule, that every speculative insn
7545 has non-null ORIG_PAT. */
7546 if (!ORIG_PAT (next))
7547 ORIG_PAT (next) = PATTERN (next);
7551 if (!ORIG_PAT (next))
7552 /* If we gonna to overwrite the original pattern of insn,
7554 ORIG_PAT (next) = PATTERN (next);
7556 res = haifa_change_pattern (next, new_pat);
7565 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7566 either correct (new_ts & SPECULATIVE),
7567 or we simply don't care (new_ts & HARD_DEP). */
7569 gcc_assert (!ORIG_PAT (next)
7570 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7572 TODO_SPEC (next) = new_ts;
7574 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7576 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7577 control-speculative NEXT could have been discarded by sched-rgn.c
7578 (the same case as when discarded by can_schedule_ready_p ()). */
7579 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7581 change_queue_index (next, QUEUE_NOWHERE);
7585 else if (!(new_ts & BEGIN_SPEC)
7586 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7587 && !IS_SPECULATION_CHECK_P (next))
7588 /* We should change pattern of every previously speculative
7589 instruction - and we determine if NEXT was speculative by using
7590 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7591 pat too, so skip them. */
7593 bool success = haifa_change_pattern (next, ORIG_PAT (next));
7594 gcc_assert (success);
7595 ORIG_PAT (next) = 0;
7598 if (sched_verbose >= 2)
7600 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7601 (*current_sched_info->print_insn) (next, 0));
7603 if (spec_info && spec_info->dump)
7605 if (new_ts & BEGIN_DATA)
7606 fprintf (spec_info->dump, "; data-spec;");
7607 if (new_ts & BEGIN_CONTROL)
7608 fprintf (spec_info->dump, "; control-spec;");
7609 if (new_ts & BE_IN_CONTROL)
7610 fprintf (spec_info->dump, "; in-control-spec;");
7612 if (TODO_SPEC (next) & DEP_CONTROL)
7613 fprintf (sched_dump, " predicated");
7614 fprintf (sched_dump, "\n");
7617 adjust_priority (next);
7619 return fix_tick_ready (next);
7622 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7624 fix_tick_ready (rtx_insn *next)
7628 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7631 sd_iterator_def sd_it;
7634 tick = INSN_TICK (next);
7635 /* if tick is not equal to INVALID_TICK, then update
7636 INSN_TICK of NEXT with the most recent resolved dependence
7637 cost. Otherwise, recalculate from scratch. */
7638 full_p = (tick == INVALID_TICK);
7640 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7642 rtx_insn *pro = DEP_PRO (dep);
7645 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7647 tick1 = INSN_TICK (pro) + dep_cost (dep);
7658 INSN_TICK (next) = tick;
7660 delay = tick - clock_var;
7661 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7662 delay = QUEUE_READY;
7664 change_queue_index (next, delay);
7669 /* Move NEXT to the proper queue list with (DELAY >= 1),
7670 or add it to the ready list (DELAY == QUEUE_READY),
7671 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7673 change_queue_index (rtx_insn *next, int delay)
7675 int i = QUEUE_INDEX (next);
7677 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7679 gcc_assert (i != QUEUE_SCHEDULED);
7681 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7682 || (delay < 0 && delay == i))
7683 /* We have nothing to do. */
7686 /* Remove NEXT from wherever it is now. */
7687 if (i == QUEUE_READY)
7688 ready_remove_insn (next);
7690 queue_remove (next);
7692 /* Add it to the proper place. */
7693 if (delay == QUEUE_READY)
7694 ready_add (readyp, next, false);
7695 else if (delay >= 1)
7696 queue_insn (next, delay, "change queue index");
7698 if (sched_verbose >= 2)
7700 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7701 (*current_sched_info->print_insn) (next, 0));
7703 if (delay == QUEUE_READY)
7704 fprintf (sched_dump, " into ready\n");
7705 else if (delay >= 1)
7706 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7708 fprintf (sched_dump, " removed from ready or queue lists\n");
7712 static int sched_ready_n_insns = -1;
7714 /* Initialize per region data structures. */
7716 sched_extend_ready_list (int new_sched_ready_n_insns)
7720 if (sched_ready_n_insns == -1)
7721 /* At the first call we need to initialize one more choice_stack
7725 sched_ready_n_insns = 0;
7726 scheduled_insns.reserve (new_sched_ready_n_insns);
7729 i = sched_ready_n_insns + 1;
7731 ready.veclen = new_sched_ready_n_insns + issue_rate;
7732 ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7734 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7736 ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7737 sched_ready_n_insns,
7738 sizeof (*ready_try));
7740 /* We allocate +1 element to save initial state in the choice_stack[0]
7742 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7743 new_sched_ready_n_insns + 1);
7745 for (; i <= new_sched_ready_n_insns; i++)
7747 choice_stack[i].state = xmalloc (dfa_state_size);
7749 if (targetm.sched.first_cycle_multipass_init)
7750 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7754 sched_ready_n_insns = new_sched_ready_n_insns;
7757 /* Free per region data structures. */
7759 sched_finish_ready_list (void)
7770 for (i = 0; i <= sched_ready_n_insns; i++)
7772 if (targetm.sched.first_cycle_multipass_fini)
7773 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7776 free (choice_stack [i].state);
7778 free (choice_stack);
7779 choice_stack = NULL;
7781 sched_ready_n_insns = -1;
7785 haifa_luid_for_non_insn (rtx x)
7787 gcc_assert (NOTE_P (x) || LABEL_P (x));
7792 /* Generates recovery code for INSN. */
7794 generate_recovery_code (rtx_insn *insn)
7796 if (TODO_SPEC (insn) & BEGIN_SPEC)
7797 begin_speculative_block (insn);
7799 /* Here we have insn with no dependencies to
7800 instructions other then CHECK_SPEC ones. */
7802 if (TODO_SPEC (insn) & BE_IN_SPEC)
7803 add_to_speculative_block (insn);
7807 Tries to add speculative dependencies of type FS between instructions
7808 in deps_list L and TWIN. */
7810 process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
7812 sd_iterator_def sd_it;
7815 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7820 consumer = DEP_CON (dep);
7822 ds = DEP_STATUS (dep);
7824 if (/* If we want to create speculative dep. */
7826 /* And we can do that because this is a true dep. */
7827 && (ds & DEP_TYPES) == DEP_TRUE)
7829 gcc_assert (!(ds & BE_IN_SPEC));
7831 if (/* If this dep can be overcome with 'begin speculation'. */
7833 /* Then we have a choice: keep the dep 'begin speculative'
7834 or transform it into 'be in speculative'. */
7836 if (/* In try_ready we assert that if insn once became ready
7837 it can be removed from the ready (or queue) list only
7838 due to backend decision. Hence we can't let the
7839 probability of the speculative dep to decrease. */
7840 ds_weak (ds) <= ds_weak (fs))
7844 new_ds = (ds & ~BEGIN_SPEC) | fs;
7846 if (/* consumer can 'be in speculative'. */
7847 sched_insn_is_legitimate_for_speculation_p (consumer,
7849 /* Transform it to be in speculative. */
7854 /* Mark the dep as 'be in speculative'. */
7859 dep_def _new_dep, *new_dep = &_new_dep;
7861 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7862 sd_add_dep (new_dep, false);
7867 /* Generates recovery code for BEGIN speculative INSN. */
7869 begin_speculative_block (rtx_insn *insn)
7871 if (TODO_SPEC (insn) & BEGIN_DATA)
7873 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7876 create_check_block_twin (insn, false);
7878 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7881 static void haifa_init_insn (rtx_insn *);
7883 /* Generates recovery code for BE_IN speculative INSN. */
7885 add_to_speculative_block (rtx_insn *insn)
7888 sd_iterator_def sd_it;
7890 rtx_insn_list *twins = NULL;
7891 rtx_vec_t priorities_roots;
7893 ts = TODO_SPEC (insn);
7894 gcc_assert (!(ts & ~BE_IN_SPEC));
7896 if (ts & BE_IN_DATA)
7898 if (ts & BE_IN_CONTROL)
7901 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7902 gcc_assert (!TODO_SPEC (insn));
7904 DONE_SPEC (insn) |= ts;
7906 /* First we convert all simple checks to branchy. */
7907 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7908 sd_iterator_cond (&sd_it, &dep);)
7910 rtx_insn *check = DEP_PRO (dep);
7912 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7914 create_check_block_twin (check, true);
7916 /* Restart search. */
7917 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7920 /* Continue search. */
7921 sd_iterator_next (&sd_it);
7924 priorities_roots.create (0);
7925 clear_priorities (insn, &priorities_roots);
7929 rtx_insn *check, *twin;
7932 /* Get the first backward dependency of INSN. */
7933 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7934 if (!sd_iterator_cond (&sd_it, &dep))
7935 /* INSN has no backward dependencies left. */
7938 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7939 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7940 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7942 check = DEP_PRO (dep);
7944 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7945 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7947 rec = BLOCK_FOR_INSN (check);
7949 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7950 haifa_init_insn (twin);
7952 sd_copy_back_deps (twin, insn, true);
7954 if (sched_verbose && spec_info->dump)
7955 /* INSN_BB (insn) isn't determined for twin insns yet.
7956 So we can't use current_sched_info->print_insn. */
7957 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7958 INSN_UID (twin), rec->index);
7960 twins = alloc_INSN_LIST (twin, twins);
7962 /* Add dependences between TWIN and all appropriate
7963 instructions from REC. */
7964 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7966 rtx_insn *pro = DEP_PRO (dep);
7968 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7970 /* INSN might have dependencies from the instructions from
7971 several recovery blocks. At this iteration we process those
7972 producers that reside in REC. */
7973 if (BLOCK_FOR_INSN (pro) == rec)
7975 dep_def _new_dep, *new_dep = &_new_dep;
7977 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7978 sd_add_dep (new_dep, false);
7982 process_insn_forw_deps_be_in_spec (insn, twin, ts);
7984 /* Remove all dependencies between INSN and insns in REC. */
7985 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7986 sd_iterator_cond (&sd_it, &dep);)
7988 rtx_insn *pro = DEP_PRO (dep);
7990 if (BLOCK_FOR_INSN (pro) == rec)
7991 sd_delete_dep (sd_it);
7993 sd_iterator_next (&sd_it);
7997 /* We couldn't have added the dependencies between INSN and TWINS earlier
7998 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
8002 rtx_insn_list *next_node;
8004 twin = twins->insn ();
8007 dep_def _new_dep, *new_dep = &_new_dep;
8009 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8010 sd_add_dep (new_dep, false);
8013 next_node = twins->next ();
8014 free_INSN_LIST_node (twins);
8018 calc_priorities (priorities_roots);
8019 priorities_roots.release ();
8022 /* Extends and fills with zeros (only the new part) array pointed to by P. */
8024 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8026 gcc_assert (new_nmemb >= old_nmemb);
8027 p = XRESIZEVAR (void, p, new_nmemb * size);
8028 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8033 Find fallthru edge from PRED. */
8035 find_fallthru_edge_from (basic_block pred)
8040 succ = pred->next_bb;
8041 gcc_assert (succ->prev_bb == pred);
8043 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8045 e = find_fallthru_edge (pred->succs);
8049 gcc_assert (e->dest == succ);
8055 e = find_fallthru_edge (succ->preds);
8059 gcc_assert (e->src == pred);
8067 /* Extend per basic block data structures. */
8069 sched_extend_bb (void)
8071 /* The following is done to keep current_sched_info->next_tail non null. */
8072 rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8073 rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8074 if (NEXT_INSN (end) == 0
8077 /* Don't emit a NOTE if it would end up before a BARRIER. */
8078 && !BARRIER_P (NEXT_INSN (end))))
8080 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8081 /* Make note appear outside BB. */
8082 set_block_for_insn (note, NULL);
8083 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8087 /* Init per basic block data structures. */
8089 sched_init_bbs (void)
8094 /* Initialize BEFORE_RECOVERY variable. */
8096 init_before_recovery (basic_block *before_recovery_ptr)
8101 last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8102 e = find_fallthru_edge_from (last);
8106 /* We create two basic blocks:
8107 1. Single instruction block is inserted right after E->SRC
8109 2. Empty block right before EXIT_BLOCK.
8110 Between these two blocks recovery blocks will be emitted. */
8112 basic_block single, empty;
8116 /* If the fallthrough edge to exit we've found is from the block we've
8117 created before, don't do anything more. */
8118 if (last == after_recovery)
8121 adding_bb_to_current_region_p = false;
8123 single = sched_create_empty_bb (last);
8124 empty = sched_create_empty_bb (single);
8126 /* Add new blocks to the root loop. */
8127 if (current_loops != NULL)
8129 add_bb_to_loop (single, (*current_loops->larray)[0]);
8130 add_bb_to_loop (empty, (*current_loops->larray)[0]);
8133 single->count = last->count;
8134 empty->count = last->count;
8135 single->frequency = last->frequency;
8136 empty->frequency = last->frequency;
8137 BB_COPY_PARTITION (single, last);
8138 BB_COPY_PARTITION (empty, last);
8140 redirect_edge_succ (e, single);
8141 make_single_succ_edge (single, empty, 0);
8142 make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8145 label = block_label (empty);
8146 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
8147 JUMP_LABEL (x) = label;
8148 LABEL_NUSES (label)++;
8149 haifa_init_insn (x);
8151 emit_barrier_after (x);
8153 sched_init_only_bb (empty, NULL);
8154 sched_init_only_bb (single, NULL);
8157 adding_bb_to_current_region_p = true;
8158 before_recovery = single;
8159 after_recovery = empty;
8161 if (before_recovery_ptr)
8162 *before_recovery_ptr = before_recovery;
8164 if (sched_verbose >= 2 && spec_info->dump)
8165 fprintf (spec_info->dump,
8166 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8167 last->index, single->index, empty->index);
8170 before_recovery = last;
8173 /* Returns new recovery block. */
8175 sched_create_recovery_block (basic_block *before_recovery_ptr)
8181 haifa_recovery_bb_recently_added_p = true;
8182 haifa_recovery_bb_ever_added_p = true;
8184 init_before_recovery (before_recovery_ptr);
8186 barrier = get_last_bb_insn (before_recovery);
8187 gcc_assert (BARRIER_P (barrier));
8189 label = emit_label_after (gen_label_rtx (), barrier);
8191 rec = create_basic_block (label, label, before_recovery);
8193 /* A recovery block always ends with an unconditional jump. */
8194 emit_barrier_after (BB_END (rec));
8196 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8197 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8199 if (sched_verbose && spec_info->dump)
8200 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8206 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8207 and emit necessary jumps. */
8209 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8210 basic_block second_bb)
8216 /* This is fixing of incoming edge. */
8217 /* ??? Which other flags should be specified? */
8218 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8219 /* Partition type is the same, if it is "unpartitioned". */
8220 edge_flags = EDGE_CROSSING;
8224 make_edge (first_bb, rec, edge_flags);
8225 label = block_label (second_bb);
8226 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
8227 JUMP_LABEL (jump) = label;
8228 LABEL_NUSES (label)++;
8230 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8231 /* Partition type is the same, if it is "unpartitioned". */
8233 /* Rewritten from cfgrtl.c. */
8234 if (flag_reorder_blocks_and_partition
8235 && targetm_common.have_named_sections)
8237 /* We don't need the same note for the check because
8238 any_condjump_p (check) == true. */
8239 CROSSING_JUMP_P (jump) = 1;
8241 edge_flags = EDGE_CROSSING;
8246 make_single_succ_edge (rec, second_bb, edge_flags);
8247 if (dom_info_available_p (CDI_DOMINATORS))
8248 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8251 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
8252 INSN is a simple check, that should be converted to branchy one. */
8254 create_check_block_twin (rtx_insn *insn, bool mutate_p)
8257 rtx_insn *label, *check, *twin;
8260 sd_iterator_def sd_it;
8262 dep_def _new_dep, *new_dep = &_new_dep;
8265 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8268 todo_spec = TODO_SPEC (insn);
8271 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8272 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8274 todo_spec = CHECK_SPEC (insn);
8277 todo_spec &= SPECULATIVE;
8279 /* Create recovery block. */
8280 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8282 rec = sched_create_recovery_block (NULL);
8283 label = BB_HEAD (rec);
8287 rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8292 check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8294 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8296 /* To have mem_reg alive at the beginning of second_bb,
8297 we emit check BEFORE insn, so insn after splitting
8298 insn will be at the beginning of second_bb, which will
8299 provide us with the correct life information. */
8300 check = emit_jump_insn_before (check_pat, insn);
8301 JUMP_LABEL (check) = label;
8302 LABEL_NUSES (label)++;
8305 check = emit_insn_before (check_pat, insn);
8307 /* Extend data structures. */
8308 haifa_init_insn (check);
8310 /* CHECK is being added to current region. Extend ready list. */
8311 gcc_assert (sched_ready_n_insns != -1);
8312 sched_extend_ready_list (sched_ready_n_insns + 1);
8314 if (current_sched_info->add_remove_insn)
8315 current_sched_info->add_remove_insn (insn, 0);
8317 RECOVERY_BLOCK (check) = rec;
8319 if (sched_verbose && spec_info->dump)
8320 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8321 (*current_sched_info->print_insn) (check, 0));
8323 gcc_assert (ORIG_PAT (insn));
8325 /* Initialize TWIN (twin is a duplicate of original instruction
8326 in the recovery block). */
8327 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8329 sd_iterator_def sd_it;
8332 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8333 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8335 struct _dep _dep2, *dep2 = &_dep2;
8337 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8339 sd_add_dep (dep2, true);
8342 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8343 haifa_init_insn (twin);
8345 if (sched_verbose && spec_info->dump)
8346 /* INSN_BB (insn) isn't determined for twin insns yet.
8347 So we can't use current_sched_info->print_insn. */
8348 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8349 INSN_UID (twin), rec->index);
8353 ORIG_PAT (check) = ORIG_PAT (insn);
8354 HAS_INTERNAL_DEP (check) = 1;
8356 /* ??? We probably should change all OUTPUT dependencies to
8360 /* Copy all resolved back dependencies of INSN to TWIN. This will
8361 provide correct value for INSN_TICK (TWIN). */
8362 sd_copy_back_deps (twin, insn, true);
8364 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8365 /* In case of branchy check, fix CFG. */
8367 basic_block first_bb, second_bb;
8370 first_bb = BLOCK_FOR_INSN (check);
8371 second_bb = sched_split_block (first_bb, check);
8373 sched_create_recovery_edges (first_bb, rec, second_bb);
8375 sched_init_only_bb (second_bb, first_bb);
8376 sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8378 jump = BB_END (rec);
8379 haifa_init_insn (jump);
8382 /* Move backward dependences from INSN to CHECK and
8383 move forward dependences from INSN to TWIN. */
8385 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8386 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8388 rtx_insn *pro = DEP_PRO (dep);
8391 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8392 check --TRUE--> producer ??? or ANTI ???
8393 twin --TRUE--> producer
8394 twin --ANTI--> check
8396 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8397 check --ANTI--> producer
8398 twin --ANTI--> producer
8399 twin --ANTI--> check
8401 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8402 check ~~TRUE~~> producer
8403 twin ~~TRUE~~> producer
8404 twin --ANTI--> check */
8406 ds = DEP_STATUS (dep);
8408 if (ds & BEGIN_SPEC)
8410 gcc_assert (!mutate_p);
8414 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8415 sd_add_dep (new_dep, false);
8417 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8419 DEP_CON (new_dep) = twin;
8420 sd_add_dep (new_dep, false);
8424 /* Second, remove backward dependencies of INSN. */
8425 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8426 sd_iterator_cond (&sd_it, &dep);)
8428 if ((DEP_STATUS (dep) & BEGIN_SPEC)
8430 /* We can delete this dep because we overcome it with
8431 BEGIN_SPECULATION. */
8432 sd_delete_dep (sd_it);
8434 sd_iterator_next (&sd_it);
8437 /* Future Speculations. Determine what BE_IN speculations will be like. */
8440 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8443 gcc_assert (!DONE_SPEC (insn));
8447 ds_t ts = TODO_SPEC (insn);
8449 DONE_SPEC (insn) = ts & BEGIN_SPEC;
8450 CHECK_SPEC (check) = ts & BEGIN_SPEC;
8452 /* Luckiness of future speculations solely depends upon initial
8453 BEGIN speculation. */
8454 if (ts & BEGIN_DATA)
8455 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8456 if (ts & BEGIN_CONTROL)
8457 fs = set_dep_weak (fs, BE_IN_CONTROL,
8458 get_dep_weak (ts, BEGIN_CONTROL));
8461 CHECK_SPEC (check) = CHECK_SPEC (insn);
8463 /* Future speculations: call the helper. */
8464 process_insn_forw_deps_be_in_spec (insn, twin, fs);
8466 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8468 /* Which types of dependencies should we use here is,
8469 generally, machine-dependent question... But, for now,
8474 init_dep (new_dep, insn, check, REG_DEP_TRUE);
8475 sd_add_dep (new_dep, false);
8477 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8478 sd_add_dep (new_dep, false);
8482 if (spec_info->dump)
8483 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8484 (*current_sched_info->print_insn) (insn, 0));
8486 /* Remove all dependencies of the INSN. */
8488 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8490 | SD_LIST_RES_BACK));
8491 while (sd_iterator_cond (&sd_it, &dep))
8492 sd_delete_dep (sd_it);
8495 /* If former check (INSN) already was moved to the ready (or queue)
8496 list, add new check (CHECK) there too. */
8497 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8500 /* Remove old check from instruction stream and free its
8502 sched_remove_insn (insn);
8505 init_dep (new_dep, check, twin, REG_DEP_ANTI);
8506 sd_add_dep (new_dep, false);
8510 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8511 sd_add_dep (new_dep, false);
8515 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8516 because it'll be done later in add_to_speculative_block. */
8518 rtx_vec_t priorities_roots = rtx_vec_t ();
8520 clear_priorities (twin, &priorities_roots);
8521 calc_priorities (priorities_roots);
8522 priorities_roots.release ();
8526 /* Removes dependency between instructions in the recovery block REC
8527 and usual region instructions. It keeps inner dependences so it
8528 won't be necessary to recompute them. */
8530 fix_recovery_deps (basic_block rec)
8532 rtx_insn *note, *insn, *jump;
8533 rtx_insn_list *ready_list = 0;
8534 bitmap_head in_ready;
8535 rtx_insn_list *link;
8537 bitmap_initialize (&in_ready, 0);
8539 /* NOTE - a basic block note. */
8540 note = NEXT_INSN (BB_HEAD (rec));
8541 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8542 insn = BB_END (rec);
8543 gcc_assert (JUMP_P (insn));
8544 insn = PREV_INSN (insn);
8548 sd_iterator_def sd_it;
8551 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8552 sd_iterator_cond (&sd_it, &dep);)
8554 rtx_insn *consumer = DEP_CON (dep);
8556 if (BLOCK_FOR_INSN (consumer) != rec)
8558 sd_delete_dep (sd_it);
8560 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8561 ready_list = alloc_INSN_LIST (consumer, ready_list);
8565 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8567 sd_iterator_next (&sd_it);
8571 insn = PREV_INSN (insn);
8573 while (insn != note);
8575 bitmap_clear (&in_ready);
8577 /* Try to add instructions to the ready or queue list. */
8578 for (link = ready_list; link; link = link->next ())
8579 try_ready (link->insn ());
8580 free_INSN_LIST_list (&ready_list);
8582 /* Fixing jump's dependences. */
8583 insn = BB_HEAD (rec);
8584 jump = BB_END (rec);
8586 gcc_assert (LABEL_P (insn));
8587 insn = NEXT_INSN (insn);
8589 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8590 add_jump_dependencies (insn, jump);
8593 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8594 instruction data. */
8596 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8600 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8604 update_insn_after_change (insn);
8608 /* -1 - can't speculate,
8609 0 - for speculation with REQUEST mode it is OK to use
8610 current instruction pattern,
8611 1 - need to change pattern for *NEW_PAT to be speculative. */
8613 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8615 gcc_assert (current_sched_info->flags & DO_SPECULATION
8616 && (request & SPECULATIVE)
8617 && sched_insn_is_legitimate_for_speculation_p (insn, request));
8619 if ((request & spec_info->mask) != request)
8622 if (request & BE_IN_SPEC
8623 && !(request & BEGIN_SPEC))
8626 return targetm.sched.speculate_insn (insn, request, new_pat);
8630 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8632 gcc_assert (sched_deps_info->generate_spec_deps
8633 && !IS_SPECULATION_CHECK_P (insn));
8635 if (HAS_INTERNAL_DEP (insn)
8636 || SCHED_GROUP_P (insn))
8639 return sched_speculate_insn (insn, request, new_pat);
8642 /* Print some information about block BB, which starts with HEAD and
8643 ends with TAIL, before scheduling it.
8644 I is zero, if scheduler is about to start with the fresh ebb. */
8646 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8649 fprintf (sched_dump,
8650 ";; ======================================================\n");
8652 fprintf (sched_dump,
8653 ";; =====================ADVANCING TO=====================\n");
8654 fprintf (sched_dump,
8655 ";; -- basic block %d from %d to %d -- %s reload\n",
8656 bb->index, INSN_UID (head), INSN_UID (tail),
8657 (reload_completed ? "after" : "before"));
8658 fprintf (sched_dump,
8659 ";; ======================================================\n");
8660 fprintf (sched_dump, "\n");
8663 /* Unlink basic block notes and labels and saves them, so they
8664 can be easily restored. We unlink basic block notes in EBB to
8665 provide back-compatibility with the previous code, as target backends
8666 assume, that there'll be only instructions between
8667 current_sched_info->{head and tail}. We restore these notes as soon
8669 FIRST (LAST) is the first (last) basic block in the ebb.
8670 NB: In usual case (FIRST == LAST) nothing is really done. */
8672 unlink_bb_notes (basic_block first, basic_block last)
8674 /* We DON'T unlink basic block notes of the first block in the ebb. */
8678 bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8680 /* Make a sentinel. */
8681 if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8682 bb_header[last->next_bb->index] = 0;
8684 first = first->next_bb;
8687 rtx_insn *prev, *label, *note, *next;
8689 label = BB_HEAD (last);
8690 if (LABEL_P (label))
8691 note = NEXT_INSN (label);
8694 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8696 prev = PREV_INSN (label);
8697 next = NEXT_INSN (note);
8698 gcc_assert (prev && next);
8700 SET_NEXT_INSN (prev) = next;
8701 SET_PREV_INSN (next) = prev;
8703 bb_header[last->index] = label;
8708 last = last->prev_bb;
8713 /* Restore basic block notes.
8714 FIRST is the first basic block in the ebb. */
8716 restore_bb_notes (basic_block first)
8721 /* We DON'T unlink basic block notes of the first block in the ebb. */
8722 first = first->next_bb;
8723 /* Remember: FIRST is actually a second basic block in the ebb. */
8725 while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8726 && bb_header[first->index])
8728 rtx_insn *prev, *label, *note, *next;
8730 label = bb_header[first->index];
8731 prev = PREV_INSN (label);
8732 next = NEXT_INSN (prev);
8734 if (LABEL_P (label))
8735 note = NEXT_INSN (label);
8738 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8740 bb_header[first->index] = 0;
8742 SET_NEXT_INSN (prev) = label;
8743 SET_NEXT_INSN (note) = next;
8744 SET_PREV_INSN (next) = note;
8746 first = first->next_bb;
8754 Fix CFG after both in- and inter-block movement of
8755 control_flow_insn_p JUMP. */
8757 fix_jump_move (rtx_insn *jump)
8759 basic_block bb, jump_bb, jump_bb_next;
8761 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8762 jump_bb = BLOCK_FOR_INSN (jump);
8763 jump_bb_next = jump_bb->next_bb;
8765 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8766 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8768 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8769 /* if jump_bb_next is not empty. */
8770 BB_END (jump_bb) = BB_END (jump_bb_next);
8772 if (BB_END (bb) != PREV_INSN (jump))
8773 /* Then there are instruction after jump that should be placed
8775 BB_END (jump_bb_next) = BB_END (bb);
8777 /* Otherwise jump_bb_next is empty. */
8778 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8780 /* To make assertion in move_insn happy. */
8781 BB_END (bb) = PREV_INSN (jump);
8783 update_bb_for_insn (jump_bb_next);
8786 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8788 move_block_after_check (rtx_insn *jump)
8790 basic_block bb, jump_bb, jump_bb_next;
8791 vec<edge, va_gc> *t;
8793 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8794 jump_bb = BLOCK_FOR_INSN (jump);
8795 jump_bb_next = jump_bb->next_bb;
8797 update_bb_for_insn (jump_bb);
8799 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8800 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8802 unlink_block (jump_bb_next);
8803 link_block (jump_bb_next, bb);
8807 move_succs (&(jump_bb->succs), bb);
8808 move_succs (&(jump_bb_next->succs), jump_bb);
8809 move_succs (&t, jump_bb_next);
8811 df_mark_solutions_dirty ();
8813 common_sched_info->fix_recovery_cfg
8814 (bb->index, jump_bb->index, jump_bb_next->index);
8817 /* Helper function for move_block_after_check.
8818 This functions attaches edge vector pointed to by SUCCSP to
8821 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8826 gcc_assert (to->succs == 0);
8828 to->succs = *succsp;
8830 FOR_EACH_EDGE (e, ei, to->succs)
8836 /* Remove INSN from the instruction stream.
8837 INSN should have any dependencies. */
8839 sched_remove_insn (rtx_insn *insn)
8841 sd_finish_insn (insn);
8843 change_queue_index (insn, QUEUE_NOWHERE);
8844 current_sched_info->add_remove_insn (insn, 1);
8848 /* Clear priorities of all instructions, that are forward dependent on INSN.
8849 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8850 be invoked to initialize all cleared priorities. */
8852 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8854 sd_iterator_def sd_it;
8856 bool insn_is_root_p = true;
8858 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8860 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8862 rtx_insn *pro = DEP_PRO (dep);
8864 if (INSN_PRIORITY_STATUS (pro) >= 0
8865 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8867 /* If DEP doesn't contribute to priority then INSN itself should
8868 be added to priority roots. */
8869 if (contributes_to_priority_p (dep))
8870 insn_is_root_p = false;
8872 INSN_PRIORITY_STATUS (pro) = -1;
8873 clear_priorities (pro, roots_ptr);
8878 roots_ptr->safe_push (insn);
8881 /* Recompute priorities of instructions, whose priorities might have been
8882 changed. ROOTS is a vector of instructions whose priority computation will
8883 trigger initialization of all cleared priorities. */
8885 calc_priorities (rtx_vec_t roots)
8890 FOR_EACH_VEC_ELT (roots, i, insn)
8895 /* Add dependences between JUMP and other instructions in the recovery
8896 block. INSN is the first insn the recovery block. */
8898 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8902 insn = NEXT_INSN (insn);
8906 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8908 dep_def _new_dep, *new_dep = &_new_dep;
8910 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8911 sd_add_dep (new_dep, false);
8916 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8919 /* Extend data structures for logical insn UID. */
8921 sched_extend_luids (void)
8923 int new_luids_max_uid = get_max_uid () + 1;
8925 sched_luids.safe_grow_cleared (new_luids_max_uid);
8928 /* Initialize LUID for INSN. */
8930 sched_init_insn_luid (rtx_insn *insn)
8932 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8937 luid = sched_max_luid;
8938 sched_max_luid += i;
8943 SET_INSN_LUID (insn, luid);
8946 /* Initialize luids for BBS.
8947 The hook common_sched_info->luid_for_non_insn () is used to determine
8948 if notes, labels, etc. need luids. */
8950 sched_init_luids (bb_vec_t bbs)
8955 sched_extend_luids ();
8956 FOR_EACH_VEC_ELT (bbs, i, bb)
8960 FOR_BB_INSNS (bb, insn)
8961 sched_init_insn_luid (insn);
8967 sched_finish_luids (void)
8969 sched_luids.release ();
8973 /* Return logical uid of INSN. Helpful while debugging. */
8975 insn_luid (rtx_insn *insn)
8977 return INSN_LUID (insn);
8980 /* Extend per insn data in the target. */
8982 sched_extend_target (void)
8984 if (targetm.sched.h_i_d_extended)
8985 targetm.sched.h_i_d_extended ();
8988 /* Extend global scheduler structures (those, that live across calls to
8989 schedule_block) to include information about just emitted INSN. */
8993 int reserve = (get_max_uid () + 1 - h_i_d.length ());
8995 && ! h_i_d.space (reserve))
8997 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8998 sched_extend_target ();
9002 /* Initialize h_i_d entry of the INSN with default values.
9003 Values, that are not explicitly initialized here, hold zero. */
9005 init_h_i_d (rtx_insn *insn)
9007 if (INSN_LUID (insn) > 0)
9009 INSN_COST (insn) = -1;
9010 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
9011 INSN_TICK (insn) = INVALID_TICK;
9012 INSN_EXACT_TICK (insn) = INVALID_TICK;
9013 INTER_TICK (insn) = INVALID_TICK;
9014 TODO_SPEC (insn) = HARD_DEP;
9015 INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
9016 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9017 INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
9018 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9022 /* Initialize haifa_insn_data for BBS. */
9024 haifa_init_h_i_d (bb_vec_t bbs)
9030 FOR_EACH_VEC_ELT (bbs, i, bb)
9034 FOR_BB_INSNS (bb, insn)
9039 /* Finalize haifa_insn_data. */
9041 haifa_finish_h_i_d (void)
9044 haifa_insn_data_t data;
9045 struct reg_use_data *use, *next;
9047 FOR_EACH_VEC_ELT (h_i_d, i, data)
9049 free (data->max_reg_pressure);
9050 free (data->reg_pressure);
9051 for (use = data->reg_use_list; use != NULL; use = next)
9053 next = use->next_insn_use;
9060 /* Init data for the new insn INSN. */
9062 haifa_init_insn (rtx_insn *insn)
9064 gcc_assert (insn != NULL);
9066 sched_extend_luids ();
9067 sched_init_insn_luid (insn);
9068 sched_extend_target ();
9069 sched_deps_init (false);
9073 if (adding_bb_to_current_region_p)
9075 sd_init_insn (insn);
9077 /* Extend dependency caches by one element. */
9078 extend_dependency_caches (1, false);
9080 if (sched_pressure != SCHED_PRESSURE_NONE)
9081 init_insn_reg_pressure_info (insn);
9084 /* Init data for the new basic block BB which comes after AFTER. */
9086 haifa_init_only_bb (basic_block bb, basic_block after)
9088 gcc_assert (bb != NULL);
9092 if (common_sched_info->add_block)
9093 /* This changes only data structures of the front-end. */
9094 common_sched_info->add_block (bb, after);
9097 /* A generic version of sched_split_block (). */
9099 sched_split_block_1 (basic_block first_bb, rtx after)
9103 e = split_block (first_bb, after);
9104 gcc_assert (e->src == first_bb);
9106 /* sched_split_block emits note if *check == BB_END. Probably it
9107 is better to rip that note off. */
9112 /* A generic version of sched_create_empty_bb (). */
9114 sched_create_empty_bb_1 (basic_block after)
9116 return create_empty_bb (after);
9119 /* Insert PAT as an INSN into the schedule and update the necessary data
9120 structures to account for it. */
9122 sched_emit_insn (rtx pat)
9124 rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9125 haifa_init_insn (insn);
9127 if (current_sched_info->add_remove_insn)
9128 current_sched_info->add_remove_insn (insn, 0);
9130 (*current_sched_info->begin_schedule_ready) (insn);
9131 scheduled_insns.safe_push (insn);
9133 last_scheduled_insn = insn;
9137 /* This function returns a candidate satisfying dispatch constraints from
9141 ready_remove_first_dispatch (struct ready_list *ready)
9144 rtx_insn *insn = ready_element (ready, 0);
9146 if (ready->n_ready == 1
9148 || INSN_CODE (insn) < 0
9149 || !active_insn_p (insn)
9150 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9151 return ready_remove_first (ready);
9153 for (i = 1; i < ready->n_ready; i++)
9155 insn = ready_element (ready, i);
9158 || INSN_CODE (insn) < 0
9159 || !active_insn_p (insn))
9162 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9164 /* Return ith element of ready. */
9165 insn = ready_remove (ready, i);
9170 if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9171 return ready_remove_first (ready);
9173 for (i = 1; i < ready->n_ready; i++)
9175 insn = ready_element (ready, i);
9178 || INSN_CODE (insn) < 0
9179 || !active_insn_p (insn))
9182 /* Return i-th element of ready. */
9183 if (targetm.sched.dispatch (insn, IS_CMP))
9184 return ready_remove (ready, i);
9187 return ready_remove_first (ready);
9190 /* Get number of ready insn in the ready list. */
9193 number_in_ready (void)
9195 return ready.n_ready;
9198 /* Get number of ready's in the ready list. */
9201 get_ready_element (int i)
9203 return ready_element (&ready, i);
9206 #endif /* INSN_SCHEDULING */