1/* Instruction scheduling pass.
2   Copyright (C) 1992-2015 Free Software Foundation, Inc.
3   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4   and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3.  If not see
20<http://www.gnu.org/licenses/>.  */
21
22/* Instruction scheduling pass.  This file, along with sched-deps.c,
23   contains the generic parts.  The actual entry point for
24   the normal instruction scheduling pass is found in sched-rgn.c.
25
26   We compute insn priorities based on data dependencies.  Flow
27   analysis only creates a fraction of the data-dependencies we must
28   observe: namely, only those dependencies which the combiner can be
29   expected to use.  For this pass, we must therefore create the
30   remaining dependencies we need to observe: register dependencies,
31   memory dependencies, dependencies to keep function calls in order,
32   and the dependence between a conditional branch and the setting of
33   condition codes are all dealt with here.
34
35   The scheduler first traverses the data flow graph, starting with
36   the last instruction, and proceeding to the first, assigning values
37   to insn_priority as it goes.  This sorts the instructions
38   topologically by data dependence.
39
40   Once priorities have been established, we order the insns using
41   list scheduling.  This works as follows: starting with a list of
42   all the ready insns, and sorted according to priority number, we
43   schedule the insn from the end of the list by placing its
44   predecessors in the list according to their priority order.  We
45   consider this insn scheduled by setting the pointer to the "end" of
46   the list to point to the previous insn.  When an insn has no
47   predecessors, we either queue it until sufficient time has elapsed
48   or add it to the ready list.  As the instructions are scheduled or
49   when stalls are introduced, the queue advances and dumps insns into
50   the ready list.  When all insns down to the lowest priority have
51   been scheduled, the critical path of the basic block has been made
52   as short as possible.  The remaining insns are then scheduled in
53   remaining slots.
54
55   The following list shows the order in which we want to break ties
56   among insns in the ready list:
57
58   1.  choose insn with the longest path to end of bb, ties
59   broken by
60   2.  choose insn with least contribution to register pressure,
61   ties broken by
62   3.  prefer in-block upon interblock motion, ties broken by
63   4.  prefer useful upon speculative motion, ties broken by
64   5.  choose insn with largest control flow probability, ties
65   broken by
66   6.  choose insn with the least dependences upon the previously
67   scheduled insn, or finally
68   7   choose the insn which has the most insns dependent on it.
69   8.  choose insn with lowest UID.
70
71   Memory references complicate matters.  Only if we can be certain
72   that memory references are not part of the data dependency graph
73   (via true, anti, or output dependence), can we move operations past
74   memory references.  To first approximation, reads can be done
75   independently, while writes introduce dependencies.  Better
76   approximations will yield fewer dependencies.
77
78   Before reload, an extended analysis of interblock data dependences
79   is required for interblock scheduling.  This is performed in
80   compute_block_dependences ().
81
82   Dependencies set up by memory references are treated in exactly the
83   same way as other dependencies, by using insn backward dependences
84   INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
85   INSN_FORW_DEPS for the purpose of forward list scheduling.
86
87   Having optimized the critical path, we may have also unduly
88   extended the lifetimes of some registers.  If an operation requires
89   that constants be loaded into registers, it is certainly desirable
90   to load those constants as early as necessary, but no earlier.
91   I.e., it will not do to load up a bunch of registers at the
92   beginning of a basic block only to use them at the end, if they
93   could be loaded later, since this may result in excessive register
94   utilization.
95
96   Note that since branches are never in basic blocks, but only end
97   basic blocks, this pass will not move branches.  But that is ok,
98   since we can use GNU's delayed branch scheduling pass to take care
99   of this case.
100
101   Also note that no further optimizations based on algebraic
102   identities are performed, so this pass would be a good one to
103   perform instruction splitting, such as breaking up a multiply
104   instruction into shifts and adds where that is profitable.
105
106   Given the memory aliasing analysis that this pass should perform,
107   it should be possible to remove redundant stores to memory, and to
108   load values from registers instead of hitting memory.
109
110   Before reload, speculative insns are moved only if a 'proof' exists
111   that no exception will be caused by this, and if no live registers
112   exist that inhibit the motion (live registers constraints are not
113   represented by data dependence edges).
114
115   This pass must update information that subsequent passes expect to
116   be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117   reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
118
119   The information in the line number notes is carefully retained by
120   this pass.  Notes that refer to the starting and ending of
121   exception regions are also carefully retained by this pass.  All
122   other NOTE insns are grouped in their same relative order at the
123   beginning of basic blocks and regions that have been scheduled.  */
124
125#include "config.h"
126#include "system.h"
127#include "coretypes.h"
128#include "tm.h"
129#include "diagnostic-core.h"
130#include "hard-reg-set.h"
131#include "rtl.h"
132#include "tm_p.h"
133#include "regs.h"
134#include "hashtab.h"
135#include "hash-set.h"
136#include "vec.h"
137#include "machmode.h"
138#include "input.h"
139#include "function.h"
140#include "flags.h"
141#include "insn-config.h"
142#include "insn-attr.h"
143#include "except.h"
144#include "recog.h"
145#include "dominance.h"
146#include "cfg.h"
147#include "cfgrtl.h"
148#include "cfgbuild.h"
149#include "predict.h"
150#include "basic-block.h"
151#include "sched-int.h"
152#include "target.h"
153#include "common/common-target.h"
154#include "params.h"
155#include "dbgcnt.h"
156#include "cfgloop.h"
157#include "ira.h"
158#include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
159#include "hash-table.h"
160#include "dumpfile.h"
161
162#ifdef INSN_SCHEDULING
163
164/* True if we do register pressure relief through live-range
165   shrinkage.  */
166static bool live_range_shrinkage_p;
167
168/* Switch on live range shrinkage.  */
169void
170initialize_live_range_shrinkage (void)
171{
172  live_range_shrinkage_p = true;
173}
174
175/* Switch off live range shrinkage.  */
176void
177finish_live_range_shrinkage (void)
178{
179  live_range_shrinkage_p = false;
180}
181
182/* issue_rate is the number of insns that can be scheduled in the same
183   machine cycle.  It can be defined in the config/mach/mach.h file,
184   otherwise we set it to 1.  */
185
186int issue_rate;
187
188/* This can be set to true by a backend if the scheduler should not
189   enable a DCE pass.  */
190bool sched_no_dce;
191
192/* The current initiation interval used when modulo scheduling.  */
193static int modulo_ii;
194
195/* The maximum number of stages we are prepared to handle.  */
196static int modulo_max_stages;
197
198/* The number of insns that exist in each iteration of the loop.  We use this
199   to detect when we've scheduled all insns from the first iteration.  */
200static int modulo_n_insns;
201
202/* The current count of insns in the first iteration of the loop that have
203   already been scheduled.  */
204static int modulo_insns_scheduled;
205
206/* The maximum uid of insns from the first iteration of the loop.  */
207static int modulo_iter0_max_uid;
208
209/* The number of times we should attempt to backtrack when modulo scheduling.
210   Decreased each time we have to backtrack.  */
211static int modulo_backtracks_left;
212
213/* The stage in which the last insn from the original loop was
214   scheduled.  */
215static int modulo_last_stage;
216
217/* sched-verbose controls the amount of debugging output the
218   scheduler prints.  It is controlled by -fsched-verbose=N:
219   N>0 and no -DSR : the output is directed to stderr.
220   N>=10 will direct the printouts to stderr (regardless of -dSR).
221   N=1: same as -dSR.
222   N=2: bb's probabilities, detailed ready list info, unit/insn info.
223   N=3: rtl at abort point, control-flow, regions info.
224   N=5: dependences info.  */
225
226int sched_verbose = 0;
227
228/* Debugging file.  All printouts are sent to dump, which is always set,
229   either to stderr, or to the dump listing file (-dRS).  */
230FILE *sched_dump = 0;
231
232/* This is a placeholder for the scheduler parameters common
233   to all schedulers.  */
234struct common_sched_info_def *common_sched_info;
235
236#define INSN_TICK(INSN)	(HID (INSN)->tick)
237#define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
238#define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
239#define INTER_TICK(INSN) (HID (INSN)->inter_tick)
240#define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
241#define SHADOW_P(INSN) (HID (INSN)->shadow_p)
242#define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
243/* Cached cost of the instruction.  Use insn_cost to get cost of the
244   insn.  -1 here means that the field is not initialized.  */
245#define INSN_COST(INSN)	(HID (INSN)->cost)
246
247/* If INSN_TICK of an instruction is equal to INVALID_TICK,
248   then it should be recalculated from scratch.  */
249#define INVALID_TICK (-(max_insn_queue_index + 1))
250/* The minimal value of the INSN_TICK of an instruction.  */
251#define MIN_TICK (-max_insn_queue_index)
252
253/* Original order of insns in the ready list.
254   Used to keep order of normal insns while separating DEBUG_INSNs.  */
255#define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
256
257/* The deciding reason for INSN's place in the ready list.  */
258#define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
259
260/* List of important notes we must keep around.  This is a pointer to the
261   last element in the list.  */
262rtx_insn *note_list;
263
264static struct spec_info_def spec_info_var;
265/* Description of the speculative part of the scheduling.
266   If NULL - no speculation.  */
267spec_info_t spec_info = NULL;
268
269/* True, if recovery block was added during scheduling of current block.
270   Used to determine, if we need to fix INSN_TICKs.  */
271static bool haifa_recovery_bb_recently_added_p;
272
273/* True, if recovery block was added during this scheduling pass.
274   Used to determine if we should have empty memory pools of dependencies
275   after finishing current region.  */
276bool haifa_recovery_bb_ever_added_p;
277
278/* Counters of different types of speculative instructions.  */
279static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
280
281/* Array used in {unlink, restore}_bb_notes.  */
282static rtx_insn **bb_header = 0;
283
284/* Basic block after which recovery blocks will be created.  */
285static basic_block before_recovery;
286
287/* Basic block just before the EXIT_BLOCK and after recovery, if we have
288   created it.  */
289basic_block after_recovery;
290
291/* FALSE if we add bb to another region, so we don't need to initialize it.  */
292bool adding_bb_to_current_region_p = true;
293
294/* Queues, etc.  */
295
296/* An instruction is ready to be scheduled when all insns preceding it
297   have already been scheduled.  It is important to ensure that all
298   insns which use its result will not be executed until its result
299   has been computed.  An insn is maintained in one of four structures:
300
301   (P) the "Pending" set of insns which cannot be scheduled until
302   their dependencies have been satisfied.
303   (Q) the "Queued" set of insns that can be scheduled when sufficient
304   time has passed.
305   (R) the "Ready" list of unscheduled, uncommitted insns.
306   (S) the "Scheduled" list of insns.
307
308   Initially, all insns are either "Pending" or "Ready" depending on
309   whether their dependencies are satisfied.
310
311   Insns move from the "Ready" list to the "Scheduled" list as they
312   are committed to the schedule.  As this occurs, the insns in the
313   "Pending" list have their dependencies satisfied and move to either
314   the "Ready" list or the "Queued" set depending on whether
315   sufficient time has passed to make them ready.  As time passes,
316   insns move from the "Queued" set to the "Ready" list.
317
318   The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
319   unscheduled insns, i.e., those that are ready, queued, and pending.
320   The "Queued" set (Q) is implemented by the variable `insn_queue'.
321   The "Ready" list (R) is implemented by the variables `ready' and
322   `n_ready'.
323   The "Scheduled" list (S) is the new insn chain built by this pass.
324
325   The transition (R->S) is implemented in the scheduling loop in
326   `schedule_block' when the best insn to schedule is chosen.
327   The transitions (P->R and P->Q) are implemented in `schedule_insn' as
328   insns move from the ready list to the scheduled list.
329   The transition (Q->R) is implemented in 'queue_to_insn' as time
330   passes or stalls are introduced.  */
331
332/* Implement a circular buffer to delay instructions until sufficient
333   time has passed.  For the new pipeline description interface,
334   MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
335   than maximal time of instruction execution computed by genattr.c on
336   the base maximal time of functional unit reservations and getting a
337   result.  This is the longest time an insn may be queued.  */
338
339static rtx_insn_list **insn_queue;
340static int q_ptr = 0;
341static int q_size = 0;
342#define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
343#define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
344
345#define QUEUE_SCHEDULED (-3)
346#define QUEUE_NOWHERE   (-2)
347#define QUEUE_READY     (-1)
348/* QUEUE_SCHEDULED - INSN is scheduled.
349   QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
350   queue or ready list.
351   QUEUE_READY     - INSN is in ready list.
352   N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
353
354#define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
355
356/* The following variable value refers for all current and future
357   reservations of the processor units.  */
358state_t curr_state;
359
360/* The following variable value is size of memory representing all
361   current and future reservations of the processor units.  */
362size_t dfa_state_size;
363
364/* The following array is used to find the best insn from ready when
365   the automaton pipeline interface is used.  */
366signed char *ready_try = NULL;
367
368/* The ready list.  */
369struct ready_list ready = {NULL, 0, 0, 0, 0};
370
371/* The pointer to the ready list (to be removed).  */
372static struct ready_list *readyp = &ready;
373
374/* Scheduling clock.  */
375static int clock_var;
376
377/* Clock at which the previous instruction was issued.  */
378static int last_clock_var;
379
380/* Set to true if, when queuing a shadow insn, we discover that it would be
381   scheduled too late.  */
382static bool must_backtrack;
383
384/* The following variable value is number of essential insns issued on
385   the current cycle.  An insn is essential one if it changes the
386   processors state.  */
387int cycle_issued_insns;
388
389/* This records the actual schedule.  It is built up during the main phase
390   of schedule_block, and afterwards used to reorder the insns in the RTL.  */
391static vec<rtx_insn *> scheduled_insns;
392
393static int may_trap_exp (const_rtx, int);
394
395/* Nonzero iff the address is comprised from at most 1 register.  */
396#define CONST_BASED_ADDRESS_P(x)			\
397  (REG_P (x)					\
398   || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
399	|| (GET_CODE (x) == LO_SUM))			\
400       && (CONSTANT_P (XEXP (x, 0))			\
401	   || CONSTANT_P (XEXP (x, 1)))))
402
403/* Returns a class that insn with GET_DEST(insn)=x may belong to,
404   as found by analyzing insn's expression.  */
405
406
407static int haifa_luid_for_non_insn (rtx x);
408
409/* Haifa version of sched_info hooks common to all headers.  */
410const struct common_sched_info_def haifa_common_sched_info =
411  {
412    NULL, /* fix_recovery_cfg */
413    NULL, /* add_block */
414    NULL, /* estimate_number_of_insns */
415    haifa_luid_for_non_insn, /* luid_for_non_insn */
416    SCHED_PASS_UNKNOWN /* sched_pass_id */
417  };
418
419/* Mapping from instruction UID to its Logical UID.  */
420vec<int> sched_luids = vNULL;
421
422/* Next LUID to assign to an instruction.  */
423int sched_max_luid = 1;
424
425/* Haifa Instruction Data.  */
426vec<haifa_insn_data_def> h_i_d = vNULL;
427
428void (* sched_init_only_bb) (basic_block, basic_block);
429
430/* Split block function.  Different schedulers might use different functions
431   to handle their internal data consistent.  */
432basic_block (* sched_split_block) (basic_block, rtx);
433
434/* Create empty basic block after the specified block.  */
435basic_block (* sched_create_empty_bb) (basic_block);
436
437/* Return the number of cycles until INSN is expected to be ready.
438   Return zero if it already is.  */
439static int
440insn_delay (rtx_insn *insn)
441{
442  return MAX (INSN_TICK (insn) - clock_var, 0);
443}
444
445static int
446may_trap_exp (const_rtx x, int is_store)
447{
448  enum rtx_code code;
449
450  if (x == 0)
451    return TRAP_FREE;
452  code = GET_CODE (x);
453  if (is_store)
454    {
455      if (code == MEM && may_trap_p (x))
456	return TRAP_RISKY;
457      else
458	return TRAP_FREE;
459    }
460  if (code == MEM)
461    {
462      /* The insn uses memory:  a volatile load.  */
463      if (MEM_VOLATILE_P (x))
464	return IRISKY;
465      /* An exception-free load.  */
466      if (!may_trap_p (x))
467	return IFREE;
468      /* A load with 1 base register, to be further checked.  */
469      if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
470	return PFREE_CANDIDATE;
471      /* No info on the load, to be further checked.  */
472      return PRISKY_CANDIDATE;
473    }
474  else
475    {
476      const char *fmt;
477      int i, insn_class = TRAP_FREE;
478
479      /* Neither store nor load, check if it may cause a trap.  */
480      if (may_trap_p (x))
481	return TRAP_RISKY;
482      /* Recursive step: walk the insn...  */
483      fmt = GET_RTX_FORMAT (code);
484      for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
485	{
486	  if (fmt[i] == 'e')
487	    {
488	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
489	      insn_class = WORST_CLASS (insn_class, tmp_class);
490	    }
491	  else if (fmt[i] == 'E')
492	    {
493	      int j;
494	      for (j = 0; j < XVECLEN (x, i); j++)
495		{
496		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
497		  insn_class = WORST_CLASS (insn_class, tmp_class);
498		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
499		    break;
500		}
501	    }
502	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
503	    break;
504	}
505      return insn_class;
506    }
507}
508
509/* Classifies rtx X of an insn for the purpose of verifying that X can be
510   executed speculatively (and consequently the insn can be moved
511   speculatively), by examining X, returning:
512   TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
513   TRAP_FREE: non-load insn.
514   IFREE: load from a globally safe location.
515   IRISKY: volatile load.
516   PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
517   being either PFREE or PRISKY.  */
518
519static int
520haifa_classify_rtx (const_rtx x)
521{
522  int tmp_class = TRAP_FREE;
523  int insn_class = TRAP_FREE;
524  enum rtx_code code;
525
526  if (GET_CODE (x) == PARALLEL)
527    {
528      int i, len = XVECLEN (x, 0);
529
530      for (i = len - 1; i >= 0; i--)
531	{
532	  tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
533	  insn_class = WORST_CLASS (insn_class, tmp_class);
534	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
535	    break;
536	}
537    }
538  else
539    {
540      code = GET_CODE (x);
541      switch (code)
542	{
543	case CLOBBER:
544	  /* Test if it is a 'store'.  */
545	  tmp_class = may_trap_exp (XEXP (x, 0), 1);
546	  break;
547	case SET:
548	  /* Test if it is a store.  */
549	  tmp_class = may_trap_exp (SET_DEST (x), 1);
550	  if (tmp_class == TRAP_RISKY)
551	    break;
552	  /* Test if it is a load.  */
553	  tmp_class =
554	    WORST_CLASS (tmp_class,
555			 may_trap_exp (SET_SRC (x), 0));
556	  break;
557	case COND_EXEC:
558	  tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
559	  if (tmp_class == TRAP_RISKY)
560	    break;
561	  tmp_class = WORST_CLASS (tmp_class,
562				   may_trap_exp (COND_EXEC_TEST (x), 0));
563	  break;
564	case TRAP_IF:
565	  tmp_class = TRAP_RISKY;
566	  break;
567	default:;
568	}
569      insn_class = tmp_class;
570    }
571
572  return insn_class;
573}
574
575int
576haifa_classify_insn (const_rtx insn)
577{
578  return haifa_classify_rtx (PATTERN (insn));
579}
580
581/* After the scheduler initialization function has been called, this function
582   can be called to enable modulo scheduling.  II is the initiation interval
583   we should use, it affects the delays for delay_pairs that were recorded as
584   separated by a given number of stages.
585
586   MAX_STAGES provides us with a limit
587   after which we give up scheduling; the caller must have unrolled at least
588   as many copies of the loop body and recorded delay_pairs for them.
589
590   INSNS is the number of real (non-debug) insns in one iteration of
591   the loop.  MAX_UID can be used to test whether an insn belongs to
592   the first iteration of the loop; all of them have a uid lower than
593   MAX_UID.  */
594void
595set_modulo_params (int ii, int max_stages, int insns, int max_uid)
596{
597  modulo_ii = ii;
598  modulo_max_stages = max_stages;
599  modulo_n_insns = insns;
600  modulo_iter0_max_uid = max_uid;
601  modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
602}
603
604/* A structure to record a pair of insns where the first one is a real
605   insn that has delay slots, and the second is its delayed shadow.
606   I1 is scheduled normally and will emit an assembly instruction,
607   while I2 describes the side effect that takes place at the
608   transition between cycles CYCLES and (CYCLES + 1) after I1.  */
609struct delay_pair
610{
611  struct delay_pair *next_same_i1;
612  rtx_insn *i1, *i2;
613  int cycles;
614  /* When doing modulo scheduling, we a delay_pair can also be used to
615     show that I1 and I2 are the same insn in a different stage.  If that
616     is the case, STAGES will be nonzero.  */
617  int stages;
618};
619
620/* Helpers for delay hashing.  */
621
622struct delay_i1_hasher : typed_noop_remove <delay_pair>
623{
624  typedef delay_pair value_type;
625  typedef void compare_type;
626  static inline hashval_t hash (const value_type *);
627  static inline bool equal (const value_type *, const compare_type *);
628};
629
630/* Returns a hash value for X, based on hashing just I1.  */
631
632inline hashval_t
633delay_i1_hasher::hash (const value_type *x)
634{
635  return htab_hash_pointer (x->i1);
636}
637
638/* Return true if I1 of pair X is the same as that of pair Y.  */
639
640inline bool
641delay_i1_hasher::equal (const value_type *x, const compare_type *y)
642{
643  return x->i1 == y;
644}
645
646struct delay_i2_hasher : typed_free_remove <delay_pair>
647{
648  typedef delay_pair value_type;
649  typedef void compare_type;
650  static inline hashval_t hash (const value_type *);
651  static inline bool equal (const value_type *, const compare_type *);
652};
653
654/* Returns a hash value for X, based on hashing just I2.  */
655
656inline hashval_t
657delay_i2_hasher::hash (const value_type *x)
658{
659  return htab_hash_pointer (x->i2);
660}
661
662/* Return true if I2 of pair X is the same as that of pair Y.  */
663
664inline bool
665delay_i2_hasher::equal (const value_type *x, const compare_type *y)
666{
667  return x->i2 == y;
668}
669
670/* Two hash tables to record delay_pairs, one indexed by I1 and the other
671   indexed by I2.  */
672static hash_table<delay_i1_hasher> *delay_htab;
673static hash_table<delay_i2_hasher> *delay_htab_i2;
674
675/* Called through htab_traverse.  Walk the hashtable using I2 as
676   index, and delete all elements involving an UID higher than
677   that pointed to by *DATA.  */
678int
679haifa_htab_i2_traverse (delay_pair **slot, int *data)
680{
681  int maxuid = *data;
682  struct delay_pair *p = *slot;
683  if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
684    {
685      delay_htab_i2->clear_slot (slot);
686    }
687  return 1;
688}
689
690/* Called through htab_traverse.  Walk the hashtable using I2 as
691   index, and delete all elements involving an UID higher than
692   that pointed to by *DATA.  */
693int
694haifa_htab_i1_traverse (delay_pair **pslot, int *data)
695{
696  int maxuid = *data;
697  struct delay_pair *p, *first, **pprev;
698
699  if (INSN_UID ((*pslot)->i1) >= maxuid)
700    {
701      delay_htab->clear_slot (pslot);
702      return 1;
703    }
704  pprev = &first;
705  for (p = *pslot; p; p = p->next_same_i1)
706    {
707      if (INSN_UID (p->i2) < maxuid)
708	{
709	  *pprev = p;
710	  pprev = &p->next_same_i1;
711	}
712    }
713  *pprev = NULL;
714  if (first == NULL)
715    delay_htab->clear_slot (pslot);
716  else
717    *pslot = first;
718  return 1;
719}
720
721/* Discard all delay pairs which involve an insn with an UID higher
722   than MAX_UID.  */
723void
724discard_delay_pairs_above (int max_uid)
725{
726  delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
727  delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
728}
729
730/* This function can be called by a port just before it starts the final
731   scheduling pass.  It records the fact that an instruction with delay
732   slots has been split into two insns, I1 and I2.  The first one will be
733   scheduled normally and initiates the operation.  The second one is a
734   shadow which must follow a specific number of cycles after I1; its only
735   purpose is to show the side effect that occurs at that cycle in the RTL.
736   If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
737   while I2 retains the original insn type.
738
739   There are two ways in which the number of cycles can be specified,
740   involving the CYCLES and STAGES arguments to this function.  If STAGES
741   is zero, we just use the value of CYCLES.  Otherwise, STAGES is a factor
742   which is multiplied by MODULO_II to give the number of cycles.  This is
743   only useful if the caller also calls set_modulo_params to enable modulo
744   scheduling.  */
745
746void
747record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
748{
749  struct delay_pair *p = XNEW (struct delay_pair);
750  struct delay_pair **slot;
751
752  p->i1 = i1;
753  p->i2 = i2;
754  p->cycles = cycles;
755  p->stages = stages;
756
757  if (!delay_htab)
758    {
759      delay_htab = new hash_table<delay_i1_hasher> (10);
760      delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
761    }
762  slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
763  p->next_same_i1 = *slot;
764  *slot = p;
765  slot = delay_htab_i2->find_slot (p, INSERT);
766  *slot = p;
767}
768
769/* Examine the delay pair hashtable to see if INSN is a shadow for another,
770   and return the other insn if so.  Return NULL otherwise.  */
771rtx_insn *
772real_insn_for_shadow (rtx_insn *insn)
773{
774  struct delay_pair *pair;
775
776  if (!delay_htab)
777    return NULL;
778
779  pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
780  if (!pair || pair->stages > 0)
781    return NULL;
782  return pair->i1;
783}
784
785/* For a pair P of insns, return the fixed distance in cycles from the first
786   insn after which the second must be scheduled.  */
787static int
788pair_delay (struct delay_pair *p)
789{
790  if (p->stages == 0)
791    return p->cycles;
792  else
793    return p->stages * modulo_ii;
794}
795
796/* Given an insn INSN, add a dependence on its delayed shadow if it
797   has one.  Also try to find situations where shadows depend on each other
798   and add dependencies to the real insns to limit the amount of backtracking
799   needed.  */
800void
801add_delay_dependencies (rtx_insn *insn)
802{
803  struct delay_pair *pair;
804  sd_iterator_def sd_it;
805  dep_t dep;
806
807  if (!delay_htab)
808    return;
809
810  pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
811  if (!pair)
812    return;
813  add_dependence (insn, pair->i1, REG_DEP_ANTI);
814  if (pair->stages)
815    return;
816
817  FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
818    {
819      rtx_insn *pro = DEP_PRO (dep);
820      struct delay_pair *other_pair
821	= delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
822      if (!other_pair || other_pair->stages)
823	continue;
824      if (pair_delay (other_pair) >= pair_delay (pair))
825	{
826	  if (sched_verbose >= 4)
827	    {
828	      fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
829		       INSN_UID (other_pair->i1),
830		       INSN_UID (pair->i1));
831	      fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
832		       INSN_UID (pair->i1),
833		       INSN_UID (pair->i2),
834		       pair_delay (pair));
835	      fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
836		       INSN_UID (other_pair->i1),
837		       INSN_UID (other_pair->i2),
838		       pair_delay (other_pair));
839	    }
840	  add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
841	}
842    }
843}
844
845/* Forward declarations.  */
846
847static int priority (rtx_insn *);
848static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
849static int rank_for_schedule (const void *, const void *);
850static void swap_sort (rtx_insn **, int);
851static void queue_insn (rtx_insn *, int, const char *);
852static int schedule_insn (rtx_insn *);
853static void adjust_priority (rtx_insn *);
854static void advance_one_cycle (void);
855static void extend_h_i_d (void);
856
857
858/* Notes handling mechanism:
859   =========================
860   Generally, NOTES are saved before scheduling and restored after scheduling.
861   The scheduler distinguishes between two types of notes:
862
863   (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
864   Before scheduling a region, a pointer to the note is added to the insn
865   that follows or precedes it.  (This happens as part of the data dependence
866   computation).  After scheduling an insn, the pointer contained in it is
867   used for regenerating the corresponding note (in reemit_notes).
868
869   (2) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
870   these notes are put in a list (in rm_other_notes() and
871   unlink_other_notes ()).  After scheduling the block, these notes are
872   inserted at the beginning of the block (in schedule_block()).  */
873
874static void ready_add (struct ready_list *, rtx_insn *, bool);
875static rtx_insn *ready_remove_first (struct ready_list *);
876static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
877
878static void queue_to_ready (struct ready_list *);
879static int early_queue_to_ready (state_t, struct ready_list *);
880
881/* The following functions are used to implement multi-pass scheduling
882   on the first cycle.  */
883static rtx_insn *ready_remove (struct ready_list *, int);
884static void ready_remove_insn (rtx);
885
886static void fix_inter_tick (rtx_insn *, rtx_insn *);
887static int fix_tick_ready (rtx_insn *);
888static void change_queue_index (rtx_insn *, int);
889
890/* The following functions are used to implement scheduling of data/control
891   speculative instructions.  */
892
893static void extend_h_i_d (void);
894static void init_h_i_d (rtx_insn *);
895static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
896static void generate_recovery_code (rtx_insn *);
897static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
898static void begin_speculative_block (rtx_insn *);
899static void add_to_speculative_block (rtx_insn *);
900static void init_before_recovery (basic_block *);
901static void create_check_block_twin (rtx_insn *, bool);
902static void fix_recovery_deps (basic_block);
903static bool haifa_change_pattern (rtx_insn *, rtx);
904static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
905static void restore_bb_notes (basic_block);
906static void fix_jump_move (rtx_insn *);
907static void move_block_after_check (rtx_insn *);
908static void move_succs (vec<edge, va_gc> **, basic_block);
909static void sched_remove_insn (rtx_insn *);
910static void clear_priorities (rtx_insn *, rtx_vec_t *);
911static void calc_priorities (rtx_vec_t);
912static void add_jump_dependencies (rtx_insn *, rtx_insn *);
913
914#endif /* INSN_SCHEDULING */
915
916/* Point to state used for the current scheduling pass.  */
917struct haifa_sched_info *current_sched_info;
918
919#ifndef INSN_SCHEDULING
920void
921schedule_insns (void)
922{
923}
924#else
925
926/* Do register pressure sensitive insn scheduling if the flag is set
927   up.  */
928enum sched_pressure_algorithm sched_pressure;
929
930/* Map regno -> its pressure class.  The map defined only when
931   SCHED_PRESSURE != SCHED_PRESSURE_NONE.  */
932enum reg_class *sched_regno_pressure_class;
933
934/* The current register pressure.  Only elements corresponding pressure
935   classes are defined.  */
936static int curr_reg_pressure[N_REG_CLASSES];
937
938/* Saved value of the previous array.  */
939static int saved_reg_pressure[N_REG_CLASSES];
940
941/* Register living at given scheduling point.  */
942static bitmap curr_reg_live;
943
944/* Saved value of the previous array.  */
945static bitmap saved_reg_live;
946
947/* Registers mentioned in the current region.  */
948static bitmap region_ref_regs;
949
950/* Effective number of available registers of a given class (see comment
951   in sched_pressure_start_bb).  */
952static int sched_class_regs_num[N_REG_CLASSES];
953/* Number of call_used_regs.  This is a helper for calculating of
954   sched_class_regs_num.  */
955static int call_used_regs_num[N_REG_CLASSES];
956
957/* Initiate register pressure relative info for scheduling the current
958   region.  Currently it is only clearing register mentioned in the
959   current region.  */
960void
961sched_init_region_reg_pressure_info (void)
962{
963  bitmap_clear (region_ref_regs);
964}
965
966/* PRESSURE[CL] describes the pressure on register class CL.  Update it
967   for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
968   LIVE tracks the set of live registers; if it is null, assume that
969   every birth or death is genuine.  */
970static inline void
971mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
972{
973  enum reg_class pressure_class;
974
975  pressure_class = sched_regno_pressure_class[regno];
976  if (regno >= FIRST_PSEUDO_REGISTER)
977    {
978      if (pressure_class != NO_REGS)
979	{
980	  if (birth_p)
981	    {
982	      if (!live || bitmap_set_bit (live, regno))
983		pressure[pressure_class]
984		  += (ira_reg_class_max_nregs
985		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
986	    }
987	  else
988	    {
989	      if (!live || bitmap_clear_bit (live, regno))
990		pressure[pressure_class]
991		  -= (ira_reg_class_max_nregs
992		      [pressure_class][PSEUDO_REGNO_MODE (regno)]);
993	    }
994	}
995    }
996  else if (pressure_class != NO_REGS
997	   && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
998    {
999      if (birth_p)
1000	{
1001	  if (!live || bitmap_set_bit (live, regno))
1002	    pressure[pressure_class]++;
1003	}
1004      else
1005	{
1006	  if (!live || bitmap_clear_bit (live, regno))
1007	    pressure[pressure_class]--;
1008	}
1009    }
1010}
1011
1012/* Initiate current register pressure related info from living
1013   registers given by LIVE.  */
1014static void
1015initiate_reg_pressure_info (bitmap live)
1016{
1017  int i;
1018  unsigned int j;
1019  bitmap_iterator bi;
1020
1021  for (i = 0; i < ira_pressure_classes_num; i++)
1022    curr_reg_pressure[ira_pressure_classes[i]] = 0;
1023  bitmap_clear (curr_reg_live);
1024  EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1025    if (sched_pressure == SCHED_PRESSURE_MODEL
1026	|| current_nr_blocks == 1
1027	|| bitmap_bit_p (region_ref_regs, j))
1028      mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1029}
1030
1031/* Mark registers in X as mentioned in the current region.  */
1032static void
1033setup_ref_regs (rtx x)
1034{
1035  int i, j, regno;
1036  const RTX_CODE code = GET_CODE (x);
1037  const char *fmt;
1038
1039  if (REG_P (x))
1040    {
1041      regno = REGNO (x);
1042      if (HARD_REGISTER_NUM_P (regno))
1043	bitmap_set_range (region_ref_regs, regno,
1044			  hard_regno_nregs[regno][GET_MODE (x)]);
1045      else
1046	bitmap_set_bit (region_ref_regs, REGNO (x));
1047      return;
1048    }
1049  fmt = GET_RTX_FORMAT (code);
1050  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1051    if (fmt[i] == 'e')
1052      setup_ref_regs (XEXP (x, i));
1053    else if (fmt[i] == 'E')
1054      {
1055	for (j = 0; j < XVECLEN (x, i); j++)
1056	  setup_ref_regs (XVECEXP (x, i, j));
1057      }
1058}
1059
1060/* Initiate current register pressure related info at the start of
1061   basic block BB.  */
1062static void
1063initiate_bb_reg_pressure_info (basic_block bb)
1064{
1065  unsigned int i ATTRIBUTE_UNUSED;
1066  rtx_insn *insn;
1067
1068  if (current_nr_blocks > 1)
1069    FOR_BB_INSNS (bb, insn)
1070      if (NONDEBUG_INSN_P (insn))
1071	setup_ref_regs (PATTERN (insn));
1072  initiate_reg_pressure_info (df_get_live_in (bb));
1073#ifdef EH_RETURN_DATA_REGNO
1074  if (bb_has_eh_pred (bb))
1075    for (i = 0; ; ++i)
1076      {
1077	unsigned int regno = EH_RETURN_DATA_REGNO (i);
1078
1079	if (regno == INVALID_REGNUM)
1080	  break;
1081	if (! bitmap_bit_p (df_get_live_in (bb), regno))
1082	  mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1083				     regno, true);
1084      }
1085#endif
1086}
1087
1088/* Save current register pressure related info.  */
1089static void
1090save_reg_pressure (void)
1091{
1092  int i;
1093
1094  for (i = 0; i < ira_pressure_classes_num; i++)
1095    saved_reg_pressure[ira_pressure_classes[i]]
1096      = curr_reg_pressure[ira_pressure_classes[i]];
1097  bitmap_copy (saved_reg_live, curr_reg_live);
1098}
1099
1100/* Restore saved register pressure related info.  */
1101static void
1102restore_reg_pressure (void)
1103{
1104  int i;
1105
1106  for (i = 0; i < ira_pressure_classes_num; i++)
1107    curr_reg_pressure[ira_pressure_classes[i]]
1108      = saved_reg_pressure[ira_pressure_classes[i]];
1109  bitmap_copy (curr_reg_live, saved_reg_live);
1110}
1111
1112/* Return TRUE if the register is dying after its USE.  */
1113static bool
1114dying_use_p (struct reg_use_data *use)
1115{
1116  struct reg_use_data *next;
1117
1118  for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1119    if (NONDEBUG_INSN_P (next->insn)
1120	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1121      return false;
1122  return true;
1123}
1124
1125/* Print info about the current register pressure and its excess for
1126   each pressure class.  */
1127static void
1128print_curr_reg_pressure (void)
1129{
1130  int i;
1131  enum reg_class cl;
1132
1133  fprintf (sched_dump, ";;\t");
1134  for (i = 0; i < ira_pressure_classes_num; i++)
1135    {
1136      cl = ira_pressure_classes[i];
1137      gcc_assert (curr_reg_pressure[cl] >= 0);
1138      fprintf (sched_dump, "  %s:%d(%d)", reg_class_names[cl],
1139	       curr_reg_pressure[cl],
1140	       curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1141    }
1142  fprintf (sched_dump, "\n");
1143}
1144
1145/* Determine if INSN has a condition that is clobbered if a register
1146   in SET_REGS is modified.  */
1147static bool
1148cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1149{
1150  rtx pat = PATTERN (insn);
1151  gcc_assert (GET_CODE (pat) == COND_EXEC);
1152  if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1153    {
1154      sd_iterator_def sd_it;
1155      dep_t dep;
1156      haifa_change_pattern (insn, ORIG_PAT (insn));
1157      FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1158	DEP_STATUS (dep) &= ~DEP_CANCELLED;
1159      TODO_SPEC (insn) = HARD_DEP;
1160      if (sched_verbose >= 2)
1161	fprintf (sched_dump,
1162		 ";;\t\tdequeue insn %s because of clobbered condition\n",
1163		 (*current_sched_info->print_insn) (insn, 0));
1164      return true;
1165    }
1166
1167  return false;
1168}
1169
1170/* This function should be called after modifying the pattern of INSN,
1171   to update scheduler data structures as needed.  */
1172static void
1173update_insn_after_change (rtx_insn *insn)
1174{
1175  sd_iterator_def sd_it;
1176  dep_t dep;
1177
1178  dfa_clear_single_insn_cache (insn);
1179
1180  sd_it = sd_iterator_start (insn,
1181			     SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1182  while (sd_iterator_cond (&sd_it, &dep))
1183    {
1184      DEP_COST (dep) = UNKNOWN_DEP_COST;
1185      sd_iterator_next (&sd_it);
1186    }
1187
1188  /* Invalidate INSN_COST, so it'll be recalculated.  */
1189  INSN_COST (insn) = -1;
1190  /* Invalidate INSN_TICK, so it'll be recalculated.  */
1191  INSN_TICK (insn) = INVALID_TICK;
1192
1193  /* Invalidate autoprefetch data entry.  */
1194  INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1195    = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1196  INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1197    = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1198}
1199
1200
1201/* Two VECs, one to hold dependencies for which pattern replacements
1202   need to be applied or restored at the start of the next cycle, and
1203   another to hold an integer that is either one, to apply the
1204   corresponding replacement, or zero to restore it.  */
1205static vec<dep_t> next_cycle_replace_deps;
1206static vec<int> next_cycle_apply;
1207
1208static void apply_replacement (dep_t, bool);
1209static void restore_pattern (dep_t, bool);
1210
1211/* Look at the remaining dependencies for insn NEXT, and compute and return
1212   the TODO_SPEC value we should use for it.  This is called after one of
1213   NEXT's dependencies has been resolved.
1214   We also perform pattern replacements for predication, and for broken
1215   replacement dependencies.  The latter is only done if FOR_BACKTRACK is
1216   false.  */
1217
1218static ds_t
1219recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1220{
1221  ds_t new_ds;
1222  sd_iterator_def sd_it;
1223  dep_t dep, modify_dep = NULL;
1224  int n_spec = 0;
1225  int n_control = 0;
1226  int n_replace = 0;
1227  bool first_p = true;
1228
1229  if (sd_lists_empty_p (next, SD_LIST_BACK))
1230    /* NEXT has all its dependencies resolved.  */
1231    return 0;
1232
1233  if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1234    return HARD_DEP;
1235
1236  /* If NEXT is intended to sit adjacent to this instruction, we don't
1237     want to try to break any dependencies.  Treat it as a HARD_DEP.  */
1238  if (SCHED_GROUP_P (next))
1239    return HARD_DEP;
1240
1241  /* Now we've got NEXT with speculative deps only.
1242     1. Look at the deps to see what we have to do.
1243     2. Check if we can do 'todo'.  */
1244  new_ds = 0;
1245
1246  FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1247    {
1248      rtx_insn *pro = DEP_PRO (dep);
1249      ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1250
1251      if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1252	continue;
1253
1254      if (ds)
1255	{
1256	  n_spec++;
1257	  if (first_p)
1258	    {
1259	      first_p = false;
1260
1261	      new_ds = ds;
1262	    }
1263	  else
1264	    new_ds = ds_merge (new_ds, ds);
1265	}
1266      else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1267	{
1268	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1269	    {
1270	      n_control++;
1271	      modify_dep = dep;
1272	    }
1273	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1274	}
1275      else if (DEP_REPLACE (dep) != NULL)
1276	{
1277	  if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1278	    {
1279	      n_replace++;
1280	      modify_dep = dep;
1281	    }
1282	  DEP_STATUS (dep) &= ~DEP_CANCELLED;
1283	}
1284    }
1285
1286  if (n_replace > 0 && n_control == 0 && n_spec == 0)
1287    {
1288      if (!dbg_cnt (sched_breakdep))
1289	return HARD_DEP;
1290      FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1291	{
1292	  struct dep_replacement *desc = DEP_REPLACE (dep);
1293	  if (desc != NULL)
1294	    {
1295	      if (desc->insn == next && !for_backtrack)
1296		{
1297		  gcc_assert (n_replace == 1);
1298		  apply_replacement (dep, true);
1299		}
1300	      DEP_STATUS (dep) |= DEP_CANCELLED;
1301	    }
1302	}
1303      return 0;
1304    }
1305
1306  else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1307    {
1308      rtx_insn *pro, *other;
1309      rtx new_pat;
1310      rtx cond = NULL_RTX;
1311      bool success;
1312      rtx_insn *prev = NULL;
1313      int i;
1314      unsigned regno;
1315
1316      if ((current_sched_info->flags & DO_PREDICATION) == 0
1317	  || (ORIG_PAT (next) != NULL_RTX
1318	      && PREDICATED_PAT (next) == NULL_RTX))
1319	return HARD_DEP;
1320
1321      pro = DEP_PRO (modify_dep);
1322      other = real_insn_for_shadow (pro);
1323      if (other != NULL_RTX)
1324	pro = other;
1325
1326      cond = sched_get_reverse_condition_uncached (pro);
1327      regno = REGNO (XEXP (cond, 0));
1328
1329      /* Find the last scheduled insn that modifies the condition register.
1330	 We can stop looking once we find the insn we depend on through the
1331	 REG_DEP_CONTROL; if the condition register isn't modified after it,
1332	 we know that it still has the right value.  */
1333      if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1334	FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1335	  {
1336	    HARD_REG_SET t;
1337
1338	    find_all_hard_reg_sets (prev, &t, true);
1339	    if (TEST_HARD_REG_BIT (t, regno))
1340	      return HARD_DEP;
1341	    if (prev == pro)
1342	      break;
1343	  }
1344      if (ORIG_PAT (next) == NULL_RTX)
1345	{
1346	  ORIG_PAT (next) = PATTERN (next);
1347
1348	  new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1349	  success = haifa_change_pattern (next, new_pat);
1350	  if (!success)
1351	    return HARD_DEP;
1352	  PREDICATED_PAT (next) = new_pat;
1353	}
1354      else if (PATTERN (next) != PREDICATED_PAT (next))
1355	{
1356	  bool success = haifa_change_pattern (next,
1357					       PREDICATED_PAT (next));
1358	  gcc_assert (success);
1359	}
1360      DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1361      return DEP_CONTROL;
1362    }
1363
1364  if (PREDICATED_PAT (next) != NULL_RTX)
1365    {
1366      int tick = INSN_TICK (next);
1367      bool success = haifa_change_pattern (next,
1368					   ORIG_PAT (next));
1369      INSN_TICK (next) = tick;
1370      gcc_assert (success);
1371    }
1372
1373  /* We can't handle the case where there are both speculative and control
1374     dependencies, so we return HARD_DEP in such a case.  Also fail if
1375     we have speculative dependencies with not enough points, or more than
1376     one control dependency.  */
1377  if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1378      || (n_spec > 0
1379	  /* Too few points?  */
1380	  && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1381      || n_control > 0
1382      || n_replace > 0)
1383    return HARD_DEP;
1384
1385  return new_ds;
1386}
1387
1388/* Pointer to the last instruction scheduled.  */
1389static rtx_insn *last_scheduled_insn;
1390
1391/* Pointer to the last nondebug instruction scheduled within the
1392   block, or the prev_head of the scheduling block.  Used by
1393   rank_for_schedule, so that insns independent of the last scheduled
1394   insn will be preferred over dependent instructions.  */
1395static rtx last_nondebug_scheduled_insn;
1396
1397/* Pointer that iterates through the list of unscheduled insns if we
1398   have a dbg_cnt enabled.  It always points at an insn prior to the
1399   first unscheduled one.  */
1400static rtx_insn *nonscheduled_insns_begin;
1401
1402/* Compute cost of executing INSN.
1403   This is the number of cycles between instruction issue and
1404   instruction results.  */
1405int
1406insn_cost (rtx_insn *insn)
1407{
1408  int cost;
1409
1410  if (sched_fusion)
1411    return 0;
1412
1413  if (sel_sched_p ())
1414    {
1415      if (recog_memoized (insn) < 0)
1416	return 0;
1417
1418      cost = insn_default_latency (insn);
1419      if (cost < 0)
1420	cost = 0;
1421
1422      return cost;
1423    }
1424
1425  cost = INSN_COST (insn);
1426
1427  if (cost < 0)
1428    {
1429      /* A USE insn, or something else we don't need to
1430	 understand.  We can't pass these directly to
1431	 result_ready_cost or insn_default_latency because it will
1432	 trigger a fatal error for unrecognizable insns.  */
1433      if (recog_memoized (insn) < 0)
1434	{
1435	  INSN_COST (insn) = 0;
1436	  return 0;
1437	}
1438      else
1439	{
1440	  cost = insn_default_latency (insn);
1441	  if (cost < 0)
1442	    cost = 0;
1443
1444	  INSN_COST (insn) = cost;
1445	}
1446    }
1447
1448  return cost;
1449}
1450
1451/* Compute cost of dependence LINK.
1452   This is the number of cycles between instruction issue and
1453   instruction results.
1454   ??? We also use this function to call recog_memoized on all insns.  */
1455int
1456dep_cost_1 (dep_t link, dw_t dw)
1457{
1458  rtx_insn *insn = DEP_PRO (link);
1459  rtx_insn *used = DEP_CON (link);
1460  int cost;
1461
1462  if (DEP_COST (link) != UNKNOWN_DEP_COST)
1463    return DEP_COST (link);
1464
1465  if (delay_htab)
1466    {
1467      struct delay_pair *delay_entry;
1468      delay_entry
1469	= delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1470      if (delay_entry)
1471	{
1472	  if (delay_entry->i1 == insn)
1473	    {
1474	      DEP_COST (link) = pair_delay (delay_entry);
1475	      return DEP_COST (link);
1476	    }
1477	}
1478    }
1479
1480  /* A USE insn should never require the value used to be computed.
1481     This allows the computation of a function's result and parameter
1482     values to overlap the return and call.  We don't care about the
1483     dependence cost when only decreasing register pressure.  */
1484  if (recog_memoized (used) < 0)
1485    {
1486      cost = 0;
1487      recog_memoized (insn);
1488    }
1489  else
1490    {
1491      enum reg_note dep_type = DEP_TYPE (link);
1492
1493      cost = insn_cost (insn);
1494
1495      if (INSN_CODE (insn) >= 0)
1496	{
1497	  if (dep_type == REG_DEP_ANTI)
1498	    cost = 0;
1499	  else if (dep_type == REG_DEP_OUTPUT)
1500	    {
1501	      cost = (insn_default_latency (insn)
1502		      - insn_default_latency (used));
1503	      if (cost <= 0)
1504		cost = 1;
1505	    }
1506	  else if (bypass_p (insn))
1507	    cost = insn_latency (insn, used);
1508	}
1509
1510
1511      if (targetm.sched.adjust_cost_2)
1512	cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1513					    dw);
1514      else if (targetm.sched.adjust_cost != NULL)
1515	{
1516	  /* This variable is used for backward compatibility with the
1517	     targets.  */
1518	  rtx_insn_list *dep_cost_rtx_link =
1519	    alloc_INSN_LIST (NULL_RTX, NULL);
1520
1521	  /* Make it self-cycled, so that if some tries to walk over this
1522	     incomplete list he/she will be caught in an endless loop.  */
1523	  XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1524
1525	  /* Targets use only REG_NOTE_KIND of the link.  */
1526	  PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1527
1528	  cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1529					    insn, cost);
1530
1531	  free_INSN_LIST_node (dep_cost_rtx_link);
1532	}
1533
1534      if (cost < 0)
1535	cost = 0;
1536    }
1537
1538  DEP_COST (link) = cost;
1539  return cost;
1540}
1541
1542/* Compute cost of dependence LINK.
1543   This is the number of cycles between instruction issue and
1544   instruction results.  */
1545int
1546dep_cost (dep_t link)
1547{
1548  return dep_cost_1 (link, 0);
1549}
1550
1551/* Use this sel-sched.c friendly function in reorder2 instead of increasing
1552   INSN_PRIORITY explicitly.  */
1553void
1554increase_insn_priority (rtx_insn *insn, int amount)
1555{
1556  if (!sel_sched_p ())
1557    {
1558      /* We're dealing with haifa-sched.c INSN_PRIORITY.  */
1559      if (INSN_PRIORITY_KNOWN (insn))
1560	  INSN_PRIORITY (insn) += amount;
1561    }
1562  else
1563    {
1564      /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1565	 Use EXPR_PRIORITY instead. */
1566      sel_add_to_insn_priority (insn, amount);
1567    }
1568}
1569
1570/* Return 'true' if DEP should be included in priority calculations.  */
1571static bool
1572contributes_to_priority_p (dep_t dep)
1573{
1574  if (DEBUG_INSN_P (DEP_CON (dep))
1575      || DEBUG_INSN_P (DEP_PRO (dep)))
1576    return false;
1577
1578  /* Critical path is meaningful in block boundaries only.  */
1579  if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1580						    DEP_PRO (dep)))
1581    return false;
1582
1583  if (DEP_REPLACE (dep) != NULL)
1584    return false;
1585
1586  /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1587     then speculative instructions will less likely be
1588     scheduled.  That is because the priority of
1589     their producers will increase, and, thus, the
1590     producers will more likely be scheduled, thus,
1591     resolving the dependence.  */
1592  if (sched_deps_info->generate_spec_deps
1593      && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1594      && (DEP_STATUS (dep) & SPECULATIVE))
1595    return false;
1596
1597  return true;
1598}
1599
1600/* Compute the number of nondebug deps in list LIST for INSN.  */
1601
1602static int
1603dep_list_size (rtx insn, sd_list_types_def list)
1604{
1605  sd_iterator_def sd_it;
1606  dep_t dep;
1607  int dbgcount = 0, nodbgcount = 0;
1608
1609  if (!MAY_HAVE_DEBUG_INSNS)
1610    return sd_lists_size (insn, list);
1611
1612  FOR_EACH_DEP (insn, list, sd_it, dep)
1613    {
1614      if (DEBUG_INSN_P (DEP_CON (dep)))
1615	dbgcount++;
1616      else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1617	nodbgcount++;
1618    }
1619
1620  gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1621
1622  return nodbgcount;
1623}
1624
1625bool sched_fusion;
1626
1627/* Compute the priority number for INSN.  */
1628static int
1629priority (rtx_insn *insn)
1630{
1631  if (! INSN_P (insn))
1632    return 0;
1633
1634  /* We should not be interested in priority of an already scheduled insn.  */
1635  gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1636
1637  if (!INSN_PRIORITY_KNOWN (insn))
1638    {
1639      int this_priority = -1;
1640
1641      if (sched_fusion)
1642	{
1643	  int this_fusion_priority;
1644
1645	  targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1646					 &this_fusion_priority, &this_priority);
1647	  INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1648	}
1649      else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1650	/* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1651	   some forward deps but all of them are ignored by
1652	   contributes_to_priority hook.  At the moment we set priority of
1653	   such insn to 0.  */
1654	this_priority = insn_cost (insn);
1655      else
1656	{
1657	  rtx_insn *prev_first, *twin;
1658	  basic_block rec;
1659
1660	  /* For recovery check instructions we calculate priority slightly
1661	     different than that of normal instructions.  Instead of walking
1662	     through INSN_FORW_DEPS (check) list, we walk through
1663	     INSN_FORW_DEPS list of each instruction in the corresponding
1664	     recovery block.  */
1665
1666          /* Selective scheduling does not define RECOVERY_BLOCK macro.  */
1667	  rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1668	  if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1669	    {
1670	      prev_first = PREV_INSN (insn);
1671	      twin = insn;
1672	    }
1673	  else
1674	    {
1675	      prev_first = NEXT_INSN (BB_HEAD (rec));
1676	      twin = PREV_INSN (BB_END (rec));
1677	    }
1678
1679	  do
1680	    {
1681	      sd_iterator_def sd_it;
1682	      dep_t dep;
1683
1684	      FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1685		{
1686		  rtx_insn *next;
1687		  int next_priority;
1688
1689		  next = DEP_CON (dep);
1690
1691		  if (BLOCK_FOR_INSN (next) != rec)
1692		    {
1693		      int cost;
1694
1695		      if (!contributes_to_priority_p (dep))
1696			continue;
1697
1698		      if (twin == insn)
1699			cost = dep_cost (dep);
1700		      else
1701			{
1702			  struct _dep _dep1, *dep1 = &_dep1;
1703
1704			  init_dep (dep1, insn, next, REG_DEP_ANTI);
1705
1706			  cost = dep_cost (dep1);
1707			}
1708
1709		      next_priority = cost + priority (next);
1710
1711		      if (next_priority > this_priority)
1712			this_priority = next_priority;
1713		    }
1714		}
1715
1716	      twin = PREV_INSN (twin);
1717	    }
1718	  while (twin != prev_first);
1719	}
1720
1721      if (this_priority < 0)
1722	{
1723	  gcc_assert (this_priority == -1);
1724
1725	  this_priority = insn_cost (insn);
1726	}
1727
1728      INSN_PRIORITY (insn) = this_priority;
1729      INSN_PRIORITY_STATUS (insn) = 1;
1730    }
1731
1732  return INSN_PRIORITY (insn);
1733}
1734
1735/* Macros and functions for keeping the priority queue sorted, and
1736   dealing with queuing and dequeuing of instructions.  */
1737
1738/* For each pressure class CL, set DEATH[CL] to the number of registers
1739   in that class that die in INSN.  */
1740
1741static void
1742calculate_reg_deaths (rtx_insn *insn, int *death)
1743{
1744  int i;
1745  struct reg_use_data *use;
1746
1747  for (i = 0; i < ira_pressure_classes_num; i++)
1748    death[ira_pressure_classes[i]] = 0;
1749  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1750    if (dying_use_p (use))
1751      mark_regno_birth_or_death (0, death, use->regno, true);
1752}
1753
1754/* Setup info about the current register pressure impact of scheduling
1755   INSN at the current scheduling point.  */
1756static void
1757setup_insn_reg_pressure_info (rtx_insn *insn)
1758{
1759  int i, change, before, after, hard_regno;
1760  int excess_cost_change;
1761  machine_mode mode;
1762  enum reg_class cl;
1763  struct reg_pressure_data *pressure_info;
1764  int *max_reg_pressure;
1765  static int death[N_REG_CLASSES];
1766
1767  gcc_checking_assert (!DEBUG_INSN_P (insn));
1768
1769  excess_cost_change = 0;
1770  calculate_reg_deaths (insn, death);
1771  pressure_info = INSN_REG_PRESSURE (insn);
1772  max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1773  gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1774  for (i = 0; i < ira_pressure_classes_num; i++)
1775    {
1776      cl = ira_pressure_classes[i];
1777      gcc_assert (curr_reg_pressure[cl] >= 0);
1778      change = (int) pressure_info[i].set_increase - death[cl];
1779      before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1780      after = MAX (0, max_reg_pressure[i] + change
1781		   - sched_class_regs_num[cl]);
1782      hard_regno = ira_class_hard_regs[cl][0];
1783      gcc_assert (hard_regno >= 0);
1784      mode = reg_raw_mode[hard_regno];
1785      excess_cost_change += ((after - before)
1786			     * (ira_memory_move_cost[mode][cl][0]
1787				+ ira_memory_move_cost[mode][cl][1]));
1788    }
1789  INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1790}
1791
1792/* This is the first page of code related to SCHED_PRESSURE_MODEL.
1793   It tries to make the scheduler take register pressure into account
1794   without introducing too many unnecessary stalls.  It hooks into the
1795   main scheduling algorithm at several points:
1796
1797    - Before scheduling starts, model_start_schedule constructs a
1798      "model schedule" for the current block.  This model schedule is
1799      chosen solely to keep register pressure down.  It does not take the
1800      target's pipeline or the original instruction order into account,
1801      except as a tie-breaker.  It also doesn't work to a particular
1802      pressure limit.
1803
1804      This model schedule gives us an idea of what pressure can be
1805      achieved for the block and gives us an example of a schedule that
1806      keeps to that pressure.  It also makes the final schedule less
1807      dependent on the original instruction order.  This is important
1808      because the original order can either be "wide" (many values live
1809      at once, such as in user-scheduled code) or "narrow" (few values
1810      live at once, such as after loop unrolling, where several
1811      iterations are executed sequentially).
1812
1813      We do not apply this model schedule to the rtx stream.  We simply
1814      record it in model_schedule.  We also compute the maximum pressure,
1815      MP, that was seen during this schedule.
1816
1817    - Instructions are added to the ready queue even if they require
1818      a stall.  The length of the stall is instead computed as:
1819
1820	 MAX (INSN_TICK (INSN) - clock_var, 0)
1821
1822      (= insn_delay).  This allows rank_for_schedule to choose between
1823      introducing a deliberate stall or increasing pressure.
1824
1825    - Before sorting the ready queue, model_set_excess_costs assigns
1826      a pressure-based cost to each ready instruction in the queue.
1827      This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1828      (ECC for short) and is effectively measured in cycles.
1829
1830    - rank_for_schedule ranks instructions based on:
1831
1832	ECC (insn) + insn_delay (insn)
1833
1834      then as:
1835
1836	insn_delay (insn)
1837
1838      So, for example, an instruction X1 with an ECC of 1 that can issue
1839      now will win over an instruction X0 with an ECC of zero that would
1840      introduce a stall of one cycle.  However, an instruction X2 with an
1841      ECC of 2 that can issue now will lose to both X0 and X1.
1842
1843    - When an instruction is scheduled, model_recompute updates the model
1844      schedule with the new pressures (some of which might now exceed the
1845      original maximum pressure MP).  model_update_limit_points then searches
1846      for the new point of maximum pressure, if not already known.  */
1847
1848/* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1849   from surrounding debug information.  */
1850#define MODEL_BAR \
1851  ";;\t\t+------------------------------------------------------\n"
1852
1853/* Information about the pressure on a particular register class at a
1854   particular point of the model schedule.  */
1855struct model_pressure_data {
1856  /* The pressure at this point of the model schedule, or -1 if the
1857     point is associated with an instruction that has already been
1858     scheduled.  */
1859  int ref_pressure;
1860
1861  /* The maximum pressure during or after this point of the model schedule.  */
1862  int max_pressure;
1863};
1864
1865/* Per-instruction information that is used while building the model
1866   schedule.  Here, "schedule" refers to the model schedule rather
1867   than the main schedule.  */
1868struct model_insn_info {
1869  /* The instruction itself.  */
1870  rtx_insn *insn;
1871
1872  /* If this instruction is in model_worklist, these fields link to the
1873     previous (higher-priority) and next (lower-priority) instructions
1874     in the list.  */
1875  struct model_insn_info *prev;
1876  struct model_insn_info *next;
1877
1878  /* While constructing the schedule, QUEUE_INDEX describes whether an
1879     instruction has already been added to the schedule (QUEUE_SCHEDULED),
1880     is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1881     old_queue records the value that QUEUE_INDEX had before scheduling
1882     started, so that we can restore it once the schedule is complete.  */
1883  int old_queue;
1884
1885  /* The relative importance of an unscheduled instruction.  Higher
1886     values indicate greater importance.  */
1887  unsigned int model_priority;
1888
1889  /* The length of the longest path of satisfied true dependencies
1890     that leads to this instruction.  */
1891  unsigned int depth;
1892
1893  /* The length of the longest path of dependencies of any kind
1894     that leads from this instruction.  */
1895  unsigned int alap;
1896
1897  /* The number of predecessor nodes that must still be scheduled.  */
1898  int unscheduled_preds;
1899};
1900
1901/* Information about the pressure limit for a particular register class.
1902   This structure is used when applying a model schedule to the main
1903   schedule.  */
1904struct model_pressure_limit {
1905  /* The maximum register pressure seen in the original model schedule.  */
1906  int orig_pressure;
1907
1908  /* The maximum register pressure seen in the current model schedule
1909     (which excludes instructions that have already been scheduled).  */
1910  int pressure;
1911
1912  /* The point of the current model schedule at which PRESSURE is first
1913     reached.  It is set to -1 if the value needs to be recomputed.  */
1914  int point;
1915};
1916
1917/* Describes a particular way of measuring register pressure.  */
1918struct model_pressure_group {
1919  /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI].  */
1920  struct model_pressure_limit limits[N_REG_CLASSES];
1921
1922  /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1923     on register class ira_pressure_classes[PCI] at point POINT of the
1924     current model schedule.  A POINT of model_num_insns describes the
1925     pressure at the end of the schedule.  */
1926  struct model_pressure_data *model;
1927};
1928
1929/* Index POINT gives the instruction at point POINT of the model schedule.
1930   This array doesn't change during main scheduling.  */
1931static vec<rtx_insn *> model_schedule;
1932
1933/* The list of instructions in the model worklist, sorted in order of
1934   decreasing priority.  */
1935static struct model_insn_info *model_worklist;
1936
1937/* Index I describes the instruction with INSN_LUID I.  */
1938static struct model_insn_info *model_insns;
1939
1940/* The number of instructions in the model schedule.  */
1941static int model_num_insns;
1942
1943/* The index of the first instruction in model_schedule that hasn't yet been
1944   added to the main schedule, or model_num_insns if all of them have.  */
1945static int model_curr_point;
1946
1947/* Describes the pressure before each instruction in the model schedule.  */
1948static struct model_pressure_group model_before_pressure;
1949
1950/* The first unused model_priority value (as used in model_insn_info).  */
1951static unsigned int model_next_priority;
1952
1953
1954/* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1955   at point POINT of the model schedule.  */
1956#define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1957  (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1958
1959/* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1960   after point POINT of the model schedule.  */
1961#define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1962  (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1963
1964/* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1965   of the model schedule.  */
1966#define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1967  (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1968
1969/* Information about INSN that is used when creating the model schedule.  */
1970#define MODEL_INSN_INFO(INSN) \
1971  (&model_insns[INSN_LUID (INSN)])
1972
1973/* The instruction at point POINT of the model schedule.  */
1974#define MODEL_INSN(POINT) \
1975  (model_schedule[POINT])
1976
1977
1978/* Return INSN's index in the model schedule, or model_num_insns if it
1979   doesn't belong to that schedule.  */
1980
1981static int
1982model_index (rtx_insn *insn)
1983{
1984  if (INSN_MODEL_INDEX (insn) == 0)
1985    return model_num_insns;
1986  return INSN_MODEL_INDEX (insn) - 1;
1987}
1988
1989/* Make sure that GROUP->limits is up-to-date for the current point
1990   of the model schedule.  */
1991
1992static void
1993model_update_limit_points_in_group (struct model_pressure_group *group)
1994{
1995  int pci, max_pressure, point;
1996
1997  for (pci = 0; pci < ira_pressure_classes_num; pci++)
1998    {
1999      /* We may have passed the final point at which the pressure in
2000	 group->limits[pci].pressure was reached.  Update the limit if so.  */
2001      max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
2002      group->limits[pci].pressure = max_pressure;
2003
2004      /* Find the point at which MAX_PRESSURE is first reached.  We need
2005	 to search in three cases:
2006
2007	 - We've already moved past the previous pressure point.
2008	   In this case we search forward from model_curr_point.
2009
2010	 - We scheduled the previous point of maximum pressure ahead of
2011	   its position in the model schedule, but doing so didn't bring
2012	   the pressure point earlier.  In this case we search forward
2013	   from that previous pressure point.
2014
2015	 - Scheduling an instruction early caused the maximum pressure
2016	   to decrease.  In this case we will have set the pressure
2017	   point to -1, and we search forward from model_curr_point.  */
2018      point = MAX (group->limits[pci].point, model_curr_point);
2019      while (point < model_num_insns
2020	     && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
2021	point++;
2022      group->limits[pci].point = point;
2023
2024      gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
2025      gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
2026    }
2027}
2028
2029/* Make sure that all register-pressure limits are up-to-date for the
2030   current position in the model schedule.  */
2031
2032static void
2033model_update_limit_points (void)
2034{
2035  model_update_limit_points_in_group (&model_before_pressure);
2036}
2037
2038/* Return the model_index of the last unscheduled use in chain USE
2039   outside of USE's instruction.  Return -1 if there are no other uses,
2040   or model_num_insns if the register is live at the end of the block.  */
2041
2042static int
2043model_last_use_except (struct reg_use_data *use)
2044{
2045  struct reg_use_data *next;
2046  int last, index;
2047
2048  last = -1;
2049  for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2050    if (NONDEBUG_INSN_P (next->insn)
2051	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2052      {
2053	index = model_index (next->insn);
2054	if (index == model_num_insns)
2055	  return model_num_insns;
2056	if (last < index)
2057	  last = index;
2058      }
2059  return last;
2060}
2061
2062/* An instruction with model_index POINT has just been scheduled, and it
2063   adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2064   Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2065   MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly.  */
2066
2067static void
2068model_start_update_pressure (struct model_pressure_group *group,
2069			     int point, int pci, int delta)
2070{
2071  int next_max_pressure;
2072
2073  if (point == model_num_insns)
2074    {
2075      /* The instruction wasn't part of the model schedule; it was moved
2076	 from a different block.  Update the pressure for the end of
2077	 the model schedule.  */
2078      MODEL_REF_PRESSURE (group, point, pci) += delta;
2079      MODEL_MAX_PRESSURE (group, point, pci) += delta;
2080    }
2081  else
2082    {
2083      /* Record that this instruction has been scheduled.  Nothing now
2084	 changes between POINT and POINT + 1, so get the maximum pressure
2085	 from the latter.  If the maximum pressure decreases, the new
2086	 pressure point may be before POINT.  */
2087      MODEL_REF_PRESSURE (group, point, pci) = -1;
2088      next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2089      if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2090	{
2091	  MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2092	  if (group->limits[pci].point == point)
2093	    group->limits[pci].point = -1;
2094	}
2095    }
2096}
2097
2098/* Record that scheduling a later instruction has changed the pressure
2099   at point POINT of the model schedule by DELTA (which might be 0).
2100   Update GROUP accordingly.  Return nonzero if these changes might
2101   trigger changes to previous points as well.  */
2102
2103static int
2104model_update_pressure (struct model_pressure_group *group,
2105		       int point, int pci, int delta)
2106{
2107  int ref_pressure, max_pressure, next_max_pressure;
2108
2109  /* If POINT hasn't yet been scheduled, update its pressure.  */
2110  ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2111  if (ref_pressure >= 0 && delta != 0)
2112    {
2113      ref_pressure += delta;
2114      MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2115
2116      /* Check whether the maximum pressure in the overall schedule
2117	 has increased.  (This means that the MODEL_MAX_PRESSURE of
2118	 every point <= POINT will need to increase too; see below.)  */
2119      if (group->limits[pci].pressure < ref_pressure)
2120	group->limits[pci].pressure = ref_pressure;
2121
2122      /* If we are at maximum pressure, and the maximum pressure
2123	 point was previously unknown or later than POINT,
2124	 bring it forward.  */
2125      if (group->limits[pci].pressure == ref_pressure
2126	  && !IN_RANGE (group->limits[pci].point, 0, point))
2127	group->limits[pci].point = point;
2128
2129      /* If POINT used to be the point of maximum pressure, but isn't
2130	 any longer, we need to recalculate it using a forward walk.  */
2131      if (group->limits[pci].pressure > ref_pressure
2132	  && group->limits[pci].point == point)
2133	group->limits[pci].point = -1;
2134    }
2135
2136  /* Update the maximum pressure at POINT.  Changes here might also
2137     affect the maximum pressure at POINT - 1.  */
2138  next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2139  max_pressure = MAX (ref_pressure, next_max_pressure);
2140  if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2141    {
2142      MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2143      return 1;
2144    }
2145  return 0;
2146}
2147
2148/* INSN has just been scheduled.  Update the model schedule accordingly.  */
2149
2150static void
2151model_recompute (rtx_insn *insn)
2152{
2153  struct {
2154    int last_use;
2155    int regno;
2156  } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2157  struct reg_use_data *use;
2158  struct reg_pressure_data *reg_pressure;
2159  int delta[N_REG_CLASSES];
2160  int pci, point, mix, new_last, cl, ref_pressure, queue;
2161  unsigned int i, num_uses, num_pending_births;
2162  bool print_p;
2163
2164  /* The destinations of INSN were previously live from POINT onwards, but are
2165     now live from model_curr_point onwards.  Set up DELTA accordingly.  */
2166  point = model_index (insn);
2167  reg_pressure = INSN_REG_PRESSURE (insn);
2168  for (pci = 0; pci < ira_pressure_classes_num; pci++)
2169    {
2170      cl = ira_pressure_classes[pci];
2171      delta[cl] = reg_pressure[pci].set_increase;
2172    }
2173
2174  /* Record which registers previously died at POINT, but which now die
2175     before POINT.  Adjust DELTA so that it represents the effect of
2176     this change after POINT - 1.  Set NUM_PENDING_BIRTHS to the number of
2177     registers that will be born in the range [model_curr_point, POINT).  */
2178  num_uses = 0;
2179  num_pending_births = 0;
2180  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2181    {
2182      new_last = model_last_use_except (use);
2183      if (new_last < point)
2184	{
2185	  gcc_assert (num_uses < ARRAY_SIZE (uses));
2186	  uses[num_uses].last_use = new_last;
2187	  uses[num_uses].regno = use->regno;
2188	  /* This register is no longer live after POINT - 1.  */
2189	  mark_regno_birth_or_death (NULL, delta, use->regno, false);
2190	  num_uses++;
2191	  if (new_last >= 0)
2192	    num_pending_births++;
2193	}
2194    }
2195
2196  /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2197     Also set each group pressure limit for POINT.  */
2198  for (pci = 0; pci < ira_pressure_classes_num; pci++)
2199    {
2200      cl = ira_pressure_classes[pci];
2201      model_start_update_pressure (&model_before_pressure,
2202				   point, pci, delta[cl]);
2203    }
2204
2205  /* Walk the model schedule backwards, starting immediately before POINT.  */
2206  print_p = false;
2207  if (point != model_curr_point)
2208    do
2209      {
2210	point--;
2211	insn = MODEL_INSN (point);
2212	queue = QUEUE_INDEX (insn);
2213
2214	if (queue != QUEUE_SCHEDULED)
2215	  {
2216	    /* DELTA describes the effect of the move on the register pressure
2217	       after POINT.  Make it describe the effect on the pressure
2218	       before POINT.  */
2219	    i = 0;
2220	    while (i < num_uses)
2221	      {
2222		if (uses[i].last_use == point)
2223		  {
2224		    /* This register is now live again.  */
2225		    mark_regno_birth_or_death (NULL, delta,
2226					       uses[i].regno, true);
2227
2228		    /* Remove this use from the array.  */
2229		    uses[i] = uses[num_uses - 1];
2230		    num_uses--;
2231		    num_pending_births--;
2232		  }
2233		else
2234		  i++;
2235	      }
2236
2237	    if (sched_verbose >= 5)
2238	      {
2239		if (!print_p)
2240		  {
2241		    fprintf (sched_dump, MODEL_BAR);
2242		    fprintf (sched_dump, ";;\t\t| New pressure for model"
2243			     " schedule\n");
2244		    fprintf (sched_dump, MODEL_BAR);
2245		    print_p = true;
2246		  }
2247
2248		fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2249			 point, INSN_UID (insn),
2250			 str_pattern_slim (PATTERN (insn)));
2251		for (pci = 0; pci < ira_pressure_classes_num; pci++)
2252		  {
2253		    cl = ira_pressure_classes[pci];
2254		    ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2255						       point, pci);
2256		    fprintf (sched_dump, " %s:[%d->%d]",
2257			     reg_class_names[ira_pressure_classes[pci]],
2258			     ref_pressure, ref_pressure + delta[cl]);
2259		  }
2260		fprintf (sched_dump, "\n");
2261	      }
2262	  }
2263
2264	/* Adjust the pressure at POINT.  Set MIX to nonzero if POINT - 1
2265	   might have changed as well.  */
2266	mix = num_pending_births;
2267	for (pci = 0; pci < ira_pressure_classes_num; pci++)
2268	  {
2269	    cl = ira_pressure_classes[pci];
2270	    mix |= delta[cl];
2271	    mix |= model_update_pressure (&model_before_pressure,
2272					  point, pci, delta[cl]);
2273	  }
2274      }
2275    while (mix && point > model_curr_point);
2276
2277  if (print_p)
2278    fprintf (sched_dump, MODEL_BAR);
2279}
2280
2281/* After DEP, which was cancelled, has been resolved for insn NEXT,
2282   check whether the insn's pattern needs restoring.  */
2283static bool
2284must_restore_pattern_p (rtx_insn *next, dep_t dep)
2285{
2286  if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2287    return false;
2288
2289  if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2290    {
2291      gcc_assert (ORIG_PAT (next) != NULL_RTX);
2292      gcc_assert (next == DEP_CON (dep));
2293    }
2294  else
2295    {
2296      struct dep_replacement *desc = DEP_REPLACE (dep);
2297      if (desc->insn != next)
2298	{
2299	  gcc_assert (*desc->loc == desc->orig);
2300	  return false;
2301	}
2302    }
2303  return true;
2304}
2305
2306/* model_spill_cost (CL, P, P') returns the cost of increasing the
2307   pressure on CL from P to P'.  We use this to calculate a "base ECC",
2308   baseECC (CL, X), for each pressure class CL and each instruction X.
2309   Supposing X changes the pressure on CL from P to P', and that the
2310   maximum pressure on CL in the current model schedule is MP', then:
2311
2312   * if X occurs before or at the next point of maximum pressure in
2313     the model schedule and P' > MP', then:
2314
2315       baseECC (CL, X) = model_spill_cost (CL, MP, P')
2316
2317     The idea is that the pressure after scheduling a fixed set of
2318     instructions -- in this case, the set up to and including the
2319     next maximum pressure point -- is going to be the same regardless
2320     of the order; we simply want to keep the intermediate pressure
2321     under control.  Thus X has a cost of zero unless scheduling it
2322     now would exceed MP'.
2323
2324     If all increases in the set are by the same amount, no zero-cost
2325     instruction will ever cause the pressure to exceed MP'.  However,
2326     if X is instead moved past an instruction X' with pressure in the
2327     range (MP' - (P' - P), MP'), the pressure at X' will increase
2328     beyond MP'.  Since baseECC is very much a heuristic anyway,
2329     it doesn't seem worth the overhead of tracking cases like these.
2330
2331     The cost of exceeding MP' is always based on the original maximum
2332     pressure MP.  This is so that going 2 registers over the original
2333     limit has the same cost regardless of whether it comes from two
2334     separate +1 deltas or from a single +2 delta.
2335
2336   * if X occurs after the next point of maximum pressure in the model
2337     schedule and P' > P, then:
2338
2339       baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2340
2341     That is, if we move X forward across a point of maximum pressure,
2342     and if X increases the pressure by P' - P, then we conservatively
2343     assume that scheduling X next would increase the maximum pressure
2344     by P' - P.  Again, the cost of doing this is based on the original
2345     maximum pressure MP, for the same reason as above.
2346
2347   * if P' < P, P > MP, and X occurs at or after the next point of
2348     maximum pressure, then:
2349
2350       baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2351
2352     That is, if we have already exceeded the original maximum pressure MP,
2353     and if X might reduce the maximum pressure again -- or at least push
2354     it further back, and thus allow more scheduling freedom -- it is given
2355     a negative cost to reflect the improvement.
2356
2357   * otherwise,
2358
2359       baseECC (CL, X) = 0
2360
2361     In this case, X is not expected to affect the maximum pressure MP',
2362     so it has zero cost.
2363
2364   We then create a combined value baseECC (X) that is the sum of
2365   baseECC (CL, X) for each pressure class CL.
2366
2367   baseECC (X) could itself be used as the ECC value described above.
2368   However, this is often too conservative, in the sense that it
2369   tends to make high-priority instructions that increase pressure
2370   wait too long in cases where introducing a spill would be better.
2371   For this reason the final ECC is a priority-adjusted form of
2372   baseECC (X).  Specifically, we calculate:
2373
2374     P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2375     baseP = MAX { P (X) | baseECC (X) <= 0 }
2376
2377   Then:
2378
2379     ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2380
2381   Thus an instruction's effect on pressure is ignored if it has a high
2382   enough priority relative to the ones that don't increase pressure.
2383   Negative values of baseECC (X) do not increase the priority of X
2384   itself, but they do make it harder for other instructions to
2385   increase the pressure further.
2386
2387   This pressure cost is deliberately timid.  The intention has been
2388   to choose a heuristic that rarely interferes with the normal list
2389   scheduler in cases where that scheduler would produce good code.
2390   We simply want to curb some of its worst excesses.  */
2391
2392/* Return the cost of increasing the pressure in class CL from FROM to TO.
2393
2394   Here we use the very simplistic cost model that every register above
2395   sched_class_regs_num[CL] has a spill cost of 1.  We could use other
2396   measures instead, such as one based on MEMORY_MOVE_COST.  However:
2397
2398      (1) In order for an instruction to be scheduled, the higher cost
2399	  would need to be justified in a single saving of that many stalls.
2400	  This is overly pessimistic, because the benefit of spilling is
2401	  often to avoid a sequence of several short stalls rather than
2402	  a single long one.
2403
2404      (2) The cost is still arbitrary.  Because we are not allocating
2405	  registers during scheduling, we have no way of knowing for
2406	  sure how many memory accesses will be required by each spill,
2407	  where the spills will be placed within the block, or even
2408	  which block(s) will contain the spills.
2409
2410   So a higher cost than 1 is often too conservative in practice,
2411   forcing blocks to contain unnecessary stalls instead of spill code.
2412   The simple cost below seems to be the best compromise.  It reduces
2413   the interference with the normal list scheduler, which helps make
2414   it more suitable for a default-on option.  */
2415
2416static int
2417model_spill_cost (int cl, int from, int to)
2418{
2419  from = MAX (from, sched_class_regs_num[cl]);
2420  return MAX (to, from) - from;
2421}
2422
2423/* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2424   P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2425   P' = P + DELTA.  */
2426
2427static int
2428model_excess_group_cost (struct model_pressure_group *group,
2429			 int point, int pci, int delta)
2430{
2431  int pressure, cl;
2432
2433  cl = ira_pressure_classes[pci];
2434  if (delta < 0 && point >= group->limits[pci].point)
2435    {
2436      pressure = MAX (group->limits[pci].orig_pressure,
2437		      curr_reg_pressure[cl] + delta);
2438      return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2439    }
2440
2441  if (delta > 0)
2442    {
2443      if (point > group->limits[pci].point)
2444	pressure = group->limits[pci].pressure + delta;
2445      else
2446	pressure = curr_reg_pressure[cl] + delta;
2447
2448      if (pressure > group->limits[pci].pressure)
2449	return model_spill_cost (cl, group->limits[pci].orig_pressure,
2450				 pressure);
2451    }
2452
2453  return 0;
2454}
2455
2456/* Return baseECC (MODEL_INSN (INSN)).  Dump the costs to sched_dump
2457   if PRINT_P.  */
2458
2459static int
2460model_excess_cost (rtx_insn *insn, bool print_p)
2461{
2462  int point, pci, cl, cost, this_cost, delta;
2463  struct reg_pressure_data *insn_reg_pressure;
2464  int insn_death[N_REG_CLASSES];
2465
2466  calculate_reg_deaths (insn, insn_death);
2467  point = model_index (insn);
2468  insn_reg_pressure = INSN_REG_PRESSURE (insn);
2469  cost = 0;
2470
2471  if (print_p)
2472    fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2473	     INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2474
2475  /* Sum up the individual costs for each register class.  */
2476  for (pci = 0; pci < ira_pressure_classes_num; pci++)
2477    {
2478      cl = ira_pressure_classes[pci];
2479      delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2480      this_cost = model_excess_group_cost (&model_before_pressure,
2481					   point, pci, delta);
2482      cost += this_cost;
2483      if (print_p)
2484	fprintf (sched_dump, " %s:[%d base cost %d]",
2485		 reg_class_names[cl], delta, this_cost);
2486    }
2487
2488  if (print_p)
2489    fprintf (sched_dump, "\n");
2490
2491  return cost;
2492}
2493
2494/* Dump the next points of maximum pressure for GROUP.  */
2495
2496static void
2497model_dump_pressure_points (struct model_pressure_group *group)
2498{
2499  int pci, cl;
2500
2501  fprintf (sched_dump, ";;\t\t|  pressure points");
2502  for (pci = 0; pci < ira_pressure_classes_num; pci++)
2503    {
2504      cl = ira_pressure_classes[pci];
2505      fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2506	       curr_reg_pressure[cl], group->limits[pci].pressure);
2507      if (group->limits[pci].point < model_num_insns)
2508	fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2509		 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2510      else
2511	fprintf (sched_dump, "end]");
2512    }
2513  fprintf (sched_dump, "\n");
2514}
2515
2516/* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1].  */
2517
2518static void
2519model_set_excess_costs (rtx_insn **insns, int count)
2520{
2521  int i, cost, priority_base, priority;
2522  bool print_p;
2523
2524  /* Record the baseECC value for each instruction in the model schedule,
2525     except that negative costs are converted to zero ones now rather than
2526     later.  Do not assign a cost to debug instructions, since they must
2527     not change code-generation decisions.  Experiments suggest we also
2528     get better results by not assigning a cost to instructions from
2529     a different block.
2530
2531     Set PRIORITY_BASE to baseP in the block comment above.  This is the
2532     maximum priority of the "cheap" instructions, which should always
2533     include the next model instruction.  */
2534  priority_base = 0;
2535  print_p = false;
2536  for (i = 0; i < count; i++)
2537    if (INSN_MODEL_INDEX (insns[i]))
2538      {
2539	if (sched_verbose >= 6 && !print_p)
2540	  {
2541	    fprintf (sched_dump, MODEL_BAR);
2542	    fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2543	    model_dump_pressure_points (&model_before_pressure);
2544	    fprintf (sched_dump, MODEL_BAR);
2545	    print_p = true;
2546	  }
2547	cost = model_excess_cost (insns[i], print_p);
2548	if (cost <= 0)
2549	  {
2550	    priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2551	    priority_base = MAX (priority_base, priority);
2552	    cost = 0;
2553	  }
2554	INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2555      }
2556  if (print_p)
2557    fprintf (sched_dump, MODEL_BAR);
2558
2559  /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2560     instruction.  */
2561  for (i = 0; i < count; i++)
2562    {
2563      cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2564      priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2565      if (cost > 0 && priority > priority_base)
2566	{
2567	  cost += priority_base - priority;
2568	  INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2569	}
2570    }
2571}
2572
2573
2574/* Enum of rank_for_schedule heuristic decisions.  */
2575enum rfs_decision {
2576  RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2577  RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2578  RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2579  RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2580  RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N };
2581
2582/* Corresponding strings for print outs.  */
2583static const char *rfs_str[RFS_N] = {
2584  "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2585  "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2586  "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2587  "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2588  "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
2589
2590/* Statistical breakdown of rank_for_schedule decisions.  */
2591typedef struct { unsigned stats[RFS_N]; } rank_for_schedule_stats_t;
2592static rank_for_schedule_stats_t rank_for_schedule_stats;
2593
2594/* Return the result of comparing insns TMP and TMP2 and update
2595   Rank_For_Schedule statistics.  */
2596static int
2597rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2598{
2599  ++rank_for_schedule_stats.stats[decision];
2600  if (result < 0)
2601    INSN_LAST_RFS_WIN (tmp) = decision;
2602  else if (result > 0)
2603    INSN_LAST_RFS_WIN (tmp2) = decision;
2604  else
2605    gcc_unreachable ();
2606  return result;
2607}
2608
2609/* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2610   keeping normal insns in original order.  */
2611
2612static int
2613rank_for_schedule_debug (const void *x, const void *y)
2614{
2615  rtx_insn *tmp = *(rtx_insn * const *) y;
2616  rtx_insn *tmp2 = *(rtx_insn * const *) x;
2617
2618  /* Schedule debug insns as early as possible.  */
2619  if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2620    return -1;
2621  else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2622    return 1;
2623  else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2624    return INSN_LUID (tmp) - INSN_LUID (tmp2);
2625  else
2626    return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2627}
2628
2629/* Returns a positive value if x is preferred; returns a negative value if
2630   y is preferred.  Should never return 0, since that will make the sort
2631   unstable.  */
2632
2633static int
2634rank_for_schedule (const void *x, const void *y)
2635{
2636  rtx_insn *tmp = *(rtx_insn * const *) y;
2637  rtx_insn *tmp2 = *(rtx_insn * const *) x;
2638  int tmp_class, tmp2_class;
2639  int val, priority_val, info_val, diff;
2640
2641  if (live_range_shrinkage_p)
2642    {
2643      /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2644	 code.  */
2645      gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2646      if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2647	   || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2648	  && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2649		      - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2650	return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2651      /* Sort by INSN_LUID (original insn order), so that we make the
2652	 sort stable.  This minimizes instruction movement, thus
2653	 minimizing sched's effect on debugging and cross-jumping.  */
2654      return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2655			 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2656    }
2657
2658  /* The insn in a schedule group should be issued the first.  */
2659  if (flag_sched_group_heuristic &&
2660      SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2661    return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2662		       tmp, tmp2);
2663
2664  /* Make sure that priority of TMP and TMP2 are initialized.  */
2665  gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2666
2667  if (sched_fusion)
2668    {
2669      /* The instruction that has the same fusion priority as the last
2670	 instruction is the instruction we picked next.  If that is not
2671	 the case, we sort ready list firstly by fusion priority, then
2672	 by priority, and at last by INSN_LUID.  */
2673      int a = INSN_FUSION_PRIORITY (tmp);
2674      int b = INSN_FUSION_PRIORITY (tmp2);
2675      int last = -1;
2676
2677      if (last_nondebug_scheduled_insn
2678	  && !NOTE_P (last_nondebug_scheduled_insn)
2679	  && BLOCK_FOR_INSN (tmp)
2680	       == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2681	last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2682
2683      if (a != last && b != last)
2684	{
2685	  if (a == b)
2686	    {
2687	      a = INSN_PRIORITY (tmp);
2688	      b = INSN_PRIORITY (tmp2);
2689	    }
2690	  if (a != b)
2691	    return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2692	  else
2693	    return rfs_result (RFS_FUSION,
2694			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2695	}
2696      else if (a == b)
2697	{
2698	  gcc_assert (last_nondebug_scheduled_insn
2699		      && !NOTE_P (last_nondebug_scheduled_insn));
2700	  last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2701
2702	  a = abs (INSN_PRIORITY (tmp) - last);
2703	  b = abs (INSN_PRIORITY (tmp2) - last);
2704	  if (a != b)
2705	    return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2706	  else
2707	    return rfs_result (RFS_FUSION,
2708			       INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2709	}
2710      else if (a == last)
2711	return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2712      else
2713	return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2714    }
2715
2716  if (sched_pressure != SCHED_PRESSURE_NONE)
2717    {
2718      /* Prefer insn whose scheduling results in the smallest register
2719	 pressure excess.  */
2720      if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2721		   + insn_delay (tmp)
2722		   - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2723		   - insn_delay (tmp2))))
2724	return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2725    }
2726
2727  if (sched_pressure != SCHED_PRESSURE_NONE
2728      && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2729      && INSN_TICK (tmp2) != INSN_TICK (tmp))
2730    {
2731      diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2732      return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2733    }
2734
2735  /* If we are doing backtracking in this schedule, prefer insns that
2736     have forward dependencies with negative cost against an insn that
2737     was already scheduled.  */
2738  if (current_sched_info->flags & DO_BACKTRACKING)
2739    {
2740      priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2741      if (priority_val)
2742	return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2743    }
2744
2745  /* Prefer insn with higher priority.  */
2746  priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2747
2748  if (flag_sched_critical_path_heuristic && priority_val)
2749    return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2750
2751  if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
2752    {
2753      int autopref = autopref_rank_for_schedule (tmp, tmp2);
2754      if (autopref != 0)
2755	return autopref;
2756    }
2757
2758  /* Prefer speculative insn with greater dependencies weakness.  */
2759  if (flag_sched_spec_insn_heuristic && spec_info)
2760    {
2761      ds_t ds1, ds2;
2762      dw_t dw1, dw2;
2763      int dw;
2764
2765      ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2766      if (ds1)
2767	dw1 = ds_weak (ds1);
2768      else
2769	dw1 = NO_DEP_WEAK;
2770
2771      ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2772      if (ds2)
2773	dw2 = ds_weak (ds2);
2774      else
2775	dw2 = NO_DEP_WEAK;
2776
2777      dw = dw2 - dw1;
2778      if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2779	return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2780    }
2781
2782  info_val = (*current_sched_info->rank) (tmp, tmp2);
2783  if (flag_sched_rank_heuristic && info_val)
2784    return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2785
2786  /* Compare insns based on their relation to the last scheduled
2787     non-debug insn.  */
2788  if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2789    {
2790      dep_t dep1;
2791      dep_t dep2;
2792      rtx last = last_nondebug_scheduled_insn;
2793
2794      /* Classify the instructions into three classes:
2795         1) Data dependent on last schedule insn.
2796         2) Anti/Output dependent on last scheduled insn.
2797         3) Independent of last scheduled insn, or has latency of one.
2798         Choose the insn from the highest numbered class if different.  */
2799      dep1 = sd_find_dep_between (last, tmp, true);
2800
2801      if (dep1 == NULL || dep_cost (dep1) == 1)
2802	tmp_class = 3;
2803      else if (/* Data dependence.  */
2804	       DEP_TYPE (dep1) == REG_DEP_TRUE)
2805	tmp_class = 1;
2806      else
2807	tmp_class = 2;
2808
2809      dep2 = sd_find_dep_between (last, tmp2, true);
2810
2811      if (dep2 == NULL || dep_cost (dep2)  == 1)
2812	tmp2_class = 3;
2813      else if (/* Data dependence.  */
2814	       DEP_TYPE (dep2) == REG_DEP_TRUE)
2815	tmp2_class = 1;
2816      else
2817	tmp2_class = 2;
2818
2819      if ((val = tmp2_class - tmp_class))
2820	return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2821    }
2822
2823  /* Prefer instructions that occur earlier in the model schedule.  */
2824  if (sched_pressure == SCHED_PRESSURE_MODEL
2825      && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
2826    {
2827      diff = model_index (tmp) - model_index (tmp2);
2828      gcc_assert (diff != 0);
2829      return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2830    }
2831
2832  /* Prefer the insn which has more later insns that depend on it.
2833     This gives the scheduler more freedom when scheduling later
2834     instructions at the expense of added register pressure.  */
2835
2836  val = (dep_list_size (tmp2, SD_LIST_FORW)
2837	 - dep_list_size (tmp, SD_LIST_FORW));
2838
2839  if (flag_sched_dep_count_heuristic && val != 0)
2840    return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2841
2842  /* If insns are equally good, sort by INSN_LUID (original insn order),
2843     so that we make the sort stable.  This minimizes instruction movement,
2844     thus minimizing sched's effect on debugging and cross-jumping.  */
2845  return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2846}
2847
2848/* Resort the array A in which only element at index N may be out of order.  */
2849
2850HAIFA_INLINE static void
2851swap_sort (rtx_insn **a, int n)
2852{
2853  rtx_insn *insn = a[n - 1];
2854  int i = n - 2;
2855
2856  while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2857    {
2858      a[i + 1] = a[i];
2859      i -= 1;
2860    }
2861  a[i + 1] = insn;
2862}
2863
2864/* Add INSN to the insn queue so that it can be executed at least
2865   N_CYCLES after the currently executing insn.  Preserve insns
2866   chain for debugging purposes.  REASON will be printed in debugging
2867   output.  */
2868
2869HAIFA_INLINE static void
2870queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2871{
2872  int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2873  rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2874  int new_tick;
2875
2876  gcc_assert (n_cycles <= max_insn_queue_index);
2877  gcc_assert (!DEBUG_INSN_P (insn));
2878
2879  insn_queue[next_q] = link;
2880  q_size += 1;
2881
2882  if (sched_verbose >= 2)
2883    {
2884      fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2885	       (*current_sched_info->print_insn) (insn, 0));
2886
2887      fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2888    }
2889
2890  QUEUE_INDEX (insn) = next_q;
2891
2892  if (current_sched_info->flags & DO_BACKTRACKING)
2893    {
2894      new_tick = clock_var + n_cycles;
2895      if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2896	INSN_TICK (insn) = new_tick;
2897
2898      if (INSN_EXACT_TICK (insn) != INVALID_TICK
2899	  && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2900	{
2901	  must_backtrack = true;
2902	  if (sched_verbose >= 2)
2903	    fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2904	}
2905    }
2906}
2907
2908/* Remove INSN from queue.  */
2909static void
2910queue_remove (rtx_insn *insn)
2911{
2912  gcc_assert (QUEUE_INDEX (insn) >= 0);
2913  remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2914  q_size--;
2915  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2916}
2917
2918/* Return a pointer to the bottom of the ready list, i.e. the insn
2919   with the lowest priority.  */
2920
2921rtx_insn **
2922ready_lastpos (struct ready_list *ready)
2923{
2924  gcc_assert (ready->n_ready >= 1);
2925  return ready->vec + ready->first - ready->n_ready + 1;
2926}
2927
2928/* Add an element INSN to the ready list so that it ends up with the
2929   lowest/highest priority depending on FIRST_P.  */
2930
2931HAIFA_INLINE static void
2932ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2933{
2934  if (!first_p)
2935    {
2936      if (ready->first == ready->n_ready)
2937	{
2938	  memmove (ready->vec + ready->veclen - ready->n_ready,
2939		   ready_lastpos (ready),
2940		   ready->n_ready * sizeof (rtx));
2941	  ready->first = ready->veclen - 1;
2942	}
2943      ready->vec[ready->first - ready->n_ready] = insn;
2944    }
2945  else
2946    {
2947      if (ready->first == ready->veclen - 1)
2948	{
2949	  if (ready->n_ready)
2950	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
2951	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2952		     ready_lastpos (ready),
2953		     ready->n_ready * sizeof (rtx));
2954	  ready->first = ready->veclen - 2;
2955	}
2956      ready->vec[++(ready->first)] = insn;
2957    }
2958
2959  ready->n_ready++;
2960  if (DEBUG_INSN_P (insn))
2961    ready->n_debug++;
2962
2963  gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2964  QUEUE_INDEX (insn) = QUEUE_READY;
2965
2966  if (INSN_EXACT_TICK (insn) != INVALID_TICK
2967      && INSN_EXACT_TICK (insn) < clock_var)
2968    {
2969      must_backtrack = true;
2970    }
2971}
2972
2973/* Remove the element with the highest priority from the ready list and
2974   return it.  */
2975
2976HAIFA_INLINE static rtx_insn *
2977ready_remove_first (struct ready_list *ready)
2978{
2979  rtx_insn *t;
2980
2981  gcc_assert (ready->n_ready);
2982  t = ready->vec[ready->first--];
2983  ready->n_ready--;
2984  if (DEBUG_INSN_P (t))
2985    ready->n_debug--;
2986  /* If the queue becomes empty, reset it.  */
2987  if (ready->n_ready == 0)
2988    ready->first = ready->veclen - 1;
2989
2990  gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2991  QUEUE_INDEX (t) = QUEUE_NOWHERE;
2992
2993  return t;
2994}
2995
2996/* The following code implements multi-pass scheduling for the first
2997   cycle.  In other words, we will try to choose ready insn which
2998   permits to start maximum number of insns on the same cycle.  */
2999
3000/* Return a pointer to the element INDEX from the ready.  INDEX for
3001   insn with the highest priority is 0, and the lowest priority has
3002   N_READY - 1.  */
3003
3004rtx_insn *
3005ready_element (struct ready_list *ready, int index)
3006{
3007  gcc_assert (ready->n_ready && index < ready->n_ready);
3008
3009  return ready->vec[ready->first - index];
3010}
3011
3012/* Remove the element INDEX from the ready list and return it.  INDEX
3013   for insn with the highest priority is 0, and the lowest priority
3014   has N_READY - 1.  */
3015
3016HAIFA_INLINE static rtx_insn *
3017ready_remove (struct ready_list *ready, int index)
3018{
3019  rtx_insn *t;
3020  int i;
3021
3022  if (index == 0)
3023    return ready_remove_first (ready);
3024  gcc_assert (ready->n_ready && index < ready->n_ready);
3025  t = ready->vec[ready->first - index];
3026  ready->n_ready--;
3027  if (DEBUG_INSN_P (t))
3028    ready->n_debug--;
3029  for (i = index; i < ready->n_ready; i++)
3030    ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3031  QUEUE_INDEX (t) = QUEUE_NOWHERE;
3032  return t;
3033}
3034
3035/* Remove INSN from the ready list.  */
3036static void
3037ready_remove_insn (rtx insn)
3038{
3039  int i;
3040
3041  for (i = 0; i < readyp->n_ready; i++)
3042    if (ready_element (readyp, i) == insn)
3043      {
3044        ready_remove (readyp, i);
3045        return;
3046      }
3047  gcc_unreachable ();
3048}
3049
3050/* Calculate difference of two statistics set WAS and NOW.
3051   Result returned in WAS.  */
3052static void
3053rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3054			      const rank_for_schedule_stats_t *now)
3055{
3056  for (int i = 0; i < RFS_N; ++i)
3057    was->stats[i] = now->stats[i] - was->stats[i];
3058}
3059
3060/* Print rank_for_schedule statistics.  */
3061static void
3062print_rank_for_schedule_stats (const char *prefix,
3063			       const rank_for_schedule_stats_t *stats,
3064			       struct ready_list *ready)
3065{
3066  for (int i = 0; i < RFS_N; ++i)
3067    if (stats->stats[i])
3068      {
3069	fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3070
3071	if (ready != NULL)
3072	  /* Print out insns that won due to RFS_<I>.  */
3073	  {
3074	    rtx_insn **p = ready_lastpos (ready);
3075
3076	    fprintf (sched_dump, ":");
3077	    /* Start with 1 since least-priority insn didn't have any wins.  */
3078	    for (int j = 1; j < ready->n_ready; ++j)
3079	      if (INSN_LAST_RFS_WIN (p[j]) == i)
3080		fprintf (sched_dump, " %s",
3081			 (*current_sched_info->print_insn) (p[j], 0));
3082	  }
3083	fprintf (sched_dump, "\n");
3084      }
3085}
3086
3087/* Separate DEBUG_INSNS from normal insns.  DEBUG_INSNs go to the end
3088   of array.  */
3089static void
3090ready_sort_debug (struct ready_list *ready)
3091{
3092  int i;
3093  rtx_insn **first = ready_lastpos (ready);
3094
3095  for (i = 0; i < ready->n_ready; ++i)
3096    if (!DEBUG_INSN_P (first[i]))
3097      INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3098
3099  qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3100}
3101
3102/* Sort non-debug insns in the ready list READY by ascending priority.
3103   Assumes that all debug insns are separated from the real insns.  */
3104static void
3105ready_sort_real (struct ready_list *ready)
3106{
3107  int i;
3108  rtx_insn **first = ready_lastpos (ready);
3109  int n_ready_real = ready->n_ready - ready->n_debug;
3110
3111  if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3112    for (i = 0; i < n_ready_real; ++i)
3113      setup_insn_reg_pressure_info (first[i]);
3114  else if (sched_pressure == SCHED_PRESSURE_MODEL
3115	   && model_curr_point < model_num_insns)
3116    model_set_excess_costs (first, n_ready_real);
3117
3118  rank_for_schedule_stats_t stats1;
3119  if (sched_verbose >= 4)
3120    stats1 = rank_for_schedule_stats;
3121
3122  if (n_ready_real == 2)
3123    swap_sort (first, n_ready_real);
3124  else if (n_ready_real > 2)
3125    qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3126
3127  if (sched_verbose >= 4)
3128    {
3129      rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3130      print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3131    }
3132}
3133
3134/* Sort the ready list READY by ascending priority.  */
3135static void
3136ready_sort (struct ready_list *ready)
3137{
3138  if (ready->n_debug > 0)
3139    ready_sort_debug (ready);
3140  else
3141    ready_sort_real (ready);
3142}
3143
3144/* PREV is an insn that is ready to execute.  Adjust its priority if that
3145   will help shorten or lengthen register lifetimes as appropriate.  Also
3146   provide a hook for the target to tweak itself.  */
3147
3148HAIFA_INLINE static void
3149adjust_priority (rtx_insn *prev)
3150{
3151  /* ??? There used to be code here to try and estimate how an insn
3152     affected register lifetimes, but it did it by looking at REG_DEAD
3153     notes, which we removed in schedule_region.  Nor did it try to
3154     take into account register pressure or anything useful like that.
3155
3156     Revisit when we have a machine model to work with and not before.  */
3157
3158  if (targetm.sched.adjust_priority)
3159    INSN_PRIORITY (prev) =
3160      targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3161}
3162
3163/* Advance DFA state STATE on one cycle.  */
3164void
3165advance_state (state_t state)
3166{
3167  if (targetm.sched.dfa_pre_advance_cycle)
3168    targetm.sched.dfa_pre_advance_cycle ();
3169
3170  if (targetm.sched.dfa_pre_cycle_insn)
3171    state_transition (state,
3172		      targetm.sched.dfa_pre_cycle_insn ());
3173
3174  state_transition (state, NULL);
3175
3176  if (targetm.sched.dfa_post_cycle_insn)
3177    state_transition (state,
3178		      targetm.sched.dfa_post_cycle_insn ());
3179
3180  if (targetm.sched.dfa_post_advance_cycle)
3181    targetm.sched.dfa_post_advance_cycle ();
3182}
3183
3184/* Advance time on one cycle.  */
3185HAIFA_INLINE static void
3186advance_one_cycle (void)
3187{
3188  advance_state (curr_state);
3189  if (sched_verbose >= 4)
3190    fprintf (sched_dump, ";;\tAdvance the current state.\n");
3191}
3192
3193/* Update register pressure after scheduling INSN.  */
3194static void
3195update_register_pressure (rtx_insn *insn)
3196{
3197  struct reg_use_data *use;
3198  struct reg_set_data *set;
3199
3200  gcc_checking_assert (!DEBUG_INSN_P (insn));
3201
3202  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3203    if (dying_use_p (use))
3204      mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3205				 use->regno, false);
3206  for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3207    mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3208			       set->regno, true);
3209}
3210
3211/* Set up or update (if UPDATE_P) max register pressure (see its
3212   meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3213   after insn AFTER.  */
3214static void
3215setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3216{
3217  int i, p;
3218  bool eq_p;
3219  rtx_insn *insn;
3220  static int max_reg_pressure[N_REG_CLASSES];
3221
3222  save_reg_pressure ();
3223  for (i = 0; i < ira_pressure_classes_num; i++)
3224    max_reg_pressure[ira_pressure_classes[i]]
3225      = curr_reg_pressure[ira_pressure_classes[i]];
3226  for (insn = NEXT_INSN (after);
3227       insn != NULL_RTX && ! BARRIER_P (insn)
3228	 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3229       insn = NEXT_INSN (insn))
3230    if (NONDEBUG_INSN_P (insn))
3231      {
3232	eq_p = true;
3233	for (i = 0; i < ira_pressure_classes_num; i++)
3234	  {
3235	    p = max_reg_pressure[ira_pressure_classes[i]];
3236	    if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3237	      {
3238		eq_p = false;
3239		INSN_MAX_REG_PRESSURE (insn)[i]
3240		  = max_reg_pressure[ira_pressure_classes[i]];
3241	      }
3242	  }
3243	if (update_p && eq_p)
3244	  break;
3245	update_register_pressure (insn);
3246	for (i = 0; i < ira_pressure_classes_num; i++)
3247	  if (max_reg_pressure[ira_pressure_classes[i]]
3248	      < curr_reg_pressure[ira_pressure_classes[i]])
3249	    max_reg_pressure[ira_pressure_classes[i]]
3250	      = curr_reg_pressure[ira_pressure_classes[i]];
3251      }
3252  restore_reg_pressure ();
3253}
3254
3255/* Update the current register pressure after scheduling INSN.  Update
3256   also max register pressure for unscheduled insns of the current
3257   BB.  */
3258static void
3259update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3260{
3261  int i;
3262  int before[N_REG_CLASSES];
3263
3264  for (i = 0; i < ira_pressure_classes_num; i++)
3265    before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3266  update_register_pressure (insn);
3267  for (i = 0; i < ira_pressure_classes_num; i++)
3268    if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3269      break;
3270  if (i < ira_pressure_classes_num)
3271    setup_insn_max_reg_pressure (insn, true);
3272}
3273
3274/* Set up register pressure at the beginning of basic block BB whose
3275   insns starting after insn AFTER.  Set up also max register pressure
3276   for all insns of the basic block.  */
3277void
3278sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3279{
3280  gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3281  initiate_bb_reg_pressure_info (bb);
3282  setup_insn_max_reg_pressure (after, false);
3283}
3284
3285/* If doing predication while scheduling, verify whether INSN, which
3286   has just been scheduled, clobbers the conditions of any
3287   instructions that must be predicated in order to break their
3288   dependencies.  If so, remove them from the queues so that they will
3289   only be scheduled once their control dependency is resolved.  */
3290
3291static void
3292check_clobbered_conditions (rtx insn)
3293{
3294  HARD_REG_SET t;
3295  int i;
3296
3297  if ((current_sched_info->flags & DO_PREDICATION) == 0)
3298    return;
3299
3300  find_all_hard_reg_sets (insn, &t, true);
3301
3302 restart:
3303  for (i = 0; i < ready.n_ready; i++)
3304    {
3305      rtx_insn *x = ready_element (&ready, i);
3306      if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3307	{
3308	  ready_remove_insn (x);
3309	  goto restart;
3310	}
3311    }
3312  for (i = 0; i <= max_insn_queue_index; i++)
3313    {
3314      rtx_insn_list *link;
3315      int q = NEXT_Q_AFTER (q_ptr, i);
3316
3317    restart_queue:
3318      for (link = insn_queue[q]; link; link = link->next ())
3319	{
3320	  rtx_insn *x = link->insn ();
3321	  if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3322	    {
3323	      queue_remove (x);
3324	      goto restart_queue;
3325	    }
3326	}
3327    }
3328}
3329
3330/* Return (in order):
3331
3332   - positive if INSN adversely affects the pressure on one
3333     register class
3334
3335   - negative if INSN reduces the pressure on one register class
3336
3337   - 0 if INSN doesn't affect the pressure on any register class.  */
3338
3339static int
3340model_classify_pressure (struct model_insn_info *insn)
3341{
3342  struct reg_pressure_data *reg_pressure;
3343  int death[N_REG_CLASSES];
3344  int pci, cl, sum;
3345
3346  calculate_reg_deaths (insn->insn, death);
3347  reg_pressure = INSN_REG_PRESSURE (insn->insn);
3348  sum = 0;
3349  for (pci = 0; pci < ira_pressure_classes_num; pci++)
3350    {
3351      cl = ira_pressure_classes[pci];
3352      if (death[cl] < reg_pressure[pci].set_increase)
3353	return 1;
3354      sum += reg_pressure[pci].set_increase - death[cl];
3355    }
3356  return sum;
3357}
3358
3359/* Return true if INSN1 should come before INSN2 in the model schedule.  */
3360
3361static int
3362model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3363{
3364  unsigned int height1, height2;
3365  unsigned int priority1, priority2;
3366
3367  /* Prefer instructions with a higher model priority.  */
3368  if (insn1->model_priority != insn2->model_priority)
3369    return insn1->model_priority > insn2->model_priority;
3370
3371  /* Combine the length of the longest path of satisfied true dependencies
3372     that leads to each instruction (depth) with the length of the longest
3373     path of any dependencies that leads from the instruction (alap).
3374     Prefer instructions with the greatest combined length.  If the combined
3375     lengths are equal, prefer instructions with the greatest depth.
3376
3377     The idea is that, if we have a set S of "equal" instructions that each
3378     have ALAP value X, and we pick one such instruction I, any true-dependent
3379     successors of I that have ALAP value X - 1 should be preferred over S.
3380     This encourages the schedule to be "narrow" rather than "wide".
3381     However, if I is a low-priority instruction that we decided to
3382     schedule because of its model_classify_pressure, and if there
3383     is a set of higher-priority instructions T, the aforementioned
3384     successors of I should not have the edge over T.  */
3385  height1 = insn1->depth + insn1->alap;
3386  height2 = insn2->depth + insn2->alap;
3387  if (height1 != height2)
3388    return height1 > height2;
3389  if (insn1->depth != insn2->depth)
3390    return insn1->depth > insn2->depth;
3391
3392  /* We have no real preference between INSN1 an INSN2 as far as attempts
3393     to reduce pressure go.  Prefer instructions with higher priorities.  */
3394  priority1 = INSN_PRIORITY (insn1->insn);
3395  priority2 = INSN_PRIORITY (insn2->insn);
3396  if (priority1 != priority2)
3397    return priority1 > priority2;
3398
3399  /* Use the original rtl sequence as a tie-breaker.  */
3400  return insn1 < insn2;
3401}
3402
3403/* Add INSN to the model worklist immediately after PREV.  Add it to the
3404   beginning of the list if PREV is null.  */
3405
3406static void
3407model_add_to_worklist_at (struct model_insn_info *insn,
3408			  struct model_insn_info *prev)
3409{
3410  gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3411  QUEUE_INDEX (insn->insn) = QUEUE_READY;
3412
3413  insn->prev = prev;
3414  if (prev)
3415    {
3416      insn->next = prev->next;
3417      prev->next = insn;
3418    }
3419  else
3420    {
3421      insn->next = model_worklist;
3422      model_worklist = insn;
3423    }
3424  if (insn->next)
3425    insn->next->prev = insn;
3426}
3427
3428/* Remove INSN from the model worklist.  */
3429
3430static void
3431model_remove_from_worklist (struct model_insn_info *insn)
3432{
3433  gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3434  QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3435
3436  if (insn->prev)
3437    insn->prev->next = insn->next;
3438  else
3439    model_worklist = insn->next;
3440  if (insn->next)
3441    insn->next->prev = insn->prev;
3442}
3443
3444/* Add INSN to the model worklist.  Start looking for a suitable position
3445   between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3446   insns either side.  A null PREV indicates the beginning of the list and
3447   a null NEXT indicates the end.  */
3448
3449static void
3450model_add_to_worklist (struct model_insn_info *insn,
3451		       struct model_insn_info *prev,
3452		       struct model_insn_info *next)
3453{
3454  int count;
3455
3456  count = MAX_SCHED_READY_INSNS;
3457  if (count > 0 && prev && model_order_p (insn, prev))
3458    do
3459      {
3460	count--;
3461	prev = prev->prev;
3462      }
3463    while (count > 0 && prev && model_order_p (insn, prev));
3464  else
3465    while (count > 0 && next && model_order_p (next, insn))
3466      {
3467	count--;
3468	prev = next;
3469	next = next->next;
3470      }
3471  model_add_to_worklist_at (insn, prev);
3472}
3473
3474/* INSN may now have a higher priority (in the model_order_p sense)
3475   than before.  Move it up the worklist if necessary.  */
3476
3477static void
3478model_promote_insn (struct model_insn_info *insn)
3479{
3480  struct model_insn_info *prev;
3481  int count;
3482
3483  prev = insn->prev;
3484  count = MAX_SCHED_READY_INSNS;
3485  while (count > 0 && prev && model_order_p (insn, prev))
3486    {
3487      count--;
3488      prev = prev->prev;
3489    }
3490  if (prev != insn->prev)
3491    {
3492      model_remove_from_worklist (insn);
3493      model_add_to_worklist_at (insn, prev);
3494    }
3495}
3496
3497/* Add INSN to the end of the model schedule.  */
3498
3499static void
3500model_add_to_schedule (rtx_insn *insn)
3501{
3502  unsigned int point;
3503
3504  gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3505  QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3506
3507  point = model_schedule.length ();
3508  model_schedule.quick_push (insn);
3509  INSN_MODEL_INDEX (insn) = point + 1;
3510}
3511
3512/* Analyze the instructions that are to be scheduled, setting up
3513   MODEL_INSN_INFO (...) and model_num_insns accordingly.  Add ready
3514   instructions to model_worklist.  */
3515
3516static void
3517model_analyze_insns (void)
3518{
3519  rtx_insn *start, *end, *iter;
3520  sd_iterator_def sd_it;
3521  dep_t dep;
3522  struct model_insn_info *insn, *con;
3523
3524  model_num_insns = 0;
3525  start = PREV_INSN (current_sched_info->next_tail);
3526  end = current_sched_info->prev_head;
3527  for (iter = start; iter != end; iter = PREV_INSN (iter))
3528    if (NONDEBUG_INSN_P (iter))
3529      {
3530	insn = MODEL_INSN_INFO (iter);
3531	insn->insn = iter;
3532	FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3533	  {
3534	    con = MODEL_INSN_INFO (DEP_CON (dep));
3535	    if (con->insn && insn->alap < con->alap + 1)
3536	      insn->alap = con->alap + 1;
3537	  }
3538
3539	insn->old_queue = QUEUE_INDEX (iter);
3540	QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3541
3542	insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3543	if (insn->unscheduled_preds == 0)
3544	  model_add_to_worklist (insn, NULL, model_worklist);
3545
3546	model_num_insns++;
3547      }
3548}
3549
3550/* The global state describes the register pressure at the start of the
3551   model schedule.  Initialize GROUP accordingly.  */
3552
3553static void
3554model_init_pressure_group (struct model_pressure_group *group)
3555{
3556  int pci, cl;
3557
3558  for (pci = 0; pci < ira_pressure_classes_num; pci++)
3559    {
3560      cl = ira_pressure_classes[pci];
3561      group->limits[pci].pressure = curr_reg_pressure[cl];
3562      group->limits[pci].point = 0;
3563    }
3564  /* Use index model_num_insns to record the state after the last
3565     instruction in the model schedule.  */
3566  group->model = XNEWVEC (struct model_pressure_data,
3567			  (model_num_insns + 1) * ira_pressure_classes_num);
3568}
3569
3570/* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3571   Update the maximum pressure for the whole schedule.  */
3572
3573static void
3574model_record_pressure (struct model_pressure_group *group,
3575		       int point, int pci, int pressure)
3576{
3577  MODEL_REF_PRESSURE (group, point, pci) = pressure;
3578  if (group->limits[pci].pressure < pressure)
3579    {
3580      group->limits[pci].pressure = pressure;
3581      group->limits[pci].point = point;
3582    }
3583}
3584
3585/* INSN has just been added to the end of the model schedule.  Record its
3586   register-pressure information.  */
3587
3588static void
3589model_record_pressures (struct model_insn_info *insn)
3590{
3591  struct reg_pressure_data *reg_pressure;
3592  int point, pci, cl, delta;
3593  int death[N_REG_CLASSES];
3594
3595  point = model_index (insn->insn);
3596  if (sched_verbose >= 2)
3597    {
3598      if (point == 0)
3599	{
3600	  fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3601	  fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3602	}
3603      fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3604	       point, INSN_UID (insn->insn), insn->model_priority,
3605	       insn->depth + insn->alap, insn->depth,
3606	       INSN_PRIORITY (insn->insn),
3607	       str_pattern_slim (PATTERN (insn->insn)));
3608    }
3609  calculate_reg_deaths (insn->insn, death);
3610  reg_pressure = INSN_REG_PRESSURE (insn->insn);
3611  for (pci = 0; pci < ira_pressure_classes_num; pci++)
3612    {
3613      cl = ira_pressure_classes[pci];
3614      delta = reg_pressure[pci].set_increase - death[cl];
3615      if (sched_verbose >= 2)
3616	fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3617		 curr_reg_pressure[cl], delta);
3618      model_record_pressure (&model_before_pressure, point, pci,
3619			     curr_reg_pressure[cl]);
3620    }
3621  if (sched_verbose >= 2)
3622    fprintf (sched_dump, "\n");
3623}
3624
3625/* All instructions have been added to the model schedule.  Record the
3626   final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs.  */
3627
3628static void
3629model_record_final_pressures (struct model_pressure_group *group)
3630{
3631  int point, pci, max_pressure, ref_pressure, cl;
3632
3633  for (pci = 0; pci < ira_pressure_classes_num; pci++)
3634    {
3635      /* Record the final pressure for this class.  */
3636      cl = ira_pressure_classes[pci];
3637      point = model_num_insns;
3638      ref_pressure = curr_reg_pressure[cl];
3639      model_record_pressure (group, point, pci, ref_pressure);
3640
3641      /* Record the original maximum pressure.  */
3642      group->limits[pci].orig_pressure = group->limits[pci].pressure;
3643
3644      /* Update the MODEL_MAX_PRESSURE for every point of the schedule.  */
3645      max_pressure = ref_pressure;
3646      MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3647      while (point > 0)
3648	{
3649	  point--;
3650	  ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3651	  max_pressure = MAX (max_pressure, ref_pressure);
3652	  MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3653	}
3654    }
3655}
3656
3657/* Update all successors of INSN, given that INSN has just been scheduled.  */
3658
3659static void
3660model_add_successors_to_worklist (struct model_insn_info *insn)
3661{
3662  sd_iterator_def sd_it;
3663  struct model_insn_info *con;
3664  dep_t dep;
3665
3666  FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3667    {
3668      con = MODEL_INSN_INFO (DEP_CON (dep));
3669      /* Ignore debug instructions, and instructions from other blocks.  */
3670      if (con->insn)
3671	{
3672	  con->unscheduled_preds--;
3673
3674	  /* Update the depth field of each true-dependent successor.
3675	     Increasing the depth gives them a higher priority than
3676	     before.  */
3677	  if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3678	    {
3679	      con->depth = insn->depth + 1;
3680	      if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3681		model_promote_insn (con);
3682	    }
3683
3684	  /* If this is a true dependency, or if there are no remaining
3685	     dependencies for CON (meaning that CON only had non-true
3686	     dependencies), make sure that CON is on the worklist.
3687	     We don't bother otherwise because it would tend to fill the
3688	     worklist with a lot of low-priority instructions that are not
3689	     yet ready to issue.  */
3690	  if ((con->depth > 0 || con->unscheduled_preds == 0)
3691	      && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3692	    model_add_to_worklist (con, insn, insn->next);
3693	}
3694    }
3695}
3696
3697/* Give INSN a higher priority than any current instruction, then give
3698   unscheduled predecessors of INSN a higher priority still.  If any of
3699   those predecessors are not on the model worklist, do the same for its
3700   predecessors, and so on.  */
3701
3702static void
3703model_promote_predecessors (struct model_insn_info *insn)
3704{
3705  struct model_insn_info *pro, *first;
3706  sd_iterator_def sd_it;
3707  dep_t dep;
3708
3709  if (sched_verbose >= 7)
3710    fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3711	     INSN_UID (insn->insn), model_next_priority);
3712  insn->model_priority = model_next_priority++;
3713  model_remove_from_worklist (insn);
3714  model_add_to_worklist_at (insn, NULL);
3715
3716  first = NULL;
3717  for (;;)
3718    {
3719      FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3720	{
3721	  pro = MODEL_INSN_INFO (DEP_PRO (dep));
3722	  /* The first test is to ignore debug instructions, and instructions
3723	     from other blocks.  */
3724	  if (pro->insn
3725	      && pro->model_priority != model_next_priority
3726	      && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3727	    {
3728	      pro->model_priority = model_next_priority;
3729	      if (sched_verbose >= 7)
3730		fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3731	      if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3732		{
3733		  /* PRO is already in the worklist, but it now has
3734		     a higher priority than before.  Move it at the
3735		     appropriate place.  */
3736		  model_remove_from_worklist (pro);
3737		  model_add_to_worklist (pro, NULL, model_worklist);
3738		}
3739	      else
3740		{
3741		  /* PRO isn't in the worklist.  Recursively process
3742		     its predecessors until we find one that is.  */
3743		  pro->next = first;
3744		  first = pro;
3745		}
3746	    }
3747	}
3748      if (!first)
3749	break;
3750      insn = first;
3751      first = insn->next;
3752    }
3753  if (sched_verbose >= 7)
3754    fprintf (sched_dump, " = %d\n", model_next_priority);
3755  model_next_priority++;
3756}
3757
3758/* Pick one instruction from model_worklist and process it.  */
3759
3760static void
3761model_choose_insn (void)
3762{
3763  struct model_insn_info *insn, *fallback;
3764  int count;
3765
3766  if (sched_verbose >= 7)
3767    {
3768      fprintf (sched_dump, ";;\t+--- worklist:\n");
3769      insn = model_worklist;
3770      count = MAX_SCHED_READY_INSNS;
3771      while (count > 0 && insn)
3772	{
3773	  fprintf (sched_dump, ";;\t+---   %d [%d, %d, %d, %d]\n",
3774		   INSN_UID (insn->insn), insn->model_priority,
3775		   insn->depth + insn->alap, insn->depth,
3776		   INSN_PRIORITY (insn->insn));
3777	  count--;
3778	  insn = insn->next;
3779	}
3780    }
3781
3782  /* Look for a ready instruction whose model_classify_priority is zero
3783     or negative, picking the highest-priority one.  Adding such an
3784     instruction to the schedule now should do no harm, and may actually
3785     do some good.
3786
3787     Failing that, see whether there is an instruction with the highest
3788     extant model_priority that is not yet ready, but which would reduce
3789     pressure if it became ready.  This is designed to catch cases like:
3790
3791       (set (mem (reg R1)) (reg R2))
3792
3793     where the instruction is the last remaining use of R1 and where the
3794     value of R2 is not yet available (or vice versa).  The death of R1
3795     means that this instruction already reduces pressure.  It is of
3796     course possible that the computation of R2 involves other registers
3797     that are hard to kill, but such cases are rare enough for this
3798     heuristic to be a win in general.
3799
3800     Failing that, just pick the highest-priority instruction in the
3801     worklist.  */
3802  count = MAX_SCHED_READY_INSNS;
3803  insn = model_worklist;
3804  fallback = 0;
3805  for (;;)
3806    {
3807      if (count == 0 || !insn)
3808	{
3809	  insn = fallback ? fallback : model_worklist;
3810	  break;
3811	}
3812      if (insn->unscheduled_preds)
3813	{
3814	  if (model_worklist->model_priority == insn->model_priority
3815	      && !fallback
3816	      && model_classify_pressure (insn) < 0)
3817	    fallback = insn;
3818	}
3819      else
3820	{
3821	  if (model_classify_pressure (insn) <= 0)
3822	    break;
3823	}
3824      count--;
3825      insn = insn->next;
3826    }
3827
3828  if (sched_verbose >= 7 && insn != model_worklist)
3829    {
3830      if (insn->unscheduled_preds)
3831	fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3832		 INSN_UID (insn->insn));
3833      else
3834	fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3835		 INSN_UID (insn->insn));
3836    }
3837  if (insn->unscheduled_preds)
3838    /* INSN isn't yet ready to issue.  Give all its predecessors the
3839       highest priority.  */
3840    model_promote_predecessors (insn);
3841  else
3842    {
3843      /* INSN is ready.  Add it to the end of model_schedule and
3844	 process its successors.  */
3845      model_add_successors_to_worklist (insn);
3846      model_remove_from_worklist (insn);
3847      model_add_to_schedule (insn->insn);
3848      model_record_pressures (insn);
3849      update_register_pressure (insn->insn);
3850    }
3851}
3852
3853/* Restore all QUEUE_INDEXs to the values that they had before
3854   model_start_schedule was called.  */
3855
3856static void
3857model_reset_queue_indices (void)
3858{
3859  unsigned int i;
3860  rtx_insn *insn;
3861
3862  FOR_EACH_VEC_ELT (model_schedule, i, insn)
3863    QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3864}
3865
3866/* We have calculated the model schedule and spill costs.  Print a summary
3867   to sched_dump.  */
3868
3869static void
3870model_dump_pressure_summary (void)
3871{
3872  int pci, cl;
3873
3874  fprintf (sched_dump, ";; Pressure summary:");
3875  for (pci = 0; pci < ira_pressure_classes_num; pci++)
3876    {
3877      cl = ira_pressure_classes[pci];
3878      fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3879	       model_before_pressure.limits[pci].pressure);
3880    }
3881  fprintf (sched_dump, "\n\n");
3882}
3883
3884/* Initialize the SCHED_PRESSURE_MODEL information for the current
3885   scheduling region.  */
3886
3887static void
3888model_start_schedule (basic_block bb)
3889{
3890  model_next_priority = 1;
3891  model_schedule.create (sched_max_luid);
3892  model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3893
3894  gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3895  initiate_reg_pressure_info (df_get_live_in (bb));
3896
3897  model_analyze_insns ();
3898  model_init_pressure_group (&model_before_pressure);
3899  while (model_worklist)
3900    model_choose_insn ();
3901  gcc_assert (model_num_insns == (int) model_schedule.length ());
3902  if (sched_verbose >= 2)
3903    fprintf (sched_dump, "\n");
3904
3905  model_record_final_pressures (&model_before_pressure);
3906  model_reset_queue_indices ();
3907
3908  XDELETEVEC (model_insns);
3909
3910  model_curr_point = 0;
3911  initiate_reg_pressure_info (df_get_live_in (bb));
3912  if (sched_verbose >= 1)
3913    model_dump_pressure_summary ();
3914}
3915
3916/* Free the information associated with GROUP.  */
3917
3918static void
3919model_finalize_pressure_group (struct model_pressure_group *group)
3920{
3921  XDELETEVEC (group->model);
3922}
3923
3924/* Free the information created by model_start_schedule.  */
3925
3926static void
3927model_end_schedule (void)
3928{
3929  model_finalize_pressure_group (&model_before_pressure);
3930  model_schedule.release ();
3931}
3932
3933/* Prepare reg pressure scheduling for basic block BB.  */
3934static void
3935sched_pressure_start_bb (basic_block bb)
3936{
3937  /* Set the number of available registers for each class taking into account
3938     relative probability of current basic block versus function prologue and
3939     epilogue.
3940     * If the basic block executes much more often than the prologue/epilogue
3941     (e.g., inside a hot loop), then cost of spill in the prologue is close to
3942     nil, so the effective number of available registers is
3943     (ira_class_hard_regs_num[cl] - 0).
3944     * If the basic block executes as often as the prologue/epilogue,
3945     then spill in the block is as costly as in the prologue, so the effective
3946     number of available registers is
3947     (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3948     Note that all-else-equal, we prefer to spill in the prologue, since that
3949     allows "extra" registers for other basic blocks of the function.
3950     * If the basic block is on the cold path of the function and executes
3951     rarely, then we should always prefer to spill in the block, rather than
3952     in the prologue/epilogue.  The effective number of available register is
3953     (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).  */
3954  {
3955    int i;
3956    int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
3957    int bb_freq = bb->frequency;
3958
3959    if (bb_freq == 0)
3960      {
3961	if (entry_freq == 0)
3962	  entry_freq = bb_freq = 1;
3963      }
3964    if (bb_freq < entry_freq)
3965      bb_freq = entry_freq;
3966
3967    for (i = 0; i < ira_pressure_classes_num; ++i)
3968      {
3969	enum reg_class cl = ira_pressure_classes[i];
3970	sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
3971	sched_class_regs_num[cl]
3972	  -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
3973      }
3974  }
3975
3976  if (sched_pressure == SCHED_PRESSURE_MODEL)
3977    model_start_schedule (bb);
3978}
3979
3980/* A structure that holds local state for the loop in schedule_block.  */
3981struct sched_block_state
3982{
3983  /* True if no real insns have been scheduled in the current cycle.  */
3984  bool first_cycle_insn_p;
3985  /* True if a shadow insn has been scheduled in the current cycle, which
3986     means that no more normal insns can be issued.  */
3987  bool shadows_only_p;
3988  /* True if we're winding down a modulo schedule, which means that we only
3989     issue insns with INSN_EXACT_TICK set.  */
3990  bool modulo_epilogue;
3991  /* Initialized with the machine's issue rate every cycle, and updated
3992     by calls to the variable_issue hook.  */
3993  int can_issue_more;
3994};
3995
3996/* INSN is the "currently executing insn".  Launch each insn which was
3997   waiting on INSN.  READY is the ready list which contains the insns
3998   that are ready to fire.  CLOCK is the current cycle.  The function
3999   returns necessary cycle advance after issuing the insn (it is not
4000   zero for insns in a schedule group).  */
4001
4002static int
4003schedule_insn (rtx_insn *insn)
4004{
4005  sd_iterator_def sd_it;
4006  dep_t dep;
4007  int i;
4008  int advance = 0;
4009
4010  if (sched_verbose >= 1)
4011    {
4012      struct reg_pressure_data *pressure_info;
4013      fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
4014	       clock_var, (*current_sched_info->print_insn) (insn, 1),
4015	       str_pattern_slim (PATTERN (insn)));
4016
4017      if (recog_memoized (insn) < 0)
4018	fprintf (sched_dump, "nothing");
4019      else
4020	print_reservation (sched_dump, insn);
4021      pressure_info = INSN_REG_PRESSURE (insn);
4022      if (pressure_info != NULL)
4023	{
4024	  fputc (':', sched_dump);
4025	  for (i = 0; i < ira_pressure_classes_num; i++)
4026	    fprintf (sched_dump, "%s%s%+d(%d)",
4027		     scheduled_insns.length () > 1
4028		     && INSN_LUID (insn)
4029		     < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4030		     reg_class_names[ira_pressure_classes[i]],
4031		     pressure_info[i].set_increase, pressure_info[i].change);
4032	}
4033      if (sched_pressure == SCHED_PRESSURE_MODEL
4034	  && model_curr_point < model_num_insns
4035	  && model_index (insn) == model_curr_point)
4036	fprintf (sched_dump, ":model %d", model_curr_point);
4037      fputc ('\n', sched_dump);
4038    }
4039
4040  if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4041    update_reg_and_insn_max_reg_pressure (insn);
4042
4043  /* Scheduling instruction should have all its dependencies resolved and
4044     should have been removed from the ready list.  */
4045  gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4046
4047  /* Reset debug insns invalidated by moving this insn.  */
4048  if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
4049    for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4050	 sd_iterator_cond (&sd_it, &dep);)
4051      {
4052	rtx_insn *dbg = DEP_PRO (dep);
4053	struct reg_use_data *use, *next;
4054
4055	if (DEP_STATUS (dep) & DEP_CANCELLED)
4056	  {
4057	    sd_iterator_next (&sd_it);
4058	    continue;
4059	  }
4060
4061	gcc_assert (DEBUG_INSN_P (dbg));
4062
4063	if (sched_verbose >= 6)
4064	  fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4065		   INSN_UID (dbg));
4066
4067	/* ??? Rather than resetting the debug insn, we might be able
4068	   to emit a debug temp before the just-scheduled insn, but
4069	   this would involve checking that the expression at the
4070	   point of the debug insn is equivalent to the expression
4071	   before the just-scheduled insn.  They might not be: the
4072	   expression in the debug insn may depend on other insns not
4073	   yet scheduled that set MEMs, REGs or even other debug
4074	   insns.  It's not clear that attempting to preserve debug
4075	   information in these cases is worth the effort, given how
4076	   uncommon these resets are and the likelihood that the debug
4077	   temps introduced won't survive the schedule change.  */
4078	INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4079	df_insn_rescan (dbg);
4080
4081	/* Unknown location doesn't use any registers.  */
4082	for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4083	  {
4084	    struct reg_use_data *prev = use;
4085
4086	    /* Remove use from the cyclic next_regno_use chain first.  */
4087	    while (prev->next_regno_use != use)
4088	      prev = prev->next_regno_use;
4089	    prev->next_regno_use = use->next_regno_use;
4090	    next = use->next_insn_use;
4091	    free (use);
4092	  }
4093	INSN_REG_USE_LIST (dbg) = NULL;
4094
4095	/* We delete rather than resolve these deps, otherwise we
4096	   crash in sched_free_deps(), because forward deps are
4097	   expected to be released before backward deps.  */
4098	sd_delete_dep (sd_it);
4099      }
4100
4101  gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4102  QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4103
4104  if (sched_pressure == SCHED_PRESSURE_MODEL
4105      && model_curr_point < model_num_insns
4106      && NONDEBUG_INSN_P (insn))
4107    {
4108      if (model_index (insn) == model_curr_point)
4109	do
4110	  model_curr_point++;
4111	while (model_curr_point < model_num_insns
4112	       && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4113		   == QUEUE_SCHEDULED));
4114      else
4115	model_recompute (insn);
4116      model_update_limit_points ();
4117      update_register_pressure (insn);
4118      if (sched_verbose >= 2)
4119	print_curr_reg_pressure ();
4120    }
4121
4122  gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4123  if (INSN_TICK (insn) > clock_var)
4124    /* INSN has been prematurely moved from the queue to the ready list.
4125       This is possible only if following flags are set.  */
4126    gcc_assert (flag_sched_stalled_insns || sched_fusion);
4127
4128  /* ??? Probably, if INSN is scheduled prematurely, we should leave
4129     INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
4130  INSN_TICK (insn) = clock_var;
4131
4132  check_clobbered_conditions (insn);
4133
4134  /* Update dependent instructions.  First, see if by scheduling this insn
4135     now we broke a dependence in a way that requires us to change another
4136     insn.  */
4137  for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4138       sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4139    {
4140      struct dep_replacement *desc = DEP_REPLACE (dep);
4141      rtx_insn *pro = DEP_PRO (dep);
4142      if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4143	  && desc != NULL && desc->insn == pro)
4144	apply_replacement (dep, false);
4145    }
4146
4147  /* Go through and resolve forward dependencies.  */
4148  for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4149       sd_iterator_cond (&sd_it, &dep);)
4150    {
4151      rtx_insn *next = DEP_CON (dep);
4152      bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4153
4154      /* Resolve the dependence between INSN and NEXT.
4155	 sd_resolve_dep () moves current dep to another list thus
4156	 advancing the iterator.  */
4157      sd_resolve_dep (sd_it);
4158
4159      if (cancelled)
4160	{
4161	  if (must_restore_pattern_p (next, dep))
4162	    restore_pattern (dep, false);
4163	  continue;
4164	}
4165
4166      /* Don't bother trying to mark next as ready if insn is a debug
4167	 insn.  If insn is the last hard dependency, it will have
4168	 already been discounted.  */
4169      if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4170	continue;
4171
4172      if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4173	{
4174	  int effective_cost;
4175
4176	  effective_cost = try_ready (next);
4177
4178	  if (effective_cost >= 0
4179	      && SCHED_GROUP_P (next)
4180	      && advance < effective_cost)
4181	    advance = effective_cost;
4182	}
4183      else
4184	/* Check always has only one forward dependence (to the first insn in
4185	   the recovery block), therefore, this will be executed only once.  */
4186	{
4187	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4188	  fix_recovery_deps (RECOVERY_BLOCK (insn));
4189	}
4190    }
4191
4192  /* Annotate the instruction with issue information -- TImode
4193     indicates that the instruction is expected not to be able
4194     to issue on the same cycle as the previous insn.  A machine
4195     may use this information to decide how the instruction should
4196     be aligned.  */
4197  if (issue_rate > 1
4198      && GET_CODE (PATTERN (insn)) != USE
4199      && GET_CODE (PATTERN (insn)) != CLOBBER
4200      && !DEBUG_INSN_P (insn))
4201    {
4202      if (reload_completed)
4203	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4204      last_clock_var = clock_var;
4205    }
4206
4207  if (nonscheduled_insns_begin != NULL_RTX)
4208    /* Indicate to debug counters that INSN is scheduled.  */
4209    nonscheduled_insns_begin = insn;
4210
4211  return advance;
4212}
4213
4214/* Functions for handling of notes.  */
4215
4216/* Add note list that ends on FROM_END to the end of TO_ENDP.  */
4217void
4218concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4219{
4220  rtx_insn *from_start;
4221
4222  /* It's easy when have nothing to concat.  */
4223  if (from_end == NULL)
4224    return;
4225
4226  /* It's also easy when destination is empty.  */
4227  if (*to_endp == NULL)
4228    {
4229      *to_endp = from_end;
4230      return;
4231    }
4232
4233  from_start = from_end;
4234  while (PREV_INSN (from_start) != NULL)
4235    from_start = PREV_INSN (from_start);
4236
4237  SET_PREV_INSN (from_start) = *to_endp;
4238  SET_NEXT_INSN (*to_endp) = from_start;
4239  *to_endp = from_end;
4240}
4241
4242/* Delete notes between HEAD and TAIL and put them in the chain
4243   of notes ended by NOTE_LIST.  */
4244void
4245remove_notes (rtx_insn *head, rtx_insn *tail)
4246{
4247  rtx_insn *next_tail, *insn, *next;
4248
4249  note_list = 0;
4250  if (head == tail && !INSN_P (head))
4251    return;
4252
4253  next_tail = NEXT_INSN (tail);
4254  for (insn = head; insn != next_tail; insn = next)
4255    {
4256      next = NEXT_INSN (insn);
4257      if (!NOTE_P (insn))
4258	continue;
4259
4260      switch (NOTE_KIND (insn))
4261	{
4262	case NOTE_INSN_BASIC_BLOCK:
4263	  continue;
4264
4265	case NOTE_INSN_EPILOGUE_BEG:
4266	  if (insn != tail)
4267	    {
4268	      remove_insn (insn);
4269	      add_reg_note (next, REG_SAVE_NOTE,
4270			    GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4271	      break;
4272	    }
4273	  /* FALLTHRU */
4274
4275	default:
4276	  remove_insn (insn);
4277
4278	  /* Add the note to list that ends at NOTE_LIST.  */
4279	  SET_PREV_INSN (insn) = note_list;
4280	  SET_NEXT_INSN (insn) = NULL_RTX;
4281	  if (note_list)
4282	    SET_NEXT_INSN (note_list) = insn;
4283	  note_list = insn;
4284	  break;
4285	}
4286
4287      gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4288    }
4289}
4290
4291/* A structure to record enough data to allow us to backtrack the scheduler to
4292   a previous state.  */
4293struct haifa_saved_data
4294{
4295  /* Next entry on the list.  */
4296  struct haifa_saved_data *next;
4297
4298  /* Backtracking is associated with scheduling insns that have delay slots.
4299     DELAY_PAIR points to the structure that contains the insns involved, and
4300     the number of cycles between them.  */
4301  struct delay_pair *delay_pair;
4302
4303  /* Data used by the frontend (e.g. sched-ebb or sched-rgn).  */
4304  void *fe_saved_data;
4305  /* Data used by the backend.  */
4306  void *be_saved_data;
4307
4308  /* Copies of global state.  */
4309  int clock_var, last_clock_var;
4310  struct ready_list ready;
4311  state_t curr_state;
4312
4313  rtx_insn *last_scheduled_insn;
4314  rtx last_nondebug_scheduled_insn;
4315  rtx_insn *nonscheduled_insns_begin;
4316  int cycle_issued_insns;
4317
4318  /* Copies of state used in the inner loop of schedule_block.  */
4319  struct sched_block_state sched_block;
4320
4321  /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4322     to 0 when restoring.  */
4323  int q_size;
4324  rtx_insn_list **insn_queue;
4325
4326  /* Describe pattern replacements that occurred since this backtrack point
4327     was queued.  */
4328  vec<dep_t> replacement_deps;
4329  vec<int> replace_apply;
4330
4331  /* A copy of the next-cycle replacement vectors at the time of the backtrack
4332     point.  */
4333  vec<dep_t> next_cycle_deps;
4334  vec<int> next_cycle_apply;
4335};
4336
4337/* A record, in reverse order, of all scheduled insns which have delay slots
4338   and may require backtracking.  */
4339static struct haifa_saved_data *backtrack_queue;
4340
4341/* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4342   to SET_P.  */
4343static void
4344mark_backtrack_feeds (rtx insn, int set_p)
4345{
4346  sd_iterator_def sd_it;
4347  dep_t dep;
4348  FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4349    {
4350      FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4351    }
4352}
4353
4354/* Save the current scheduler state so that we can backtrack to it
4355   later if necessary.  PAIR gives the insns that make it necessary to
4356   save this point.  SCHED_BLOCK is the local state of schedule_block
4357   that need to be saved.  */
4358static void
4359save_backtrack_point (struct delay_pair *pair,
4360		      struct sched_block_state sched_block)
4361{
4362  int i;
4363  struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4364
4365  save->curr_state = xmalloc (dfa_state_size);
4366  memcpy (save->curr_state, curr_state, dfa_state_size);
4367
4368  save->ready.first = ready.first;
4369  save->ready.n_ready = ready.n_ready;
4370  save->ready.n_debug = ready.n_debug;
4371  save->ready.veclen = ready.veclen;
4372  save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4373  memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4374
4375  save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4376  save->q_size = q_size;
4377  for (i = 0; i <= max_insn_queue_index; i++)
4378    {
4379      int q = NEXT_Q_AFTER (q_ptr, i);
4380      save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4381    }
4382
4383  save->clock_var = clock_var;
4384  save->last_clock_var = last_clock_var;
4385  save->cycle_issued_insns = cycle_issued_insns;
4386  save->last_scheduled_insn = last_scheduled_insn;
4387  save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4388  save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4389
4390  save->sched_block = sched_block;
4391
4392  save->replacement_deps.create (0);
4393  save->replace_apply.create (0);
4394  save->next_cycle_deps = next_cycle_replace_deps.copy ();
4395  save->next_cycle_apply = next_cycle_apply.copy ();
4396
4397  if (current_sched_info->save_state)
4398    save->fe_saved_data = (*current_sched_info->save_state) ();
4399
4400  if (targetm.sched.alloc_sched_context)
4401    {
4402      save->be_saved_data = targetm.sched.alloc_sched_context ();
4403      targetm.sched.init_sched_context (save->be_saved_data, false);
4404    }
4405  else
4406    save->be_saved_data = NULL;
4407
4408  save->delay_pair = pair;
4409
4410  save->next = backtrack_queue;
4411  backtrack_queue = save;
4412
4413  while (pair)
4414    {
4415      mark_backtrack_feeds (pair->i2, 1);
4416      INSN_TICK (pair->i2) = INVALID_TICK;
4417      INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4418      SHADOW_P (pair->i2) = pair->stages == 0;
4419      pair = pair->next_same_i1;
4420    }
4421}
4422
4423/* Walk the ready list and all queues. If any insns have unresolved backwards
4424   dependencies, these must be cancelled deps, broken by predication.  Set or
4425   clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS.  */
4426
4427static void
4428toggle_cancelled_flags (bool set)
4429{
4430  int i;
4431  sd_iterator_def sd_it;
4432  dep_t dep;
4433
4434  if (ready.n_ready > 0)
4435    {
4436      rtx_insn **first = ready_lastpos (&ready);
4437      for (i = 0; i < ready.n_ready; i++)
4438	FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4439	  if (!DEBUG_INSN_P (DEP_PRO (dep)))
4440	    {
4441	      if (set)
4442		DEP_STATUS (dep) |= DEP_CANCELLED;
4443	      else
4444		DEP_STATUS (dep) &= ~DEP_CANCELLED;
4445	    }
4446    }
4447  for (i = 0; i <= max_insn_queue_index; i++)
4448    {
4449      int q = NEXT_Q_AFTER (q_ptr, i);
4450      rtx_insn_list *link;
4451      for (link = insn_queue[q]; link; link = link->next ())
4452	{
4453	  rtx_insn *insn = link->insn ();
4454	  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4455	    if (!DEBUG_INSN_P (DEP_PRO (dep)))
4456	      {
4457		if (set)
4458		  DEP_STATUS (dep) |= DEP_CANCELLED;
4459		else
4460		  DEP_STATUS (dep) &= ~DEP_CANCELLED;
4461	      }
4462	}
4463    }
4464}
4465
4466/* Undo the replacements that have occurred after backtrack point SAVE
4467   was placed.  */
4468static void
4469undo_replacements_for_backtrack (struct haifa_saved_data *save)
4470{
4471  while (!save->replacement_deps.is_empty ())
4472    {
4473      dep_t dep = save->replacement_deps.pop ();
4474      int apply_p = save->replace_apply.pop ();
4475
4476      if (apply_p)
4477	restore_pattern (dep, true);
4478      else
4479	apply_replacement (dep, true);
4480    }
4481  save->replacement_deps.release ();
4482  save->replace_apply.release ();
4483}
4484
4485/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4486   Restore their dependencies to an unresolved state, and mark them as
4487   queued nowhere.  */
4488
4489static void
4490unschedule_insns_until (rtx insn)
4491{
4492  auto_vec<rtx_insn *> recompute_vec;
4493
4494  /* Make two passes over the insns to be unscheduled.  First, we clear out
4495     dependencies and other trivial bookkeeping.  */
4496  for (;;)
4497    {
4498      rtx_insn *last;
4499      sd_iterator_def sd_it;
4500      dep_t dep;
4501
4502      last = scheduled_insns.pop ();
4503
4504      /* This will be changed by restore_backtrack_point if the insn is in
4505	 any queue.  */
4506      QUEUE_INDEX (last) = QUEUE_NOWHERE;
4507      if (last != insn)
4508	INSN_TICK (last) = INVALID_TICK;
4509
4510      if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4511	modulo_insns_scheduled--;
4512
4513      for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4514	   sd_iterator_cond (&sd_it, &dep);)
4515	{
4516	  rtx_insn *con = DEP_CON (dep);
4517	  sd_unresolve_dep (sd_it);
4518	  if (!MUST_RECOMPUTE_SPEC_P (con))
4519	    {
4520	      MUST_RECOMPUTE_SPEC_P (con) = 1;
4521	      recompute_vec.safe_push (con);
4522	    }
4523	}
4524
4525      if (last == insn)
4526	break;
4527    }
4528
4529  /* A second pass, to update ready and speculation status for insns
4530     depending on the unscheduled ones.  The first pass must have
4531     popped the scheduled_insns vector up to the point where we
4532     restart scheduling, as recompute_todo_spec requires it to be
4533     up-to-date.  */
4534  while (!recompute_vec.is_empty ())
4535    {
4536      rtx_insn *con;
4537
4538      con = recompute_vec.pop ();
4539      MUST_RECOMPUTE_SPEC_P (con) = 0;
4540      if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4541	{
4542	  TODO_SPEC (con) = HARD_DEP;
4543	  INSN_TICK (con) = INVALID_TICK;
4544	  if (PREDICATED_PAT (con) != NULL_RTX)
4545	    haifa_change_pattern (con, ORIG_PAT (con));
4546	}
4547      else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4548	TODO_SPEC (con) = recompute_todo_spec (con, true);
4549    }
4550}
4551
4552/* Restore scheduler state from the topmost entry on the backtracking queue.
4553   PSCHED_BLOCK_P points to the local data of schedule_block that we must
4554   overwrite with the saved data.
4555   The caller must already have called unschedule_insns_until.  */
4556
4557static void
4558restore_last_backtrack_point (struct sched_block_state *psched_block)
4559{
4560  int i;
4561  struct haifa_saved_data *save = backtrack_queue;
4562
4563  backtrack_queue = save->next;
4564
4565  if (current_sched_info->restore_state)
4566    (*current_sched_info->restore_state) (save->fe_saved_data);
4567
4568  if (targetm.sched.alloc_sched_context)
4569    {
4570      targetm.sched.set_sched_context (save->be_saved_data);
4571      targetm.sched.free_sched_context (save->be_saved_data);
4572    }
4573
4574  /* Do this first since it clobbers INSN_TICK of the involved
4575     instructions.  */
4576  undo_replacements_for_backtrack (save);
4577
4578  /* Clear the QUEUE_INDEX of everything in the ready list or one
4579     of the queues.  */
4580  if (ready.n_ready > 0)
4581    {
4582      rtx_insn **first = ready_lastpos (&ready);
4583      for (i = 0; i < ready.n_ready; i++)
4584	{
4585	  rtx_insn *insn = first[i];
4586	  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4587	  INSN_TICK (insn) = INVALID_TICK;
4588	}
4589    }
4590  for (i = 0; i <= max_insn_queue_index; i++)
4591    {
4592      int q = NEXT_Q_AFTER (q_ptr, i);
4593
4594      for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4595	{
4596	  rtx_insn *x = link->insn ();
4597	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
4598	  INSN_TICK (x) = INVALID_TICK;
4599	}
4600      free_INSN_LIST_list (&insn_queue[q]);
4601    }
4602
4603  free (ready.vec);
4604  ready = save->ready;
4605
4606  if (ready.n_ready > 0)
4607    {
4608      rtx_insn **first = ready_lastpos (&ready);
4609      for (i = 0; i < ready.n_ready; i++)
4610	{
4611	  rtx_insn *insn = first[i];
4612	  QUEUE_INDEX (insn) = QUEUE_READY;
4613	  TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4614	  INSN_TICK (insn) = save->clock_var;
4615	}
4616    }
4617
4618  q_ptr = 0;
4619  q_size = save->q_size;
4620  for (i = 0; i <= max_insn_queue_index; i++)
4621    {
4622      int q = NEXT_Q_AFTER (q_ptr, i);
4623
4624      insn_queue[q] = save->insn_queue[q];
4625
4626      for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4627	{
4628	  rtx_insn *x = link->insn ();
4629	  QUEUE_INDEX (x) = i;
4630	  TODO_SPEC (x) = recompute_todo_spec (x, true);
4631	  INSN_TICK (x) = save->clock_var + i;
4632	}
4633    }
4634  free (save->insn_queue);
4635
4636  toggle_cancelled_flags (true);
4637
4638  clock_var = save->clock_var;
4639  last_clock_var = save->last_clock_var;
4640  cycle_issued_insns = save->cycle_issued_insns;
4641  last_scheduled_insn = save->last_scheduled_insn;
4642  last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4643  nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4644
4645  *psched_block = save->sched_block;
4646
4647  memcpy (curr_state, save->curr_state, dfa_state_size);
4648  free (save->curr_state);
4649
4650  mark_backtrack_feeds (save->delay_pair->i2, 0);
4651
4652  gcc_assert (next_cycle_replace_deps.is_empty ());
4653  next_cycle_replace_deps = save->next_cycle_deps.copy ();
4654  next_cycle_apply = save->next_cycle_apply.copy ();
4655
4656  free (save);
4657
4658  for (save = backtrack_queue; save; save = save->next)
4659    {
4660      mark_backtrack_feeds (save->delay_pair->i2, 1);
4661    }
4662}
4663
4664/* Discard all data associated with the topmost entry in the backtrack
4665   queue.  If RESET_TICK is false, we just want to free the data.  If true,
4666   we are doing this because we discovered a reason to backtrack.  In the
4667   latter case, also reset the INSN_TICK for the shadow insn.  */
4668static void
4669free_topmost_backtrack_point (bool reset_tick)
4670{
4671  struct haifa_saved_data *save = backtrack_queue;
4672  int i;
4673
4674  backtrack_queue = save->next;
4675
4676  if (reset_tick)
4677    {
4678      struct delay_pair *pair = save->delay_pair;
4679      while (pair)
4680	{
4681	  INSN_TICK (pair->i2) = INVALID_TICK;
4682	  INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4683	  pair = pair->next_same_i1;
4684	}
4685      undo_replacements_for_backtrack (save);
4686    }
4687  else
4688    {
4689      save->replacement_deps.release ();
4690      save->replace_apply.release ();
4691    }
4692
4693  if (targetm.sched.free_sched_context)
4694    targetm.sched.free_sched_context (save->be_saved_data);
4695  if (current_sched_info->restore_state)
4696    free (save->fe_saved_data);
4697  for (i = 0; i <= max_insn_queue_index; i++)
4698    free_INSN_LIST_list (&save->insn_queue[i]);
4699  free (save->insn_queue);
4700  free (save->curr_state);
4701  free (save->ready.vec);
4702  free (save);
4703}
4704
4705/* Free the entire backtrack queue.  */
4706static void
4707free_backtrack_queue (void)
4708{
4709  while (backtrack_queue)
4710    free_topmost_backtrack_point (false);
4711}
4712
4713/* Apply a replacement described by DESC.  If IMMEDIATELY is false, we
4714   may have to postpone the replacement until the start of the next cycle,
4715   at which point we will be called again with IMMEDIATELY true.  This is
4716   only done for machines which have instruction packets with explicit
4717   parallelism however.  */
4718static void
4719apply_replacement (dep_t dep, bool immediately)
4720{
4721  struct dep_replacement *desc = DEP_REPLACE (dep);
4722  if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4723    {
4724      next_cycle_replace_deps.safe_push (dep);
4725      next_cycle_apply.safe_push (1);
4726    }
4727  else
4728    {
4729      bool success;
4730
4731      if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4732	return;
4733
4734      if (sched_verbose >= 5)
4735	fprintf (sched_dump, "applying replacement for insn %d\n",
4736		 INSN_UID (desc->insn));
4737
4738      success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4739      gcc_assert (success);
4740
4741      update_insn_after_change (desc->insn);
4742      if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4743	fix_tick_ready (desc->insn);
4744
4745      if (backtrack_queue != NULL)
4746	{
4747	  backtrack_queue->replacement_deps.safe_push (dep);
4748	  backtrack_queue->replace_apply.safe_push (1);
4749	}
4750    }
4751}
4752
4753/* We have determined that a pattern involved in DEP must be restored.
4754   If IMMEDIATELY is false, we may have to postpone the replacement
4755   until the start of the next cycle, at which point we will be called
4756   again with IMMEDIATELY true.  */
4757static void
4758restore_pattern (dep_t dep, bool immediately)
4759{
4760  rtx_insn *next = DEP_CON (dep);
4761  int tick = INSN_TICK (next);
4762
4763  /* If we already scheduled the insn, the modified version is
4764     correct.  */
4765  if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4766    return;
4767
4768  if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4769    {
4770      next_cycle_replace_deps.safe_push (dep);
4771      next_cycle_apply.safe_push (0);
4772      return;
4773    }
4774
4775
4776  if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4777    {
4778      if (sched_verbose >= 5)
4779	fprintf (sched_dump, "restoring pattern for insn %d\n",
4780		 INSN_UID (next));
4781      haifa_change_pattern (next, ORIG_PAT (next));
4782    }
4783  else
4784    {
4785      struct dep_replacement *desc = DEP_REPLACE (dep);
4786      bool success;
4787
4788      if (sched_verbose >= 5)
4789	fprintf (sched_dump, "restoring pattern for insn %d\n",
4790		 INSN_UID (desc->insn));
4791      tick = INSN_TICK (desc->insn);
4792
4793      success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4794      gcc_assert (success);
4795      update_insn_after_change (desc->insn);
4796      if (backtrack_queue != NULL)
4797	{
4798	  backtrack_queue->replacement_deps.safe_push (dep);
4799	  backtrack_queue->replace_apply.safe_push (0);
4800	}
4801    }
4802  INSN_TICK (next) = tick;
4803  if (TODO_SPEC (next) == DEP_POSTPONED)
4804    return;
4805
4806  if (sd_lists_empty_p (next, SD_LIST_BACK))
4807    TODO_SPEC (next) = 0;
4808  else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4809    TODO_SPEC (next) = HARD_DEP;
4810}
4811
4812/* Perform pattern replacements that were queued up until the next
4813   cycle.  */
4814static void
4815perform_replacements_new_cycle (void)
4816{
4817  int i;
4818  dep_t dep;
4819  FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4820    {
4821      int apply_p = next_cycle_apply[i];
4822      if (apply_p)
4823	apply_replacement (dep, true);
4824      else
4825	restore_pattern (dep, true);
4826    }
4827  next_cycle_replace_deps.truncate (0);
4828  next_cycle_apply.truncate (0);
4829}
4830
4831/* Compute INSN_TICK_ESTIMATE for INSN.  PROCESSED is a bitmap of
4832   instructions we've previously encountered, a set bit prevents
4833   recursion.  BUDGET is a limit on how far ahead we look, it is
4834   reduced on recursive calls.  Return true if we produced a good
4835   estimate, or false if we exceeded the budget.  */
4836static bool
4837estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4838{
4839  sd_iterator_def sd_it;
4840  dep_t dep;
4841  int earliest = INSN_TICK (insn);
4842
4843  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4844    {
4845      rtx_insn *pro = DEP_PRO (dep);
4846      int t;
4847
4848      if (DEP_STATUS (dep) & DEP_CANCELLED)
4849	continue;
4850
4851      if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4852	gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4853      else
4854	{
4855	  int cost = dep_cost (dep);
4856	  if (cost >= budget)
4857	    return false;
4858	  if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4859	    {
4860	      if (!estimate_insn_tick (processed, pro, budget - cost))
4861		return false;
4862	    }
4863	  gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4864	  t = INSN_TICK_ESTIMATE (pro) + cost;
4865	  if (earliest == INVALID_TICK || t > earliest)
4866	    earliest = t;
4867	}
4868    }
4869  bitmap_set_bit (processed, INSN_LUID (insn));
4870  INSN_TICK_ESTIMATE (insn) = earliest;
4871  return true;
4872}
4873
4874/* Examine the pair of insns in P, and estimate (optimistically, assuming
4875   infinite resources) the cycle in which the delayed shadow can be issued.
4876   Return the number of cycles that must pass before the real insn can be
4877   issued in order to meet this constraint.  */
4878static int
4879estimate_shadow_tick (struct delay_pair *p)
4880{
4881  bitmap_head processed;
4882  int t;
4883  bool cutoff;
4884  bitmap_initialize (&processed, 0);
4885
4886  cutoff = !estimate_insn_tick (&processed, p->i2,
4887				max_insn_queue_index + pair_delay (p));
4888  bitmap_clear (&processed);
4889  if (cutoff)
4890    return max_insn_queue_index;
4891  t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4892  if (t > 0)
4893    return t;
4894  return 0;
4895}
4896
4897/* If INSN has no unresolved backwards dependencies, add it to the schedule and
4898   recursively resolve all its forward dependencies.  */
4899static void
4900resolve_dependencies (rtx_insn *insn)
4901{
4902  sd_iterator_def sd_it;
4903  dep_t dep;
4904
4905  /* Don't use sd_lists_empty_p; it ignores debug insns.  */
4906  if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4907      || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4908    return;
4909
4910  if (sched_verbose >= 4)
4911    fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4912
4913  if (QUEUE_INDEX (insn) >= 0)
4914    queue_remove (insn);
4915
4916  scheduled_insns.safe_push (insn);
4917
4918  /* Update dependent instructions.  */
4919  for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4920       sd_iterator_cond (&sd_it, &dep);)
4921    {
4922      rtx_insn *next = DEP_CON (dep);
4923
4924      if (sched_verbose >= 4)
4925	fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4926		 INSN_UID (next));
4927
4928      /* Resolve the dependence between INSN and NEXT.
4929	 sd_resolve_dep () moves current dep to another list thus
4930	 advancing the iterator.  */
4931      sd_resolve_dep (sd_it);
4932
4933      if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4934	{
4935	  resolve_dependencies (next);
4936	}
4937      else
4938	/* Check always has only one forward dependence (to the first insn in
4939	   the recovery block), therefore, this will be executed only once.  */
4940	{
4941	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4942	}
4943    }
4944}
4945
4946
4947/* Return the head and tail pointers of ebb starting at BEG and ending
4948   at END.  */
4949void
4950get_ebb_head_tail (basic_block beg, basic_block end,
4951		   rtx_insn **headp, rtx_insn **tailp)
4952{
4953  rtx_insn *beg_head = BB_HEAD (beg);
4954  rtx_insn * beg_tail = BB_END (beg);
4955  rtx_insn * end_head = BB_HEAD (end);
4956  rtx_insn * end_tail = BB_END (end);
4957
4958  /* Don't include any notes or labels at the beginning of the BEG
4959     basic block, or notes at the end of the END basic blocks.  */
4960
4961  if (LABEL_P (beg_head))
4962    beg_head = NEXT_INSN (beg_head);
4963
4964  while (beg_head != beg_tail)
4965    if (NOTE_P (beg_head))
4966      beg_head = NEXT_INSN (beg_head);
4967    else if (DEBUG_INSN_P (beg_head))
4968      {
4969	rtx_insn * note, *next;
4970
4971	for (note = NEXT_INSN (beg_head);
4972	     note != beg_tail;
4973	     note = next)
4974	  {
4975	    next = NEXT_INSN (note);
4976	    if (NOTE_P (note))
4977	      {
4978		if (sched_verbose >= 9)
4979		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4980
4981		reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4982
4983		if (BLOCK_FOR_INSN (note) != beg)
4984		  df_insn_change_bb (note, beg);
4985	      }
4986	    else if (!DEBUG_INSN_P (note))
4987	      break;
4988	  }
4989
4990	break;
4991      }
4992    else
4993      break;
4994
4995  *headp = beg_head;
4996
4997  if (beg == end)
4998    end_head = beg_head;
4999  else if (LABEL_P (end_head))
5000    end_head = NEXT_INSN (end_head);
5001
5002  while (end_head != end_tail)
5003    if (NOTE_P (end_tail))
5004      end_tail = PREV_INSN (end_tail);
5005    else if (DEBUG_INSN_P (end_tail))
5006      {
5007	rtx_insn * note, *prev;
5008
5009	for (note = PREV_INSN (end_tail);
5010	     note != end_head;
5011	     note = prev)
5012	  {
5013	    prev = PREV_INSN (note);
5014	    if (NOTE_P (note))
5015	      {
5016		if (sched_verbose >= 9)
5017		  fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
5018
5019		reorder_insns_nobb (note, note, end_tail);
5020
5021		if (end_tail == BB_END (end))
5022		  BB_END (end) = note;
5023
5024		if (BLOCK_FOR_INSN (note) != end)
5025		  df_insn_change_bb (note, end);
5026	      }
5027	    else if (!DEBUG_INSN_P (note))
5028	      break;
5029	  }
5030
5031	break;
5032      }
5033    else
5034      break;
5035
5036  *tailp = end_tail;
5037}
5038
5039/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
5040
5041int
5042no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5043{
5044  while (head != NEXT_INSN (tail))
5045    {
5046      if (!NOTE_P (head) && !LABEL_P (head))
5047	return 0;
5048      head = NEXT_INSN (head);
5049    }
5050  return 1;
5051}
5052
5053/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5054   previously found among the insns.  Insert them just before HEAD.  */
5055rtx_insn *
5056restore_other_notes (rtx_insn *head, basic_block head_bb)
5057{
5058  if (note_list != 0)
5059    {
5060      rtx_insn *note_head = note_list;
5061
5062      if (head)
5063	head_bb = BLOCK_FOR_INSN (head);
5064      else
5065	head = NEXT_INSN (bb_note (head_bb));
5066
5067      while (PREV_INSN (note_head))
5068	{
5069	  set_block_for_insn (note_head, head_bb);
5070	  note_head = PREV_INSN (note_head);
5071	}
5072      /* In the above cycle we've missed this note.  */
5073      set_block_for_insn (note_head, head_bb);
5074
5075      SET_PREV_INSN (note_head) = PREV_INSN (head);
5076      SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5077      SET_PREV_INSN (head) = note_list;
5078      SET_NEXT_INSN (note_list) = head;
5079
5080      if (BLOCK_FOR_INSN (head) != head_bb)
5081	BB_END (head_bb) = note_list;
5082
5083      head = note_head;
5084    }
5085
5086  return head;
5087}
5088
5089/* When we know we are going to discard the schedule due to a failed attempt
5090   at modulo scheduling, undo all replacements.  */
5091static void
5092undo_all_replacements (void)
5093{
5094  rtx_insn *insn;
5095  int i;
5096
5097  FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5098    {
5099      sd_iterator_def sd_it;
5100      dep_t dep;
5101
5102      /* See if we must undo a replacement.  */
5103      for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5104	   sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5105	{
5106	  struct dep_replacement *desc = DEP_REPLACE (dep);
5107	  if (desc != NULL)
5108	    validate_change (desc->insn, desc->loc, desc->orig, 0);
5109	}
5110    }
5111}
5112
5113/* Return first non-scheduled insn in the current scheduling block.
5114   This is mostly used for debug-counter purposes.  */
5115static rtx_insn *
5116first_nonscheduled_insn (void)
5117{
5118  rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5119		    ? nonscheduled_insns_begin
5120		    : current_sched_info->prev_head);
5121
5122  do
5123    {
5124      insn = next_nonnote_nondebug_insn (insn);
5125    }
5126  while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5127
5128  return insn;
5129}
5130
5131/* Move insns that became ready to fire from queue to ready list.  */
5132
5133static void
5134queue_to_ready (struct ready_list *ready)
5135{
5136  rtx_insn *insn;
5137  rtx_insn_list *link;
5138  rtx skip_insn;
5139
5140  q_ptr = NEXT_Q (q_ptr);
5141
5142  if (dbg_cnt (sched_insn) == false)
5143    /* If debug counter is activated do not requeue the first
5144       nonscheduled insn.  */
5145    skip_insn = first_nonscheduled_insn ();
5146  else
5147    skip_insn = NULL_RTX;
5148
5149  /* Add all pending insns that can be scheduled without stalls to the
5150     ready list.  */
5151  for (link = insn_queue[q_ptr]; link; link = link->next ())
5152    {
5153      insn = link->insn ();
5154      q_size -= 1;
5155
5156      if (sched_verbose >= 2)
5157	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5158		 (*current_sched_info->print_insn) (insn, 0));
5159
5160      /* If the ready list is full, delay the insn for 1 cycle.
5161	 See the comment in schedule_block for the rationale.  */
5162      if (!reload_completed
5163	  && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5164	      || (sched_pressure == SCHED_PRESSURE_MODEL
5165		  /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5166		     instructions too.  */
5167		  && model_index (insn) > (model_curr_point
5168					   + MAX_SCHED_READY_INSNS)))
5169	  && !(sched_pressure == SCHED_PRESSURE_MODEL
5170	       && model_curr_point < model_num_insns
5171	       /* Always allow the next model instruction to issue.  */
5172	       && model_index (insn) == model_curr_point)
5173	  && !SCHED_GROUP_P (insn)
5174	  && insn != skip_insn)
5175	{
5176	  if (sched_verbose >= 2)
5177	    fprintf (sched_dump, "keeping in queue, ready full\n");
5178	  queue_insn (insn, 1, "ready full");
5179	}
5180      else
5181	{
5182	  ready_add (ready, insn, false);
5183	  if (sched_verbose >= 2)
5184	    fprintf (sched_dump, "moving to ready without stalls\n");
5185        }
5186    }
5187  free_INSN_LIST_list (&insn_queue[q_ptr]);
5188
5189  /* If there are no ready insns, stall until one is ready and add all
5190     of the pending insns at that point to the ready list.  */
5191  if (ready->n_ready == 0)
5192    {
5193      int stalls;
5194
5195      for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5196	{
5197	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5198	    {
5199	      for (; link; link = link->next ())
5200		{
5201		  insn = link->insn ();
5202		  q_size -= 1;
5203
5204		  if (sched_verbose >= 2)
5205		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5206			     (*current_sched_info->print_insn) (insn, 0));
5207
5208		  ready_add (ready, insn, false);
5209		  if (sched_verbose >= 2)
5210		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5211		}
5212	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5213
5214	      advance_one_cycle ();
5215
5216	      break;
5217	    }
5218
5219	  advance_one_cycle ();
5220	}
5221
5222      q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5223      clock_var += stalls;
5224      if (sched_verbose >= 2)
5225	fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5226		 stalls, clock_var);
5227    }
5228}
5229
5230/* Used by early_queue_to_ready.  Determines whether it is "ok" to
5231   prematurely move INSN from the queue to the ready list.  Currently,
5232   if a target defines the hook 'is_costly_dependence', this function
5233   uses the hook to check whether there exist any dependences which are
5234   considered costly by the target, between INSN and other insns that
5235   have already been scheduled.  Dependences are checked up to Y cycles
5236   back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5237   controlling this value.
5238   (Other considerations could be taken into account instead (or in
5239   addition) depending on user flags and target hooks.  */
5240
5241static bool
5242ok_for_early_queue_removal (rtx insn)
5243{
5244  if (targetm.sched.is_costly_dependence)
5245    {
5246      rtx prev_insn;
5247      int n_cycles;
5248      int i = scheduled_insns.length ();
5249      for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5250	{
5251	  while (i-- > 0)
5252	    {
5253	      int cost;
5254
5255	      prev_insn = scheduled_insns[i];
5256
5257	      if (!NOTE_P (prev_insn))
5258		{
5259		  dep_t dep;
5260
5261		  dep = sd_find_dep_between (prev_insn, insn, true);
5262
5263		  if (dep != NULL)
5264		    {
5265		      cost = dep_cost (dep);
5266
5267		      if (targetm.sched.is_costly_dependence (dep, cost,
5268				flag_sched_stalled_insns_dep - n_cycles))
5269			return false;
5270		    }
5271		}
5272
5273	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5274		break;
5275	    }
5276
5277	  if (i == 0)
5278	    break;
5279	}
5280    }
5281
5282  return true;
5283}
5284
5285
5286/* Remove insns from the queue, before they become "ready" with respect
5287   to FU latency considerations.  */
5288
5289static int
5290early_queue_to_ready (state_t state, struct ready_list *ready)
5291{
5292  rtx_insn *insn;
5293  rtx_insn_list *link;
5294  rtx_insn_list *next_link;
5295  rtx_insn_list *prev_link;
5296  bool move_to_ready;
5297  int cost;
5298  state_t temp_state = alloca (dfa_state_size);
5299  int stalls;
5300  int insns_removed = 0;
5301
5302  /*
5303     Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5304     function:
5305
5306     X == 0: There is no limit on how many queued insns can be removed
5307             prematurely.  (flag_sched_stalled_insns = -1).
5308
5309     X >= 1: Only X queued insns can be removed prematurely in each
5310	     invocation.  (flag_sched_stalled_insns = X).
5311
5312     Otherwise: Early queue removal is disabled.
5313         (flag_sched_stalled_insns = 0)
5314  */
5315
5316  if (! flag_sched_stalled_insns)
5317    return 0;
5318
5319  for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5320    {
5321      if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5322	{
5323	  if (sched_verbose > 6)
5324	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5325
5326	  prev_link = 0;
5327	  while (link)
5328	    {
5329	      next_link = link->next ();
5330	      insn = link->insn ();
5331	      if (insn && sched_verbose > 6)
5332		print_rtl_single (sched_dump, insn);
5333
5334	      memcpy (temp_state, state, dfa_state_size);
5335	      if (recog_memoized (insn) < 0)
5336		/* non-negative to indicate that it's not ready
5337		   to avoid infinite Q->R->Q->R... */
5338		cost = 0;
5339	      else
5340		cost = state_transition (temp_state, insn);
5341
5342	      if (sched_verbose >= 6)
5343		fprintf (sched_dump, "transition cost = %d\n", cost);
5344
5345	      move_to_ready = false;
5346	      if (cost < 0)
5347		{
5348		  move_to_ready = ok_for_early_queue_removal (insn);
5349		  if (move_to_ready == true)
5350		    {
5351		      /* move from Q to R */
5352		      q_size -= 1;
5353		      ready_add (ready, insn, false);
5354
5355		      if (prev_link)
5356			XEXP (prev_link, 1) = next_link;
5357		      else
5358			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5359
5360		      free_INSN_LIST_node (link);
5361
5362		      if (sched_verbose >= 2)
5363			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5364				 (*current_sched_info->print_insn) (insn, 0));
5365
5366		      insns_removed++;
5367		      if (insns_removed == flag_sched_stalled_insns)
5368			/* Remove no more than flag_sched_stalled_insns insns
5369			   from Q at a time.  */
5370			return insns_removed;
5371		    }
5372		}
5373
5374	      if (move_to_ready == false)
5375		prev_link = link;
5376
5377	      link = next_link;
5378	    } /* while link */
5379	} /* if link */
5380
5381    } /* for stalls.. */
5382
5383  return insns_removed;
5384}
5385
5386
5387/* Print the ready list for debugging purposes.
5388   If READY_TRY is non-zero then only print insns that max_issue
5389   will consider.  */
5390static void
5391debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5392{
5393  rtx_insn **p;
5394  int i;
5395
5396  if (ready->n_ready == 0)
5397    {
5398      fprintf (sched_dump, "\n");
5399      return;
5400    }
5401
5402  p = ready_lastpos (ready);
5403  for (i = 0; i < ready->n_ready; i++)
5404    {
5405      if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5406	continue;
5407
5408      fprintf (sched_dump, "  %s:%d",
5409	       (*current_sched_info->print_insn) (p[i], 0),
5410	       INSN_LUID (p[i]));
5411      if (sched_pressure != SCHED_PRESSURE_NONE)
5412	fprintf (sched_dump, "(cost=%d",
5413		 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5414      fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5415      if (INSN_TICK (p[i]) > clock_var)
5416	fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5417      if (sched_pressure == SCHED_PRESSURE_MODEL)
5418	fprintf (sched_dump, ":idx=%d",
5419		 model_index (p[i]));
5420      if (sched_pressure != SCHED_PRESSURE_NONE)
5421	fprintf (sched_dump, ")");
5422    }
5423  fprintf (sched_dump, "\n");
5424}
5425
5426/* Print the ready list.  Callable from debugger.  */
5427static void
5428debug_ready_list (struct ready_list *ready)
5429{
5430  debug_ready_list_1 (ready, NULL);
5431}
5432
5433/* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5434   NOTEs.  This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5435   replaces the epilogue note in the correct basic block.  */
5436void
5437reemit_notes (rtx_insn *insn)
5438{
5439  rtx note;
5440  rtx_insn *last = insn;
5441
5442  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5443    {
5444      if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5445	{
5446	  enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5447
5448	  last = emit_note_before (note_type, last);
5449	  remove_note (insn, note);
5450	}
5451    }
5452}
5453
5454/* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
5455static void
5456move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5457{
5458  if (PREV_INSN (insn) != last)
5459    {
5460      basic_block bb;
5461      rtx_insn *note;
5462      int jump_p = 0;
5463
5464      bb = BLOCK_FOR_INSN (insn);
5465
5466      /* BB_HEAD is either LABEL or NOTE.  */
5467      gcc_assert (BB_HEAD (bb) != insn);
5468
5469      if (BB_END (bb) == insn)
5470	/* If this is last instruction in BB, move end marker one
5471	   instruction up.  */
5472	{
5473	  /* Jumps are always placed at the end of basic block.  */
5474	  jump_p = control_flow_insn_p (insn);
5475
5476	  gcc_assert (!jump_p
5477		      || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5478			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5479		      || (common_sched_info->sched_pass_id
5480			  == SCHED_EBB_PASS));
5481
5482	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5483
5484	  BB_END (bb) = PREV_INSN (insn);
5485	}
5486
5487      gcc_assert (BB_END (bb) != last);
5488
5489      if (jump_p)
5490	/* We move the block note along with jump.  */
5491	{
5492	  gcc_assert (nt);
5493
5494	  note = NEXT_INSN (insn);
5495	  while (NOTE_NOT_BB_P (note) && note != nt)
5496	    note = NEXT_INSN (note);
5497
5498	  if (note != nt
5499	      && (LABEL_P (note)
5500		  || BARRIER_P (note)))
5501	    note = NEXT_INSN (note);
5502
5503	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5504	}
5505      else
5506	note = insn;
5507
5508      SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5509      SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5510
5511      SET_NEXT_INSN (note) = NEXT_INSN (last);
5512      SET_PREV_INSN (NEXT_INSN (last)) = note;
5513
5514      SET_NEXT_INSN (last) = insn;
5515      SET_PREV_INSN (insn) = last;
5516
5517      bb = BLOCK_FOR_INSN (last);
5518
5519      if (jump_p)
5520	{
5521	  fix_jump_move (insn);
5522
5523	  if (BLOCK_FOR_INSN (insn) != bb)
5524	    move_block_after_check (insn);
5525
5526	  gcc_assert (BB_END (bb) == last);
5527	}
5528
5529      df_insn_change_bb (insn, bb);
5530
5531      /* Update BB_END, if needed.  */
5532      if (BB_END (bb) == last)
5533	BB_END (bb) = insn;
5534    }
5535
5536  SCHED_GROUP_P (insn) = 0;
5537}
5538
5539/* Return true if scheduling INSN will finish current clock cycle.  */
5540static bool
5541insn_finishes_cycle_p (rtx_insn *insn)
5542{
5543  if (SCHED_GROUP_P (insn))
5544    /* After issuing INSN, rest of the sched_group will be forced to issue
5545       in order.  Don't make any plans for the rest of cycle.  */
5546    return true;
5547
5548  /* Finishing the block will, apparently, finish the cycle.  */
5549  if (current_sched_info->insn_finishes_block_p
5550      && current_sched_info->insn_finishes_block_p (insn))
5551    return true;
5552
5553  return false;
5554}
5555
5556/* Functions to model cache auto-prefetcher.
5557
5558   Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5559   memory prefetches if it sees instructions with consequitive memory accesses
5560   in the instruction stream.  Details of such hardware units are not published,
5561   so we can only guess what exactly is going on there.
5562   In the scheduler, we model abstract auto-prefetcher.  If there are memory
5563   insns in the ready list (or the queue) that have same memory base, but
5564   different offsets, then we delay the insns with larger offsets until insns
5565   with smaller offsets get scheduled.  If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5566   is "1", then we look at the ready list; if it is N>1, then we also look
5567   through N-1 queue entries.
5568   If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5569   among its heuristics.
5570   Param value of "-1" disables modelling of the auto-prefetcher.  */
5571
5572/* Initialize autoprefetcher model data for INSN.  */
5573static void
5574autopref_multipass_init (const rtx_insn *insn, int write)
5575{
5576  autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5577
5578  gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5579  data->base = NULL_RTX;
5580  data->offset = 0;
5581  /* Set insn entry initialized, but not relevant for auto-prefetcher.  */
5582  data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5583
5584  rtx set = single_set (insn);
5585  if (set == NULL_RTX)
5586    return;
5587
5588  rtx mem = write ? SET_DEST (set) : SET_SRC (set);
5589  if (!MEM_P (mem))
5590    return;
5591
5592  struct address_info info;
5593  decompose_mem_address (&info, mem);
5594
5595  /* TODO: Currently only (base+const) addressing is supported.  */
5596  if (info.base == NULL || !REG_P (*info.base)
5597      || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5598    return;
5599
5600  /* This insn is relevant for auto-prefetcher.  */
5601  data->base = *info.base;
5602  data->offset = info.disp ? INTVAL (*info.disp) : 0;
5603  data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5604}
5605
5606/* Helper function for rank_for_schedule sorting.  */
5607static int
5608autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5609{
5610  for (int write = 0; write < 2; ++write)
5611    {
5612      autopref_multipass_data_t data1
5613	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5614      autopref_multipass_data_t data2
5615	= &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5616
5617      if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5618	autopref_multipass_init (insn1, write);
5619      if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5620	continue;
5621
5622      if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5623	autopref_multipass_init (insn2, write);
5624      if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5625	continue;
5626
5627      if (!rtx_equal_p (data1->base, data2->base))
5628	continue;
5629
5630      return data1->offset - data2->offset;
5631    }
5632
5633  return 0;
5634}
5635
5636/* True if header of debug dump was printed.  */
5637static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5638
5639/* Helper for autopref_multipass_dfa_lookahead_guard.
5640   Return "1" if INSN1 should be delayed in favor of INSN2.  */
5641static int
5642autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5643					  const rtx_insn *insn2, int write)
5644{
5645  autopref_multipass_data_t data1
5646    = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5647  autopref_multipass_data_t data2
5648    = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5649
5650  if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5651    autopref_multipass_init (insn2, write);
5652  if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5653    return 0;
5654
5655  if (rtx_equal_p (data1->base, data2->base)
5656      && data1->offset > data2->offset)
5657    {
5658      if (sched_verbose >= 2)
5659	{
5660          if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5661	    {
5662	      fprintf (sched_dump,
5663		       ";;\t\tnot trying in max_issue due to autoprefetch "
5664		       "model: ");
5665	      autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5666	    }
5667
5668	  fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5669	}
5670
5671      return 1;
5672    }
5673
5674  return 0;
5675}
5676
5677/* General note:
5678
5679   We could have also hooked autoprefetcher model into
5680   first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5681   to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5682   (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5683   unblocked).  We don't bother about this yet because target of interest
5684   (ARM Cortex-A15) can issue only 1 memory operation per cycle.  */
5685
5686/* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5687   Return "1" if INSN1 should not be considered in max_issue due to
5688   auto-prefetcher considerations.  */
5689int
5690autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5691{
5692  int r = 0;
5693
5694  /* Exit early if the param forbids this or if we're not entering here through
5695     normal haifa scheduling.  This can happen if selective scheduling is
5696     explicitly enabled.  */
5697  if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
5698    return 0;
5699
5700  if (sched_verbose >= 2 && ready_index == 0)
5701    autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5702
5703  for (int write = 0; write < 2; ++write)
5704    {
5705      autopref_multipass_data_t data1
5706	= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5707
5708      if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5709	autopref_multipass_init (insn1, write);
5710      if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5711	continue;
5712
5713      if (ready_index == 0
5714	  && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5715	/* We allow only a single delay on priviledged instructions.
5716	   Doing otherwise would cause infinite loop.  */
5717	{
5718	  if (sched_verbose >= 2)
5719	    {
5720	      if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5721		{
5722		  fprintf (sched_dump,
5723			   ";;\t\tnot trying in max_issue due to autoprefetch "
5724			   "model: ");
5725		  autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5726		}
5727
5728	      fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5729	    }
5730	  continue;
5731	}
5732
5733      for (int i2 = 0; i2 < ready.n_ready; ++i2)
5734	{
5735	  rtx_insn *insn2 = get_ready_element (i2);
5736	  if (insn1 == insn2)
5737	    continue;
5738	  r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5739	  if (r)
5740	    {
5741	      if (ready_index == 0)
5742		{
5743		  r = -1;
5744		  data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5745		}
5746	      goto finish;
5747	    }
5748	}
5749
5750      if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
5751	continue;
5752
5753      /* Everything from the current queue slot should have been moved to
5754	 the ready list.  */
5755      gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5756
5757      int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
5758      if (n_stalls > max_insn_queue_index)
5759	n_stalls = max_insn_queue_index;
5760
5761      for (int stalls = 1; stalls <= n_stalls; ++stalls)
5762	{
5763	  for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5764	       link != NULL_RTX;
5765	       link = link->next ())
5766	    {
5767	      rtx_insn *insn2 = link->insn ();
5768	      r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5769							    write);
5770	      if (r)
5771		{
5772		  /* Queue INSN1 until INSN2 can issue.  */
5773		  r = -stalls;
5774		  if (ready_index == 0)
5775		    data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5776		  goto finish;
5777		}
5778	    }
5779	}
5780    }
5781
5782    finish:
5783  if (sched_verbose >= 2
5784      && autopref_multipass_dfa_lookahead_guard_started_dump_p
5785      && (ready_index == ready.n_ready - 1 || r < 0))
5786    /* This does not /always/ trigger.  We don't output EOL if the last
5787       insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5788       called.  We can live with this.  */
5789    fprintf (sched_dump, "\n");
5790
5791  return r;
5792}
5793
5794/* Define type for target data used in multipass scheduling.  */
5795#ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5796# define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5797#endif
5798typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5799
5800/* The following structure describe an entry of the stack of choices.  */
5801struct choice_entry
5802{
5803  /* Ordinal number of the issued insn in the ready queue.  */
5804  int index;
5805  /* The number of the rest insns whose issues we should try.  */
5806  int rest;
5807  /* The number of issued essential insns.  */
5808  int n;
5809  /* State after issuing the insn.  */
5810  state_t state;
5811  /* Target-specific data.  */
5812  first_cycle_multipass_data_t target_data;
5813};
5814
5815/* The following array is used to implement a stack of choices used in
5816   function max_issue.  */
5817static struct choice_entry *choice_stack;
5818
5819/* This holds the value of the target dfa_lookahead hook.  */
5820int dfa_lookahead;
5821
5822/* The following variable value is maximal number of tries of issuing
5823   insns for the first cycle multipass insn scheduling.  We define
5824   this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
5825   need this constraint if all real insns (with non-negative codes)
5826   had reservations because in this case the algorithm complexity is
5827   O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
5828   might be incomplete and such insn might occur.  For such
5829   descriptions, the complexity of algorithm (without the constraint)
5830   could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
5831static int max_lookahead_tries;
5832
5833/* The following function returns maximal (or close to maximal) number
5834   of insns which can be issued on the same cycle and one of which
5835   insns is insns with the best rank (the first insn in READY).  To
5836   make this function tries different samples of ready insns.  READY
5837   is current queue `ready'.  Global array READY_TRY reflects what
5838   insns are already issued in this try.  The function stops immediately,
5839   if it reached the such a solution, that all instruction can be issued.
5840   INDEX will contain index of the best insn in READY.  The following
5841   function is used only for first cycle multipass scheduling.
5842
5843   PRIVILEGED_N >= 0
5844
5845   This function expects recognized insns only.  All USEs,
5846   CLOBBERs, etc must be filtered elsewhere.  */
5847int
5848max_issue (struct ready_list *ready, int privileged_n, state_t state,
5849	   bool first_cycle_insn_p, int *index)
5850{
5851  int n, i, all, n_ready, best, delay, tries_num;
5852  int more_issue;
5853  struct choice_entry *top;
5854  rtx_insn *insn;
5855
5856  if (sched_fusion)
5857    return 0;
5858
5859  n_ready = ready->n_ready;
5860  gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5861	      && privileged_n <= n_ready);
5862
5863  /* Init MAX_LOOKAHEAD_TRIES.  */
5864  if (max_lookahead_tries == 0)
5865    {
5866      max_lookahead_tries = 100;
5867      for (i = 0; i < issue_rate; i++)
5868	max_lookahead_tries *= dfa_lookahead;
5869    }
5870
5871  /* Init max_points.  */
5872  more_issue = issue_rate - cycle_issued_insns;
5873  gcc_assert (more_issue >= 0);
5874
5875  /* The number of the issued insns in the best solution.  */
5876  best = 0;
5877
5878  top = choice_stack;
5879
5880  /* Set initial state of the search.  */
5881  memcpy (top->state, state, dfa_state_size);
5882  top->rest = dfa_lookahead;
5883  top->n = 0;
5884  if (targetm.sched.first_cycle_multipass_begin)
5885    targetm.sched.first_cycle_multipass_begin (&top->target_data,
5886					       ready_try, n_ready,
5887					       first_cycle_insn_p);
5888
5889  /* Count the number of the insns to search among.  */
5890  for (all = i = 0; i < n_ready; i++)
5891    if (!ready_try [i])
5892      all++;
5893
5894  if (sched_verbose >= 2)
5895    {
5896      fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5897      debug_ready_list_1 (ready, ready_try);
5898    }
5899
5900  /* I is the index of the insn to try next.  */
5901  i = 0;
5902  tries_num = 0;
5903  for (;;)
5904    {
5905      if (/* If we've reached a dead end or searched enough of what we have
5906	     been asked...  */
5907	  top->rest == 0
5908	  /* or have nothing else to try...  */
5909	  || i >= n_ready
5910	  /* or should not issue more.  */
5911	  || top->n >= more_issue)
5912	{
5913	  /* ??? (... || i == n_ready).  */
5914	  gcc_assert (i <= n_ready);
5915
5916	  /* We should not issue more than issue_rate instructions.  */
5917	  gcc_assert (top->n <= more_issue);
5918
5919	  if (top == choice_stack)
5920	    break;
5921
5922	  if (best < top - choice_stack)
5923	    {
5924	      if (privileged_n)
5925		{
5926		  n = privileged_n;
5927		  /* Try to find issued privileged insn.  */
5928		  while (n && !ready_try[--n])
5929		    ;
5930		}
5931
5932	      if (/* If all insns are equally good...  */
5933		  privileged_n == 0
5934		  /* Or a privileged insn will be issued.  */
5935		  || ready_try[n])
5936		/* Then we have a solution.  */
5937		{
5938		  best = top - choice_stack;
5939		  /* This is the index of the insn issued first in this
5940		     solution.  */
5941		  *index = choice_stack [1].index;
5942		  if (top->n == more_issue || best == all)
5943		    break;
5944		}
5945	    }
5946
5947	  /* Set ready-list index to point to the last insn
5948	     ('i++' below will advance it to the next insn).  */
5949	  i = top->index;
5950
5951	  /* Backtrack.  */
5952	  ready_try [i] = 0;
5953
5954	  if (targetm.sched.first_cycle_multipass_backtrack)
5955	    targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5956							   ready_try, n_ready);
5957
5958	  top--;
5959	  memcpy (state, top->state, dfa_state_size);
5960	}
5961      else if (!ready_try [i])
5962	{
5963	  tries_num++;
5964	  if (tries_num > max_lookahead_tries)
5965	    break;
5966	  insn = ready_element (ready, i);
5967	  delay = state_transition (state, insn);
5968	  if (delay < 0)
5969	    {
5970	      if (state_dead_lock_p (state)
5971		  || insn_finishes_cycle_p (insn))
5972		/* We won't issue any more instructions in the next
5973		   choice_state.  */
5974		top->rest = 0;
5975	      else
5976		top->rest--;
5977
5978	      n = top->n;
5979	      if (memcmp (top->state, state, dfa_state_size) != 0)
5980		n++;
5981
5982	      /* Advance to the next choice_entry.  */
5983	      top++;
5984	      /* Initialize it.  */
5985	      top->rest = dfa_lookahead;
5986	      top->index = i;
5987	      top->n = n;
5988	      memcpy (top->state, state, dfa_state_size);
5989	      ready_try [i] = 1;
5990
5991	      if (targetm.sched.first_cycle_multipass_issue)
5992		targetm.sched.first_cycle_multipass_issue (&top->target_data,
5993							   ready_try, n_ready,
5994							   insn,
5995							   &((top - 1)
5996							     ->target_data));
5997
5998	      i = -1;
5999	    }
6000	}
6001
6002      /* Increase ready-list index.  */
6003      i++;
6004    }
6005
6006  if (targetm.sched.first_cycle_multipass_end)
6007    targetm.sched.first_cycle_multipass_end (best != 0
6008					     ? &choice_stack[1].target_data
6009					     : NULL);
6010
6011  /* Restore the original state of the DFA.  */
6012  memcpy (state, choice_stack->state, dfa_state_size);
6013
6014  return best;
6015}
6016
6017/* The following function chooses insn from READY and modifies
6018   READY.  The following function is used only for first
6019   cycle multipass scheduling.
6020   Return:
6021   -1 if cycle should be advanced,
6022   0 if INSN_PTR is set to point to the desirable insn,
6023   1 if choose_ready () should be restarted without advancing the cycle.  */
6024static int
6025choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6026	      rtx_insn **insn_ptr)
6027{
6028  if (dbg_cnt (sched_insn) == false)
6029    {
6030      if (nonscheduled_insns_begin == NULL_RTX)
6031	nonscheduled_insns_begin = current_sched_info->prev_head;
6032
6033      rtx_insn *insn = first_nonscheduled_insn ();
6034
6035      if (QUEUE_INDEX (insn) == QUEUE_READY)
6036	/* INSN is in the ready_list.  */
6037	{
6038	  ready_remove_insn (insn);
6039	  *insn_ptr = insn;
6040	  return 0;
6041	}
6042
6043      /* INSN is in the queue.  Advance cycle to move it to the ready list.  */
6044      gcc_assert (QUEUE_INDEX (insn) >= 0);
6045      return -1;
6046    }
6047
6048  if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6049      || DEBUG_INSN_P (ready_element (ready, 0)))
6050    {
6051      if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6052	*insn_ptr = ready_remove_first_dispatch (ready);
6053      else
6054	*insn_ptr = ready_remove_first (ready);
6055
6056      return 0;
6057    }
6058  else
6059    {
6060      /* Try to choose the best insn.  */
6061      int index = 0, i;
6062      rtx_insn *insn;
6063
6064      insn = ready_element (ready, 0);
6065      if (INSN_CODE (insn) < 0)
6066	{
6067	  *insn_ptr = ready_remove_first (ready);
6068	  return 0;
6069	}
6070
6071      /* Filter the search space.  */
6072      for (i = 0; i < ready->n_ready; i++)
6073	{
6074	  ready_try[i] = 0;
6075
6076	  insn = ready_element (ready, i);
6077
6078	  /* If this insn is recognizable we should have already
6079	     recognized it earlier.
6080	     ??? Not very clear where this is supposed to be done.
6081	     See dep_cost_1.  */
6082	  gcc_checking_assert (INSN_CODE (insn) >= 0
6083			       || recog_memoized (insn) < 0);
6084	  if (INSN_CODE (insn) < 0)
6085	    {
6086	      /* Non-recognized insns at position 0 are handled above.  */
6087	      gcc_assert (i > 0);
6088	      ready_try[i] = 1;
6089	      continue;
6090	    }
6091
6092	  if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6093	    {
6094	      ready_try[i]
6095		= (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6096		    (insn, i));
6097
6098	      if (ready_try[i] < 0)
6099		/* Queue instruction for several cycles.
6100		   We need to restart choose_ready as we have changed
6101		   the ready list.  */
6102		{
6103		  change_queue_index (insn, -ready_try[i]);
6104		  return 1;
6105		}
6106
6107	      /* Make sure that we didn't end up with 0'th insn filtered out.
6108		 Don't be tempted to make life easier for backends and just
6109		 requeue 0'th insn if (ready_try[0] == 0) and restart
6110		 choose_ready.  Backends should be very considerate about
6111		 requeueing instructions -- especially the highest priority
6112		 one at position 0.  */
6113	      gcc_assert (ready_try[i] == 0 || i > 0);
6114	      if (ready_try[i])
6115		continue;
6116	    }
6117
6118	  gcc_assert (ready_try[i] == 0);
6119	  /* INSN made it through the scrutiny of filters!  */
6120	}
6121
6122      if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6123	{
6124	  *insn_ptr = ready_remove_first (ready);
6125	  if (sched_verbose >= 4)
6126	    fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6127                     (*current_sched_info->print_insn) (*insn_ptr, 0));
6128	  return 0;
6129	}
6130      else
6131	{
6132	  if (sched_verbose >= 4)
6133	    fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6134		     (*current_sched_info->print_insn)
6135		     (ready_element (ready, index), 0));
6136
6137	  *insn_ptr = ready_remove (ready, index);
6138	  return 0;
6139	}
6140    }
6141}
6142
6143/* This function is called when we have successfully scheduled a
6144   block.  It uses the schedule stored in the scheduled_insns vector
6145   to rearrange the RTL.  PREV_HEAD is used as the anchor to which we
6146   append the scheduled insns; TAIL is the insn after the scheduled
6147   block.  TARGET_BB is the argument passed to schedule_block.  */
6148
6149static void
6150commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6151{
6152  unsigned int i;
6153  rtx_insn *insn;
6154
6155  last_scheduled_insn = prev_head;
6156  for (i = 0;
6157       scheduled_insns.iterate (i, &insn);
6158       i++)
6159    {
6160      if (control_flow_insn_p (last_scheduled_insn)
6161	  || current_sched_info->advance_target_bb (*target_bb, insn))
6162	{
6163	  *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6164
6165	  if (sched_verbose)
6166	    {
6167	      rtx_insn *x;
6168
6169	      x = next_real_insn (last_scheduled_insn);
6170	      gcc_assert (x);
6171	      dump_new_block_header (1, *target_bb, x, tail);
6172	    }
6173
6174	  last_scheduled_insn = bb_note (*target_bb);
6175	}
6176
6177      if (current_sched_info->begin_move_insn)
6178	(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6179      move_insn (insn, last_scheduled_insn,
6180		 current_sched_info->next_tail);
6181      if (!DEBUG_INSN_P (insn))
6182	reemit_notes (insn);
6183      last_scheduled_insn = insn;
6184    }
6185
6186  scheduled_insns.truncate (0);
6187}
6188
6189/* Examine all insns on the ready list and queue those which can't be
6190   issued in this cycle.  TEMP_STATE is temporary scheduler state we
6191   can use as scratch space.  If FIRST_CYCLE_INSN_P is true, no insns
6192   have been issued for the current cycle, which means it is valid to
6193   issue an asm statement.
6194
6195   If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6196   leave those for which SHADOW_P is true.  If MODULO_EPILOGUE is true,
6197   we only leave insns which have an INSN_EXACT_TICK.  */
6198
6199static void
6200prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6201		  bool shadows_only_p, bool modulo_epilogue_p)
6202{
6203  int i, pass;
6204  bool sched_group_found = false;
6205  int min_cost_group = 1;
6206
6207  if (sched_fusion)
6208    return;
6209
6210  for (i = 0; i < ready.n_ready; i++)
6211    {
6212      rtx_insn *insn = ready_element (&ready, i);
6213      if (SCHED_GROUP_P (insn))
6214	{
6215	  sched_group_found = true;
6216	  break;
6217	}
6218    }
6219
6220  /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6221     such an insn first and note its cost, then schedule all other insns
6222     for one cycle later.  */
6223  for (pass = sched_group_found ? 0 : 1; pass < 2; )
6224    {
6225      int n = ready.n_ready;
6226      for (i = 0; i < n; i++)
6227	{
6228	  rtx_insn *insn = ready_element (&ready, i);
6229	  int cost = 0;
6230	  const char *reason = "resource conflict";
6231
6232	  if (DEBUG_INSN_P (insn))
6233	    continue;
6234
6235	  if (sched_group_found && !SCHED_GROUP_P (insn))
6236	    {
6237	      if (pass == 0)
6238		continue;
6239	      cost = min_cost_group;
6240	      reason = "not in sched group";
6241	    }
6242	  else if (modulo_epilogue_p
6243		   && INSN_EXACT_TICK (insn) == INVALID_TICK)
6244	    {
6245	      cost = max_insn_queue_index;
6246	      reason = "not an epilogue insn";
6247	    }
6248	  else if (shadows_only_p && !SHADOW_P (insn))
6249	    {
6250	      cost = 1;
6251	      reason = "not a shadow";
6252	    }
6253	  else if (recog_memoized (insn) < 0)
6254	    {
6255	      if (!first_cycle_insn_p
6256		  && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6257		      || asm_noperands (PATTERN (insn)) >= 0))
6258		cost = 1;
6259	      reason = "asm";
6260	    }
6261	  else if (sched_pressure != SCHED_PRESSURE_NONE)
6262	    {
6263	      if (sched_pressure == SCHED_PRESSURE_MODEL
6264		  && INSN_TICK (insn) <= clock_var)
6265		{
6266		  memcpy (temp_state, curr_state, dfa_state_size);
6267		  if (state_transition (temp_state, insn) >= 0)
6268		    INSN_TICK (insn) = clock_var + 1;
6269		}
6270	      cost = 0;
6271	    }
6272	  else
6273	    {
6274	      int delay_cost = 0;
6275
6276	      if (delay_htab)
6277		{
6278		  struct delay_pair *delay_entry;
6279		  delay_entry
6280		    = delay_htab->find_with_hash (insn,
6281						  htab_hash_pointer (insn));
6282		  while (delay_entry && delay_cost == 0)
6283		    {
6284		      delay_cost = estimate_shadow_tick (delay_entry);
6285		      if (delay_cost > max_insn_queue_index)
6286			delay_cost = max_insn_queue_index;
6287		      delay_entry = delay_entry->next_same_i1;
6288		    }
6289		}
6290
6291	      memcpy (temp_state, curr_state, dfa_state_size);
6292	      cost = state_transition (temp_state, insn);
6293	      if (cost < 0)
6294		cost = 0;
6295	      else if (cost == 0)
6296		cost = 1;
6297	      if (cost < delay_cost)
6298		{
6299		  cost = delay_cost;
6300		  reason = "shadow tick";
6301		}
6302	    }
6303	  if (cost >= 1)
6304	    {
6305	      if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6306		min_cost_group = cost;
6307	      ready_remove (&ready, i);
6308	      /* Normally we'd want to queue INSN for COST cycles.  However,
6309		 if SCHED_GROUP_P is set, then we must ensure that nothing
6310		 else comes between INSN and its predecessor.  If there is
6311		 some other insn ready to fire on the next cycle, then that
6312		 invariant would be broken.
6313
6314		 So when SCHED_GROUP_P is set, just queue this insn for a
6315		 single cycle.  */
6316	      queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6317	      if (i + 1 < n)
6318		break;
6319	    }
6320	}
6321      if (i == n)
6322	pass++;
6323    }
6324}
6325
6326/* Called when we detect that the schedule is impossible.  We examine the
6327   backtrack queue to find the earliest insn that caused this condition.  */
6328
6329static struct haifa_saved_data *
6330verify_shadows (void)
6331{
6332  struct haifa_saved_data *save, *earliest_fail = NULL;
6333  for (save = backtrack_queue; save; save = save->next)
6334    {
6335      int t;
6336      struct delay_pair *pair = save->delay_pair;
6337      rtx_insn *i1 = pair->i1;
6338
6339      for (; pair; pair = pair->next_same_i1)
6340	{
6341	  rtx_insn *i2 = pair->i2;
6342
6343	  if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6344	    continue;
6345
6346	  t = INSN_TICK (i1) + pair_delay (pair);
6347	  if (t < clock_var)
6348	    {
6349	      if (sched_verbose >= 2)
6350		fprintf (sched_dump,
6351			 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6352			 ", not ready\n",
6353			 INSN_UID (pair->i1), INSN_UID (pair->i2),
6354			 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6355	      earliest_fail = save;
6356	      break;
6357	    }
6358	  if (QUEUE_INDEX (i2) >= 0)
6359	    {
6360	      int queued_for = INSN_TICK (i2);
6361
6362	      if (t < queued_for)
6363		{
6364		  if (sched_verbose >= 2)
6365		    fprintf (sched_dump,
6366			     ";;\t\tfailed delay requirements for %d/%d"
6367			     " (%d->%d), queued too late\n",
6368			     INSN_UID (pair->i1), INSN_UID (pair->i2),
6369			     INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6370		  earliest_fail = save;
6371		  break;
6372		}
6373	    }
6374	}
6375    }
6376
6377  return earliest_fail;
6378}
6379
6380/* Print instructions together with useful scheduling information between
6381   HEAD and TAIL (inclusive).  */
6382static void
6383dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6384{
6385  fprintf (sched_dump, ";;\t| insn | prio |\n");
6386
6387  rtx_insn *next_tail = NEXT_INSN (tail);
6388  for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6389    {
6390      int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6391      const char *pattern = (NOTE_P (insn)
6392			     ? "note"
6393			     : str_pattern_slim (PATTERN (insn)));
6394
6395      fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6396	       INSN_UID (insn), priority, pattern);
6397
6398      if (sched_verbose >= 4)
6399	{
6400	  if (NOTE_P (insn) || recog_memoized (insn) < 0)
6401	    fprintf (sched_dump, "nothing");
6402	  else
6403	    print_reservation (sched_dump, insn);
6404	}
6405      fprintf (sched_dump, "\n");
6406    }
6407}
6408
6409/* Use forward list scheduling to rearrange insns of block pointed to by
6410   TARGET_BB, possibly bringing insns from subsequent blocks in the same
6411   region.  */
6412
6413bool
6414schedule_block (basic_block *target_bb, state_t init_state)
6415{
6416  int i;
6417  bool success = modulo_ii == 0;
6418  struct sched_block_state ls;
6419  state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
6420  int sort_p, advance, start_clock_var;
6421
6422  /* Head/tail info for this block.  */
6423  rtx_insn *prev_head = current_sched_info->prev_head;
6424  rtx_insn *next_tail = current_sched_info->next_tail;
6425  rtx_insn *head = NEXT_INSN (prev_head);
6426  rtx_insn *tail = PREV_INSN (next_tail);
6427
6428  if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6429      && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6430    find_modifiable_mems (head, tail);
6431
6432  /* We used to have code to avoid getting parameters moved from hard
6433     argument registers into pseudos.
6434
6435     However, it was removed when it proved to be of marginal benefit
6436     and caused problems because schedule_block and compute_forward_dependences
6437     had different notions of what the "head" insn was.  */
6438
6439  gcc_assert (head != tail || INSN_P (head));
6440
6441  haifa_recovery_bb_recently_added_p = false;
6442
6443  backtrack_queue = NULL;
6444
6445  /* Debug info.  */
6446  if (sched_verbose)
6447    {
6448      dump_new_block_header (0, *target_bb, head, tail);
6449
6450      if (sched_verbose >= 2)
6451	{
6452	  dump_insn_stream (head, tail);
6453	  memset (&rank_for_schedule_stats, 0,
6454		  sizeof (rank_for_schedule_stats));
6455	}
6456    }
6457
6458  if (init_state == NULL)
6459    state_reset (curr_state);
6460  else
6461    memcpy (curr_state, init_state, dfa_state_size);
6462
6463  /* Clear the ready list.  */
6464  ready.first = ready.veclen - 1;
6465  ready.n_ready = 0;
6466  ready.n_debug = 0;
6467
6468  /* It is used for first cycle multipass scheduling.  */
6469  temp_state = alloca (dfa_state_size);
6470
6471  if (targetm.sched.init)
6472    targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6473
6474  /* We start inserting insns after PREV_HEAD.  */
6475  last_scheduled_insn = prev_head;
6476  last_nondebug_scheduled_insn = NULL_RTX;
6477  nonscheduled_insns_begin = NULL;
6478
6479  gcc_assert ((NOTE_P (last_scheduled_insn)
6480	       || DEBUG_INSN_P (last_scheduled_insn))
6481	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6482
6483  /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
6484     queue.  */
6485  q_ptr = 0;
6486  q_size = 0;
6487
6488  insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6489  memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6490
6491  /* Start just before the beginning of time.  */
6492  clock_var = -1;
6493
6494  /* We need queue and ready lists and clock_var be initialized
6495     in try_ready () (which is called through init_ready_list ()).  */
6496  (*current_sched_info->init_ready_list) ();
6497
6498  if (sched_pressure)
6499    sched_pressure_start_bb (*target_bb);
6500
6501  /* The algorithm is O(n^2) in the number of ready insns at any given
6502     time in the worst case.  Before reload we are more likely to have
6503     big lists so truncate them to a reasonable size.  */
6504  if (!reload_completed
6505      && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6506    {
6507      ready_sort_debug (&ready);
6508      ready_sort_real (&ready);
6509
6510      /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6511         If there are debug insns, we know they're first.  */
6512      for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6513	if (!SCHED_GROUP_P (ready_element (&ready, i)))
6514	  break;
6515
6516      if (sched_verbose >= 2)
6517	{
6518	  fprintf (sched_dump,
6519		   ";;\t\tReady list on entry: %d insns:  ", ready.n_ready);
6520	  debug_ready_list (&ready);
6521	  fprintf (sched_dump,
6522		   ";;\t\t before reload => truncated to %d insns\n", i);
6523	}
6524
6525      /* Delay all insns past it for 1 cycle.  If debug counter is
6526	 activated make an exception for the insn right after
6527	 nonscheduled_insns_begin.  */
6528      {
6529	rtx_insn *skip_insn;
6530
6531	if (dbg_cnt (sched_insn) == false)
6532	  skip_insn = first_nonscheduled_insn ();
6533	else
6534	  skip_insn = NULL;
6535
6536	while (i < ready.n_ready)
6537	  {
6538	    rtx_insn *insn;
6539
6540	    insn = ready_remove (&ready, i);
6541
6542	    if (insn != skip_insn)
6543	      queue_insn (insn, 1, "list truncated");
6544	  }
6545	if (skip_insn)
6546	  ready_add (&ready, skip_insn, true);
6547      }
6548    }
6549
6550  /* Now we can restore basic block notes and maintain precise cfg.  */
6551  restore_bb_notes (*target_bb);
6552
6553  last_clock_var = -1;
6554
6555  advance = 0;
6556
6557  gcc_assert (scheduled_insns.length () == 0);
6558  sort_p = TRUE;
6559  must_backtrack = false;
6560  modulo_insns_scheduled = 0;
6561
6562  ls.modulo_epilogue = false;
6563  ls.first_cycle_insn_p = true;
6564
6565  /* Loop until all the insns in BB are scheduled.  */
6566  while ((*current_sched_info->schedule_more_p) ())
6567    {
6568      perform_replacements_new_cycle ();
6569      do
6570	{
6571	  start_clock_var = clock_var;
6572
6573	  clock_var++;
6574
6575	  advance_one_cycle ();
6576
6577	  /* Add to the ready list all pending insns that can be issued now.
6578	     If there are no ready insns, increment clock until one
6579	     is ready and add all pending insns at that point to the ready
6580	     list.  */
6581	  queue_to_ready (&ready);
6582
6583	  gcc_assert (ready.n_ready);
6584
6585	  if (sched_verbose >= 2)
6586	    {
6587	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6588	      debug_ready_list (&ready);
6589	    }
6590	  advance -= clock_var - start_clock_var;
6591	}
6592      while (advance > 0);
6593
6594      if (ls.modulo_epilogue)
6595	{
6596	  int stage = clock_var / modulo_ii;
6597	  if (stage > modulo_last_stage * 2 + 2)
6598	    {
6599	      if (sched_verbose >= 2)
6600		fprintf (sched_dump,
6601			 ";;\t\tmodulo scheduled succeeded at II %d\n",
6602			 modulo_ii);
6603	      success = true;
6604	      goto end_schedule;
6605	    }
6606	}
6607      else if (modulo_ii > 0)
6608	{
6609	  int stage = clock_var / modulo_ii;
6610	  if (stage > modulo_max_stages)
6611	    {
6612	      if (sched_verbose >= 2)
6613		fprintf (sched_dump,
6614			 ";;\t\tfailing schedule due to excessive stages\n");
6615	      goto end_schedule;
6616	    }
6617	  if (modulo_n_insns == modulo_insns_scheduled
6618	      && stage > modulo_last_stage)
6619	    {
6620	      if (sched_verbose >= 2)
6621		fprintf (sched_dump,
6622			 ";;\t\tfound kernel after %d stages, II %d\n",
6623			 stage, modulo_ii);
6624	      ls.modulo_epilogue = true;
6625	    }
6626	}
6627
6628      prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6629      if (ready.n_ready == 0)
6630	continue;
6631      if (must_backtrack)
6632	goto do_backtrack;
6633
6634      ls.shadows_only_p = false;
6635      cycle_issued_insns = 0;
6636      ls.can_issue_more = issue_rate;
6637      for (;;)
6638	{
6639	  rtx_insn *insn;
6640	  int cost;
6641	  bool asm_p;
6642
6643	  if (sort_p && ready.n_ready > 0)
6644	    {
6645	      /* Sort the ready list based on priority.  This must be
6646		 done every iteration through the loop, as schedule_insn
6647		 may have readied additional insns that will not be
6648		 sorted correctly.  */
6649	      ready_sort (&ready);
6650
6651	      if (sched_verbose >= 2)
6652		{
6653		  fprintf (sched_dump,
6654			   ";;\t\tReady list after ready_sort:    ");
6655		  debug_ready_list (&ready);
6656		}
6657	    }
6658
6659	  /* We don't want md sched reorder to even see debug isns, so put
6660	     them out right away.  */
6661	  if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6662	      && (*current_sched_info->schedule_more_p) ())
6663	    {
6664	      while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6665		{
6666		  rtx_insn *insn = ready_remove_first (&ready);
6667		  gcc_assert (DEBUG_INSN_P (insn));
6668		  (*current_sched_info->begin_schedule_ready) (insn);
6669		  scheduled_insns.safe_push (insn);
6670		  last_scheduled_insn = insn;
6671		  advance = schedule_insn (insn);
6672		  gcc_assert (advance == 0);
6673		  if (ready.n_ready > 0)
6674		    ready_sort (&ready);
6675		}
6676	    }
6677
6678	  if (ls.first_cycle_insn_p && !ready.n_ready)
6679	    break;
6680
6681	resume_after_backtrack:
6682	  /* Allow the target to reorder the list, typically for
6683	     better instruction bundling.  */
6684	  if (sort_p
6685	      && (ready.n_ready == 0
6686		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
6687	    {
6688	      if (ls.first_cycle_insn_p && targetm.sched.reorder)
6689		ls.can_issue_more
6690		  = targetm.sched.reorder (sched_dump, sched_verbose,
6691					   ready_lastpos (&ready),
6692					   &ready.n_ready, clock_var);
6693	      else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6694		ls.can_issue_more
6695		  = targetm.sched.reorder2 (sched_dump, sched_verbose,
6696					    ready.n_ready
6697					    ? ready_lastpos (&ready) : NULL,
6698					    &ready.n_ready, clock_var);
6699	    }
6700
6701	restart_choose_ready:
6702	  if (sched_verbose >= 2)
6703	    {
6704	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
6705		       clock_var);
6706	      debug_ready_list (&ready);
6707	      if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6708		print_curr_reg_pressure ();
6709	    }
6710
6711	  if (ready.n_ready == 0
6712	      && ls.can_issue_more
6713	      && reload_completed)
6714	    {
6715	      /* Allow scheduling insns directly from the queue in case
6716		 there's nothing better to do (ready list is empty) but
6717		 there are still vacant dispatch slots in the current cycle.  */
6718	      if (sched_verbose >= 6)
6719		fprintf (sched_dump,";;\t\tSecond chance\n");
6720	      memcpy (temp_state, curr_state, dfa_state_size);
6721	      if (early_queue_to_ready (temp_state, &ready))
6722		ready_sort (&ready);
6723	    }
6724
6725	  if (ready.n_ready == 0
6726	      || !ls.can_issue_more
6727	      || state_dead_lock_p (curr_state)
6728	      || !(*current_sched_info->schedule_more_p) ())
6729	    break;
6730
6731	  /* Select and remove the insn from the ready list.  */
6732	  if (sort_p)
6733	    {
6734	      int res;
6735
6736	      insn = NULL;
6737	      res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6738
6739	      if (res < 0)
6740		/* Finish cycle.  */
6741		break;
6742	      if (res > 0)
6743		goto restart_choose_ready;
6744
6745	      gcc_assert (insn != NULL_RTX);
6746	    }
6747	  else
6748	    insn = ready_remove_first (&ready);
6749
6750	  if (sched_pressure != SCHED_PRESSURE_NONE
6751	      && INSN_TICK (insn) > clock_var)
6752	    {
6753	      ready_add (&ready, insn, true);
6754	      advance = 1;
6755	      break;
6756	    }
6757
6758	  if (targetm.sched.dfa_new_cycle
6759	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6760					      insn, last_clock_var,
6761					      clock_var, &sort_p))
6762	    /* SORT_P is used by the target to override sorting
6763	       of the ready list.  This is needed when the target
6764	       has modified its internal structures expecting that
6765	       the insn will be issued next.  As we need the insn
6766	       to have the highest priority (so it will be returned by
6767	       the ready_remove_first call above), we invoke
6768	       ready_add (&ready, insn, true).
6769	       But, still, there is one issue: INSN can be later
6770	       discarded by scheduler's front end through
6771	       current_sched_info->can_schedule_ready_p, hence, won't
6772	       be issued next.  */
6773	    {
6774	      ready_add (&ready, insn, true);
6775              break;
6776	    }
6777
6778	  sort_p = TRUE;
6779
6780	  if (current_sched_info->can_schedule_ready_p
6781	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
6782	    /* We normally get here only if we don't want to move
6783	       insn from the split block.  */
6784	    {
6785	      TODO_SPEC (insn) = DEP_POSTPONED;
6786	      goto restart_choose_ready;
6787	    }
6788
6789	  if (delay_htab)
6790	    {
6791	      /* If this insn is the first part of a delay-slot pair, record a
6792		 backtrack point.  */
6793	      struct delay_pair *delay_entry;
6794	      delay_entry
6795		= delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6796	      if (delay_entry)
6797		{
6798		  save_backtrack_point (delay_entry, ls);
6799		  if (sched_verbose >= 2)
6800		    fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6801		}
6802	    }
6803
6804	  /* DECISION is made.  */
6805
6806	  if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6807	    {
6808	      modulo_insns_scheduled++;
6809	      modulo_last_stage = clock_var / modulo_ii;
6810	    }
6811          if (TODO_SPEC (insn) & SPECULATIVE)
6812            generate_recovery_code (insn);
6813
6814	  if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6815	    targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6816
6817	  /* Update counters, etc in the scheduler's front end.  */
6818	  (*current_sched_info->begin_schedule_ready) (insn);
6819	  scheduled_insns.safe_push (insn);
6820	  gcc_assert (NONDEBUG_INSN_P (insn));
6821	  last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6822
6823	  if (recog_memoized (insn) >= 0)
6824	    {
6825	      memcpy (temp_state, curr_state, dfa_state_size);
6826	      cost = state_transition (curr_state, insn);
6827	      if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6828		gcc_assert (cost < 0);
6829	      if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6830		cycle_issued_insns++;
6831	      asm_p = false;
6832	    }
6833	  else
6834	    asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6835		     || asm_noperands (PATTERN (insn)) >= 0);
6836
6837	  if (targetm.sched.variable_issue)
6838	    ls.can_issue_more =
6839	      targetm.sched.variable_issue (sched_dump, sched_verbose,
6840					    insn, ls.can_issue_more);
6841	  /* A naked CLOBBER or USE generates no instruction, so do
6842	     not count them against the issue rate.  */
6843	  else if (GET_CODE (PATTERN (insn)) != USE
6844		   && GET_CODE (PATTERN (insn)) != CLOBBER)
6845	    ls.can_issue_more--;
6846	  advance = schedule_insn (insn);
6847
6848	  if (SHADOW_P (insn))
6849	    ls.shadows_only_p = true;
6850
6851	  /* After issuing an asm insn we should start a new cycle.  */
6852	  if (advance == 0 && asm_p)
6853	    advance = 1;
6854
6855	  if (must_backtrack)
6856	    break;
6857
6858	  if (advance != 0)
6859	    break;
6860
6861	  ls.first_cycle_insn_p = false;
6862	  if (ready.n_ready > 0)
6863	    prune_ready_list (temp_state, false, ls.shadows_only_p,
6864			      ls.modulo_epilogue);
6865	}
6866
6867    do_backtrack:
6868      if (!must_backtrack)
6869	for (i = 0; i < ready.n_ready; i++)
6870	  {
6871	    rtx_insn *insn = ready_element (&ready, i);
6872	    if (INSN_EXACT_TICK (insn) == clock_var)
6873	      {
6874		must_backtrack = true;
6875		clock_var++;
6876		break;
6877	      }
6878	  }
6879      if (must_backtrack && modulo_ii > 0)
6880	{
6881	  if (modulo_backtracks_left == 0)
6882	    goto end_schedule;
6883	  modulo_backtracks_left--;
6884	}
6885      while (must_backtrack)
6886	{
6887	  struct haifa_saved_data *failed;
6888	  rtx_insn *failed_insn;
6889
6890	  must_backtrack = false;
6891	  failed = verify_shadows ();
6892	  gcc_assert (failed);
6893
6894	  failed_insn = failed->delay_pair->i1;
6895	  /* Clear these queues.  */
6896	  perform_replacements_new_cycle ();
6897	  toggle_cancelled_flags (false);
6898	  unschedule_insns_until (failed_insn);
6899	  while (failed != backtrack_queue)
6900	    free_topmost_backtrack_point (true);
6901	  restore_last_backtrack_point (&ls);
6902	  if (sched_verbose >= 2)
6903	    fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6904	  /* Delay by at least a cycle.  This could cause additional
6905	     backtracking.  */
6906	  queue_insn (failed_insn, 1, "backtracked");
6907	  advance = 0;
6908	  if (must_backtrack)
6909	    continue;
6910	  if (ready.n_ready > 0)
6911	    goto resume_after_backtrack;
6912	  else
6913	    {
6914	      if (clock_var == 0 && ls.first_cycle_insn_p)
6915		goto end_schedule;
6916	      advance = 1;
6917	      break;
6918	    }
6919	}
6920      ls.first_cycle_insn_p = true;
6921    }
6922  if (ls.modulo_epilogue)
6923    success = true;
6924 end_schedule:
6925  if (!ls.first_cycle_insn_p || advance)
6926    advance_one_cycle ();
6927  perform_replacements_new_cycle ();
6928  if (modulo_ii > 0)
6929    {
6930      /* Once again, debug insn suckiness: they can be on the ready list
6931	 even if they have unresolved dependencies.  To make our view
6932	 of the world consistent, remove such "ready" insns.  */
6933    restart_debug_insn_loop:
6934      for (i = ready.n_ready - 1; i >= 0; i--)
6935	{
6936	  rtx_insn *x;
6937
6938	  x = ready_element (&ready, i);
6939	  if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6940	      || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6941	    {
6942	      ready_remove (&ready, i);
6943	      goto restart_debug_insn_loop;
6944	    }
6945	}
6946      for (i = ready.n_ready - 1; i >= 0; i--)
6947	{
6948	  rtx_insn *x;
6949
6950	  x = ready_element (&ready, i);
6951	  resolve_dependencies (x);
6952	}
6953      for (i = 0; i <= max_insn_queue_index; i++)
6954	{
6955	  rtx_insn_list *link;
6956	  while ((link = insn_queue[i]) != NULL)
6957	    {
6958	      rtx_insn *x = link->insn ();
6959	      insn_queue[i] = link->next ();
6960	      QUEUE_INDEX (x) = QUEUE_NOWHERE;
6961	      free_INSN_LIST_node (link);
6962	      resolve_dependencies (x);
6963	    }
6964	}
6965    }
6966
6967  if (!success)
6968    undo_all_replacements ();
6969
6970  /* Debug info.  */
6971  if (sched_verbose)
6972    {
6973      fprintf (sched_dump, ";;\tReady list (final):  ");
6974      debug_ready_list (&ready);
6975    }
6976
6977  if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6978    /* Sanity check -- queue must be empty now.  Meaningless if region has
6979       multiple bbs.  */
6980    gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6981  else if (modulo_ii == 0)
6982    {
6983      /* We must maintain QUEUE_INDEX between blocks in region.  */
6984      for (i = ready.n_ready - 1; i >= 0; i--)
6985	{
6986	  rtx_insn *x;
6987
6988	  x = ready_element (&ready, i);
6989	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
6990	  TODO_SPEC (x) = HARD_DEP;
6991	}
6992
6993      if (q_size)
6994	for (i = 0; i <= max_insn_queue_index; i++)
6995	  {
6996	    rtx_insn_list *link;
6997	    for (link = insn_queue[i]; link; link = link->next ())
6998	      {
6999		rtx_insn *x;
7000
7001		x = link->insn ();
7002		QUEUE_INDEX (x) = QUEUE_NOWHERE;
7003		TODO_SPEC (x) = HARD_DEP;
7004	      }
7005	    free_INSN_LIST_list (&insn_queue[i]);
7006	  }
7007    }
7008
7009  if (sched_pressure == SCHED_PRESSURE_MODEL)
7010    model_end_schedule ();
7011
7012  if (success)
7013    {
7014      commit_schedule (prev_head, tail, target_bb);
7015      if (sched_verbose)
7016	fprintf (sched_dump, ";;   total time = %d\n", clock_var);
7017    }
7018  else
7019    last_scheduled_insn = tail;
7020
7021  scheduled_insns.truncate (0);
7022
7023  if (!current_sched_info->queue_must_finish_empty
7024      || haifa_recovery_bb_recently_added_p)
7025    {
7026      /* INSN_TICK (minimum clock tick at which the insn becomes
7027         ready) may be not correct for the insn in the subsequent
7028         blocks of the region.  We should use a correct value of
7029         `clock_var' or modify INSN_TICK.  It is better to keep
7030         clock_var value equal to 0 at the start of a basic block.
7031         Therefore we modify INSN_TICK here.  */
7032      fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7033    }
7034
7035  if (targetm.sched.finish)
7036    {
7037      targetm.sched.finish (sched_dump, sched_verbose);
7038      /* Target might have added some instructions to the scheduled block
7039	 in its md_finish () hook.  These new insns don't have any data
7040	 initialized and to identify them we extend h_i_d so that they'll
7041	 get zero luids.  */
7042      sched_extend_luids ();
7043    }
7044
7045  /* Update head/tail boundaries.  */
7046  head = NEXT_INSN (prev_head);
7047  tail = last_scheduled_insn;
7048
7049  if (sched_verbose)
7050    {
7051      fprintf (sched_dump, ";;   new head = %d\n;;   new tail = %d\n",
7052	       INSN_UID (head), INSN_UID (tail));
7053
7054      if (sched_verbose >= 2)
7055	{
7056	  dump_insn_stream (head, tail);
7057	  print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7058					 NULL);
7059	}
7060
7061      fprintf (sched_dump, "\n");
7062    }
7063
7064  head = restore_other_notes (head, NULL);
7065
7066  current_sched_info->head = head;
7067  current_sched_info->tail = tail;
7068
7069  free_backtrack_queue ();
7070
7071  return success;
7072}
7073
7074/* Set_priorities: compute priority of each insn in the block.  */
7075
7076int
7077set_priorities (rtx_insn *head, rtx_insn *tail)
7078{
7079  rtx_insn *insn;
7080  int n_insn;
7081  int sched_max_insns_priority =
7082	current_sched_info->sched_max_insns_priority;
7083  rtx_insn *prev_head;
7084
7085  if (head == tail && ! INSN_P (head))
7086    gcc_unreachable ();
7087
7088  n_insn = 0;
7089
7090  prev_head = PREV_INSN (head);
7091  for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7092    {
7093      if (!INSN_P (insn))
7094	continue;
7095
7096      n_insn++;
7097      (void) priority (insn);
7098
7099      gcc_assert (INSN_PRIORITY_KNOWN (insn));
7100
7101      sched_max_insns_priority = MAX (sched_max_insns_priority,
7102				      INSN_PRIORITY (insn));
7103    }
7104
7105  current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7106
7107  return n_insn;
7108}
7109
7110/* Set dump and sched_verbose for the desired debugging output.  If no
7111   dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
7112   For -fsched-verbose=N, N>=10, print everything to stderr.  */
7113void
7114setup_sched_dump (void)
7115{
7116  sched_verbose = sched_verbose_param;
7117  if (sched_verbose_param == 0 && dump_file)
7118    sched_verbose = 1;
7119  sched_dump = ((sched_verbose_param >= 10 || !dump_file)
7120		? stderr : dump_file);
7121}
7122
7123/* Allocate data for register pressure sensitive scheduling.  */
7124static void
7125alloc_global_sched_pressure_data (void)
7126{
7127  if (sched_pressure != SCHED_PRESSURE_NONE)
7128    {
7129      int i, max_regno = max_reg_num ();
7130
7131      if (sched_dump != NULL)
7132	/* We need info about pseudos for rtl dumps about pseudo
7133	   classes and costs.  */
7134	regstat_init_n_sets_and_refs ();
7135      ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7136      sched_regno_pressure_class
7137	= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7138      for (i = 0; i < max_regno; i++)
7139	sched_regno_pressure_class[i]
7140	  = (i < FIRST_PSEUDO_REGISTER
7141	     ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7142	     : ira_pressure_class_translate[reg_allocno_class (i)]);
7143      curr_reg_live = BITMAP_ALLOC (NULL);
7144      if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7145	{
7146	  saved_reg_live = BITMAP_ALLOC (NULL);
7147	  region_ref_regs = BITMAP_ALLOC (NULL);
7148	}
7149
7150      /* Calculate number of CALL_USED_REGS in register classes that
7151	 we calculate register pressure for.  */
7152      for (int c = 0; c < ira_pressure_classes_num; ++c)
7153	{
7154	  enum reg_class cl = ira_pressure_classes[c];
7155
7156	  call_used_regs_num[cl] = 0;
7157
7158	  for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7159	    if (call_used_regs[ira_class_hard_regs[cl][i]])
7160	      ++call_used_regs_num[cl];
7161	}
7162    }
7163}
7164
7165/*  Free data for register pressure sensitive scheduling.  Also called
7166    from schedule_region when stopping sched-pressure early.  */
7167void
7168free_global_sched_pressure_data (void)
7169{
7170  if (sched_pressure != SCHED_PRESSURE_NONE)
7171    {
7172      if (regstat_n_sets_and_refs != NULL)
7173	regstat_free_n_sets_and_refs ();
7174      if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7175	{
7176	  BITMAP_FREE (region_ref_regs);
7177	  BITMAP_FREE (saved_reg_live);
7178	}
7179      BITMAP_FREE (curr_reg_live);
7180      free (sched_regno_pressure_class);
7181    }
7182}
7183
7184/* Initialize some global state for the scheduler.  This function works
7185   with the common data shared between all the schedulers.  It is called
7186   from the scheduler specific initialization routine.  */
7187
7188void
7189sched_init (void)
7190{
7191  /* Disable speculative loads in their presence if cc0 defined.  */
7192#ifdef HAVE_cc0
7193  flag_schedule_speculative_load = 0;
7194#endif
7195
7196  if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7197    targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7198
7199  if (live_range_shrinkage_p)
7200    sched_pressure = SCHED_PRESSURE_WEIGHTED;
7201  else if (flag_sched_pressure
7202	   && !reload_completed
7203	   && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7204    sched_pressure = ((enum sched_pressure_algorithm)
7205		      PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
7206  else
7207    sched_pressure = SCHED_PRESSURE_NONE;
7208
7209  if (sched_pressure != SCHED_PRESSURE_NONE)
7210    ira_setup_eliminable_regset ();
7211
7212  /* Initialize SPEC_INFO.  */
7213  if (targetm.sched.set_sched_flags)
7214    {
7215      spec_info = &spec_info_var;
7216      targetm.sched.set_sched_flags (spec_info);
7217
7218      if (spec_info->mask != 0)
7219        {
7220          spec_info->data_weakness_cutoff =
7221            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
7222          spec_info->control_weakness_cutoff =
7223            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
7224             * REG_BR_PROB_BASE) / 100;
7225        }
7226      else
7227	/* So we won't read anything accidentally.  */
7228	spec_info = NULL;
7229
7230    }
7231  else
7232    /* So we won't read anything accidentally.  */
7233    spec_info = 0;
7234
7235  /* Initialize issue_rate.  */
7236  if (targetm.sched.issue_rate)
7237    issue_rate = targetm.sched.issue_rate ();
7238  else
7239    issue_rate = 1;
7240
7241  if (targetm.sched.first_cycle_multipass_dfa_lookahead
7242      /* Don't use max_issue with reg_pressure scheduling.  Multipass
7243	 scheduling and reg_pressure scheduling undo each other's decisions.  */
7244      && sched_pressure == SCHED_PRESSURE_NONE)
7245    dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7246  else
7247    dfa_lookahead = 0;
7248
7249  /* Set to "0" so that we recalculate.  */
7250  max_lookahead_tries = 0;
7251
7252  if (targetm.sched.init_dfa_pre_cycle_insn)
7253    targetm.sched.init_dfa_pre_cycle_insn ();
7254
7255  if (targetm.sched.init_dfa_post_cycle_insn)
7256    targetm.sched.init_dfa_post_cycle_insn ();
7257
7258  dfa_start ();
7259  dfa_state_size = state_size ();
7260
7261  init_alias_analysis ();
7262
7263  if (!sched_no_dce)
7264    df_set_flags (DF_LR_RUN_DCE);
7265  df_note_add_problem ();
7266
7267  /* More problems needed for interloop dep calculation in SMS.  */
7268  if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7269    {
7270      df_rd_add_problem ();
7271      df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7272    }
7273
7274  df_analyze ();
7275
7276  /* Do not run DCE after reload, as this can kill nops inserted
7277     by bundling.  */
7278  if (reload_completed)
7279    df_clear_flags (DF_LR_RUN_DCE);
7280
7281  regstat_compute_calls_crossed ();
7282
7283  if (targetm.sched.init_global)
7284    targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7285
7286  alloc_global_sched_pressure_data ();
7287
7288  curr_state = xmalloc (dfa_state_size);
7289}
7290
7291static void haifa_init_only_bb (basic_block, basic_block);
7292
7293/* Initialize data structures specific to the Haifa scheduler.  */
7294void
7295haifa_sched_init (void)
7296{
7297  setup_sched_dump ();
7298  sched_init ();
7299
7300  scheduled_insns.create (0);
7301
7302  if (spec_info != NULL)
7303    {
7304      sched_deps_info->use_deps_list = 1;
7305      sched_deps_info->generate_spec_deps = 1;
7306    }
7307
7308  /* Initialize luids, dependency caches, target and h_i_d for the
7309     whole function.  */
7310  {
7311    bb_vec_t bbs;
7312    bbs.create (n_basic_blocks_for_fn (cfun));
7313    basic_block bb;
7314
7315    sched_init_bbs ();
7316
7317    FOR_EACH_BB_FN (bb, cfun)
7318      bbs.quick_push (bb);
7319    sched_init_luids (bbs);
7320    sched_deps_init (true);
7321    sched_extend_target ();
7322    haifa_init_h_i_d (bbs);
7323
7324    bbs.release ();
7325  }
7326
7327  sched_init_only_bb = haifa_init_only_bb;
7328  sched_split_block = sched_split_block_1;
7329  sched_create_empty_bb = sched_create_empty_bb_1;
7330  haifa_recovery_bb_ever_added_p = false;
7331
7332  nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7333  before_recovery = 0;
7334  after_recovery = 0;
7335
7336  modulo_ii = 0;
7337}
7338
7339/* Finish work with the data specific to the Haifa scheduler.  */
7340void
7341haifa_sched_finish (void)
7342{
7343  sched_create_empty_bb = NULL;
7344  sched_split_block = NULL;
7345  sched_init_only_bb = NULL;
7346
7347  if (spec_info && spec_info->dump)
7348    {
7349      char c = reload_completed ? 'a' : 'b';
7350
7351      fprintf (spec_info->dump,
7352	       ";; %s:\n", current_function_name ());
7353
7354      fprintf (spec_info->dump,
7355               ";; Procedure %cr-begin-data-spec motions == %d\n",
7356               c, nr_begin_data);
7357      fprintf (spec_info->dump,
7358               ";; Procedure %cr-be-in-data-spec motions == %d\n",
7359               c, nr_be_in_data);
7360      fprintf (spec_info->dump,
7361               ";; Procedure %cr-begin-control-spec motions == %d\n",
7362               c, nr_begin_control);
7363      fprintf (spec_info->dump,
7364               ";; Procedure %cr-be-in-control-spec motions == %d\n",
7365               c, nr_be_in_control);
7366    }
7367
7368  scheduled_insns.release ();
7369
7370  /* Finalize h_i_d, dependency caches, and luids for the whole
7371     function.  Target will be finalized in md_global_finish ().  */
7372  sched_deps_finish ();
7373  sched_finish_luids ();
7374  current_sched_info = NULL;
7375  insn_queue = NULL;
7376  sched_finish ();
7377}
7378
7379/* Free global data used during insn scheduling.  This function works with
7380   the common data shared between the schedulers.  */
7381
7382void
7383sched_finish (void)
7384{
7385  haifa_finish_h_i_d ();
7386  free_global_sched_pressure_data ();
7387  free (curr_state);
7388
7389  if (targetm.sched.finish_global)
7390    targetm.sched.finish_global (sched_dump, sched_verbose);
7391
7392  end_alias_analysis ();
7393
7394  regstat_free_calls_crossed ();
7395
7396  dfa_finish ();
7397}
7398
7399/* Free all delay_pair structures that were recorded.  */
7400void
7401free_delay_pairs (void)
7402{
7403  if (delay_htab)
7404    {
7405      delay_htab->empty ();
7406      delay_htab_i2->empty ();
7407    }
7408}
7409
7410/* Fix INSN_TICKs of the instructions in the current block as well as
7411   INSN_TICKs of their dependents.
7412   HEAD and TAIL are the begin and the end of the current scheduled block.  */
7413static void
7414fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7415{
7416  /* Set of instructions with corrected INSN_TICK.  */
7417  bitmap_head processed;
7418  /* ??? It is doubtful if we should assume that cycle advance happens on
7419     basic block boundaries.  Basically insns that are unconditionally ready
7420     on the start of the block are more preferable then those which have
7421     a one cycle dependency over insn from the previous block.  */
7422  int next_clock = clock_var + 1;
7423
7424  bitmap_initialize (&processed, 0);
7425
7426  /* Iterates over scheduled instructions and fix their INSN_TICKs and
7427     INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7428     across different blocks.  */
7429  for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7430    {
7431      if (INSN_P (head))
7432	{
7433	  int tick;
7434	  sd_iterator_def sd_it;
7435	  dep_t dep;
7436
7437	  tick = INSN_TICK (head);
7438	  gcc_assert (tick >= MIN_TICK);
7439
7440	  /* Fix INSN_TICK of instruction from just scheduled block.  */
7441	  if (bitmap_set_bit (&processed, INSN_LUID (head)))
7442	    {
7443	      tick -= next_clock;
7444
7445	      if (tick < MIN_TICK)
7446		tick = MIN_TICK;
7447
7448	      INSN_TICK (head) = tick;
7449	    }
7450
7451	  if (DEBUG_INSN_P (head))
7452	    continue;
7453
7454	  FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7455	    {
7456	      rtx_insn *next;
7457
7458	      next = DEP_CON (dep);
7459	      tick = INSN_TICK (next);
7460
7461	      if (tick != INVALID_TICK
7462		  /* If NEXT has its INSN_TICK calculated, fix it.
7463		     If not - it will be properly calculated from
7464		     scratch later in fix_tick_ready.  */
7465		  && bitmap_set_bit (&processed, INSN_LUID (next)))
7466		{
7467		  tick -= next_clock;
7468
7469		  if (tick < MIN_TICK)
7470		    tick = MIN_TICK;
7471
7472		  if (tick > INTER_TICK (next))
7473		    INTER_TICK (next) = tick;
7474		  else
7475		    tick = INTER_TICK (next);
7476
7477		  INSN_TICK (next) = tick;
7478		}
7479	    }
7480	}
7481    }
7482  bitmap_clear (&processed);
7483}
7484
7485/* Check if NEXT is ready to be added to the ready or queue list.
7486   If "yes", add it to the proper list.
7487   Returns:
7488      -1 - is not ready yet,
7489       0 - added to the ready list,
7490   0 < N - queued for N cycles.  */
7491int
7492try_ready (rtx_insn *next)
7493{
7494  ds_t old_ts, new_ts;
7495
7496  old_ts = TODO_SPEC (next);
7497
7498  gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7499	      && (old_ts == HARD_DEP
7500		  || old_ts == DEP_POSTPONED
7501		  || (old_ts & SPECULATIVE)
7502		  || old_ts == DEP_CONTROL));
7503
7504  new_ts = recompute_todo_spec (next, false);
7505
7506  if (new_ts & (HARD_DEP | DEP_POSTPONED))
7507    gcc_assert (new_ts == old_ts
7508		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
7509  else if (current_sched_info->new_ready)
7510    new_ts = current_sched_info->new_ready (next, new_ts);
7511
7512  /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7513     have its original pattern or changed (speculative) one.  This is due
7514     to changing ebb in region scheduling.
7515     * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7516     has speculative pattern.
7517
7518     We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7519     control-speculative NEXT could have been discarded by sched-rgn.c
7520     (the same case as when discarded by can_schedule_ready_p ()).  */
7521
7522  if ((new_ts & SPECULATIVE)
7523      /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7524	 need to change anything.  */
7525      && new_ts != old_ts)
7526    {
7527      int res;
7528      rtx new_pat;
7529
7530      gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7531
7532      res = haifa_speculate_insn (next, new_ts, &new_pat);
7533
7534      switch (res)
7535	{
7536	case -1:
7537	  /* It would be nice to change DEP_STATUS of all dependences,
7538	     which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7539	     so we won't reanalyze anything.  */
7540	  new_ts = HARD_DEP;
7541	  break;
7542
7543	case 0:
7544	  /* We follow the rule, that every speculative insn
7545	     has non-null ORIG_PAT.  */
7546	  if (!ORIG_PAT (next))
7547	    ORIG_PAT (next) = PATTERN (next);
7548	  break;
7549
7550	case 1:
7551	  if (!ORIG_PAT (next))
7552	    /* If we gonna to overwrite the original pattern of insn,
7553	       save it.  */
7554	    ORIG_PAT (next) = PATTERN (next);
7555
7556	  res = haifa_change_pattern (next, new_pat);
7557	  gcc_assert (res);
7558	  break;
7559
7560	default:
7561	  gcc_unreachable ();
7562	}
7563    }
7564
7565  /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7566     either correct (new_ts & SPECULATIVE),
7567     or we simply don't care (new_ts & HARD_DEP).  */
7568
7569  gcc_assert (!ORIG_PAT (next)
7570	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7571
7572  TODO_SPEC (next) = new_ts;
7573
7574  if (new_ts & (HARD_DEP | DEP_POSTPONED))
7575    {
7576      /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7577	 control-speculative NEXT could have been discarded by sched-rgn.c
7578	 (the same case as when discarded by can_schedule_ready_p ()).  */
7579      /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7580
7581      change_queue_index (next, QUEUE_NOWHERE);
7582
7583      return -1;
7584    }
7585  else if (!(new_ts & BEGIN_SPEC)
7586	   && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7587	   && !IS_SPECULATION_CHECK_P (next))
7588    /* We should change pattern of every previously speculative
7589       instruction - and we determine if NEXT was speculative by using
7590       ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
7591       pat too, so skip them.  */
7592    {
7593      bool success = haifa_change_pattern (next, ORIG_PAT (next));
7594      gcc_assert (success);
7595      ORIG_PAT (next) = 0;
7596    }
7597
7598  if (sched_verbose >= 2)
7599    {
7600      fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7601               (*current_sched_info->print_insn) (next, 0));
7602
7603      if (spec_info && spec_info->dump)
7604        {
7605          if (new_ts & BEGIN_DATA)
7606            fprintf (spec_info->dump, "; data-spec;");
7607          if (new_ts & BEGIN_CONTROL)
7608            fprintf (spec_info->dump, "; control-spec;");
7609          if (new_ts & BE_IN_CONTROL)
7610            fprintf (spec_info->dump, "; in-control-spec;");
7611        }
7612      if (TODO_SPEC (next) & DEP_CONTROL)
7613	fprintf (sched_dump, " predicated");
7614      fprintf (sched_dump, "\n");
7615    }
7616
7617  adjust_priority (next);
7618
7619  return fix_tick_ready (next);
7620}
7621
7622/* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
7623static int
7624fix_tick_ready (rtx_insn *next)
7625{
7626  int tick, delay;
7627
7628  if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7629    {
7630      int full_p;
7631      sd_iterator_def sd_it;
7632      dep_t dep;
7633
7634      tick = INSN_TICK (next);
7635      /* if tick is not equal to INVALID_TICK, then update
7636	 INSN_TICK of NEXT with the most recent resolved dependence
7637	 cost.  Otherwise, recalculate from scratch.  */
7638      full_p = (tick == INVALID_TICK);
7639
7640      FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7641        {
7642          rtx_insn *pro = DEP_PRO (dep);
7643          int tick1;
7644
7645	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7646
7647          tick1 = INSN_TICK (pro) + dep_cost (dep);
7648          if (tick1 > tick)
7649            tick = tick1;
7650
7651	  if (!full_p)
7652	    break;
7653        }
7654    }
7655  else
7656    tick = -1;
7657
7658  INSN_TICK (next) = tick;
7659
7660  delay = tick - clock_var;
7661  if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7662    delay = QUEUE_READY;
7663
7664  change_queue_index (next, delay);
7665
7666  return delay;
7667}
7668
7669/* Move NEXT to the proper queue list with (DELAY >= 1),
7670   or add it to the ready list (DELAY == QUEUE_READY),
7671   or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
7672static void
7673change_queue_index (rtx_insn *next, int delay)
7674{
7675  int i = QUEUE_INDEX (next);
7676
7677  gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7678	      && delay != 0);
7679  gcc_assert (i != QUEUE_SCHEDULED);
7680
7681  if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7682      || (delay < 0 && delay == i))
7683    /* We have nothing to do.  */
7684    return;
7685
7686  /* Remove NEXT from wherever it is now.  */
7687  if (i == QUEUE_READY)
7688    ready_remove_insn (next);
7689  else if (i >= 0)
7690    queue_remove (next);
7691
7692  /* Add it to the proper place.  */
7693  if (delay == QUEUE_READY)
7694    ready_add (readyp, next, false);
7695  else if (delay >= 1)
7696    queue_insn (next, delay, "change queue index");
7697
7698  if (sched_verbose >= 2)
7699    {
7700      fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7701	       (*current_sched_info->print_insn) (next, 0));
7702
7703      if (delay == QUEUE_READY)
7704	fprintf (sched_dump, " into ready\n");
7705      else if (delay >= 1)
7706	fprintf (sched_dump, " into queue with cost=%d\n", delay);
7707      else
7708	fprintf (sched_dump, " removed from ready or queue lists\n");
7709    }
7710}
7711
7712static int sched_ready_n_insns = -1;
7713
7714/* Initialize per region data structures.  */
7715void
7716sched_extend_ready_list (int new_sched_ready_n_insns)
7717{
7718  int i;
7719
7720  if (sched_ready_n_insns == -1)
7721    /* At the first call we need to initialize one more choice_stack
7722       entry.  */
7723    {
7724      i = 0;
7725      sched_ready_n_insns = 0;
7726      scheduled_insns.reserve (new_sched_ready_n_insns);
7727    }
7728  else
7729    i = sched_ready_n_insns + 1;
7730
7731  ready.veclen = new_sched_ready_n_insns + issue_rate;
7732  ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7733
7734  gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7735
7736  ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7737					 sched_ready_n_insns,
7738					 sizeof (*ready_try));
7739
7740  /* We allocate +1 element to save initial state in the choice_stack[0]
7741     entry.  */
7742  choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7743			     new_sched_ready_n_insns + 1);
7744
7745  for (; i <= new_sched_ready_n_insns; i++)
7746    {
7747      choice_stack[i].state = xmalloc (dfa_state_size);
7748
7749      if (targetm.sched.first_cycle_multipass_init)
7750	targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7751						    .target_data));
7752    }
7753
7754  sched_ready_n_insns = new_sched_ready_n_insns;
7755}
7756
7757/* Free per region data structures.  */
7758void
7759sched_finish_ready_list (void)
7760{
7761  int i;
7762
7763  free (ready.vec);
7764  ready.vec = NULL;
7765  ready.veclen = 0;
7766
7767  free (ready_try);
7768  ready_try = NULL;
7769
7770  for (i = 0; i <= sched_ready_n_insns; i++)
7771    {
7772      if (targetm.sched.first_cycle_multipass_fini)
7773	targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7774						    .target_data));
7775
7776      free (choice_stack [i].state);
7777    }
7778  free (choice_stack);
7779  choice_stack = NULL;
7780
7781  sched_ready_n_insns = -1;
7782}
7783
7784static int
7785haifa_luid_for_non_insn (rtx x)
7786{
7787  gcc_assert (NOTE_P (x) || LABEL_P (x));
7788
7789  return 0;
7790}
7791
7792/* Generates recovery code for INSN.  */
7793static void
7794generate_recovery_code (rtx_insn *insn)
7795{
7796  if (TODO_SPEC (insn) & BEGIN_SPEC)
7797    begin_speculative_block (insn);
7798
7799  /* Here we have insn with no dependencies to
7800     instructions other then CHECK_SPEC ones.  */
7801
7802  if (TODO_SPEC (insn) & BE_IN_SPEC)
7803    add_to_speculative_block (insn);
7804}
7805
7806/* Helper function.
7807   Tries to add speculative dependencies of type FS between instructions
7808   in deps_list L and TWIN.  */
7809static void
7810process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
7811{
7812  sd_iterator_def sd_it;
7813  dep_t dep;
7814
7815  FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7816    {
7817      ds_t ds;
7818      rtx_insn *consumer;
7819
7820      consumer = DEP_CON (dep);
7821
7822      ds = DEP_STATUS (dep);
7823
7824      if (/* If we want to create speculative dep.  */
7825	  fs
7826	  /* And we can do that because this is a true dep.  */
7827	  && (ds & DEP_TYPES) == DEP_TRUE)
7828	{
7829	  gcc_assert (!(ds & BE_IN_SPEC));
7830
7831	  if (/* If this dep can be overcome with 'begin speculation'.  */
7832	      ds & BEGIN_SPEC)
7833	    /* Then we have a choice: keep the dep 'begin speculative'
7834	       or transform it into 'be in speculative'.  */
7835	    {
7836	      if (/* In try_ready we assert that if insn once became ready
7837		     it can be removed from the ready (or queue) list only
7838		     due to backend decision.  Hence we can't let the
7839		     probability of the speculative dep to decrease.  */
7840		  ds_weak (ds) <= ds_weak (fs))
7841		{
7842		  ds_t new_ds;
7843
7844		  new_ds = (ds & ~BEGIN_SPEC) | fs;
7845
7846		  if (/* consumer can 'be in speculative'.  */
7847		      sched_insn_is_legitimate_for_speculation_p (consumer,
7848								  new_ds))
7849		    /* Transform it to be in speculative.  */
7850		    ds = new_ds;
7851		}
7852	    }
7853	  else
7854	    /* Mark the dep as 'be in speculative'.  */
7855	    ds |= fs;
7856	}
7857
7858      {
7859	dep_def _new_dep, *new_dep = &_new_dep;
7860
7861	init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7862	sd_add_dep (new_dep, false);
7863      }
7864    }
7865}
7866
7867/* Generates recovery code for BEGIN speculative INSN.  */
7868static void
7869begin_speculative_block (rtx_insn *insn)
7870{
7871  if (TODO_SPEC (insn) & BEGIN_DATA)
7872    nr_begin_data++;
7873  if (TODO_SPEC (insn) & BEGIN_CONTROL)
7874    nr_begin_control++;
7875
7876  create_check_block_twin (insn, false);
7877
7878  TODO_SPEC (insn) &= ~BEGIN_SPEC;
7879}
7880
7881static void haifa_init_insn (rtx_insn *);
7882
7883/* Generates recovery code for BE_IN speculative INSN.  */
7884static void
7885add_to_speculative_block (rtx_insn *insn)
7886{
7887  ds_t ts;
7888  sd_iterator_def sd_it;
7889  dep_t dep;
7890  rtx_insn_list *twins = NULL;
7891  rtx_vec_t priorities_roots;
7892
7893  ts = TODO_SPEC (insn);
7894  gcc_assert (!(ts & ~BE_IN_SPEC));
7895
7896  if (ts & BE_IN_DATA)
7897    nr_be_in_data++;
7898  if (ts & BE_IN_CONTROL)
7899    nr_be_in_control++;
7900
7901  TODO_SPEC (insn) &= ~BE_IN_SPEC;
7902  gcc_assert (!TODO_SPEC (insn));
7903
7904  DONE_SPEC (insn) |= ts;
7905
7906  /* First we convert all simple checks to branchy.  */
7907  for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7908       sd_iterator_cond (&sd_it, &dep);)
7909    {
7910      rtx_insn *check = DEP_PRO (dep);
7911
7912      if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7913	{
7914	  create_check_block_twin (check, true);
7915
7916	  /* Restart search.  */
7917	  sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7918	}
7919      else
7920	/* Continue search.  */
7921	sd_iterator_next (&sd_it);
7922    }
7923
7924  priorities_roots.create (0);
7925  clear_priorities (insn, &priorities_roots);
7926
7927  while (1)
7928    {
7929      rtx_insn *check, *twin;
7930      basic_block rec;
7931
7932      /* Get the first backward dependency of INSN.  */
7933      sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7934      if (!sd_iterator_cond (&sd_it, &dep))
7935	/* INSN has no backward dependencies left.  */
7936	break;
7937
7938      gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7939		  && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7940		  && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7941
7942      check = DEP_PRO (dep);
7943
7944      gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7945		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7946
7947      rec = BLOCK_FOR_INSN (check);
7948
7949      twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7950      haifa_init_insn (twin);
7951
7952      sd_copy_back_deps (twin, insn, true);
7953
7954      if (sched_verbose && spec_info->dump)
7955        /* INSN_BB (insn) isn't determined for twin insns yet.
7956           So we can't use current_sched_info->print_insn.  */
7957        fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7958                 INSN_UID (twin), rec->index);
7959
7960      twins = alloc_INSN_LIST (twin, twins);
7961
7962      /* Add dependences between TWIN and all appropriate
7963	 instructions from REC.  */
7964      FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7965	{
7966	  rtx_insn *pro = DEP_PRO (dep);
7967
7968	  gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7969
7970	  /* INSN might have dependencies from the instructions from
7971	     several recovery blocks.  At this iteration we process those
7972	     producers that reside in REC.  */
7973	  if (BLOCK_FOR_INSN (pro) == rec)
7974	    {
7975	      dep_def _new_dep, *new_dep = &_new_dep;
7976
7977	      init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7978	      sd_add_dep (new_dep, false);
7979	    }
7980	}
7981
7982      process_insn_forw_deps_be_in_spec (insn, twin, ts);
7983
7984      /* Remove all dependencies between INSN and insns in REC.  */
7985      for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7986	   sd_iterator_cond (&sd_it, &dep);)
7987	{
7988	  rtx_insn *pro = DEP_PRO (dep);
7989
7990	  if (BLOCK_FOR_INSN (pro) == rec)
7991	    sd_delete_dep (sd_it);
7992	  else
7993	    sd_iterator_next (&sd_it);
7994	}
7995    }
7996
7997  /* We couldn't have added the dependencies between INSN and TWINS earlier
7998     because that would make TWINS appear in the INSN_BACK_DEPS (INSN).  */
7999  while (twins)
8000    {
8001      rtx_insn *twin;
8002      rtx_insn_list *next_node;
8003
8004      twin = twins->insn ();
8005
8006      {
8007	dep_def _new_dep, *new_dep = &_new_dep;
8008
8009	init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8010	sd_add_dep (new_dep, false);
8011      }
8012
8013      next_node = twins->next ();
8014      free_INSN_LIST_node (twins);
8015      twins = next_node;
8016    }
8017
8018  calc_priorities (priorities_roots);
8019  priorities_roots.release ();
8020}
8021
8022/* Extends and fills with zeros (only the new part) array pointed to by P.  */
8023void *
8024xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8025{
8026  gcc_assert (new_nmemb >= old_nmemb);
8027  p = XRESIZEVAR (void, p, new_nmemb * size);
8028  memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8029  return p;
8030}
8031
8032/* Helper function.
8033   Find fallthru edge from PRED.  */
8034edge
8035find_fallthru_edge_from (basic_block pred)
8036{
8037  edge e;
8038  basic_block succ;
8039
8040  succ = pred->next_bb;
8041  gcc_assert (succ->prev_bb == pred);
8042
8043  if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8044    {
8045      e = find_fallthru_edge (pred->succs);
8046
8047      if (e)
8048	{
8049	  gcc_assert (e->dest == succ);
8050	  return e;
8051	}
8052    }
8053  else
8054    {
8055      e = find_fallthru_edge (succ->preds);
8056
8057      if (e)
8058	{
8059	  gcc_assert (e->src == pred);
8060	  return e;
8061	}
8062    }
8063
8064  return NULL;
8065}
8066
8067/* Extend per basic block data structures.  */
8068static void
8069sched_extend_bb (void)
8070{
8071  /* The following is done to keep current_sched_info->next_tail non null.  */
8072  rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8073  rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8074  if (NEXT_INSN (end) == 0
8075      || (!NOTE_P (insn)
8076	  && !LABEL_P (insn)
8077	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
8078	  && !BARRIER_P (NEXT_INSN (end))))
8079    {
8080      rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8081      /* Make note appear outside BB.  */
8082      set_block_for_insn (note, NULL);
8083      BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8084    }
8085}
8086
8087/* Init per basic block data structures.  */
8088void
8089sched_init_bbs (void)
8090{
8091  sched_extend_bb ();
8092}
8093
8094/* Initialize BEFORE_RECOVERY variable.  */
8095static void
8096init_before_recovery (basic_block *before_recovery_ptr)
8097{
8098  basic_block last;
8099  edge e;
8100
8101  last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8102  e = find_fallthru_edge_from (last);
8103
8104  if (e)
8105    {
8106      /* We create two basic blocks:
8107         1. Single instruction block is inserted right after E->SRC
8108         and has jump to
8109         2. Empty block right before EXIT_BLOCK.
8110         Between these two blocks recovery blocks will be emitted.  */
8111
8112      basic_block single, empty;
8113      rtx_insn *x;
8114      rtx label;
8115
8116      /* If the fallthrough edge to exit we've found is from the block we've
8117	 created before, don't do anything more.  */
8118      if (last == after_recovery)
8119	return;
8120
8121      adding_bb_to_current_region_p = false;
8122
8123      single = sched_create_empty_bb (last);
8124      empty = sched_create_empty_bb (single);
8125
8126      /* Add new blocks to the root loop.  */
8127      if (current_loops != NULL)
8128	{
8129	  add_bb_to_loop (single, (*current_loops->larray)[0]);
8130	  add_bb_to_loop (empty, (*current_loops->larray)[0]);
8131	}
8132
8133      single->count = last->count;
8134      empty->count = last->count;
8135      single->frequency = last->frequency;
8136      empty->frequency = last->frequency;
8137      BB_COPY_PARTITION (single, last);
8138      BB_COPY_PARTITION (empty, last);
8139
8140      redirect_edge_succ (e, single);
8141      make_single_succ_edge (single, empty, 0);
8142      make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8143			     EDGE_FALLTHRU);
8144
8145      label = block_label (empty);
8146      x = emit_jump_insn_after (gen_jump (label), BB_END (single));
8147      JUMP_LABEL (x) = label;
8148      LABEL_NUSES (label)++;
8149      haifa_init_insn (x);
8150
8151      emit_barrier_after (x);
8152
8153      sched_init_only_bb (empty, NULL);
8154      sched_init_only_bb (single, NULL);
8155      sched_extend_bb ();
8156
8157      adding_bb_to_current_region_p = true;
8158      before_recovery = single;
8159      after_recovery = empty;
8160
8161      if (before_recovery_ptr)
8162        *before_recovery_ptr = before_recovery;
8163
8164      if (sched_verbose >= 2 && spec_info->dump)
8165        fprintf (spec_info->dump,
8166		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8167                 last->index, single->index, empty->index);
8168    }
8169  else
8170    before_recovery = last;
8171}
8172
8173/* Returns new recovery block.  */
8174basic_block
8175sched_create_recovery_block (basic_block *before_recovery_ptr)
8176{
8177  rtx label;
8178  rtx_insn *barrier;
8179  basic_block rec;
8180
8181  haifa_recovery_bb_recently_added_p = true;
8182  haifa_recovery_bb_ever_added_p = true;
8183
8184  init_before_recovery (before_recovery_ptr);
8185
8186  barrier = get_last_bb_insn (before_recovery);
8187  gcc_assert (BARRIER_P (barrier));
8188
8189  label = emit_label_after (gen_label_rtx (), barrier);
8190
8191  rec = create_basic_block (label, label, before_recovery);
8192
8193  /* A recovery block always ends with an unconditional jump.  */
8194  emit_barrier_after (BB_END (rec));
8195
8196  if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8197    BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8198
8199  if (sched_verbose && spec_info->dump)
8200    fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8201             rec->index);
8202
8203  return rec;
8204}
8205
8206/* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8207   and emit necessary jumps.  */
8208void
8209sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8210			     basic_block second_bb)
8211{
8212  rtx label;
8213  rtx jump;
8214  int edge_flags;
8215
8216  /* This is fixing of incoming edge.  */
8217  /* ??? Which other flags should be specified?  */
8218  if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8219    /* Partition type is the same, if it is "unpartitioned".  */
8220    edge_flags = EDGE_CROSSING;
8221  else
8222    edge_flags = 0;
8223
8224  make_edge (first_bb, rec, edge_flags);
8225  label = block_label (second_bb);
8226  jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
8227  JUMP_LABEL (jump) = label;
8228  LABEL_NUSES (label)++;
8229
8230  if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8231    /* Partition type is the same, if it is "unpartitioned".  */
8232    {
8233      /* Rewritten from cfgrtl.c.  */
8234      if (flag_reorder_blocks_and_partition
8235	  && targetm_common.have_named_sections)
8236	{
8237	  /* We don't need the same note for the check because
8238	     any_condjump_p (check) == true.  */
8239	  CROSSING_JUMP_P (jump) = 1;
8240	}
8241      edge_flags = EDGE_CROSSING;
8242    }
8243  else
8244    edge_flags = 0;
8245
8246  make_single_succ_edge (rec, second_bb, edge_flags);
8247  if (dom_info_available_p (CDI_DOMINATORS))
8248    set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8249}
8250
8251/* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
8252   INSN is a simple check, that should be converted to branchy one.  */
8253static void
8254create_check_block_twin (rtx_insn *insn, bool mutate_p)
8255{
8256  basic_block rec;
8257  rtx_insn *label, *check, *twin;
8258  rtx check_pat;
8259  ds_t fs;
8260  sd_iterator_def sd_it;
8261  dep_t dep;
8262  dep_def _new_dep, *new_dep = &_new_dep;
8263  ds_t todo_spec;
8264
8265  gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8266
8267  if (!mutate_p)
8268    todo_spec = TODO_SPEC (insn);
8269  else
8270    {
8271      gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8272		  && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8273
8274      todo_spec = CHECK_SPEC (insn);
8275    }
8276
8277  todo_spec &= SPECULATIVE;
8278
8279  /* Create recovery block.  */
8280  if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8281    {
8282      rec = sched_create_recovery_block (NULL);
8283      label = BB_HEAD (rec);
8284    }
8285  else
8286    {
8287      rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8288      label = NULL;
8289    }
8290
8291  /* Emit CHECK.  */
8292  check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8293
8294  if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8295    {
8296      /* To have mem_reg alive at the beginning of second_bb,
8297	 we emit check BEFORE insn, so insn after splitting
8298	 insn will be at the beginning of second_bb, which will
8299	 provide us with the correct life information.  */
8300      check = emit_jump_insn_before (check_pat, insn);
8301      JUMP_LABEL (check) = label;
8302      LABEL_NUSES (label)++;
8303    }
8304  else
8305    check = emit_insn_before (check_pat, insn);
8306
8307  /* Extend data structures.  */
8308  haifa_init_insn (check);
8309
8310  /* CHECK is being added to current region.  Extend ready list.  */
8311  gcc_assert (sched_ready_n_insns != -1);
8312  sched_extend_ready_list (sched_ready_n_insns + 1);
8313
8314  if (current_sched_info->add_remove_insn)
8315    current_sched_info->add_remove_insn (insn, 0);
8316
8317  RECOVERY_BLOCK (check) = rec;
8318
8319  if (sched_verbose && spec_info->dump)
8320    fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8321             (*current_sched_info->print_insn) (check, 0));
8322
8323  gcc_assert (ORIG_PAT (insn));
8324
8325  /* Initialize TWIN (twin is a duplicate of original instruction
8326     in the recovery block).  */
8327  if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8328    {
8329      sd_iterator_def sd_it;
8330      dep_t dep;
8331
8332      FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8333	if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8334	  {
8335	    struct _dep _dep2, *dep2 = &_dep2;
8336
8337	    init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8338
8339	    sd_add_dep (dep2, true);
8340	  }
8341
8342      twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8343      haifa_init_insn (twin);
8344
8345      if (sched_verbose && spec_info->dump)
8346	/* INSN_BB (insn) isn't determined for twin insns yet.
8347	   So we can't use current_sched_info->print_insn.  */
8348	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8349		 INSN_UID (twin), rec->index);
8350    }
8351  else
8352    {
8353      ORIG_PAT (check) = ORIG_PAT (insn);
8354      HAS_INTERNAL_DEP (check) = 1;
8355      twin = check;
8356      /* ??? We probably should change all OUTPUT dependencies to
8357	 (TRUE | OUTPUT).  */
8358    }
8359
8360  /* Copy all resolved back dependencies of INSN to TWIN.  This will
8361     provide correct value for INSN_TICK (TWIN).  */
8362  sd_copy_back_deps (twin, insn, true);
8363
8364  if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8365    /* In case of branchy check, fix CFG.  */
8366    {
8367      basic_block first_bb, second_bb;
8368      rtx_insn *jump;
8369
8370      first_bb = BLOCK_FOR_INSN (check);
8371      second_bb = sched_split_block (first_bb, check);
8372
8373      sched_create_recovery_edges (first_bb, rec, second_bb);
8374
8375      sched_init_only_bb (second_bb, first_bb);
8376      sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8377
8378      jump = BB_END (rec);
8379      haifa_init_insn (jump);
8380    }
8381
8382  /* Move backward dependences from INSN to CHECK and
8383     move forward dependences from INSN to TWIN.  */
8384
8385  /* First, create dependencies between INSN's producers and CHECK & TWIN.  */
8386  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8387    {
8388      rtx_insn *pro = DEP_PRO (dep);
8389      ds_t ds;
8390
8391      /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8392	 check --TRUE--> producer  ??? or ANTI ???
8393	 twin  --TRUE--> producer
8394	 twin  --ANTI--> check
8395
8396	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8397	 check --ANTI--> producer
8398	 twin  --ANTI--> producer
8399	 twin  --ANTI--> check
8400
8401	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8402	 check ~~TRUE~~> producer
8403	 twin  ~~TRUE~~> producer
8404	 twin  --ANTI--> check  */
8405
8406      ds = DEP_STATUS (dep);
8407
8408      if (ds & BEGIN_SPEC)
8409	{
8410	  gcc_assert (!mutate_p);
8411	  ds &= ~BEGIN_SPEC;
8412	}
8413
8414      init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8415      sd_add_dep (new_dep, false);
8416
8417      if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8418	{
8419	  DEP_CON (new_dep) = twin;
8420	  sd_add_dep (new_dep, false);
8421	}
8422    }
8423
8424  /* Second, remove backward dependencies of INSN.  */
8425  for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8426       sd_iterator_cond (&sd_it, &dep);)
8427    {
8428      if ((DEP_STATUS (dep) & BEGIN_SPEC)
8429	  || mutate_p)
8430	/* We can delete this dep because we overcome it with
8431	   BEGIN_SPECULATION.  */
8432	sd_delete_dep (sd_it);
8433      else
8434	sd_iterator_next (&sd_it);
8435    }
8436
8437  /* Future Speculations.  Determine what BE_IN speculations will be like.  */
8438  fs = 0;
8439
8440  /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8441     here.  */
8442
8443  gcc_assert (!DONE_SPEC (insn));
8444
8445  if (!mutate_p)
8446    {
8447      ds_t ts = TODO_SPEC (insn);
8448
8449      DONE_SPEC (insn) = ts & BEGIN_SPEC;
8450      CHECK_SPEC (check) = ts & BEGIN_SPEC;
8451
8452      /* Luckiness of future speculations solely depends upon initial
8453	 BEGIN speculation.  */
8454      if (ts & BEGIN_DATA)
8455	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8456      if (ts & BEGIN_CONTROL)
8457	fs = set_dep_weak (fs, BE_IN_CONTROL,
8458			   get_dep_weak (ts, BEGIN_CONTROL));
8459    }
8460  else
8461    CHECK_SPEC (check) = CHECK_SPEC (insn);
8462
8463  /* Future speculations: call the helper.  */
8464  process_insn_forw_deps_be_in_spec (insn, twin, fs);
8465
8466  if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8467    {
8468      /* Which types of dependencies should we use here is,
8469	 generally, machine-dependent question...  But, for now,
8470	 it is not.  */
8471
8472      if (!mutate_p)
8473	{
8474	  init_dep (new_dep, insn, check, REG_DEP_TRUE);
8475	  sd_add_dep (new_dep, false);
8476
8477	  init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8478	  sd_add_dep (new_dep, false);
8479	}
8480      else
8481	{
8482	  if (spec_info->dump)
8483	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8484		     (*current_sched_info->print_insn) (insn, 0));
8485
8486	  /* Remove all dependencies of the INSN.  */
8487	  {
8488	    sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8489					      | SD_LIST_BACK
8490					      | SD_LIST_RES_BACK));
8491	    while (sd_iterator_cond (&sd_it, &dep))
8492	      sd_delete_dep (sd_it);
8493	  }
8494
8495	  /* If former check (INSN) already was moved to the ready (or queue)
8496	     list, add new check (CHECK) there too.  */
8497	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8498	    try_ready (check);
8499
8500	  /* Remove old check from instruction stream and free its
8501	     data.  */
8502	  sched_remove_insn (insn);
8503	}
8504
8505      init_dep (new_dep, check, twin, REG_DEP_ANTI);
8506      sd_add_dep (new_dep, false);
8507    }
8508  else
8509    {
8510      init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8511      sd_add_dep (new_dep, false);
8512    }
8513
8514  if (!mutate_p)
8515    /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
8516       because it'll be done later in add_to_speculative_block.  */
8517    {
8518      rtx_vec_t priorities_roots = rtx_vec_t ();
8519
8520      clear_priorities (twin, &priorities_roots);
8521      calc_priorities (priorities_roots);
8522      priorities_roots.release ();
8523    }
8524}
8525
8526/* Removes dependency between instructions in the recovery block REC
8527   and usual region instructions.  It keeps inner dependences so it
8528   won't be necessary to recompute them.  */
8529static void
8530fix_recovery_deps (basic_block rec)
8531{
8532  rtx_insn *note, *insn, *jump;
8533  rtx_insn_list *ready_list = 0;
8534  bitmap_head in_ready;
8535  rtx_insn_list *link;
8536
8537  bitmap_initialize (&in_ready, 0);
8538
8539  /* NOTE - a basic block note.  */
8540  note = NEXT_INSN (BB_HEAD (rec));
8541  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8542  insn = BB_END (rec);
8543  gcc_assert (JUMP_P (insn));
8544  insn = PREV_INSN (insn);
8545
8546  do
8547    {
8548      sd_iterator_def sd_it;
8549      dep_t dep;
8550
8551      for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8552	   sd_iterator_cond (&sd_it, &dep);)
8553	{
8554	  rtx_insn *consumer = DEP_CON (dep);
8555
8556	  if (BLOCK_FOR_INSN (consumer) != rec)
8557	    {
8558	      sd_delete_dep (sd_it);
8559
8560	      if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8561		ready_list = alloc_INSN_LIST (consumer, ready_list);
8562	    }
8563	  else
8564	    {
8565	      gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8566
8567	      sd_iterator_next (&sd_it);
8568	    }
8569	}
8570
8571      insn = PREV_INSN (insn);
8572    }
8573  while (insn != note);
8574
8575  bitmap_clear (&in_ready);
8576
8577  /* Try to add instructions to the ready or queue list.  */
8578  for (link = ready_list; link; link = link->next ())
8579    try_ready (link->insn ());
8580  free_INSN_LIST_list (&ready_list);
8581
8582  /* Fixing jump's dependences.  */
8583  insn = BB_HEAD (rec);
8584  jump = BB_END (rec);
8585
8586  gcc_assert (LABEL_P (insn));
8587  insn = NEXT_INSN (insn);
8588
8589  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8590  add_jump_dependencies (insn, jump);
8591}
8592
8593/* Change pattern of INSN to NEW_PAT.  Invalidate cached haifa
8594   instruction data.  */
8595static bool
8596haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8597{
8598  int t;
8599
8600  t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8601  if (!t)
8602    return false;
8603
8604  update_insn_after_change (insn);
8605  return true;
8606}
8607
8608/* -1 - can't speculate,
8609   0 - for speculation with REQUEST mode it is OK to use
8610   current instruction pattern,
8611   1 - need to change pattern for *NEW_PAT to be speculative.  */
8612int
8613sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8614{
8615  gcc_assert (current_sched_info->flags & DO_SPECULATION
8616              && (request & SPECULATIVE)
8617	      && sched_insn_is_legitimate_for_speculation_p (insn, request));
8618
8619  if ((request & spec_info->mask) != request)
8620    return -1;
8621
8622  if (request & BE_IN_SPEC
8623      && !(request & BEGIN_SPEC))
8624    return 0;
8625
8626  return targetm.sched.speculate_insn (insn, request, new_pat);
8627}
8628
8629static int
8630haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8631{
8632  gcc_assert (sched_deps_info->generate_spec_deps
8633	      && !IS_SPECULATION_CHECK_P (insn));
8634
8635  if (HAS_INTERNAL_DEP (insn)
8636      || SCHED_GROUP_P (insn))
8637    return -1;
8638
8639  return sched_speculate_insn (insn, request, new_pat);
8640}
8641
8642/* Print some information about block BB, which starts with HEAD and
8643   ends with TAIL, before scheduling it.
8644   I is zero, if scheduler is about to start with the fresh ebb.  */
8645static void
8646dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8647{
8648  if (!i)
8649    fprintf (sched_dump,
8650	     ";;   ======================================================\n");
8651  else
8652    fprintf (sched_dump,
8653	     ";;   =====================ADVANCING TO=====================\n");
8654  fprintf (sched_dump,
8655	   ";;   -- basic block %d from %d to %d -- %s reload\n",
8656	   bb->index, INSN_UID (head), INSN_UID (tail),
8657	   (reload_completed ? "after" : "before"));
8658  fprintf (sched_dump,
8659	   ";;   ======================================================\n");
8660  fprintf (sched_dump, "\n");
8661}
8662
8663/* Unlink basic block notes and labels and saves them, so they
8664   can be easily restored.  We unlink basic block notes in EBB to
8665   provide back-compatibility with the previous code, as target backends
8666   assume, that there'll be only instructions between
8667   current_sched_info->{head and tail}.  We restore these notes as soon
8668   as we can.
8669   FIRST (LAST) is the first (last) basic block in the ebb.
8670   NB: In usual case (FIRST == LAST) nothing is really done.  */
8671void
8672unlink_bb_notes (basic_block first, basic_block last)
8673{
8674  /* We DON'T unlink basic block notes of the first block in the ebb.  */
8675  if (first == last)
8676    return;
8677
8678  bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8679
8680  /* Make a sentinel.  */
8681  if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8682    bb_header[last->next_bb->index] = 0;
8683
8684  first = first->next_bb;
8685  do
8686    {
8687      rtx_insn *prev, *label, *note, *next;
8688
8689      label = BB_HEAD (last);
8690      if (LABEL_P (label))
8691	note = NEXT_INSN (label);
8692      else
8693	note = label;
8694      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8695
8696      prev = PREV_INSN (label);
8697      next = NEXT_INSN (note);
8698      gcc_assert (prev && next);
8699
8700      SET_NEXT_INSN (prev) = next;
8701      SET_PREV_INSN (next) = prev;
8702
8703      bb_header[last->index] = label;
8704
8705      if (last == first)
8706	break;
8707
8708      last = last->prev_bb;
8709    }
8710  while (1);
8711}
8712
8713/* Restore basic block notes.
8714   FIRST is the first basic block in the ebb.  */
8715static void
8716restore_bb_notes (basic_block first)
8717{
8718  if (!bb_header)
8719    return;
8720
8721  /* We DON'T unlink basic block notes of the first block in the ebb.  */
8722  first = first->next_bb;
8723  /* Remember: FIRST is actually a second basic block in the ebb.  */
8724
8725  while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8726	 && bb_header[first->index])
8727    {
8728      rtx_insn *prev, *label, *note, *next;
8729
8730      label = bb_header[first->index];
8731      prev = PREV_INSN (label);
8732      next = NEXT_INSN (prev);
8733
8734      if (LABEL_P (label))
8735	note = NEXT_INSN (label);
8736      else
8737	note = label;
8738      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8739
8740      bb_header[first->index] = 0;
8741
8742      SET_NEXT_INSN (prev) = label;
8743      SET_NEXT_INSN (note) = next;
8744      SET_PREV_INSN (next) = note;
8745
8746      first = first->next_bb;
8747    }
8748
8749  free (bb_header);
8750  bb_header = 0;
8751}
8752
8753/* Helper function.
8754   Fix CFG after both in- and inter-block movement of
8755   control_flow_insn_p JUMP.  */
8756static void
8757fix_jump_move (rtx_insn *jump)
8758{
8759  basic_block bb, jump_bb, jump_bb_next;
8760
8761  bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8762  jump_bb = BLOCK_FOR_INSN (jump);
8763  jump_bb_next = jump_bb->next_bb;
8764
8765  gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8766	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8767
8768  if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8769    /* if jump_bb_next is not empty.  */
8770    BB_END (jump_bb) = BB_END (jump_bb_next);
8771
8772  if (BB_END (bb) != PREV_INSN (jump))
8773    /* Then there are instruction after jump that should be placed
8774       to jump_bb_next.  */
8775    BB_END (jump_bb_next) = BB_END (bb);
8776  else
8777    /* Otherwise jump_bb_next is empty.  */
8778    BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8779
8780  /* To make assertion in move_insn happy.  */
8781  BB_END (bb) = PREV_INSN (jump);
8782
8783  update_bb_for_insn (jump_bb_next);
8784}
8785
8786/* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
8787static void
8788move_block_after_check (rtx_insn *jump)
8789{
8790  basic_block bb, jump_bb, jump_bb_next;
8791  vec<edge, va_gc> *t;
8792
8793  bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8794  jump_bb = BLOCK_FOR_INSN (jump);
8795  jump_bb_next = jump_bb->next_bb;
8796
8797  update_bb_for_insn (jump_bb);
8798
8799  gcc_assert (IS_SPECULATION_CHECK_P (jump)
8800	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8801
8802  unlink_block (jump_bb_next);
8803  link_block (jump_bb_next, bb);
8804
8805  t = bb->succs;
8806  bb->succs = 0;
8807  move_succs (&(jump_bb->succs), bb);
8808  move_succs (&(jump_bb_next->succs), jump_bb);
8809  move_succs (&t, jump_bb_next);
8810
8811  df_mark_solutions_dirty ();
8812
8813  common_sched_info->fix_recovery_cfg
8814    (bb->index, jump_bb->index, jump_bb_next->index);
8815}
8816
8817/* Helper function for move_block_after_check.
8818   This functions attaches edge vector pointed to by SUCCSP to
8819   block TO.  */
8820static void
8821move_succs (vec<edge, va_gc> **succsp, basic_block to)
8822{
8823  edge e;
8824  edge_iterator ei;
8825
8826  gcc_assert (to->succs == 0);
8827
8828  to->succs = *succsp;
8829
8830  FOR_EACH_EDGE (e, ei, to->succs)
8831    e->src = to;
8832
8833  *succsp = 0;
8834}
8835
8836/* Remove INSN from the instruction stream.
8837   INSN should have any dependencies.  */
8838static void
8839sched_remove_insn (rtx_insn *insn)
8840{
8841  sd_finish_insn (insn);
8842
8843  change_queue_index (insn, QUEUE_NOWHERE);
8844  current_sched_info->add_remove_insn (insn, 1);
8845  delete_insn (insn);
8846}
8847
8848/* Clear priorities of all instructions, that are forward dependent on INSN.
8849   Store in vector pointed to by ROOTS_PTR insns on which priority () should
8850   be invoked to initialize all cleared priorities.  */
8851static void
8852clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8853{
8854  sd_iterator_def sd_it;
8855  dep_t dep;
8856  bool insn_is_root_p = true;
8857
8858  gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8859
8860  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8861    {
8862      rtx_insn *pro = DEP_PRO (dep);
8863
8864      if (INSN_PRIORITY_STATUS (pro) >= 0
8865	  && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8866	{
8867	  /* If DEP doesn't contribute to priority then INSN itself should
8868	     be added to priority roots.  */
8869	  if (contributes_to_priority_p (dep))
8870	    insn_is_root_p = false;
8871
8872	  INSN_PRIORITY_STATUS (pro) = -1;
8873	  clear_priorities (pro, roots_ptr);
8874	}
8875    }
8876
8877  if (insn_is_root_p)
8878    roots_ptr->safe_push (insn);
8879}
8880
8881/* Recompute priorities of instructions, whose priorities might have been
8882   changed.  ROOTS is a vector of instructions whose priority computation will
8883   trigger initialization of all cleared priorities.  */
8884static void
8885calc_priorities (rtx_vec_t roots)
8886{
8887  int i;
8888  rtx_insn *insn;
8889
8890  FOR_EACH_VEC_ELT (roots, i, insn)
8891    priority (insn);
8892}
8893
8894
8895/* Add dependences between JUMP and other instructions in the recovery
8896   block.  INSN is the first insn the recovery block.  */
8897static void
8898add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8899{
8900  do
8901    {
8902      insn = NEXT_INSN (insn);
8903      if (insn == jump)
8904	break;
8905
8906      if (dep_list_size (insn, SD_LIST_FORW) == 0)
8907	{
8908	  dep_def _new_dep, *new_dep = &_new_dep;
8909
8910	  init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8911	  sd_add_dep (new_dep, false);
8912	}
8913    }
8914  while (1);
8915
8916  gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8917}
8918
8919/* Extend data structures for logical insn UID.  */
8920void
8921sched_extend_luids (void)
8922{
8923  int new_luids_max_uid = get_max_uid () + 1;
8924
8925  sched_luids.safe_grow_cleared (new_luids_max_uid);
8926}
8927
8928/* Initialize LUID for INSN.  */
8929void
8930sched_init_insn_luid (rtx_insn *insn)
8931{
8932  int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8933  int luid;
8934
8935  if (i >= 0)
8936    {
8937      luid = sched_max_luid;
8938      sched_max_luid += i;
8939    }
8940  else
8941    luid = -1;
8942
8943  SET_INSN_LUID (insn, luid);
8944}
8945
8946/* Initialize luids for BBS.
8947   The hook common_sched_info->luid_for_non_insn () is used to determine
8948   if notes, labels, etc. need luids.  */
8949void
8950sched_init_luids (bb_vec_t bbs)
8951{
8952  int i;
8953  basic_block bb;
8954
8955  sched_extend_luids ();
8956  FOR_EACH_VEC_ELT (bbs, i, bb)
8957    {
8958      rtx_insn *insn;
8959
8960      FOR_BB_INSNS (bb, insn)
8961	sched_init_insn_luid (insn);
8962    }
8963}
8964
8965/* Free LUIDs.  */
8966void
8967sched_finish_luids (void)
8968{
8969  sched_luids.release ();
8970  sched_max_luid = 1;
8971}
8972
8973/* Return logical uid of INSN.  Helpful while debugging.  */
8974int
8975insn_luid (rtx_insn *insn)
8976{
8977  return INSN_LUID (insn);
8978}
8979
8980/* Extend per insn data in the target.  */
8981void
8982sched_extend_target (void)
8983{
8984  if (targetm.sched.h_i_d_extended)
8985    targetm.sched.h_i_d_extended ();
8986}
8987
8988/* Extend global scheduler structures (those, that live across calls to
8989   schedule_block) to include information about just emitted INSN.  */
8990static void
8991extend_h_i_d (void)
8992{
8993  int reserve = (get_max_uid () + 1 - h_i_d.length ());
8994  if (reserve > 0
8995      && ! h_i_d.space (reserve))
8996    {
8997      h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8998      sched_extend_target ();
8999    }
9000}
9001
9002/* Initialize h_i_d entry of the INSN with default values.
9003   Values, that are not explicitly initialized here, hold zero.  */
9004static void
9005init_h_i_d (rtx_insn *insn)
9006{
9007  if (INSN_LUID (insn) > 0)
9008    {
9009      INSN_COST (insn) = -1;
9010      QUEUE_INDEX (insn) = QUEUE_NOWHERE;
9011      INSN_TICK (insn) = INVALID_TICK;
9012      INSN_EXACT_TICK (insn) = INVALID_TICK;
9013      INTER_TICK (insn) = INVALID_TICK;
9014      TODO_SPEC (insn) = HARD_DEP;
9015      INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
9016	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9017      INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
9018	= AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9019    }
9020}
9021
9022/* Initialize haifa_insn_data for BBS.  */
9023void
9024haifa_init_h_i_d (bb_vec_t bbs)
9025{
9026  int i;
9027  basic_block bb;
9028
9029  extend_h_i_d ();
9030  FOR_EACH_VEC_ELT (bbs, i, bb)
9031    {
9032      rtx_insn *insn;
9033
9034      FOR_BB_INSNS (bb, insn)
9035	init_h_i_d (insn);
9036    }
9037}
9038
9039/* Finalize haifa_insn_data.  */
9040void
9041haifa_finish_h_i_d (void)
9042{
9043  int i;
9044  haifa_insn_data_t data;
9045  struct reg_use_data *use, *next;
9046
9047  FOR_EACH_VEC_ELT (h_i_d, i, data)
9048    {
9049      free (data->max_reg_pressure);
9050      free (data->reg_pressure);
9051      for (use = data->reg_use_list; use != NULL; use = next)
9052	{
9053	  next = use->next_insn_use;
9054	  free (use);
9055	}
9056    }
9057  h_i_d.release ();
9058}
9059
9060/* Init data for the new insn INSN.  */
9061static void
9062haifa_init_insn (rtx_insn *insn)
9063{
9064  gcc_assert (insn != NULL);
9065
9066  sched_extend_luids ();
9067  sched_init_insn_luid (insn);
9068  sched_extend_target ();
9069  sched_deps_init (false);
9070  extend_h_i_d ();
9071  init_h_i_d (insn);
9072
9073  if (adding_bb_to_current_region_p)
9074    {
9075      sd_init_insn (insn);
9076
9077      /* Extend dependency caches by one element.  */
9078      extend_dependency_caches (1, false);
9079    }
9080  if (sched_pressure != SCHED_PRESSURE_NONE)
9081    init_insn_reg_pressure_info (insn);
9082}
9083
9084/* Init data for the new basic block BB which comes after AFTER.  */
9085static void
9086haifa_init_only_bb (basic_block bb, basic_block after)
9087{
9088  gcc_assert (bb != NULL);
9089
9090  sched_init_bbs ();
9091
9092  if (common_sched_info->add_block)
9093    /* This changes only data structures of the front-end.  */
9094    common_sched_info->add_block (bb, after);
9095}
9096
9097/* A generic version of sched_split_block ().  */
9098basic_block
9099sched_split_block_1 (basic_block first_bb, rtx after)
9100{
9101  edge e;
9102
9103  e = split_block (first_bb, after);
9104  gcc_assert (e->src == first_bb);
9105
9106  /* sched_split_block emits note if *check == BB_END.  Probably it
9107     is better to rip that note off.  */
9108
9109  return e->dest;
9110}
9111
9112/* A generic version of sched_create_empty_bb ().  */
9113basic_block
9114sched_create_empty_bb_1 (basic_block after)
9115{
9116  return create_empty_bb (after);
9117}
9118
9119/* Insert PAT as an INSN into the schedule and update the necessary data
9120   structures to account for it. */
9121rtx_insn *
9122sched_emit_insn (rtx pat)
9123{
9124  rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9125  haifa_init_insn (insn);
9126
9127  if (current_sched_info->add_remove_insn)
9128    current_sched_info->add_remove_insn (insn, 0);
9129
9130  (*current_sched_info->begin_schedule_ready) (insn);
9131  scheduled_insns.safe_push (insn);
9132
9133  last_scheduled_insn = insn;
9134  return insn;
9135}
9136
9137/* This function returns a candidate satisfying dispatch constraints from
9138   the ready list.  */
9139
9140static rtx_insn *
9141ready_remove_first_dispatch (struct ready_list *ready)
9142{
9143  int i;
9144  rtx_insn *insn = ready_element (ready, 0);
9145
9146  if (ready->n_ready == 1
9147      || !INSN_P (insn)
9148      || INSN_CODE (insn) < 0
9149      || !active_insn_p (insn)
9150      || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9151    return ready_remove_first (ready);
9152
9153  for (i = 1; i < ready->n_ready; i++)
9154    {
9155      insn = ready_element (ready, i);
9156
9157      if (!INSN_P (insn)
9158	  || INSN_CODE (insn) < 0
9159	  || !active_insn_p (insn))
9160	continue;
9161
9162      if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9163	{
9164	  /* Return ith element of ready.  */
9165	  insn = ready_remove (ready, i);
9166	  return insn;
9167	}
9168    }
9169
9170  if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9171    return ready_remove_first (ready);
9172
9173  for (i = 1; i < ready->n_ready; i++)
9174    {
9175      insn = ready_element (ready, i);
9176
9177      if (!INSN_P (insn)
9178	  || INSN_CODE (insn) < 0
9179	  || !active_insn_p (insn))
9180	continue;
9181
9182      /* Return i-th element of ready.  */
9183      if (targetm.sched.dispatch (insn, IS_CMP))
9184	return ready_remove (ready, i);
9185    }
9186
9187  return ready_remove_first (ready);
9188}
9189
9190/* Get number of ready insn in the ready list.  */
9191
9192int
9193number_in_ready (void)
9194{
9195  return ready.n_ready;
9196}
9197
9198/* Get number of ready's in the ready list.  */
9199
9200rtx_insn *
9201get_ready_element (int i)
9202{
9203  return ready_element (&ready, i);
9204}
9205
9206#endif /* INSN_SCHEDULING */
9207