1/* Instruction scheduling pass.
2   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3   2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4   Free Software Foundation, Inc.
5   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6   and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
12Software Foundation; either version 3, or (at your option) any later
13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
21along with GCC; see the file COPYING3.  If not see
22<http://www.gnu.org/licenses/>.  */
23
24/* Instruction scheduling pass.  This file, along with sched-deps.c,
25   contains the generic parts.  The actual entry point is found for
26   the normal instruction scheduling pass is found in sched-rgn.c.
27
28   We compute insn priorities based on data dependencies.  Flow
29   analysis only creates a fraction of the data-dependencies we must
30   observe: namely, only those dependencies which the combiner can be
31   expected to use.  For this pass, we must therefore create the
32   remaining dependencies we need to observe: register dependencies,
33   memory dependencies, dependencies to keep function calls in order,
34   and the dependence between a conditional branch and the setting of
35   condition codes are all dealt with here.
36
37   The scheduler first traverses the data flow graph, starting with
38   the last instruction, and proceeding to the first, assigning values
39   to insn_priority as it goes.  This sorts the instructions
40   topologically by data dependence.
41
42   Once priorities have been established, we order the insns using
43   list scheduling.  This works as follows: starting with a list of
44   all the ready insns, and sorted according to priority number, we
45   schedule the insn from the end of the list by placing its
46   predecessors in the list according to their priority order.  We
47   consider this insn scheduled by setting the pointer to the "end" of
48   the list to point to the previous insn.  When an insn has no
49   predecessors, we either queue it until sufficient time has elapsed
50   or add it to the ready list.  As the instructions are scheduled or
51   when stalls are introduced, the queue advances and dumps insns into
52   the ready list.  When all insns down to the lowest priority have
53   been scheduled, the critical path of the basic block has been made
54   as short as possible.  The remaining insns are then scheduled in
55   remaining slots.
56
57   The following list shows the order in which we want to break ties
58   among insns in the ready list:
59
60   1.  choose insn with the longest path to end of bb, ties
61   broken by
62   2.  choose insn with least contribution to register pressure,
63   ties broken by
64   3.  prefer in-block upon interblock motion, ties broken by
65   4.  prefer useful upon speculative motion, ties broken by
66   5.  choose insn with largest control flow probability, ties
67   broken by
68   6.  choose insn with the least dependences upon the previously
69   scheduled insn, or finally
70   7   choose the insn which has the most insns dependent on it.
71   8.  choose insn with lowest UID.
72
73   Memory references complicate matters.  Only if we can be certain
74   that memory references are not part of the data dependency graph
75   (via true, anti, or output dependence), can we move operations past
76   memory references.  To first approximation, reads can be done
77   independently, while writes introduce dependencies.  Better
78   approximations will yield fewer dependencies.
79
80   Before reload, an extended analysis of interblock data dependences
81   is required for interblock scheduling.  This is performed in
82   compute_block_backward_dependences ().
83
84   Dependencies set up by memory references are treated in exactly the
85   same way as other dependencies, by using insn backward dependences
86   INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
87   INSN_FORW_DEPS the purpose of forward list scheduling.
88
89   Having optimized the critical path, we may have also unduly
90   extended the lifetimes of some registers.  If an operation requires
91   that constants be loaded into registers, it is certainly desirable
92   to load those constants as early as necessary, but no earlier.
93   I.e., it will not do to load up a bunch of registers at the
94   beginning of a basic block only to use them at the end, if they
95   could be loaded later, since this may result in excessive register
96   utilization.
97
98   Note that since branches are never in basic blocks, but only end
99   basic blocks, this pass will not move branches.  But that is ok,
100   since we can use GNU's delayed branch scheduling pass to take care
101   of this case.
102
103   Also note that no further optimizations based on algebraic
104   identities are performed, so this pass would be a good one to
105   perform instruction splitting, such as breaking up a multiply
106   instruction into shifts and adds where that is profitable.
107
108   Given the memory aliasing analysis that this pass should perform,
109   it should be possible to remove redundant stores to memory, and to
110   load values from registers instead of hitting memory.
111
112   Before reload, speculative insns are moved only if a 'proof' exists
113   that no exception will be caused by this, and if no live registers
114   exist that inhibit the motion (live registers constraints are not
115   represented by data dependence edges).
116
117   This pass must update information that subsequent passes expect to
118   be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119   reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
120
121   The information in the line number notes is carefully retained by
122   this pass.  Notes that refer to the starting and ending of
123   exception regions are also carefully retained by this pass.  All
124   other NOTE insns are grouped in their same relative order at the
125   beginning of basic blocks and regions that have been scheduled.  */
126
127#include "config.h"
128#include "system.h"
129#include "coretypes.h"
130#include "tm.h"
131#include "toplev.h"
132#include "rtl.h"
133#include "tm_p.h"
134#include "hard-reg-set.h"
135#include "regs.h"
136#include "function.h"
137#include "flags.h"
138#include "insn-config.h"
139#include "insn-attr.h"
140#include "except.h"
141#include "toplev.h"
142#include "recog.h"
143#include "sched-int.h"
144#include "target.h"
145#include "output.h"
146#include "params.h"
147#include "vecprim.h"
148#include "dbgcnt.h"
149#include "cfgloop.h"
150#include "ira.h"
151
152#ifdef INSN_SCHEDULING
153
154/* issue_rate is the number of insns that can be scheduled in the same
155   machine cycle.  It can be defined in the config/mach/mach.h file,
156   otherwise we set it to 1.  */
157
158int issue_rate;
159
160/* sched-verbose controls the amount of debugging output the
161   scheduler prints.  It is controlled by -fsched-verbose=N:
162   N>0 and no -DSR : the output is directed to stderr.
163   N>=10 will direct the printouts to stderr (regardless of -dSR).
164   N=1: same as -dSR.
165   N=2: bb's probabilities, detailed ready list info, unit/insn info.
166   N=3: rtl at abort point, control-flow, regions info.
167   N=5: dependences info.  */
168
169static int sched_verbose_param = 0;
170int sched_verbose = 0;
171
172/* Debugging file.  All printouts are sent to dump, which is always set,
173   either to stderr, or to the dump listing file (-dRS).  */
174FILE *sched_dump = 0;
175
176/* fix_sched_param() is called from toplev.c upon detection
177   of the -fsched-verbose=N option.  */
178
179void
180fix_sched_param (const char *param, const char *val)
181{
182  if (!strcmp (param, "verbose"))
183    sched_verbose_param = atoi (val);
184  else
185    warning (0, "fix_sched_param: unknown param: %s", param);
186}
187
188/* This is a placeholder for the scheduler parameters common
189   to all schedulers.  */
190struct common_sched_info_def *common_sched_info;
191
192#define INSN_TICK(INSN)	(HID (INSN)->tick)
193#define INTER_TICK(INSN) (HID (INSN)->inter_tick)
194
195/* If INSN_TICK of an instruction is equal to INVALID_TICK,
196   then it should be recalculated from scratch.  */
197#define INVALID_TICK (-(max_insn_queue_index + 1))
198/* The minimal value of the INSN_TICK of an instruction.  */
199#define MIN_TICK (-max_insn_queue_index)
200
201/* Issue points are used to distinguish between instructions in max_issue ().
202   For now, all instructions are equally good.  */
203#define ISSUE_POINTS(INSN) 1
204
205/* List of important notes we must keep around.  This is a pointer to the
206   last element in the list.  */
207rtx note_list;
208
209static struct spec_info_def spec_info_var;
210/* Description of the speculative part of the scheduling.
211   If NULL - no speculation.  */
212spec_info_t spec_info = NULL;
213
214/* True, if recovery block was added during scheduling of current block.
215   Used to determine, if we need to fix INSN_TICKs.  */
216static bool haifa_recovery_bb_recently_added_p;
217
218/* True, if recovery block was added during this scheduling pass.
219   Used to determine if we should have empty memory pools of dependencies
220   after finishing current region.  */
221bool haifa_recovery_bb_ever_added_p;
222
223/* Counters of different types of speculative instructions.  */
224static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
225
226/* Array used in {unlink, restore}_bb_notes.  */
227static rtx *bb_header = 0;
228
229/* Basic block after which recovery blocks will be created.  */
230static basic_block before_recovery;
231
232/* Basic block just before the EXIT_BLOCK and after recovery, if we have
233   created it.  */
234basic_block after_recovery;
235
236/* FALSE if we add bb to another region, so we don't need to initialize it.  */
237bool adding_bb_to_current_region_p = true;
238
239/* Queues, etc.  */
240
241/* An instruction is ready to be scheduled when all insns preceding it
242   have already been scheduled.  It is important to ensure that all
243   insns which use its result will not be executed until its result
244   has been computed.  An insn is maintained in one of four structures:
245
246   (P) the "Pending" set of insns which cannot be scheduled until
247   their dependencies have been satisfied.
248   (Q) the "Queued" set of insns that can be scheduled when sufficient
249   time has passed.
250   (R) the "Ready" list of unscheduled, uncommitted insns.
251   (S) the "Scheduled" list of insns.
252
253   Initially, all insns are either "Pending" or "Ready" depending on
254   whether their dependencies are satisfied.
255
256   Insns move from the "Ready" list to the "Scheduled" list as they
257   are committed to the schedule.  As this occurs, the insns in the
258   "Pending" list have their dependencies satisfied and move to either
259   the "Ready" list or the "Queued" set depending on whether
260   sufficient time has passed to make them ready.  As time passes,
261   insns move from the "Queued" set to the "Ready" list.
262
263   The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
264   unscheduled insns, i.e., those that are ready, queued, and pending.
265   The "Queued" set (Q) is implemented by the variable `insn_queue'.
266   The "Ready" list (R) is implemented by the variables `ready' and
267   `n_ready'.
268   The "Scheduled" list (S) is the new insn chain built by this pass.
269
270   The transition (R->S) is implemented in the scheduling loop in
271   `schedule_block' when the best insn to schedule is chosen.
272   The transitions (P->R and P->Q) are implemented in `schedule_insn' as
273   insns move from the ready list to the scheduled list.
274   The transition (Q->R) is implemented in 'queue_to_insn' as time
275   passes or stalls are introduced.  */
276
277/* Implement a circular buffer to delay instructions until sufficient
278   time has passed.  For the new pipeline description interface,
279   MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
280   than maximal time of instruction execution computed by genattr.c on
281   the base maximal time of functional unit reservations and getting a
282   result.  This is the longest time an insn may be queued.  */
283
284static rtx *insn_queue;
285static int q_ptr = 0;
286static int q_size = 0;
287#define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
288#define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
289
290#define QUEUE_SCHEDULED (-3)
291#define QUEUE_NOWHERE   (-2)
292#define QUEUE_READY     (-1)
293/* QUEUE_SCHEDULED - INSN is scheduled.
294   QUEUE_NOWHERE   - INSN isn't scheduled yet and is neither in
295   queue or ready list.
296   QUEUE_READY     - INSN is in ready list.
297   N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles.  */
298
299#define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
300
301/* The following variable value refers for all current and future
302   reservations of the processor units.  */
303state_t curr_state;
304
305/* The following variable value is size of memory representing all
306   current and future reservations of the processor units.  */
307size_t dfa_state_size;
308
309/* The following array is used to find the best insn from ready when
310   the automaton pipeline interface is used.  */
311char *ready_try = NULL;
312
313/* The ready list.  */
314struct ready_list ready = {NULL, 0, 0, 0, 0};
315
316/* The pointer to the ready list (to be removed).  */
317static struct ready_list *readyp = &ready;
318
319/* Scheduling clock.  */
320static int clock_var;
321
322static int may_trap_exp (const_rtx, int);
323
324/* Nonzero iff the address is comprised from at most 1 register.  */
325#define CONST_BASED_ADDRESS_P(x)			\
326  (REG_P (x)					\
327   || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
328	|| (GET_CODE (x) == LO_SUM))			\
329       && (CONSTANT_P (XEXP (x, 0))			\
330	   || CONSTANT_P (XEXP (x, 1)))))
331
332/* Returns a class that insn with GET_DEST(insn)=x may belong to,
333   as found by analyzing insn's expression.  */
334
335
336static int haifa_luid_for_non_insn (rtx x);
337
338/* Haifa version of sched_info hooks common to all headers.  */
339const struct common_sched_info_def haifa_common_sched_info =
340  {
341    NULL, /* fix_recovery_cfg */
342    NULL, /* add_block */
343    NULL, /* estimate_number_of_insns */
344    haifa_luid_for_non_insn, /* luid_for_non_insn */
345    SCHED_PASS_UNKNOWN /* sched_pass_id */
346  };
347
348const struct sched_scan_info_def *sched_scan_info;
349
350/* Mapping from instruction UID to its Logical UID.  */
351VEC (int, heap) *sched_luids = NULL;
352
353/* Next LUID to assign to an instruction.  */
354int sched_max_luid = 1;
355
356/* Haifa Instruction Data.  */
357VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
358
359void (* sched_init_only_bb) (basic_block, basic_block);
360
361/* Split block function.  Different schedulers might use different functions
362   to handle their internal data consistent.  */
363basic_block (* sched_split_block) (basic_block, rtx);
364
365/* Create empty basic block after the specified block.  */
366basic_block (* sched_create_empty_bb) (basic_block);
367
368static int
369may_trap_exp (const_rtx x, int is_store)
370{
371  enum rtx_code code;
372
373  if (x == 0)
374    return TRAP_FREE;
375  code = GET_CODE (x);
376  if (is_store)
377    {
378      if (code == MEM && may_trap_p (x))
379	return TRAP_RISKY;
380      else
381	return TRAP_FREE;
382    }
383  if (code == MEM)
384    {
385      /* The insn uses memory:  a volatile load.  */
386      if (MEM_VOLATILE_P (x))
387	return IRISKY;
388      /* An exception-free load.  */
389      if (!may_trap_p (x))
390	return IFREE;
391      /* A load with 1 base register, to be further checked.  */
392      if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
393	return PFREE_CANDIDATE;
394      /* No info on the load, to be further checked.  */
395      return PRISKY_CANDIDATE;
396    }
397  else
398    {
399      const char *fmt;
400      int i, insn_class = TRAP_FREE;
401
402      /* Neither store nor load, check if it may cause a trap.  */
403      if (may_trap_p (x))
404	return TRAP_RISKY;
405      /* Recursive step: walk the insn...  */
406      fmt = GET_RTX_FORMAT (code);
407      for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
408	{
409	  if (fmt[i] == 'e')
410	    {
411	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
412	      insn_class = WORST_CLASS (insn_class, tmp_class);
413	    }
414	  else if (fmt[i] == 'E')
415	    {
416	      int j;
417	      for (j = 0; j < XVECLEN (x, i); j++)
418		{
419		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
420		  insn_class = WORST_CLASS (insn_class, tmp_class);
421		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
422		    break;
423		}
424	    }
425	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
426	    break;
427	}
428      return insn_class;
429    }
430}
431
432/* Classifies rtx X of an insn for the purpose of verifying that X can be
433   executed speculatively (and consequently the insn can be moved
434   speculatively), by examining X, returning:
435   TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
436   TRAP_FREE: non-load insn.
437   IFREE: load from a globally safe location.
438   IRISKY: volatile load.
439   PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
440   being either PFREE or PRISKY.  */
441
442static int
443haifa_classify_rtx (const_rtx x)
444{
445  int tmp_class = TRAP_FREE;
446  int insn_class = TRAP_FREE;
447  enum rtx_code code;
448
449  if (GET_CODE (x) == PARALLEL)
450    {
451      int i, len = XVECLEN (x, 0);
452
453      for (i = len - 1; i >= 0; i--)
454	{
455	  tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
456	  insn_class = WORST_CLASS (insn_class, tmp_class);
457	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
458	    break;
459	}
460    }
461  else
462    {
463      code = GET_CODE (x);
464      switch (code)
465	{
466	case CLOBBER:
467	  /* Test if it is a 'store'.  */
468	  tmp_class = may_trap_exp (XEXP (x, 0), 1);
469	  break;
470	case SET:
471	  /* Test if it is a store.  */
472	  tmp_class = may_trap_exp (SET_DEST (x), 1);
473	  if (tmp_class == TRAP_RISKY)
474	    break;
475	  /* Test if it is a load.  */
476	  tmp_class =
477	    WORST_CLASS (tmp_class,
478			 may_trap_exp (SET_SRC (x), 0));
479	  break;
480	case COND_EXEC:
481	  tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
482	  if (tmp_class == TRAP_RISKY)
483	    break;
484	  tmp_class = WORST_CLASS (tmp_class,
485				   may_trap_exp (COND_EXEC_TEST (x), 0));
486	  break;
487	case TRAP_IF:
488	  tmp_class = TRAP_RISKY;
489	  break;
490	default:;
491	}
492      insn_class = tmp_class;
493    }
494
495  return insn_class;
496}
497
498int
499haifa_classify_insn (const_rtx insn)
500{
501  return haifa_classify_rtx (PATTERN (insn));
502}
503
504/* Forward declarations.  */
505
506static int priority (rtx);
507static int rank_for_schedule (const void *, const void *);
508static void swap_sort (rtx *, int);
509static void queue_insn (rtx, int);
510static int schedule_insn (rtx);
511static void adjust_priority (rtx);
512static void advance_one_cycle (void);
513static void extend_h_i_d (void);
514
515
516/* Notes handling mechanism:
517   =========================
518   Generally, NOTES are saved before scheduling and restored after scheduling.
519   The scheduler distinguishes between two types of notes:
520
521   (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522   Before scheduling a region, a pointer to the note is added to the insn
523   that follows or precedes it.  (This happens as part of the data dependence
524   computation).  After scheduling an insn, the pointer contained in it is
525   used for regenerating the corresponding note (in reemit_notes).
526
527   (2) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
528   these notes are put in a list (in rm_other_notes() and
529   unlink_other_notes ()).  After scheduling the block, these notes are
530   inserted at the beginning of the block (in schedule_block()).  */
531
532static void ready_add (struct ready_list *, rtx, bool);
533static rtx ready_remove_first (struct ready_list *);
534
535static void queue_to_ready (struct ready_list *);
536static int early_queue_to_ready (state_t, struct ready_list *);
537
538static void debug_ready_list (struct ready_list *);
539
540/* The following functions are used to implement multi-pass scheduling
541   on the first cycle.  */
542static rtx ready_remove (struct ready_list *, int);
543static void ready_remove_insn (rtx);
544
545static int choose_ready (struct ready_list *, rtx *);
546
547static void fix_inter_tick (rtx, rtx);
548static int fix_tick_ready (rtx);
549static void change_queue_index (rtx, int);
550
551/* The following functions are used to implement scheduling of data/control
552   speculative instructions.  */
553
554static void extend_h_i_d (void);
555static void init_h_i_d (rtx);
556static void generate_recovery_code (rtx);
557static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
558static void begin_speculative_block (rtx);
559static void add_to_speculative_block (rtx);
560static void init_before_recovery (basic_block *);
561static void create_check_block_twin (rtx, bool);
562static void fix_recovery_deps (basic_block);
563static void haifa_change_pattern (rtx, rtx);
564static void dump_new_block_header (int, basic_block, rtx, rtx);
565static void restore_bb_notes (basic_block);
566static void fix_jump_move (rtx);
567static void move_block_after_check (rtx);
568static void move_succs (VEC(edge,gc) **, basic_block);
569static void sched_remove_insn (rtx);
570static void clear_priorities (rtx, rtx_vec_t *);
571static void calc_priorities (rtx_vec_t);
572static void add_jump_dependencies (rtx, rtx);
573#ifdef ENABLE_CHECKING
574static int has_edge_p (VEC(edge,gc) *, int);
575static void check_cfg (rtx, rtx);
576#endif
577
578#endif /* INSN_SCHEDULING */
579
580/* Point to state used for the current scheduling pass.  */
581struct haifa_sched_info *current_sched_info;
582
583#ifndef INSN_SCHEDULING
584void
585schedule_insns (void)
586{
587}
588#else
589
590/* Do register pressure sensitive insn scheduling if the flag is set
591   up.  */
592bool sched_pressure_p;
593
594/* Map regno -> its cover class.  The map defined only when
595   SCHED_PRESSURE_P is true.  */
596enum reg_class *sched_regno_cover_class;
597
598/* The current register pressure.  Only elements corresponding cover
599   classes are defined.  */
600static int curr_reg_pressure[N_REG_CLASSES];
601
602/* Saved value of the previous array.  */
603static int saved_reg_pressure[N_REG_CLASSES];
604
605/* Register living at given scheduling point.  */
606static bitmap curr_reg_live;
607
608/* Saved value of the previous array.  */
609static bitmap saved_reg_live;
610
611/* Registers mentioned in the current region.  */
612static bitmap region_ref_regs;
613
614/* Initiate register pressure relative info for scheduling the current
615   region.  Currently it is only clearing register mentioned in the
616   current region.  */
617void
618sched_init_region_reg_pressure_info (void)
619{
620  bitmap_clear (region_ref_regs);
621}
622
623/* Update current register pressure related info after birth (if
624   BIRTH_P) or death of register REGNO.  */
625static void
626mark_regno_birth_or_death (int regno, bool birth_p)
627{
628  enum reg_class cover_class;
629
630  cover_class = sched_regno_cover_class[regno];
631  if (regno >= FIRST_PSEUDO_REGISTER)
632    {
633      if (cover_class != NO_REGS)
634	{
635	  if (birth_p)
636	    {
637	      bitmap_set_bit (curr_reg_live, regno);
638	      curr_reg_pressure[cover_class]
639		+= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
640	    }
641	  else
642	    {
643	      bitmap_clear_bit (curr_reg_live, regno);
644	      curr_reg_pressure[cover_class]
645		-= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
646	    }
647	}
648    }
649  else if (cover_class != NO_REGS
650	   && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
651    {
652      if (birth_p)
653	{
654	  bitmap_set_bit (curr_reg_live, regno);
655	  curr_reg_pressure[cover_class]++;
656	}
657      else
658	{
659	  bitmap_clear_bit (curr_reg_live, regno);
660	  curr_reg_pressure[cover_class]--;
661	}
662    }
663}
664
665/* Initiate current register pressure related info from living
666   registers given by LIVE.  */
667static void
668initiate_reg_pressure_info (bitmap live)
669{
670  int i;
671  unsigned int j;
672  bitmap_iterator bi;
673
674  for (i = 0; i < ira_reg_class_cover_size; i++)
675    curr_reg_pressure[ira_reg_class_cover[i]] = 0;
676  bitmap_clear (curr_reg_live);
677  EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
678    if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
679      mark_regno_birth_or_death (j, true);
680}
681
682/* Mark registers in X as mentioned in the current region.  */
683static void
684setup_ref_regs (rtx x)
685{
686  int i, j, regno;
687  const RTX_CODE code = GET_CODE (x);
688  const char *fmt;
689
690  if (REG_P (x))
691    {
692      regno = REGNO (x);
693      if (regno >= FIRST_PSEUDO_REGISTER)
694	bitmap_set_bit (region_ref_regs, REGNO (x));
695      else
696	for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
697	  bitmap_set_bit (region_ref_regs, regno + i);
698      return;
699    }
700  fmt = GET_RTX_FORMAT (code);
701  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
702    if (fmt[i] == 'e')
703      setup_ref_regs (XEXP (x, i));
704    else if (fmt[i] == 'E')
705      {
706	for (j = 0; j < XVECLEN (x, i); j++)
707	  setup_ref_regs (XVECEXP (x, i, j));
708      }
709}
710
711/* Initiate current register pressure related info at the start of
712   basic block BB.  */
713static void
714initiate_bb_reg_pressure_info (basic_block bb)
715{
716  unsigned int i;
717  rtx insn;
718
719  if (current_nr_blocks > 1)
720    FOR_BB_INSNS (bb, insn)
721      if (NONDEBUG_INSN_P (insn))
722	setup_ref_regs (PATTERN (insn));
723  initiate_reg_pressure_info (df_get_live_in (bb));
724#ifdef EH_RETURN_DATA_REGNO
725  if (bb_has_eh_pred (bb))
726    for (i = 0; ; ++i)
727      {
728	unsigned int regno = EH_RETURN_DATA_REGNO (i);
729
730	if (regno == INVALID_REGNUM)
731	  break;
732	if (! bitmap_bit_p (df_get_live_in (bb), regno))
733	  mark_regno_birth_or_death (regno, true);
734      }
735#endif
736}
737
738/* Save current register pressure related info.  */
739static void
740save_reg_pressure (void)
741{
742  int i;
743
744  for (i = 0; i < ira_reg_class_cover_size; i++)
745    saved_reg_pressure[ira_reg_class_cover[i]]
746      = curr_reg_pressure[ira_reg_class_cover[i]];
747  bitmap_copy (saved_reg_live, curr_reg_live);
748}
749
750/* Restore saved register pressure related info.  */
751static void
752restore_reg_pressure (void)
753{
754  int i;
755
756  for (i = 0; i < ira_reg_class_cover_size; i++)
757    curr_reg_pressure[ira_reg_class_cover[i]]
758      = saved_reg_pressure[ira_reg_class_cover[i]];
759  bitmap_copy (curr_reg_live, saved_reg_live);
760}
761
762/* Return TRUE if the register is dying after its USE.  */
763static bool
764dying_use_p (struct reg_use_data *use)
765{
766  struct reg_use_data *next;
767
768  for (next = use->next_regno_use; next != use; next = next->next_regno_use)
769    if (NONDEBUG_INSN_P (next->insn)
770	&& QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
771      return false;
772  return true;
773}
774
775/* Print info about the current register pressure and its excess for
776   each cover class.  */
777static void
778print_curr_reg_pressure (void)
779{
780  int i;
781  enum reg_class cl;
782
783  fprintf (sched_dump, ";;\t");
784  for (i = 0; i < ira_reg_class_cover_size; i++)
785    {
786      cl = ira_reg_class_cover[i];
787      gcc_assert (curr_reg_pressure[cl] >= 0);
788      fprintf (sched_dump, "  %s:%d(%d)", reg_class_names[cl],
789	       curr_reg_pressure[cl],
790	       curr_reg_pressure[cl] - ira_available_class_regs[cl]);
791    }
792  fprintf (sched_dump, "\n");
793}
794
795/* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
796   so that insns independent of the last scheduled insn will be preferred
797   over dependent instructions.  */
798
799static rtx last_scheduled_insn;
800
801/* Cached cost of the instruction.  Use below function to get cost of the
802   insn.  -1 here means that the field is not initialized.  */
803#define INSN_COST(INSN)	(HID (INSN)->cost)
804
805/* Compute cost of executing INSN.
806   This is the number of cycles between instruction issue and
807   instruction results.  */
808int
809insn_cost (rtx insn)
810{
811  int cost;
812
813  if (sel_sched_p ())
814    {
815      if (recog_memoized (insn) < 0)
816	return 0;
817
818      cost = insn_default_latency (insn);
819      if (cost < 0)
820	cost = 0;
821
822      return cost;
823    }
824
825  cost = INSN_COST (insn);
826
827  if (cost < 0)
828    {
829      /* A USE insn, or something else we don't need to
830	 understand.  We can't pass these directly to
831	 result_ready_cost or insn_default_latency because it will
832	 trigger a fatal error for unrecognizable insns.  */
833      if (recog_memoized (insn) < 0)
834	{
835	  INSN_COST (insn) = 0;
836	  return 0;
837	}
838      else
839	{
840	  cost = insn_default_latency (insn);
841	  if (cost < 0)
842	    cost = 0;
843
844	  INSN_COST (insn) = cost;
845	}
846    }
847
848  return cost;
849}
850
851/* Compute cost of dependence LINK.
852   This is the number of cycles between instruction issue and
853   instruction results.
854   ??? We also use this function to call recog_memoized on all insns.  */
855int
856dep_cost_1 (dep_t link, dw_t dw)
857{
858  rtx insn = DEP_PRO (link);
859  rtx used = DEP_CON (link);
860  int cost;
861
862  /* A USE insn should never require the value used to be computed.
863     This allows the computation of a function's result and parameter
864     values to overlap the return and call.  We don't care about the
865     the dependence cost when only decreasing register pressure.  */
866  if (recog_memoized (used) < 0)
867    {
868      cost = 0;
869      recog_memoized (insn);
870    }
871  else
872    {
873      enum reg_note dep_type = DEP_TYPE (link);
874
875      cost = insn_cost (insn);
876
877      if (INSN_CODE (insn) >= 0)
878	{
879	  if (dep_type == REG_DEP_ANTI)
880	    cost = 0;
881	  else if (dep_type == REG_DEP_OUTPUT)
882	    {
883	      cost = (insn_default_latency (insn)
884		      - insn_default_latency (used));
885	      if (cost <= 0)
886		cost = 1;
887	    }
888	  else if (bypass_p (insn))
889	    cost = insn_latency (insn, used);
890	}
891
892
893      if (targetm.sched.adjust_cost_2)
894	cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
895					    dw);
896      else if (targetm.sched.adjust_cost != NULL)
897	{
898	  /* This variable is used for backward compatibility with the
899	     targets.  */
900	  rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
901
902	  /* Make it self-cycled, so that if some tries to walk over this
903	     incomplete list he/she will be caught in an endless loop.  */
904	  XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
905
906	  /* Targets use only REG_NOTE_KIND of the link.  */
907	  PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
908
909	  cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
910					    insn, cost);
911
912	  free_INSN_LIST_node (dep_cost_rtx_link);
913	}
914
915      if (cost < 0)
916	cost = 0;
917    }
918
919  return cost;
920}
921
922/* Compute cost of dependence LINK.
923   This is the number of cycles between instruction issue and
924   instruction results.  */
925int
926dep_cost (dep_t link)
927{
928  return dep_cost_1 (link, 0);
929}
930
931/* Use this sel-sched.c friendly function in reorder2 instead of increasing
932   INSN_PRIORITY explicitly.  */
933void
934increase_insn_priority (rtx insn, int amount)
935{
936  if (!sel_sched_p ())
937    {
938      /* We're dealing with haifa-sched.c INSN_PRIORITY.  */
939      if (INSN_PRIORITY_KNOWN (insn))
940	  INSN_PRIORITY (insn) += amount;
941    }
942  else
943    {
944      /* In sel-sched.c INSN_PRIORITY is not kept up to date.
945	 Use EXPR_PRIORITY instead. */
946      sel_add_to_insn_priority (insn, amount);
947    }
948}
949
950/* Return 'true' if DEP should be included in priority calculations.  */
951static bool
952contributes_to_priority_p (dep_t dep)
953{
954  if (DEBUG_INSN_P (DEP_CON (dep))
955      || DEBUG_INSN_P (DEP_PRO (dep)))
956    return false;
957
958  /* Critical path is meaningful in block boundaries only.  */
959  if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
960						    DEP_PRO (dep)))
961    return false;
962
963  /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
964     then speculative instructions will less likely be
965     scheduled.  That is because the priority of
966     their producers will increase, and, thus, the
967     producers will more likely be scheduled, thus,
968     resolving the dependence.  */
969  if (sched_deps_info->generate_spec_deps
970      && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
971      && (DEP_STATUS (dep) & SPECULATIVE))
972    return false;
973
974  return true;
975}
976
977/* Compute the number of nondebug forward deps of an insn.  */
978
979static int
980dep_list_size (rtx insn)
981{
982  sd_iterator_def sd_it;
983  dep_t dep;
984  int dbgcount = 0, nodbgcount = 0;
985
986  if (!MAY_HAVE_DEBUG_INSNS)
987    return sd_lists_size (insn, SD_LIST_FORW);
988
989  FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
990    {
991      if (DEBUG_INSN_P (DEP_CON (dep)))
992	dbgcount++;
993      else if (!DEBUG_INSN_P (DEP_PRO (dep)))
994	nodbgcount++;
995    }
996
997  gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
998
999  return nodbgcount;
1000}
1001
1002/* Compute the priority number for INSN.  */
1003static int
1004priority (rtx insn)
1005{
1006  if (! INSN_P (insn))
1007    return 0;
1008
1009  /* We should not be interested in priority of an already scheduled insn.  */
1010  gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1011
1012  if (!INSN_PRIORITY_KNOWN (insn))
1013    {
1014      int this_priority = -1;
1015
1016      if (dep_list_size (insn) == 0)
1017	/* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1018	   some forward deps but all of them are ignored by
1019	   contributes_to_priority hook.  At the moment we set priority of
1020	   such insn to 0.  */
1021	this_priority = insn_cost (insn);
1022      else
1023	{
1024	  rtx prev_first, twin;
1025	  basic_block rec;
1026
1027	  /* For recovery check instructions we calculate priority slightly
1028	     different than that of normal instructions.  Instead of walking
1029	     through INSN_FORW_DEPS (check) list, we walk through
1030	     INSN_FORW_DEPS list of each instruction in the corresponding
1031	     recovery block.  */
1032
1033          /* Selective scheduling does not define RECOVERY_BLOCK macro.  */
1034	  rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1035	  if (!rec || rec == EXIT_BLOCK_PTR)
1036	    {
1037	      prev_first = PREV_INSN (insn);
1038	      twin = insn;
1039	    }
1040	  else
1041	    {
1042	      prev_first = NEXT_INSN (BB_HEAD (rec));
1043	      twin = PREV_INSN (BB_END (rec));
1044	    }
1045
1046	  do
1047	    {
1048	      sd_iterator_def sd_it;
1049	      dep_t dep;
1050
1051	      FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1052		{
1053		  rtx next;
1054		  int next_priority;
1055
1056		  next = DEP_CON (dep);
1057
1058		  if (BLOCK_FOR_INSN (next) != rec)
1059		    {
1060		      int cost;
1061
1062		      if (!contributes_to_priority_p (dep))
1063			continue;
1064
1065		      if (twin == insn)
1066			cost = dep_cost (dep);
1067		      else
1068			{
1069			  struct _dep _dep1, *dep1 = &_dep1;
1070
1071			  init_dep (dep1, insn, next, REG_DEP_ANTI);
1072
1073			  cost = dep_cost (dep1);
1074			}
1075
1076		      next_priority = cost + priority (next);
1077
1078		      if (next_priority > this_priority)
1079			this_priority = next_priority;
1080		    }
1081		}
1082
1083	      twin = PREV_INSN (twin);
1084	    }
1085	  while (twin != prev_first);
1086	}
1087
1088      if (this_priority < 0)
1089	{
1090	  gcc_assert (this_priority == -1);
1091
1092	  this_priority = insn_cost (insn);
1093	}
1094
1095      INSN_PRIORITY (insn) = this_priority;
1096      INSN_PRIORITY_STATUS (insn) = 1;
1097    }
1098
1099  return INSN_PRIORITY (insn);
1100}
1101
1102/* Macros and functions for keeping the priority queue sorted, and
1103   dealing with queuing and dequeuing of instructions.  */
1104
1105#define SCHED_SORT(READY, N_READY)                                   \
1106do { if ((N_READY) == 2)				             \
1107       swap_sort (READY, N_READY);			             \
1108     else if ((N_READY) > 2)                                         \
1109         qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); }  \
1110while (0)
1111
1112/* Setup info about the current register pressure impact of scheduling
1113   INSN at the current scheduling point.  */
1114static void
1115setup_insn_reg_pressure_info (rtx insn)
1116{
1117  int i, change, before, after, hard_regno;
1118  int excess_cost_change;
1119  enum machine_mode mode;
1120  enum reg_class cl;
1121  struct reg_pressure_data *pressure_info;
1122  int *max_reg_pressure;
1123  struct reg_use_data *use;
1124  static int death[N_REG_CLASSES];
1125
1126  excess_cost_change = 0;
1127  for (i = 0; i < ira_reg_class_cover_size; i++)
1128    death[ira_reg_class_cover[i]] = 0;
1129  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1130    if (dying_use_p (use))
1131      {
1132	cl = sched_regno_cover_class[use->regno];
1133	if (use->regno < FIRST_PSEUDO_REGISTER)
1134	  death[cl]++;
1135	else
1136	  death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1137      }
1138  pressure_info = INSN_REG_PRESSURE (insn);
1139  max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1140  gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1141  for (i = 0; i < ira_reg_class_cover_size; i++)
1142    {
1143      cl = ira_reg_class_cover[i];
1144      gcc_assert (curr_reg_pressure[cl] >= 0);
1145      change = (int) pressure_info[i].set_increase - death[cl];
1146      before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1147      after = MAX (0, max_reg_pressure[i] + change
1148		   - ira_available_class_regs[cl]);
1149      hard_regno = ira_class_hard_regs[cl][0];
1150      gcc_assert (hard_regno >= 0);
1151      mode = reg_raw_mode[hard_regno];
1152      excess_cost_change += ((after - before)
1153			     * (ira_memory_move_cost[mode][cl][0]
1154				+ ira_memory_move_cost[mode][cl][1]));
1155    }
1156  INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1157}
1158
1159/* Returns a positive value if x is preferred; returns a negative value if
1160   y is preferred.  Should never return 0, since that will make the sort
1161   unstable.  */
1162
1163static int
1164rank_for_schedule (const void *x, const void *y)
1165{
1166  rtx tmp = *(const rtx *) y;
1167  rtx tmp2 = *(const rtx *) x;
1168  rtx last;
1169  int tmp_class, tmp2_class;
1170  int val, priority_val, info_val;
1171
1172  if (MAY_HAVE_DEBUG_INSNS)
1173    {
1174      /* Schedule debug insns as early as possible.  */
1175      if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1176	return -1;
1177      else if (DEBUG_INSN_P (tmp2))
1178	return 1;
1179    }
1180
1181  /* The insn in a schedule group should be issued the first.  */
1182  if (flag_sched_group_heuristic &&
1183      SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1184    return SCHED_GROUP_P (tmp2) ? 1 : -1;
1185
1186  /* Make sure that priority of TMP and TMP2 are initialized.  */
1187  gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1188
1189  if (sched_pressure_p)
1190    {
1191      int diff;
1192
1193      /* Prefer insn whose scheduling results in the smallest register
1194	 pressure excess.  */
1195      if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1196		   + (INSN_TICK (tmp) > clock_var
1197		      ? INSN_TICK (tmp) - clock_var : 0)
1198		   - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1199		   - (INSN_TICK (tmp2) > clock_var
1200		      ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1201	return diff;
1202    }
1203
1204
1205  if (sched_pressure_p
1206      && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1207    {
1208      if (INSN_TICK (tmp) <= clock_var)
1209	return -1;
1210      else if (INSN_TICK (tmp2) <= clock_var)
1211	return 1;
1212      else
1213	return INSN_TICK (tmp) - INSN_TICK (tmp2);
1214    }
1215  /* Prefer insn with higher priority.  */
1216  priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1217
1218  if (flag_sched_critical_path_heuristic && priority_val)
1219    return priority_val;
1220
1221  /* Prefer speculative insn with greater dependencies weakness.  */
1222  if (flag_sched_spec_insn_heuristic && spec_info)
1223    {
1224      ds_t ds1, ds2;
1225      dw_t dw1, dw2;
1226      int dw;
1227
1228      ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1229      if (ds1)
1230	dw1 = ds_weak (ds1);
1231      else
1232	dw1 = NO_DEP_WEAK;
1233
1234      ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1235      if (ds2)
1236	dw2 = ds_weak (ds2);
1237      else
1238	dw2 = NO_DEP_WEAK;
1239
1240      dw = dw2 - dw1;
1241      if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1242	return dw;
1243    }
1244
1245  info_val = (*current_sched_info->rank) (tmp, tmp2);
1246  if(flag_sched_rank_heuristic && info_val)
1247    return info_val;
1248
1249  if (flag_sched_last_insn_heuristic)
1250    {
1251      last = last_scheduled_insn;
1252
1253      if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1254	do
1255	  last = PREV_INSN (last);
1256	while (!NONDEBUG_INSN_P (last)
1257	       && last != current_sched_info->prev_head);
1258    }
1259
1260  /* Compare insns based on their relation to the last scheduled
1261     non-debug insn.  */
1262  if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1263    {
1264      dep_t dep1;
1265      dep_t dep2;
1266
1267      /* Classify the instructions into three classes:
1268         1) Data dependent on last schedule insn.
1269         2) Anti/Output dependent on last scheduled insn.
1270         3) Independent of last scheduled insn, or has latency of one.
1271         Choose the insn from the highest numbered class if different.  */
1272      dep1 = sd_find_dep_between (last, tmp, true);
1273
1274      if (dep1 == NULL || dep_cost (dep1) == 1)
1275	tmp_class = 3;
1276      else if (/* Data dependence.  */
1277	       DEP_TYPE (dep1) == REG_DEP_TRUE)
1278	tmp_class = 1;
1279      else
1280	tmp_class = 2;
1281
1282      dep2 = sd_find_dep_between (last, tmp2, true);
1283
1284      if (dep2 == NULL || dep_cost (dep2)  == 1)
1285	tmp2_class = 3;
1286      else if (/* Data dependence.  */
1287	       DEP_TYPE (dep2) == REG_DEP_TRUE)
1288	tmp2_class = 1;
1289      else
1290	tmp2_class = 2;
1291
1292      if ((val = tmp2_class - tmp_class))
1293	return val;
1294    }
1295
1296  /* Prefer the insn which has more later insns that depend on it.
1297     This gives the scheduler more freedom when scheduling later
1298     instructions at the expense of added register pressure.  */
1299
1300  val = (dep_list_size (tmp2) - dep_list_size (tmp));
1301
1302  if (flag_sched_dep_count_heuristic && val != 0)
1303    return val;
1304
1305  /* If insns are equally good, sort by INSN_LUID (original insn order),
1306     so that we make the sort stable.  This minimizes instruction movement,
1307     thus minimizing sched's effect on debugging and cross-jumping.  */
1308  return INSN_LUID (tmp) - INSN_LUID (tmp2);
1309}
1310
1311/* Resort the array A in which only element at index N may be out of order.  */
1312
1313HAIFA_INLINE static void
1314swap_sort (rtx *a, int n)
1315{
1316  rtx insn = a[n - 1];
1317  int i = n - 2;
1318
1319  while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1320    {
1321      a[i + 1] = a[i];
1322      i -= 1;
1323    }
1324  a[i + 1] = insn;
1325}
1326
1327/* Add INSN to the insn queue so that it can be executed at least
1328   N_CYCLES after the currently executing insn.  Preserve insns
1329   chain for debugging purposes.  */
1330
1331HAIFA_INLINE static void
1332queue_insn (rtx insn, int n_cycles)
1333{
1334  int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1335  rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1336
1337  gcc_assert (n_cycles <= max_insn_queue_index);
1338  gcc_assert (!DEBUG_INSN_P (insn));
1339
1340  insn_queue[next_q] = link;
1341  q_size += 1;
1342
1343  if (sched_verbose >= 2)
1344    {
1345      fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1346	       (*current_sched_info->print_insn) (insn, 0));
1347
1348      fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1349    }
1350
1351  QUEUE_INDEX (insn) = next_q;
1352}
1353
1354/* Remove INSN from queue.  */
1355static void
1356queue_remove (rtx insn)
1357{
1358  gcc_assert (QUEUE_INDEX (insn) >= 0);
1359  remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1360  q_size--;
1361  QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1362}
1363
1364/* Return a pointer to the bottom of the ready list, i.e. the insn
1365   with the lowest priority.  */
1366
1367rtx *
1368ready_lastpos (struct ready_list *ready)
1369{
1370  gcc_assert (ready->n_ready >= 1);
1371  return ready->vec + ready->first - ready->n_ready + 1;
1372}
1373
1374/* Add an element INSN to the ready list so that it ends up with the
1375   lowest/highest priority depending on FIRST_P.  */
1376
1377HAIFA_INLINE static void
1378ready_add (struct ready_list *ready, rtx insn, bool first_p)
1379{
1380  if (!first_p)
1381    {
1382      if (ready->first == ready->n_ready)
1383	{
1384	  memmove (ready->vec + ready->veclen - ready->n_ready,
1385		   ready_lastpos (ready),
1386		   ready->n_ready * sizeof (rtx));
1387	  ready->first = ready->veclen - 1;
1388	}
1389      ready->vec[ready->first - ready->n_ready] = insn;
1390    }
1391  else
1392    {
1393      if (ready->first == ready->veclen - 1)
1394	{
1395	  if (ready->n_ready)
1396	    /* ready_lastpos() fails when called with (ready->n_ready == 0).  */
1397	    memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1398		     ready_lastpos (ready),
1399		     ready->n_ready * sizeof (rtx));
1400	  ready->first = ready->veclen - 2;
1401	}
1402      ready->vec[++(ready->first)] = insn;
1403    }
1404
1405  ready->n_ready++;
1406  if (DEBUG_INSN_P (insn))
1407    ready->n_debug++;
1408
1409  gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1410  QUEUE_INDEX (insn) = QUEUE_READY;
1411}
1412
1413/* Remove the element with the highest priority from the ready list and
1414   return it.  */
1415
1416HAIFA_INLINE static rtx
1417ready_remove_first (struct ready_list *ready)
1418{
1419  rtx t;
1420
1421  gcc_assert (ready->n_ready);
1422  t = ready->vec[ready->first--];
1423  ready->n_ready--;
1424  if (DEBUG_INSN_P (t))
1425    ready->n_debug--;
1426  /* If the queue becomes empty, reset it.  */
1427  if (ready->n_ready == 0)
1428    ready->first = ready->veclen - 1;
1429
1430  gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1431  QUEUE_INDEX (t) = QUEUE_NOWHERE;
1432
1433  return t;
1434}
1435
1436/* The following code implements multi-pass scheduling for the first
1437   cycle.  In other words, we will try to choose ready insn which
1438   permits to start maximum number of insns on the same cycle.  */
1439
1440/* Return a pointer to the element INDEX from the ready.  INDEX for
1441   insn with the highest priority is 0, and the lowest priority has
1442   N_READY - 1.  */
1443
1444rtx
1445ready_element (struct ready_list *ready, int index)
1446{
1447  gcc_assert (ready->n_ready && index < ready->n_ready);
1448
1449  return ready->vec[ready->first - index];
1450}
1451
1452/* Remove the element INDEX from the ready list and return it.  INDEX
1453   for insn with the highest priority is 0, and the lowest priority
1454   has N_READY - 1.  */
1455
1456HAIFA_INLINE static rtx
1457ready_remove (struct ready_list *ready, int index)
1458{
1459  rtx t;
1460  int i;
1461
1462  if (index == 0)
1463    return ready_remove_first (ready);
1464  gcc_assert (ready->n_ready && index < ready->n_ready);
1465  t = ready->vec[ready->first - index];
1466  ready->n_ready--;
1467  if (DEBUG_INSN_P (t))
1468    ready->n_debug--;
1469  for (i = index; i < ready->n_ready; i++)
1470    ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1471  QUEUE_INDEX (t) = QUEUE_NOWHERE;
1472  return t;
1473}
1474
1475/* Remove INSN from the ready list.  */
1476static void
1477ready_remove_insn (rtx insn)
1478{
1479  int i;
1480
1481  for (i = 0; i < readyp->n_ready; i++)
1482    if (ready_element (readyp, i) == insn)
1483      {
1484        ready_remove (readyp, i);
1485        return;
1486      }
1487  gcc_unreachable ();
1488}
1489
1490/* Sort the ready list READY by ascending priority, using the SCHED_SORT
1491   macro.  */
1492
1493void
1494ready_sort (struct ready_list *ready)
1495{
1496  int i;
1497  rtx *first = ready_lastpos (ready);
1498
1499  if (sched_pressure_p)
1500    {
1501      for (i = 0; i < ready->n_ready; i++)
1502	setup_insn_reg_pressure_info (first[i]);
1503    }
1504  SCHED_SORT (first, ready->n_ready);
1505}
1506
1507/* PREV is an insn that is ready to execute.  Adjust its priority if that
1508   will help shorten or lengthen register lifetimes as appropriate.  Also
1509   provide a hook for the target to tweak itself.  */
1510
1511HAIFA_INLINE static void
1512adjust_priority (rtx prev)
1513{
1514  /* ??? There used to be code here to try and estimate how an insn
1515     affected register lifetimes, but it did it by looking at REG_DEAD
1516     notes, which we removed in schedule_region.  Nor did it try to
1517     take into account register pressure or anything useful like that.
1518
1519     Revisit when we have a machine model to work with and not before.  */
1520
1521  if (targetm.sched.adjust_priority)
1522    INSN_PRIORITY (prev) =
1523      targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1524}
1525
1526/* Advance DFA state STATE on one cycle.  */
1527void
1528advance_state (state_t state)
1529{
1530  if (targetm.sched.dfa_pre_advance_cycle)
1531    targetm.sched.dfa_pre_advance_cycle ();
1532
1533  if (targetm.sched.dfa_pre_cycle_insn)
1534    state_transition (state,
1535		      targetm.sched.dfa_pre_cycle_insn ());
1536
1537  state_transition (state, NULL);
1538
1539  if (targetm.sched.dfa_post_cycle_insn)
1540    state_transition (state,
1541		      targetm.sched.dfa_post_cycle_insn ());
1542
1543  if (targetm.sched.dfa_post_advance_cycle)
1544    targetm.sched.dfa_post_advance_cycle ();
1545}
1546
1547/* Advance time on one cycle.  */
1548HAIFA_INLINE static void
1549advance_one_cycle (void)
1550{
1551  advance_state (curr_state);
1552  if (sched_verbose >= 6)
1553    fprintf (sched_dump, ";;\tAdvanced a state.\n");
1554}
1555
1556/* Clock at which the previous instruction was issued.  */
1557static int last_clock_var;
1558
1559/* Update register pressure after scheduling INSN.  */
1560static void
1561update_register_pressure (rtx insn)
1562{
1563  struct reg_use_data *use;
1564  struct reg_set_data *set;
1565
1566  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1567    if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1568      mark_regno_birth_or_death (use->regno, false);
1569  for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1570    mark_regno_birth_or_death (set->regno, true);
1571}
1572
1573/* Set up or update (if UPDATE_P) max register pressure (see its
1574   meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1575   after insn AFTER.  */
1576static void
1577setup_insn_max_reg_pressure (rtx after, bool update_p)
1578{
1579  int i, p;
1580  bool eq_p;
1581  rtx insn;
1582  static int max_reg_pressure[N_REG_CLASSES];
1583
1584  save_reg_pressure ();
1585  for (i = 0; i < ira_reg_class_cover_size; i++)
1586    max_reg_pressure[ira_reg_class_cover[i]]
1587      = curr_reg_pressure[ira_reg_class_cover[i]];
1588  for (insn = NEXT_INSN (after);
1589       insn != NULL_RTX && ! BARRIER_P (insn)
1590	 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1591       insn = NEXT_INSN (insn))
1592    if (NONDEBUG_INSN_P (insn))
1593      {
1594	eq_p = true;
1595	for (i = 0; i < ira_reg_class_cover_size; i++)
1596	  {
1597	    p = max_reg_pressure[ira_reg_class_cover[i]];
1598	    if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1599	      {
1600		eq_p = false;
1601		INSN_MAX_REG_PRESSURE (insn)[i]
1602		  = max_reg_pressure[ira_reg_class_cover[i]];
1603	      }
1604	  }
1605	if (update_p && eq_p)
1606	  break;
1607	update_register_pressure (insn);
1608	for (i = 0; i < ira_reg_class_cover_size; i++)
1609	  if (max_reg_pressure[ira_reg_class_cover[i]]
1610	      < curr_reg_pressure[ira_reg_class_cover[i]])
1611	    max_reg_pressure[ira_reg_class_cover[i]]
1612	      = curr_reg_pressure[ira_reg_class_cover[i]];
1613      }
1614  restore_reg_pressure ();
1615}
1616
1617/* Update the current register pressure after scheduling INSN.  Update
1618   also max register pressure for unscheduled insns of the current
1619   BB.  */
1620static void
1621update_reg_and_insn_max_reg_pressure (rtx insn)
1622{
1623  int i;
1624  int before[N_REG_CLASSES];
1625
1626  for (i = 0; i < ira_reg_class_cover_size; i++)
1627    before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1628  update_register_pressure (insn);
1629  for (i = 0; i < ira_reg_class_cover_size; i++)
1630    if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1631      break;
1632  if (i < ira_reg_class_cover_size)
1633    setup_insn_max_reg_pressure (insn, true);
1634}
1635
1636/* Set up register pressure at the beginning of basic block BB whose
1637   insns starting after insn AFTER.  Set up also max register pressure
1638   for all insns of the basic block.  */
1639void
1640sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1641{
1642  gcc_assert (sched_pressure_p);
1643  initiate_bb_reg_pressure_info (bb);
1644  setup_insn_max_reg_pressure (after, false);
1645}
1646
1647/* INSN is the "currently executing insn".  Launch each insn which was
1648   waiting on INSN.  READY is the ready list which contains the insns
1649   that are ready to fire.  CLOCK is the current cycle.  The function
1650   returns necessary cycle advance after issuing the insn (it is not
1651   zero for insns in a schedule group).  */
1652
1653static int
1654schedule_insn (rtx insn)
1655{
1656  sd_iterator_def sd_it;
1657  dep_t dep;
1658  int i;
1659  int advance = 0;
1660
1661  if (sched_verbose >= 1)
1662    {
1663      struct reg_pressure_data *pressure_info;
1664      char buf[2048];
1665
1666      print_insn (buf, insn, 0);
1667      buf[40] = 0;
1668      fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1669
1670      if (recog_memoized (insn) < 0)
1671	fprintf (sched_dump, "nothing");
1672      else
1673	print_reservation (sched_dump, insn);
1674      pressure_info = INSN_REG_PRESSURE (insn);
1675      if (pressure_info != NULL)
1676	{
1677	  fputc (':', sched_dump);
1678	  for (i = 0; i < ira_reg_class_cover_size; i++)
1679	    fprintf (sched_dump, "%s%+d(%d)",
1680		     reg_class_names[ira_reg_class_cover[i]],
1681		     pressure_info[i].set_increase, pressure_info[i].change);
1682	}
1683      fputc ('\n', sched_dump);
1684    }
1685
1686  if (sched_pressure_p)
1687    update_reg_and_insn_max_reg_pressure (insn);
1688
1689  /* Scheduling instruction should have all its dependencies resolved and
1690     should have been removed from the ready list.  */
1691  gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1692
1693  /* Reset debug insns invalidated by moving this insn.  */
1694  if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1695    for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1696	 sd_iterator_cond (&sd_it, &dep);)
1697      {
1698	rtx dbg = DEP_PRO (dep);
1699	struct reg_use_data *use, *next;
1700
1701	gcc_assert (DEBUG_INSN_P (dbg));
1702
1703	if (sched_verbose >= 6)
1704	  fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1705		   INSN_UID (dbg));
1706
1707	/* ??? Rather than resetting the debug insn, we might be able
1708	   to emit a debug temp before the just-scheduled insn, but
1709	   this would involve checking that the expression at the
1710	   point of the debug insn is equivalent to the expression
1711	   before the just-scheduled insn.  They might not be: the
1712	   expression in the debug insn may depend on other insns not
1713	   yet scheduled that set MEMs, REGs or even other debug
1714	   insns.  It's not clear that attempting to preserve debug
1715	   information in these cases is worth the effort, given how
1716	   uncommon these resets are and the likelihood that the debug
1717	   temps introduced won't survive the schedule change.  */
1718	INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1719	df_insn_rescan (dbg);
1720
1721	/* Unknown location doesn't use any registers.  */
1722	for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1723	  {
1724	    struct reg_use_data *prev = use;
1725
1726	    /* Remove use from the cyclic next_regno_use chain first.  */
1727	    while (prev->next_regno_use != use)
1728	      prev = prev->next_regno_use;
1729	    prev->next_regno_use = use->next_regno_use;
1730	    next = use->next_insn_use;
1731	    free (use);
1732	  }
1733	INSN_REG_USE_LIST (dbg) = NULL;
1734
1735	/* We delete rather than resolve these deps, otherwise we
1736	   crash in sched_free_deps(), because forward deps are
1737	   expected to be released before backward deps.  */
1738	sd_delete_dep (sd_it);
1739      }
1740
1741  gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1742  QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1743
1744  gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1745  if (INSN_TICK (insn) > clock_var)
1746    /* INSN has been prematurely moved from the queue to the ready list.
1747       This is possible only if following flag is set.  */
1748    gcc_assert (flag_sched_stalled_insns);
1749
1750  /* ??? Probably, if INSN is scheduled prematurely, we should leave
1751     INSN_TICK untouched.  This is a machine-dependent issue, actually.  */
1752  INSN_TICK (insn) = clock_var;
1753
1754  /* Update dependent instructions.  */
1755  for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1756       sd_iterator_cond (&sd_it, &dep);)
1757    {
1758      rtx next = DEP_CON (dep);
1759
1760      /* Resolve the dependence between INSN and NEXT.
1761	 sd_resolve_dep () moves current dep to another list thus
1762	 advancing the iterator.  */
1763      sd_resolve_dep (sd_it);
1764
1765      /* Don't bother trying to mark next as ready if insn is a debug
1766	 insn.  If insn is the last hard dependency, it will have
1767	 already been discounted.  */
1768      if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1769	continue;
1770
1771      if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1772	{
1773	  int effective_cost;
1774
1775	  effective_cost = try_ready (next);
1776
1777	  if (effective_cost >= 0
1778	      && SCHED_GROUP_P (next)
1779	      && advance < effective_cost)
1780	    advance = effective_cost;
1781	}
1782      else
1783	/* Check always has only one forward dependence (to the first insn in
1784	   the recovery block), therefore, this will be executed only once.  */
1785	{
1786	  gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1787	  fix_recovery_deps (RECOVERY_BLOCK (insn));
1788	}
1789    }
1790
1791  /* This is the place where scheduler doesn't *basically* need backward and
1792     forward dependencies for INSN anymore.  Nevertheless they are used in
1793     heuristics in rank_for_schedule (), early_queue_to_ready () and in
1794     some targets (e.g. rs6000).  Thus the earliest place where we *can*
1795     remove dependencies is after targetm.sched.md_finish () call in
1796     schedule_block ().  But, on the other side, the safest place to remove
1797     dependencies is when we are finishing scheduling entire region.  As we
1798     don't generate [many] dependencies during scheduling itself, we won't
1799     need memory until beginning of next region.
1800     Bottom line: Dependencies are removed for all insns in the end of
1801     scheduling the region.  */
1802
1803  /* Annotate the instruction with issue information -- TImode
1804     indicates that the instruction is expected not to be able
1805     to issue on the same cycle as the previous insn.  A machine
1806     may use this information to decide how the instruction should
1807     be aligned.  */
1808  if (issue_rate > 1
1809      && GET_CODE (PATTERN (insn)) != USE
1810      && GET_CODE (PATTERN (insn)) != CLOBBER
1811      && !DEBUG_INSN_P (insn))
1812    {
1813      if (reload_completed)
1814	PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1815      last_clock_var = clock_var;
1816    }
1817
1818  return advance;
1819}
1820
1821/* Functions for handling of notes.  */
1822
1823/* Add note list that ends on FROM_END to the end of TO_ENDP.  */
1824void
1825concat_note_lists (rtx from_end, rtx *to_endp)
1826{
1827  rtx from_start;
1828
1829  /* It's easy when have nothing to concat.  */
1830  if (from_end == NULL)
1831    return;
1832
1833  /* It's also easy when destination is empty.  */
1834  if (*to_endp == NULL)
1835    {
1836      *to_endp = from_end;
1837      return;
1838    }
1839
1840  from_start = from_end;
1841  while (PREV_INSN (from_start) != NULL)
1842    from_start = PREV_INSN (from_start);
1843
1844  PREV_INSN (from_start) = *to_endp;
1845  NEXT_INSN (*to_endp) = from_start;
1846  *to_endp = from_end;
1847}
1848
1849/* Delete notes between HEAD and TAIL and put them in the chain
1850   of notes ended by NOTE_LIST.  */
1851void
1852remove_notes (rtx head, rtx tail)
1853{
1854  rtx next_tail, insn, next;
1855
1856  note_list = 0;
1857  if (head == tail && !INSN_P (head))
1858    return;
1859
1860  next_tail = NEXT_INSN (tail);
1861  for (insn = head; insn != next_tail; insn = next)
1862    {
1863      next = NEXT_INSN (insn);
1864      if (!NOTE_P (insn))
1865	continue;
1866
1867      switch (NOTE_KIND (insn))
1868	{
1869	case NOTE_INSN_BASIC_BLOCK:
1870	  continue;
1871
1872	case NOTE_INSN_EPILOGUE_BEG:
1873	  if (insn != tail)
1874	    {
1875	      remove_insn (insn);
1876	      add_reg_note (next, REG_SAVE_NOTE,
1877			    GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1878	      break;
1879	    }
1880	  /* FALLTHRU */
1881
1882	default:
1883	  remove_insn (insn);
1884
1885	  /* Add the note to list that ends at NOTE_LIST.  */
1886	  PREV_INSN (insn) = note_list;
1887	  NEXT_INSN (insn) = NULL_RTX;
1888	  if (note_list)
1889	    NEXT_INSN (note_list) = insn;
1890	  note_list = insn;
1891	  break;
1892	}
1893
1894      gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1895    }
1896}
1897
1898
1899/* Return the head and tail pointers of ebb starting at BEG and ending
1900   at END.  */
1901void
1902get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1903{
1904  rtx beg_head = BB_HEAD (beg);
1905  rtx beg_tail = BB_END (beg);
1906  rtx end_head = BB_HEAD (end);
1907  rtx end_tail = BB_END (end);
1908
1909  /* Don't include any notes or labels at the beginning of the BEG
1910     basic block, or notes at the end of the END basic blocks.  */
1911
1912  if (LABEL_P (beg_head))
1913    beg_head = NEXT_INSN (beg_head);
1914
1915  while (beg_head != beg_tail)
1916    if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1917      beg_head = NEXT_INSN (beg_head);
1918    else
1919      break;
1920
1921  *headp = beg_head;
1922
1923  if (beg == end)
1924    end_head = beg_head;
1925  else if (LABEL_P (end_head))
1926    end_head = NEXT_INSN (end_head);
1927
1928  while (end_head != end_tail)
1929    if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1930      end_tail = PREV_INSN (end_tail);
1931    else
1932      break;
1933
1934  *tailp = end_tail;
1935}
1936
1937/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
1938
1939int
1940no_real_insns_p (const_rtx head, const_rtx tail)
1941{
1942  while (head != NEXT_INSN (tail))
1943    {
1944      if (!NOTE_P (head) && !LABEL_P (head)
1945	  && !BOUNDARY_DEBUG_INSN_P (head))
1946	return 0;
1947      head = NEXT_INSN (head);
1948    }
1949  return 1;
1950}
1951
1952/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1953   previously found among the insns.  Insert them just before HEAD.  */
1954rtx
1955restore_other_notes (rtx head, basic_block head_bb)
1956{
1957  if (note_list != 0)
1958    {
1959      rtx note_head = note_list;
1960
1961      if (head)
1962	head_bb = BLOCK_FOR_INSN (head);
1963      else
1964	head = NEXT_INSN (bb_note (head_bb));
1965
1966      while (PREV_INSN (note_head))
1967	{
1968	  set_block_for_insn (note_head, head_bb);
1969	  note_head = PREV_INSN (note_head);
1970	}
1971      /* In the above cycle we've missed this note.  */
1972      set_block_for_insn (note_head, head_bb);
1973
1974      PREV_INSN (note_head) = PREV_INSN (head);
1975      NEXT_INSN (PREV_INSN (head)) = note_head;
1976      PREV_INSN (head) = note_list;
1977      NEXT_INSN (note_list) = head;
1978
1979      if (BLOCK_FOR_INSN (head) != head_bb)
1980	BB_END (head_bb) = note_list;
1981
1982      head = note_head;
1983    }
1984
1985  return head;
1986}
1987
1988/* Move insns that became ready to fire from queue to ready list.  */
1989
1990static void
1991queue_to_ready (struct ready_list *ready)
1992{
1993  rtx insn;
1994  rtx link;
1995  rtx skip_insn;
1996
1997  q_ptr = NEXT_Q (q_ptr);
1998
1999  if (dbg_cnt (sched_insn) == false)
2000    /* If debug counter is activated do not requeue insn next after
2001       last_scheduled_insn.  */
2002    skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn);
2003  else
2004    skip_insn = NULL_RTX;
2005
2006  /* Add all pending insns that can be scheduled without stalls to the
2007     ready list.  */
2008  for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2009    {
2010      insn = XEXP (link, 0);
2011      q_size -= 1;
2012
2013      if (sched_verbose >= 2)
2014	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2015		 (*current_sched_info->print_insn) (insn, 0));
2016
2017      /* If the ready list is full, delay the insn for 1 cycle.
2018	 See the comment in schedule_block for the rationale.  */
2019      if (!reload_completed
2020	  && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2021	  && !SCHED_GROUP_P (insn)
2022	  && insn != skip_insn)
2023	{
2024	  if (sched_verbose >= 2)
2025	    fprintf (sched_dump, "requeued because ready full\n");
2026	  queue_insn (insn, 1);
2027	}
2028      else
2029	{
2030	  ready_add (ready, insn, false);
2031	  if (sched_verbose >= 2)
2032	    fprintf (sched_dump, "moving to ready without stalls\n");
2033        }
2034    }
2035  free_INSN_LIST_list (&insn_queue[q_ptr]);
2036
2037  /* If there are no ready insns, stall until one is ready and add all
2038     of the pending insns at that point to the ready list.  */
2039  if (ready->n_ready == 0)
2040    {
2041      int stalls;
2042
2043      for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2044	{
2045	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2046	    {
2047	      for (; link; link = XEXP (link, 1))
2048		{
2049		  insn = XEXP (link, 0);
2050		  q_size -= 1;
2051
2052		  if (sched_verbose >= 2)
2053		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2054			     (*current_sched_info->print_insn) (insn, 0));
2055
2056		  ready_add (ready, insn, false);
2057		  if (sched_verbose >= 2)
2058		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2059		}
2060	      free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2061
2062	      advance_one_cycle ();
2063
2064	      break;
2065	    }
2066
2067	  advance_one_cycle ();
2068	}
2069
2070      q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2071      clock_var += stalls;
2072    }
2073}
2074
2075/* Used by early_queue_to_ready.  Determines whether it is "ok" to
2076   prematurely move INSN from the queue to the ready list.  Currently,
2077   if a target defines the hook 'is_costly_dependence', this function
2078   uses the hook to check whether there exist any dependences which are
2079   considered costly by the target, between INSN and other insns that
2080   have already been scheduled.  Dependences are checked up to Y cycles
2081   back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2082   controlling this value.
2083   (Other considerations could be taken into account instead (or in
2084   addition) depending on user flags and target hooks.  */
2085
2086static bool
2087ok_for_early_queue_removal (rtx insn)
2088{
2089  int n_cycles;
2090  rtx prev_insn = last_scheduled_insn;
2091
2092  if (targetm.sched.is_costly_dependence)
2093    {
2094      for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2095	{
2096	  for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2097	    {
2098	      int cost;
2099
2100	      if (prev_insn == current_sched_info->prev_head)
2101		{
2102		  prev_insn = NULL;
2103		  break;
2104		}
2105
2106	      if (!NOTE_P (prev_insn))
2107		{
2108		  dep_t dep;
2109
2110		  dep = sd_find_dep_between (prev_insn, insn, true);
2111
2112		  if (dep != NULL)
2113		    {
2114		      cost = dep_cost (dep);
2115
2116		      if (targetm.sched.is_costly_dependence (dep, cost,
2117				flag_sched_stalled_insns_dep - n_cycles))
2118			return false;
2119		    }
2120		}
2121
2122	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2123		break;
2124	    }
2125
2126	  if (!prev_insn)
2127	    break;
2128	  prev_insn = PREV_INSN (prev_insn);
2129	}
2130    }
2131
2132  return true;
2133}
2134
2135
2136/* Remove insns from the queue, before they become "ready" with respect
2137   to FU latency considerations.  */
2138
2139static int
2140early_queue_to_ready (state_t state, struct ready_list *ready)
2141{
2142  rtx insn;
2143  rtx link;
2144  rtx next_link;
2145  rtx prev_link;
2146  bool move_to_ready;
2147  int cost;
2148  state_t temp_state = alloca (dfa_state_size);
2149  int stalls;
2150  int insns_removed = 0;
2151
2152  /*
2153     Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2154     function:
2155
2156     X == 0: There is no limit on how many queued insns can be removed
2157             prematurely.  (flag_sched_stalled_insns = -1).
2158
2159     X >= 1: Only X queued insns can be removed prematurely in each
2160	     invocation.  (flag_sched_stalled_insns = X).
2161
2162     Otherwise: Early queue removal is disabled.
2163         (flag_sched_stalled_insns = 0)
2164  */
2165
2166  if (! flag_sched_stalled_insns)
2167    return 0;
2168
2169  for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2170    {
2171      if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2172	{
2173	  if (sched_verbose > 6)
2174	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2175
2176	  prev_link = 0;
2177	  while (link)
2178	    {
2179	      next_link = XEXP (link, 1);
2180	      insn = XEXP (link, 0);
2181	      if (insn && sched_verbose > 6)
2182		print_rtl_single (sched_dump, insn);
2183
2184	      memcpy (temp_state, state, dfa_state_size);
2185	      if (recog_memoized (insn) < 0)
2186		/* non-negative to indicate that it's not ready
2187		   to avoid infinite Q->R->Q->R... */
2188		cost = 0;
2189	      else
2190		cost = state_transition (temp_state, insn);
2191
2192	      if (sched_verbose >= 6)
2193		fprintf (sched_dump, "transition cost = %d\n", cost);
2194
2195	      move_to_ready = false;
2196	      if (cost < 0)
2197		{
2198		  move_to_ready = ok_for_early_queue_removal (insn);
2199		  if (move_to_ready == true)
2200		    {
2201		      /* move from Q to R */
2202		      q_size -= 1;
2203		      ready_add (ready, insn, false);
2204
2205		      if (prev_link)
2206			XEXP (prev_link, 1) = next_link;
2207		      else
2208			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2209
2210		      free_INSN_LIST_node (link);
2211
2212		      if (sched_verbose >= 2)
2213			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2214				 (*current_sched_info->print_insn) (insn, 0));
2215
2216		      insns_removed++;
2217		      if (insns_removed == flag_sched_stalled_insns)
2218			/* Remove no more than flag_sched_stalled_insns insns
2219			   from Q at a time.  */
2220			return insns_removed;
2221		    }
2222		}
2223
2224	      if (move_to_ready == false)
2225		prev_link = link;
2226
2227	      link = next_link;
2228	    } /* while link */
2229	} /* if link */
2230
2231    } /* for stalls.. */
2232
2233  return insns_removed;
2234}
2235
2236
2237/* Print the ready list for debugging purposes.  Callable from debugger.  */
2238
2239static void
2240debug_ready_list (struct ready_list *ready)
2241{
2242  rtx *p;
2243  int i;
2244
2245  if (ready->n_ready == 0)
2246    {
2247      fprintf (sched_dump, "\n");
2248      return;
2249    }
2250
2251  p = ready_lastpos (ready);
2252  for (i = 0; i < ready->n_ready; i++)
2253    {
2254      fprintf (sched_dump, "  %s:%d",
2255	       (*current_sched_info->print_insn) (p[i], 0),
2256	       INSN_LUID (p[i]));
2257      if (sched_pressure_p)
2258	fprintf (sched_dump, "(cost=%d",
2259		 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2260      if (INSN_TICK (p[i]) > clock_var)
2261	fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2262      if (sched_pressure_p)
2263	fprintf (sched_dump, ")");
2264    }
2265  fprintf (sched_dump, "\n");
2266}
2267
2268/* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2269   NOTEs.  This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2270   replaces the epilogue note in the correct basic block.  */
2271void
2272reemit_notes (rtx insn)
2273{
2274  rtx note, last = insn;
2275
2276  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2277    {
2278      if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2279	{
2280	  enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2281
2282	  last = emit_note_before (note_type, last);
2283	  remove_note (insn, note);
2284	}
2285    }
2286}
2287
2288/* Move INSN.  Reemit notes if needed.  Update CFG, if needed.  */
2289static void
2290move_insn (rtx insn, rtx last, rtx nt)
2291{
2292  if (PREV_INSN (insn) != last)
2293    {
2294      basic_block bb;
2295      rtx note;
2296      int jump_p = 0;
2297
2298      bb = BLOCK_FOR_INSN (insn);
2299
2300      /* BB_HEAD is either LABEL or NOTE.  */
2301      gcc_assert (BB_HEAD (bb) != insn);
2302
2303      if (BB_END (bb) == insn)
2304	/* If this is last instruction in BB, move end marker one
2305	   instruction up.  */
2306	{
2307	  /* Jumps are always placed at the end of basic block.  */
2308	  jump_p = control_flow_insn_p (insn);
2309
2310	  gcc_assert (!jump_p
2311		      || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2312			  && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2313		      || (common_sched_info->sched_pass_id
2314			  == SCHED_EBB_PASS));
2315
2316	  gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2317
2318	  BB_END (bb) = PREV_INSN (insn);
2319	}
2320
2321      gcc_assert (BB_END (bb) != last);
2322
2323      if (jump_p)
2324	/* We move the block note along with jump.  */
2325	{
2326	  gcc_assert (nt);
2327
2328	  note = NEXT_INSN (insn);
2329	  while (NOTE_NOT_BB_P (note) && note != nt)
2330	    note = NEXT_INSN (note);
2331
2332	  if (note != nt
2333	      && (LABEL_P (note)
2334		  || BARRIER_P (note)))
2335	    note = NEXT_INSN (note);
2336
2337	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2338	}
2339      else
2340	note = insn;
2341
2342      NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2343      PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2344
2345      NEXT_INSN (note) = NEXT_INSN (last);
2346      PREV_INSN (NEXT_INSN (last)) = note;
2347
2348      NEXT_INSN (last) = insn;
2349      PREV_INSN (insn) = last;
2350
2351      bb = BLOCK_FOR_INSN (last);
2352
2353      if (jump_p)
2354	{
2355	  fix_jump_move (insn);
2356
2357	  if (BLOCK_FOR_INSN (insn) != bb)
2358	    move_block_after_check (insn);
2359
2360	  gcc_assert (BB_END (bb) == last);
2361	}
2362
2363      df_insn_change_bb (insn, bb);
2364
2365      /* Update BB_END, if needed.  */
2366      if (BB_END (bb) == last)
2367	BB_END (bb) = insn;
2368    }
2369
2370  SCHED_GROUP_P (insn) = 0;
2371}
2372
2373/* Return true if scheduling INSN will finish current clock cycle.  */
2374static bool
2375insn_finishes_cycle_p (rtx insn)
2376{
2377  if (SCHED_GROUP_P (insn))
2378    /* After issuing INSN, rest of the sched_group will be forced to issue
2379       in order.  Don't make any plans for the rest of cycle.  */
2380    return true;
2381
2382  /* Finishing the block will, apparently, finish the cycle.  */
2383  if (current_sched_info->insn_finishes_block_p
2384      && current_sched_info->insn_finishes_block_p (insn))
2385    return true;
2386
2387  return false;
2388}
2389
2390/* The following structure describe an entry of the stack of choices.  */
2391struct choice_entry
2392{
2393  /* Ordinal number of the issued insn in the ready queue.  */
2394  int index;
2395  /* The number of the rest insns whose issues we should try.  */
2396  int rest;
2397  /* The number of issued essential insns.  */
2398  int n;
2399  /* State after issuing the insn.  */
2400  state_t state;
2401};
2402
2403/* The following array is used to implement a stack of choices used in
2404   function max_issue.  */
2405static struct choice_entry *choice_stack;
2406
2407/* The following variable value is number of essential insns issued on
2408   the current cycle.  An insn is essential one if it changes the
2409   processors state.  */
2410int cycle_issued_insns;
2411
2412/* This holds the value of the target dfa_lookahead hook.  */
2413int dfa_lookahead;
2414
2415/* The following variable value is maximal number of tries of issuing
2416   insns for the first cycle multipass insn scheduling.  We define
2417   this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
2418   need this constraint if all real insns (with non-negative codes)
2419   had reservations because in this case the algorithm complexity is
2420   O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
2421   might be incomplete and such insn might occur.  For such
2422   descriptions, the complexity of algorithm (without the constraint)
2423   could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
2424static int max_lookahead_tries;
2425
2426/* The following value is value of hook
2427   `first_cycle_multipass_dfa_lookahead' at the last call of
2428   `max_issue'.  */
2429static int cached_first_cycle_multipass_dfa_lookahead = 0;
2430
2431/* The following value is value of `issue_rate' at the last call of
2432   `sched_init'.  */
2433static int cached_issue_rate = 0;
2434
2435/* The following function returns maximal (or close to maximal) number
2436   of insns which can be issued on the same cycle and one of which
2437   insns is insns with the best rank (the first insn in READY).  To
2438   make this function tries different samples of ready insns.  READY
2439   is current queue `ready'.  Global array READY_TRY reflects what
2440   insns are already issued in this try.  MAX_POINTS is the sum of points
2441   of all instructions in READY.  The function stops immediately,
2442   if it reached the such a solution, that all instruction can be issued.
2443   INDEX will contain index of the best insn in READY.  The following
2444   function is used only for first cycle multipass scheduling.
2445
2446   PRIVILEGED_N >= 0
2447
2448   This function expects recognized insns only.  All USEs,
2449   CLOBBERs, etc must be filtered elsewhere.  */
2450int
2451max_issue (struct ready_list *ready, int privileged_n, state_t state,
2452	   int *index)
2453{
2454  int n, i, all, n_ready, best, delay, tries_num, max_points;
2455  int more_issue;
2456  struct choice_entry *top;
2457  rtx insn;
2458
2459  n_ready = ready->n_ready;
2460  gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2461	      && privileged_n <= n_ready);
2462
2463  /* Init MAX_LOOKAHEAD_TRIES.  */
2464  if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2465    {
2466      cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2467      max_lookahead_tries = 100;
2468      for (i = 0; i < issue_rate; i++)
2469	max_lookahead_tries *= dfa_lookahead;
2470    }
2471
2472  /* Init max_points.  */
2473  max_points = 0;
2474  more_issue = issue_rate - cycle_issued_insns;
2475
2476  /* ??? We used to assert here that we never issue more insns than issue_rate.
2477     However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
2478     achieved to get better performance.  Until these targets are fixed to use
2479     scheduler hooks to manipulate insns priority instead, the assert should
2480     be disabled.
2481
2482     gcc_assert (more_issue >= 0);  */
2483
2484  for (i = 0; i < n_ready; i++)
2485    if (!ready_try [i])
2486      {
2487	if (more_issue-- > 0)
2488	  max_points += ISSUE_POINTS (ready_element (ready, i));
2489	else
2490	  break;
2491      }
2492
2493  /* The number of the issued insns in the best solution.  */
2494  best = 0;
2495
2496  top = choice_stack;
2497
2498  /* Set initial state of the search.  */
2499  memcpy (top->state, state, dfa_state_size);
2500  top->rest = dfa_lookahead;
2501  top->n = 0;
2502
2503  /* Count the number of the insns to search among.  */
2504  for (all = i = 0; i < n_ready; i++)
2505    if (!ready_try [i])
2506      all++;
2507
2508  /* I is the index of the insn to try next.  */
2509  i = 0;
2510  tries_num = 0;
2511  for (;;)
2512    {
2513      if (/* If we've reached a dead end or searched enough of what we have
2514	     been asked...  */
2515	  top->rest == 0
2516	  /* Or have nothing else to try.  */
2517	  || i >= n_ready)
2518	{
2519	  /* ??? (... || i == n_ready).  */
2520	  gcc_assert (i <= n_ready);
2521
2522	  if (top == choice_stack)
2523	    break;
2524
2525	  if (best < top - choice_stack)
2526	    {
2527	      if (privileged_n)
2528		{
2529		  n = privileged_n;
2530		  /* Try to find issued privileged insn.  */
2531		  while (n && !ready_try[--n]);
2532		}
2533
2534	      if (/* If all insns are equally good...  */
2535		  privileged_n == 0
2536		  /* Or a privileged insn will be issued.  */
2537		  || ready_try[n])
2538		/* Then we have a solution.  */
2539		{
2540		  best = top - choice_stack;
2541		  /* This is the index of the insn issued first in this
2542		     solution.  */
2543		  *index = choice_stack [1].index;
2544		  if (top->n == max_points || best == all)
2545		    break;
2546		}
2547	    }
2548
2549	  /* Set ready-list index to point to the last insn
2550	     ('i++' below will advance it to the next insn).  */
2551	  i = top->index;
2552
2553	  /* Backtrack.  */
2554	  ready_try [i] = 0;
2555	  top--;
2556	  memcpy (state, top->state, dfa_state_size);
2557	}
2558      else if (!ready_try [i])
2559	{
2560	  tries_num++;
2561	  if (tries_num > max_lookahead_tries)
2562	    break;
2563	  insn = ready_element (ready, i);
2564	  delay = state_transition (state, insn);
2565	  if (delay < 0)
2566	    {
2567	      if (state_dead_lock_p (state)
2568		  || insn_finishes_cycle_p (insn))
2569 		/* We won't issue any more instructions in the next
2570 		   choice_state.  */
2571		top->rest = 0;
2572	      else
2573		top->rest--;
2574
2575	      n = top->n;
2576	      if (memcmp (top->state, state, dfa_state_size) != 0)
2577		n += ISSUE_POINTS (insn);
2578
2579	      /* Advance to the next choice_entry.  */
2580	      top++;
2581	      /* Initialize it.  */
2582	      top->rest = dfa_lookahead;
2583	      top->index = i;
2584	      top->n = n;
2585	      memcpy (top->state, state, dfa_state_size);
2586
2587	      ready_try [i] = 1;
2588	      i = -1;
2589	    }
2590	}
2591
2592      /* Increase ready-list index.  */
2593      i++;
2594    }
2595
2596  /* Restore the original state of the DFA.  */
2597  memcpy (state, choice_stack->state, dfa_state_size);
2598
2599  return best;
2600}
2601
2602/* The following function chooses insn from READY and modifies
2603   READY.  The following function is used only for first
2604   cycle multipass scheduling.
2605   Return:
2606   -1 if cycle should be advanced,
2607   0 if INSN_PTR is set to point to the desirable insn,
2608   1 if choose_ready () should be restarted without advancing the cycle.  */
2609static int
2610choose_ready (struct ready_list *ready, rtx *insn_ptr)
2611{
2612  int lookahead;
2613
2614  if (dbg_cnt (sched_insn) == false)
2615    {
2616      rtx insn;
2617
2618      insn = next_nonnote_insn (last_scheduled_insn);
2619
2620      if (QUEUE_INDEX (insn) == QUEUE_READY)
2621	/* INSN is in the ready_list.  */
2622	{
2623	  ready_remove_insn (insn);
2624	  *insn_ptr = insn;
2625	  return 0;
2626	}
2627
2628      /* INSN is in the queue.  Advance cycle to move it to the ready list.  */
2629      return -1;
2630    }
2631
2632  lookahead = 0;
2633
2634  if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2635    lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2636  if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2637      || DEBUG_INSN_P (ready_element (ready, 0)))
2638    {
2639      *insn_ptr = ready_remove_first (ready);
2640      return 0;
2641    }
2642  else
2643    {
2644      /* Try to choose the better insn.  */
2645      int index = 0, i, n;
2646      rtx insn;
2647      int try_data = 1, try_control = 1;
2648      ds_t ts;
2649
2650      insn = ready_element (ready, 0);
2651      if (INSN_CODE (insn) < 0)
2652	{
2653	  *insn_ptr = ready_remove_first (ready);
2654	  return 0;
2655	}
2656
2657      if (spec_info
2658	  && spec_info->flags & (PREFER_NON_DATA_SPEC
2659				 | PREFER_NON_CONTROL_SPEC))
2660	{
2661	  for (i = 0, n = ready->n_ready; i < n; i++)
2662	    {
2663	      rtx x;
2664	      ds_t s;
2665
2666	      x = ready_element (ready, i);
2667	      s = TODO_SPEC (x);
2668
2669	      if (spec_info->flags & PREFER_NON_DATA_SPEC
2670		  && !(s & DATA_SPEC))
2671		{
2672		  try_data = 0;
2673		  if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2674		      || !try_control)
2675		    break;
2676		}
2677
2678	      if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2679		  && !(s & CONTROL_SPEC))
2680		{
2681		  try_control = 0;
2682		  if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2683		    break;
2684		}
2685	    }
2686	}
2687
2688      ts = TODO_SPEC (insn);
2689      if ((ts & SPECULATIVE)
2690	  && (((!try_data && (ts & DATA_SPEC))
2691	       || (!try_control && (ts & CONTROL_SPEC)))
2692	      || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2693		  && !targetm.sched
2694		  .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2695	/* Discard speculative instruction that stands first in the ready
2696	   list.  */
2697	{
2698	  change_queue_index (insn, 1);
2699	  return 1;
2700	}
2701
2702      ready_try[0] = 0;
2703
2704      for (i = 1; i < ready->n_ready; i++)
2705	{
2706	  insn = ready_element (ready, i);
2707
2708	  ready_try [i]
2709	    = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2710               || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2711	}
2712
2713      /* Let the target filter the search space.  */
2714      for (i = 1; i < ready->n_ready; i++)
2715	if (!ready_try[i])
2716	  {
2717	    insn = ready_element (ready, i);
2718
2719#ifdef ENABLE_CHECKING
2720	    /* If this insn is recognizable we should have already
2721	       recognized it earlier.
2722	       ??? Not very clear where this is supposed to be done.
2723	       See dep_cost_1.  */
2724	    gcc_assert (INSN_CODE (insn) >= 0
2725			|| recog_memoized (insn) < 0);
2726#endif
2727
2728	    ready_try [i]
2729	      = (/* INSN_CODE check can be omitted here as it is also done later
2730		    in max_issue ().  */
2731		 INSN_CODE (insn) < 0
2732		 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2733		     && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2734		     (insn)));
2735	  }
2736
2737      if (max_issue (ready, 1, curr_state, &index) == 0)
2738	{
2739	  *insn_ptr = ready_remove_first (ready);
2740	  if (sched_verbose >= 4)
2741	    fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2742                     (*current_sched_info->print_insn) (*insn_ptr, 0));
2743	  return 0;
2744	}
2745      else
2746	{
2747	  if (sched_verbose >= 4)
2748	    fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2749		     (*current_sched_info->print_insn)
2750		     (ready_element (ready, index), 0));
2751
2752	  *insn_ptr = ready_remove (ready, index);
2753	  return 0;
2754	}
2755    }
2756}
2757
2758/* Use forward list scheduling to rearrange insns of block pointed to by
2759   TARGET_BB, possibly bringing insns from subsequent blocks in the same
2760   region.  */
2761
2762void
2763schedule_block (basic_block *target_bb)
2764{
2765  int i, first_cycle_insn_p;
2766  int can_issue_more;
2767  state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
2768  int sort_p, advance, start_clock_var;
2769
2770  /* Head/tail info for this block.  */
2771  rtx prev_head = current_sched_info->prev_head;
2772  rtx next_tail = current_sched_info->next_tail;
2773  rtx head = NEXT_INSN (prev_head);
2774  rtx tail = PREV_INSN (next_tail);
2775
2776  /* We used to have code to avoid getting parameters moved from hard
2777     argument registers into pseudos.
2778
2779     However, it was removed when it proved to be of marginal benefit
2780     and caused problems because schedule_block and compute_forward_dependences
2781     had different notions of what the "head" insn was.  */
2782
2783  gcc_assert (head != tail || INSN_P (head));
2784
2785  haifa_recovery_bb_recently_added_p = false;
2786
2787  /* Debug info.  */
2788  if (sched_verbose)
2789    dump_new_block_header (0, *target_bb, head, tail);
2790
2791  state_reset (curr_state);
2792
2793  /* Clear the ready list.  */
2794  ready.first = ready.veclen - 1;
2795  ready.n_ready = 0;
2796  ready.n_debug = 0;
2797
2798  /* It is used for first cycle multipass scheduling.  */
2799  temp_state = alloca (dfa_state_size);
2800
2801  if (targetm.sched.md_init)
2802    targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2803
2804  /* We start inserting insns after PREV_HEAD.  */
2805  last_scheduled_insn = prev_head;
2806
2807  gcc_assert ((NOTE_P (last_scheduled_insn)
2808	       || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2809	      && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2810
2811  /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
2812     queue.  */
2813  q_ptr = 0;
2814  q_size = 0;
2815
2816  insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2817  memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2818
2819  /* Start just before the beginning of time.  */
2820  clock_var = -1;
2821
2822  /* We need queue and ready lists and clock_var be initialized
2823     in try_ready () (which is called through init_ready_list ()).  */
2824  (*current_sched_info->init_ready_list) ();
2825
2826  /* The algorithm is O(n^2) in the number of ready insns at any given
2827     time in the worst case.  Before reload we are more likely to have
2828     big lists so truncate them to a reasonable size.  */
2829  if (!reload_completed
2830      && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2831    {
2832      ready_sort (&ready);
2833
2834      /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2835         If there are debug insns, we know they're first.  */
2836      for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2837	if (!SCHED_GROUP_P (ready_element (&ready, i)))
2838	  break;
2839
2840      if (sched_verbose >= 2)
2841	{
2842	  fprintf (sched_dump,
2843		   ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2844	  fprintf (sched_dump,
2845		   ";;\t\t before reload => truncated to %d insns\n", i);
2846	}
2847
2848      /* Delay all insns past it for 1 cycle.  If debug counter is
2849	 activated make an exception for the insn right after
2850	 last_scheduled_insn.  */
2851      {
2852	rtx skip_insn;
2853
2854	if (dbg_cnt (sched_insn) == false)
2855	  skip_insn = next_nonnote_insn (last_scheduled_insn);
2856	else
2857	  skip_insn = NULL_RTX;
2858
2859	while (i < ready.n_ready)
2860	  {
2861	    rtx insn;
2862
2863	    insn = ready_remove (&ready, i);
2864
2865	    if (insn != skip_insn)
2866	      queue_insn (insn, 1);
2867	  }
2868      }
2869    }
2870
2871  /* Now we can restore basic block notes and maintain precise cfg.  */
2872  restore_bb_notes (*target_bb);
2873
2874  last_clock_var = -1;
2875
2876  advance = 0;
2877
2878  sort_p = TRUE;
2879  /* Loop until all the insns in BB are scheduled.  */
2880  while ((*current_sched_info->schedule_more_p) ())
2881    {
2882      do
2883	{
2884	  start_clock_var = clock_var;
2885
2886	  clock_var++;
2887
2888	  advance_one_cycle ();
2889
2890	  /* Add to the ready list all pending insns that can be issued now.
2891	     If there are no ready insns, increment clock until one
2892	     is ready and add all pending insns at that point to the ready
2893	     list.  */
2894	  queue_to_ready (&ready);
2895
2896	  gcc_assert (ready.n_ready);
2897
2898	  if (sched_verbose >= 2)
2899	    {
2900	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:  ");
2901	      debug_ready_list (&ready);
2902	    }
2903	  advance -= clock_var - start_clock_var;
2904	}
2905      while (advance > 0);
2906
2907      if (sort_p)
2908	{
2909	  /* Sort the ready list based on priority.  */
2910	  ready_sort (&ready);
2911
2912	  if (sched_verbose >= 2)
2913	    {
2914	      fprintf (sched_dump, ";;\t\tReady list after ready_sort:  ");
2915	      debug_ready_list (&ready);
2916	    }
2917	}
2918
2919      /* We don't want md sched reorder to even see debug isns, so put
2920	 them out right away.  */
2921      if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2922	{
2923	  if (control_flow_insn_p (last_scheduled_insn))
2924	    {
2925	      *target_bb = current_sched_info->advance_target_bb
2926		(*target_bb, 0);
2927
2928	      if (sched_verbose)
2929		{
2930		  rtx x;
2931
2932		  x = next_real_insn (last_scheduled_insn);
2933		  gcc_assert (x);
2934		  dump_new_block_header (1, *target_bb, x, tail);
2935		}
2936
2937	      last_scheduled_insn = bb_note (*target_bb);
2938	    }
2939
2940	  while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2941	    {
2942	      rtx insn = ready_remove_first (&ready);
2943	      gcc_assert (DEBUG_INSN_P (insn));
2944	      (*current_sched_info->begin_schedule_ready) (insn,
2945							   last_scheduled_insn);
2946	      move_insn (insn, last_scheduled_insn,
2947			 current_sched_info->next_tail);
2948	      last_scheduled_insn = insn;
2949	      advance = schedule_insn (insn);
2950	      gcc_assert (advance == 0);
2951	      if (ready.n_ready > 0)
2952		ready_sort (&ready);
2953	    }
2954
2955	  if (!ready.n_ready)
2956	    continue;
2957	}
2958
2959      /* Allow the target to reorder the list, typically for
2960	 better instruction bundling.  */
2961      if (sort_p && targetm.sched.reorder
2962	  && (ready.n_ready == 0
2963	      || !SCHED_GROUP_P (ready_element (&ready, 0))))
2964	can_issue_more =
2965	  targetm.sched.reorder (sched_dump, sched_verbose,
2966				 ready_lastpos (&ready),
2967				 &ready.n_ready, clock_var);
2968      else
2969	can_issue_more = issue_rate;
2970
2971      first_cycle_insn_p = 1;
2972      cycle_issued_insns = 0;
2973      for (;;)
2974	{
2975	  rtx insn;
2976	  int cost;
2977	  bool asm_p = false;
2978
2979	  if (sched_verbose >= 2)
2980	    {
2981	      fprintf (sched_dump, ";;\tReady list (t = %3d):  ",
2982		       clock_var);
2983	      debug_ready_list (&ready);
2984	      if (sched_pressure_p)
2985		print_curr_reg_pressure ();
2986	    }
2987
2988	  if (ready.n_ready == 0
2989	      && can_issue_more
2990	      && reload_completed)
2991	    {
2992	      /* Allow scheduling insns directly from the queue in case
2993		 there's nothing better to do (ready list is empty) but
2994		 there are still vacant dispatch slots in the current cycle.  */
2995	      if (sched_verbose >= 6)
2996		fprintf (sched_dump,";;\t\tSecond chance\n");
2997	      memcpy (temp_state, curr_state, dfa_state_size);
2998	      if (early_queue_to_ready (temp_state, &ready))
2999		ready_sort (&ready);
3000	    }
3001
3002	  if (ready.n_ready == 0
3003	      || !can_issue_more
3004	      || state_dead_lock_p (curr_state)
3005	      || !(*current_sched_info->schedule_more_p) ())
3006	    break;
3007
3008	  /* Select and remove the insn from the ready list.  */
3009	  if (sort_p)
3010	    {
3011	      int res;
3012
3013	      insn = NULL_RTX;
3014	      res = choose_ready (&ready, &insn);
3015
3016	      if (res < 0)
3017		/* Finish cycle.  */
3018		break;
3019	      if (res > 0)
3020		/* Restart choose_ready ().  */
3021		continue;
3022
3023	      gcc_assert (insn != NULL_RTX);
3024	    }
3025	  else
3026	    insn = ready_remove_first (&ready);
3027
3028	  if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3029	    {
3030	      ready_add (&ready, insn, true);
3031	      advance = 1;
3032	      break;
3033	    }
3034
3035	  if (targetm.sched.dfa_new_cycle
3036	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3037					      insn, last_clock_var,
3038					      clock_var, &sort_p))
3039	    /* SORT_P is used by the target to override sorting
3040	       of the ready list.  This is needed when the target
3041	       has modified its internal structures expecting that
3042	       the insn will be issued next.  As we need the insn
3043	       to have the highest priority (so it will be returned by
3044	       the ready_remove_first call above), we invoke
3045	       ready_add (&ready, insn, true).
3046	       But, still, there is one issue: INSN can be later
3047	       discarded by scheduler's front end through
3048	       current_sched_info->can_schedule_ready_p, hence, won't
3049	       be issued next.  */
3050	    {
3051	      ready_add (&ready, insn, true);
3052              break;
3053	    }
3054
3055	  sort_p = TRUE;
3056	  memcpy (temp_state, curr_state, dfa_state_size);
3057	  if (recog_memoized (insn) < 0)
3058	    {
3059	      asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3060		       || asm_noperands (PATTERN (insn)) >= 0);
3061	      if (!first_cycle_insn_p && asm_p)
3062		/* This is asm insn which is tried to be issued on the
3063		   cycle not first.  Issue it on the next cycle.  */
3064		cost = 1;
3065	      else
3066		/* A USE insn, or something else we don't need to
3067		   understand.  We can't pass these directly to
3068		   state_transition because it will trigger a
3069		   fatal error for unrecognizable insns.  */
3070		cost = 0;
3071	    }
3072	  else if (sched_pressure_p)
3073	    cost = 0;
3074	  else
3075	    {
3076	      cost = state_transition (temp_state, insn);
3077	      if (cost < 0)
3078		cost = 0;
3079	      else if (cost == 0)
3080		cost = 1;
3081	    }
3082
3083	  if (cost >= 1)
3084	    {
3085	      queue_insn (insn, cost);
3086 	      if (SCHED_GROUP_P (insn))
3087 		{
3088 		  advance = cost;
3089 		  break;
3090 		}
3091
3092	      continue;
3093	    }
3094
3095	  if (current_sched_info->can_schedule_ready_p
3096	      && ! (*current_sched_info->can_schedule_ready_p) (insn))
3097	    /* We normally get here only if we don't want to move
3098	       insn from the split block.  */
3099	    {
3100	      TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3101	      continue;
3102	    }
3103
3104	  /* DECISION is made.  */
3105
3106          if (TODO_SPEC (insn) & SPECULATIVE)
3107            generate_recovery_code (insn);
3108
3109	  if (control_flow_insn_p (last_scheduled_insn)
3110	      /* This is used to switch basic blocks by request
3111		 from scheduler front-end (actually, sched-ebb.c only).
3112		 This is used to process blocks with single fallthru
3113		 edge.  If succeeding block has jump, it [jump] will try
3114		 move at the end of current bb, thus corrupting CFG.  */
3115	      || current_sched_info->advance_target_bb (*target_bb, insn))
3116	    {
3117	      *target_bb = current_sched_info->advance_target_bb
3118		(*target_bb, 0);
3119
3120	      if (sched_verbose)
3121		{
3122		  rtx x;
3123
3124		  x = next_real_insn (last_scheduled_insn);
3125		  gcc_assert (x);
3126		  dump_new_block_header (1, *target_bb, x, tail);
3127		}
3128
3129	      last_scheduled_insn = bb_note (*target_bb);
3130	    }
3131
3132	  /* Update counters, etc in the scheduler's front end.  */
3133	  (*current_sched_info->begin_schedule_ready) (insn,
3134						       last_scheduled_insn);
3135
3136	  move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3137	  reemit_notes (insn);
3138	  last_scheduled_insn = insn;
3139
3140	  if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3141            {
3142              cycle_issued_insns++;
3143              memcpy (curr_state, temp_state, dfa_state_size);
3144            }
3145
3146	  if (targetm.sched.variable_issue)
3147	    can_issue_more =
3148	      targetm.sched.variable_issue (sched_dump, sched_verbose,
3149					    insn, can_issue_more);
3150	  /* A naked CLOBBER or USE generates no instruction, so do
3151	     not count them against the issue rate.  */
3152	  else if (GET_CODE (PATTERN (insn)) != USE
3153		   && GET_CODE (PATTERN (insn)) != CLOBBER)
3154	    can_issue_more--;
3155	  advance = schedule_insn (insn);
3156
3157	  /* After issuing an asm insn we should start a new cycle.  */
3158	  if (advance == 0 && asm_p)
3159	    advance = 1;
3160	  if (advance != 0)
3161	    break;
3162
3163	  first_cycle_insn_p = 0;
3164
3165	  /* Sort the ready list based on priority.  This must be
3166	     redone here, as schedule_insn may have readied additional
3167	     insns that will not be sorted correctly.  */
3168	  if (ready.n_ready > 0)
3169	    ready_sort (&ready);
3170
3171	  /* Quickly go through debug insns such that md sched
3172	     reorder2 doesn't have to deal with debug insns.  */
3173	  if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3174	      && (*current_sched_info->schedule_more_p) ())
3175	    {
3176	      if (control_flow_insn_p (last_scheduled_insn))
3177		{
3178		  *target_bb = current_sched_info->advance_target_bb
3179		    (*target_bb, 0);
3180
3181		  if (sched_verbose)
3182		    {
3183		      rtx x;
3184
3185		      x = next_real_insn (last_scheduled_insn);
3186		      gcc_assert (x);
3187		      dump_new_block_header (1, *target_bb, x, tail);
3188		    }
3189
3190		  last_scheduled_insn = bb_note (*target_bb);
3191		}
3192
3193 	      while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3194		{
3195		  insn = ready_remove_first (&ready);
3196		  gcc_assert (DEBUG_INSN_P (insn));
3197		  (*current_sched_info->begin_schedule_ready)
3198		    (insn, last_scheduled_insn);
3199		  move_insn (insn, last_scheduled_insn,
3200			     current_sched_info->next_tail);
3201		  advance = schedule_insn (insn);
3202		  last_scheduled_insn = insn;
3203		  gcc_assert (advance == 0);
3204		  if (ready.n_ready > 0)
3205		    ready_sort (&ready);
3206		}
3207	    }
3208
3209	  if (targetm.sched.reorder2
3210	      && (ready.n_ready == 0
3211		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
3212	    {
3213	      can_issue_more =
3214		targetm.sched.reorder2 (sched_dump, sched_verbose,
3215					ready.n_ready
3216					? ready_lastpos (&ready) : NULL,
3217					&ready.n_ready, clock_var);
3218	    }
3219	}
3220    }
3221
3222  /* Debug info.  */
3223  if (sched_verbose)
3224    {
3225      fprintf (sched_dump, ";;\tReady list (final):  ");
3226      debug_ready_list (&ready);
3227    }
3228
3229  if (current_sched_info->queue_must_finish_empty)
3230    /* Sanity check -- queue must be empty now.  Meaningless if region has
3231       multiple bbs.  */
3232    gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3233  else
3234    {
3235      /* We must maintain QUEUE_INDEX between blocks in region.  */
3236      for (i = ready.n_ready - 1; i >= 0; i--)
3237	{
3238	  rtx x;
3239
3240	  x = ready_element (&ready, i);
3241	  QUEUE_INDEX (x) = QUEUE_NOWHERE;
3242	  TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3243	}
3244
3245      if (q_size)
3246	for (i = 0; i <= max_insn_queue_index; i++)
3247	  {
3248	    rtx link;
3249	    for (link = insn_queue[i]; link; link = XEXP (link, 1))
3250	      {
3251		rtx x;
3252
3253		x = XEXP (link, 0);
3254		QUEUE_INDEX (x) = QUEUE_NOWHERE;
3255		TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3256	      }
3257	    free_INSN_LIST_list (&insn_queue[i]);
3258	  }
3259    }
3260
3261  if (sched_verbose)
3262    fprintf (sched_dump, ";;   total time = %d\n", clock_var);
3263
3264  if (!current_sched_info->queue_must_finish_empty
3265      || haifa_recovery_bb_recently_added_p)
3266    {
3267      /* INSN_TICK (minimum clock tick at which the insn becomes
3268         ready) may be not correct for the insn in the subsequent
3269         blocks of the region.  We should use a correct value of
3270         `clock_var' or modify INSN_TICK.  It is better to keep
3271         clock_var value equal to 0 at the start of a basic block.
3272         Therefore we modify INSN_TICK here.  */
3273      fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3274    }
3275
3276  if (targetm.sched.md_finish)
3277    {
3278      targetm.sched.md_finish (sched_dump, sched_verbose);
3279      /* Target might have added some instructions to the scheduled block
3280	 in its md_finish () hook.  These new insns don't have any data
3281	 initialized and to identify them we extend h_i_d so that they'll
3282	 get zero luids.  */
3283      sched_init_luids (NULL, NULL, NULL, NULL);
3284    }
3285
3286  if (sched_verbose)
3287    fprintf (sched_dump, ";;   new head = %d\n;;   new tail = %d\n\n",
3288	     INSN_UID (head), INSN_UID (tail));
3289
3290  /* Update head/tail boundaries.  */
3291  head = NEXT_INSN (prev_head);
3292  tail = last_scheduled_insn;
3293
3294  head = restore_other_notes (head, NULL);
3295
3296  current_sched_info->head = head;
3297  current_sched_info->tail = tail;
3298}
3299
3300/* Set_priorities: compute priority of each insn in the block.  */
3301
3302int
3303set_priorities (rtx head, rtx tail)
3304{
3305  rtx insn;
3306  int n_insn;
3307  int sched_max_insns_priority =
3308	current_sched_info->sched_max_insns_priority;
3309  rtx prev_head;
3310
3311  if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3312    gcc_unreachable ();
3313
3314  n_insn = 0;
3315
3316  prev_head = PREV_INSN (head);
3317  for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3318    {
3319      if (!INSN_P (insn))
3320	continue;
3321
3322      n_insn++;
3323      (void) priority (insn);
3324
3325      gcc_assert (INSN_PRIORITY_KNOWN (insn));
3326
3327      sched_max_insns_priority = MAX (sched_max_insns_priority,
3328				      INSN_PRIORITY (insn));
3329    }
3330
3331  current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3332
3333  return n_insn;
3334}
3335
3336/* Set dump and sched_verbose for the desired debugging output.  If no
3337   dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3338   For -fsched-verbose=N, N>=10, print everything to stderr.  */
3339void
3340setup_sched_dump (void)
3341{
3342  sched_verbose = sched_verbose_param;
3343  if (sched_verbose_param == 0 && dump_file)
3344    sched_verbose = 1;
3345  sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3346		? stderr : dump_file);
3347}
3348
3349/* Initialize some global state for the scheduler.  This function works
3350   with the common data shared between all the schedulers.  It is called
3351   from the scheduler specific initialization routine.  */
3352
3353void
3354sched_init (void)
3355{
3356  /* Disable speculative loads in their presence if cc0 defined.  */
3357#ifdef HAVE_cc0
3358  flag_schedule_speculative_load = 0;
3359#endif
3360
3361  sched_pressure_p = (flag_sched_pressure && ! reload_completed
3362		      && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3363  if (sched_pressure_p)
3364    ira_setup_eliminable_regset ();
3365
3366  /* Initialize SPEC_INFO.  */
3367  if (targetm.sched.set_sched_flags)
3368    {
3369      spec_info = &spec_info_var;
3370      targetm.sched.set_sched_flags (spec_info);
3371
3372      if (spec_info->mask != 0)
3373        {
3374          spec_info->data_weakness_cutoff =
3375            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3376          spec_info->control_weakness_cutoff =
3377            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3378             * REG_BR_PROB_BASE) / 100;
3379        }
3380      else
3381	/* So we won't read anything accidentally.  */
3382	spec_info = NULL;
3383
3384    }
3385  else
3386    /* So we won't read anything accidentally.  */
3387    spec_info = 0;
3388
3389  /* Initialize issue_rate.  */
3390  if (targetm.sched.issue_rate)
3391    issue_rate = targetm.sched.issue_rate ();
3392  else
3393    issue_rate = 1;
3394
3395  if (cached_issue_rate != issue_rate)
3396    {
3397      cached_issue_rate = issue_rate;
3398      /* To invalidate max_lookahead_tries:  */
3399      cached_first_cycle_multipass_dfa_lookahead = 0;
3400    }
3401
3402  if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3403    dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3404  else
3405    dfa_lookahead = 0;
3406
3407  if (targetm.sched.init_dfa_pre_cycle_insn)
3408    targetm.sched.init_dfa_pre_cycle_insn ();
3409
3410  if (targetm.sched.init_dfa_post_cycle_insn)
3411    targetm.sched.init_dfa_post_cycle_insn ();
3412
3413  dfa_start ();
3414  dfa_state_size = state_size ();
3415
3416  init_alias_analysis ();
3417
3418  df_set_flags (DF_LR_RUN_DCE);
3419  df_note_add_problem ();
3420
3421  /* More problems needed for interloop dep calculation in SMS.  */
3422  if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3423    {
3424      df_rd_add_problem ();
3425      df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3426    }
3427
3428  df_analyze ();
3429
3430  /* Do not run DCE after reload, as this can kill nops inserted
3431     by bundling.  */
3432  if (reload_completed)
3433    df_clear_flags (DF_LR_RUN_DCE);
3434
3435  regstat_compute_calls_crossed ();
3436
3437  if (targetm.sched.md_init_global)
3438    targetm.sched.md_init_global (sched_dump, sched_verbose,
3439				  get_max_uid () + 1);
3440
3441  if (sched_pressure_p)
3442    {
3443      int i, max_regno = max_reg_num ();
3444
3445      ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3446      sched_regno_cover_class
3447	= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3448      for (i = 0; i < max_regno; i++)
3449	sched_regno_cover_class[i]
3450	  = (i < FIRST_PSEUDO_REGISTER
3451	     ? ira_class_translate[REGNO_REG_CLASS (i)]
3452	     : reg_cover_class (i));
3453      curr_reg_live = BITMAP_ALLOC (NULL);
3454      saved_reg_live = BITMAP_ALLOC (NULL);
3455      region_ref_regs = BITMAP_ALLOC (NULL);
3456    }
3457
3458  curr_state = xmalloc (dfa_state_size);
3459}
3460
3461static void haifa_init_only_bb (basic_block, basic_block);
3462
3463/* Initialize data structures specific to the Haifa scheduler.  */
3464void
3465haifa_sched_init (void)
3466{
3467  setup_sched_dump ();
3468  sched_init ();
3469
3470  if (spec_info != NULL)
3471    {
3472      sched_deps_info->use_deps_list = 1;
3473      sched_deps_info->generate_spec_deps = 1;
3474    }
3475
3476  /* Initialize luids, dependency caches, target and h_i_d for the
3477     whole function.  */
3478  {
3479    bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3480    basic_block bb;
3481
3482    sched_init_bbs ();
3483
3484    FOR_EACH_BB (bb)
3485      VEC_quick_push (basic_block, bbs, bb);
3486    sched_init_luids (bbs, NULL, NULL, NULL);
3487    sched_deps_init (true);
3488    sched_extend_target ();
3489    haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3490
3491    VEC_free (basic_block, heap, bbs);
3492  }
3493
3494  sched_init_only_bb = haifa_init_only_bb;
3495  sched_split_block = sched_split_block_1;
3496  sched_create_empty_bb = sched_create_empty_bb_1;
3497  haifa_recovery_bb_ever_added_p = false;
3498
3499#ifdef ENABLE_CHECKING
3500  /* This is used preferably for finding bugs in check_cfg () itself.
3501     We must call sched_bbs_init () before check_cfg () because check_cfg ()
3502     assumes that the last insn in the last bb has a non-null successor.  */
3503  check_cfg (0, 0);
3504#endif
3505
3506  nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3507  before_recovery = 0;
3508  after_recovery = 0;
3509}
3510
3511/* Finish work with the data specific to the Haifa scheduler.  */
3512void
3513haifa_sched_finish (void)
3514{
3515  sched_create_empty_bb = NULL;
3516  sched_split_block = NULL;
3517  sched_init_only_bb = NULL;
3518
3519  if (spec_info && spec_info->dump)
3520    {
3521      char c = reload_completed ? 'a' : 'b';
3522
3523      fprintf (spec_info->dump,
3524	       ";; %s:\n", current_function_name ());
3525
3526      fprintf (spec_info->dump,
3527               ";; Procedure %cr-begin-data-spec motions == %d\n",
3528               c, nr_begin_data);
3529      fprintf (spec_info->dump,
3530               ";; Procedure %cr-be-in-data-spec motions == %d\n",
3531               c, nr_be_in_data);
3532      fprintf (spec_info->dump,
3533               ";; Procedure %cr-begin-control-spec motions == %d\n",
3534               c, nr_begin_control);
3535      fprintf (spec_info->dump,
3536               ";; Procedure %cr-be-in-control-spec motions == %d\n",
3537               c, nr_be_in_control);
3538    }
3539
3540  /* Finalize h_i_d, dependency caches, and luids for the whole
3541     function.  Target will be finalized in md_global_finish ().  */
3542  sched_deps_finish ();
3543  sched_finish_luids ();
3544  current_sched_info = NULL;
3545  sched_finish ();
3546}
3547
3548/* Free global data used during insn scheduling.  This function works with
3549   the common data shared between the schedulers.  */
3550
3551void
3552sched_finish (void)
3553{
3554  haifa_finish_h_i_d ();
3555  if (sched_pressure_p)
3556    {
3557      free (sched_regno_cover_class);
3558      BITMAP_FREE (region_ref_regs);
3559      BITMAP_FREE (saved_reg_live);
3560      BITMAP_FREE (curr_reg_live);
3561    }
3562  free (curr_state);
3563
3564  if (targetm.sched.md_finish_global)
3565    targetm.sched.md_finish_global (sched_dump, sched_verbose);
3566
3567  end_alias_analysis ();
3568
3569  regstat_free_calls_crossed ();
3570
3571  dfa_finish ();
3572
3573#ifdef ENABLE_CHECKING
3574  /* After reload ia64 backend clobbers CFG, so can't check anything.  */
3575  if (!reload_completed)
3576    check_cfg (0, 0);
3577#endif
3578}
3579
3580/* Fix INSN_TICKs of the instructions in the current block as well as
3581   INSN_TICKs of their dependents.
3582   HEAD and TAIL are the begin and the end of the current scheduled block.  */
3583static void
3584fix_inter_tick (rtx head, rtx tail)
3585{
3586  /* Set of instructions with corrected INSN_TICK.  */
3587  bitmap_head processed;
3588  /* ??? It is doubtful if we should assume that cycle advance happens on
3589     basic block boundaries.  Basically insns that are unconditionally ready
3590     on the start of the block are more preferable then those which have
3591     a one cycle dependency over insn from the previous block.  */
3592  int next_clock = clock_var + 1;
3593
3594  bitmap_initialize (&processed, 0);
3595
3596  /* Iterates over scheduled instructions and fix their INSN_TICKs and
3597     INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3598     across different blocks.  */
3599  for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3600    {
3601      if (INSN_P (head))
3602	{
3603	  int tick;
3604	  sd_iterator_def sd_it;
3605	  dep_t dep;
3606
3607	  tick = INSN_TICK (head);
3608	  gcc_assert (tick >= MIN_TICK);
3609
3610	  /* Fix INSN_TICK of instruction from just scheduled block.  */
3611	  if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3612	    {
3613	      bitmap_set_bit (&processed, INSN_LUID (head));
3614	      tick -= next_clock;
3615
3616	      if (tick < MIN_TICK)
3617		tick = MIN_TICK;
3618
3619	      INSN_TICK (head) = tick;
3620	    }
3621
3622	  FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3623	    {
3624	      rtx next;
3625
3626	      next = DEP_CON (dep);
3627	      tick = INSN_TICK (next);
3628
3629	      if (tick != INVALID_TICK
3630		  /* If NEXT has its INSN_TICK calculated, fix it.
3631		     If not - it will be properly calculated from
3632		     scratch later in fix_tick_ready.  */
3633		  && !bitmap_bit_p (&processed, INSN_LUID (next)))
3634		{
3635		  bitmap_set_bit (&processed, INSN_LUID (next));
3636		  tick -= next_clock;
3637
3638		  if (tick < MIN_TICK)
3639		    tick = MIN_TICK;
3640
3641		  if (tick > INTER_TICK (next))
3642		    INTER_TICK (next) = tick;
3643		  else
3644		    tick = INTER_TICK (next);
3645
3646		  INSN_TICK (next) = tick;
3647		}
3648	    }
3649	}
3650    }
3651  bitmap_clear (&processed);
3652}
3653
3654static int haifa_speculate_insn (rtx, ds_t, rtx *);
3655
3656/* Check if NEXT is ready to be added to the ready or queue list.
3657   If "yes", add it to the proper list.
3658   Returns:
3659      -1 - is not ready yet,
3660       0 - added to the ready list,
3661   0 < N - queued for N cycles.  */
3662int
3663try_ready (rtx next)
3664{
3665  ds_t old_ts, *ts;
3666
3667  ts = &TODO_SPEC (next);
3668  old_ts = *ts;
3669
3670  gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3671	      && ((old_ts & HARD_DEP)
3672		  || (old_ts & SPECULATIVE)));
3673
3674  if (sd_lists_empty_p (next, SD_LIST_BACK))
3675    /* NEXT has all its dependencies resolved.  */
3676    {
3677      /* Remove HARD_DEP bit from NEXT's status.  */
3678      *ts &= ~HARD_DEP;
3679
3680      if (current_sched_info->flags & DO_SPECULATION)
3681	/* Remove all speculative bits from NEXT's status.  */
3682	*ts &= ~SPECULATIVE;
3683    }
3684  else
3685    {
3686      /* One of the NEXT's dependencies has been resolved.
3687	 Recalculate NEXT's status.  */
3688
3689      *ts &= ~SPECULATIVE & ~HARD_DEP;
3690
3691      if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3692	/* Now we've got NEXT with speculative deps only.
3693	   1. Look at the deps to see what we have to do.
3694	   2. Check if we can do 'todo'.  */
3695	{
3696	  sd_iterator_def sd_it;
3697	  dep_t dep;
3698	  bool first_p = true;
3699
3700	  FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3701	    {
3702	      ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3703
3704	      if (DEBUG_INSN_P (DEP_PRO (dep))
3705		  && !DEBUG_INSN_P (next))
3706		continue;
3707
3708	      if (first_p)
3709		{
3710		  first_p = false;
3711
3712		  *ts = ds;
3713		}
3714	      else
3715		*ts = ds_merge (*ts, ds);
3716	    }
3717
3718	  if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3719	    /* Too few points.  */
3720	    *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3721	}
3722      else
3723	*ts |= HARD_DEP;
3724    }
3725
3726  if (*ts & HARD_DEP)
3727    gcc_assert (*ts == old_ts
3728		&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
3729  else if (current_sched_info->new_ready)
3730    *ts = current_sched_info->new_ready (next, *ts);
3731
3732  /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3733     have its original pattern or changed (speculative) one.  This is due
3734     to changing ebb in region scheduling.
3735     * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3736     has speculative pattern.
3737
3738     We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3739     control-speculative NEXT could have been discarded by sched-rgn.c
3740     (the same case as when discarded by can_schedule_ready_p ()).  */
3741
3742  if ((*ts & SPECULATIVE)
3743      /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3744	 need to change anything.  */
3745      && *ts != old_ts)
3746    {
3747      int res;
3748      rtx new_pat;
3749
3750      gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3751
3752      res = haifa_speculate_insn (next, *ts, &new_pat);
3753
3754      switch (res)
3755	{
3756	case -1:
3757	  /* It would be nice to change DEP_STATUS of all dependences,
3758	     which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3759	     so we won't reanalyze anything.  */
3760	  *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3761	  break;
3762
3763	case 0:
3764	  /* We follow the rule, that every speculative insn
3765	     has non-null ORIG_PAT.  */
3766	  if (!ORIG_PAT (next))
3767	    ORIG_PAT (next) = PATTERN (next);
3768	  break;
3769
3770	case 1:
3771	  if (!ORIG_PAT (next))
3772	    /* If we gonna to overwrite the original pattern of insn,
3773	       save it.  */
3774	    ORIG_PAT (next) = PATTERN (next);
3775
3776	  haifa_change_pattern (next, new_pat);
3777	  break;
3778
3779	default:
3780	  gcc_unreachable ();
3781	}
3782    }
3783
3784  /* We need to restore pattern only if (*ts == 0), because otherwise it is
3785     either correct (*ts & SPECULATIVE),
3786     or we simply don't care (*ts & HARD_DEP).  */
3787
3788  gcc_assert (!ORIG_PAT (next)
3789	      || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3790
3791  if (*ts & HARD_DEP)
3792    {
3793      /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3794	 control-speculative NEXT could have been discarded by sched-rgn.c
3795	 (the same case as when discarded by can_schedule_ready_p ()).  */
3796      /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3797
3798      change_queue_index (next, QUEUE_NOWHERE);
3799      return -1;
3800    }
3801  else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3802    /* We should change pattern of every previously speculative
3803       instruction - and we determine if NEXT was speculative by using
3804       ORIG_PAT field.  Except one case - speculation checks have ORIG_PAT
3805       pat too, so skip them.  */
3806    {
3807      haifa_change_pattern (next, ORIG_PAT (next));
3808      ORIG_PAT (next) = 0;
3809    }
3810
3811  if (sched_verbose >= 2)
3812    {
3813      int s = TODO_SPEC (next);
3814
3815      fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3816               (*current_sched_info->print_insn) (next, 0));
3817
3818      if (spec_info && spec_info->dump)
3819        {
3820          if (s & BEGIN_DATA)
3821            fprintf (spec_info->dump, "; data-spec;");
3822          if (s & BEGIN_CONTROL)
3823            fprintf (spec_info->dump, "; control-spec;");
3824          if (s & BE_IN_CONTROL)
3825            fprintf (spec_info->dump, "; in-control-spec;");
3826        }
3827
3828      fprintf (sched_dump, "\n");
3829    }
3830
3831  adjust_priority (next);
3832
3833  return fix_tick_ready (next);
3834}
3835
3836/* Calculate INSN_TICK of NEXT and add it to either ready or queue list.  */
3837static int
3838fix_tick_ready (rtx next)
3839{
3840  int tick, delay;
3841
3842  if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3843    {
3844      int full_p;
3845      sd_iterator_def sd_it;
3846      dep_t dep;
3847
3848      tick = INSN_TICK (next);
3849      /* if tick is not equal to INVALID_TICK, then update
3850	 INSN_TICK of NEXT with the most recent resolved dependence
3851	 cost.  Otherwise, recalculate from scratch.  */
3852      full_p = (tick == INVALID_TICK);
3853
3854      FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3855        {
3856          rtx pro = DEP_PRO (dep);
3857          int tick1;
3858
3859	  gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3860
3861          tick1 = INSN_TICK (pro) + dep_cost (dep);
3862          if (tick1 > tick)
3863            tick = tick1;
3864
3865	  if (!full_p)
3866	    break;
3867        }
3868    }
3869  else
3870    tick = -1;
3871
3872  INSN_TICK (next) = tick;
3873
3874  delay = tick - clock_var;
3875  if (delay <= 0 || sched_pressure_p)
3876    delay = QUEUE_READY;
3877
3878  change_queue_index (next, delay);
3879
3880  return delay;
3881}
3882
3883/* Move NEXT to the proper queue list with (DELAY >= 1),
3884   or add it to the ready list (DELAY == QUEUE_READY),
3885   or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE).  */
3886static void
3887change_queue_index (rtx next, int delay)
3888{
3889  int i = QUEUE_INDEX (next);
3890
3891  gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3892	      && delay != 0);
3893  gcc_assert (i != QUEUE_SCHEDULED);
3894
3895  if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3896      || (delay < 0 && delay == i))
3897    /* We have nothing to do.  */
3898    return;
3899
3900  /* Remove NEXT from wherever it is now.  */
3901  if (i == QUEUE_READY)
3902    ready_remove_insn (next);
3903  else if (i >= 0)
3904    queue_remove (next);
3905
3906  /* Add it to the proper place.  */
3907  if (delay == QUEUE_READY)
3908    ready_add (readyp, next, false);
3909  else if (delay >= 1)
3910    queue_insn (next, delay);
3911
3912  if (sched_verbose >= 2)
3913    {
3914      fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3915	       (*current_sched_info->print_insn) (next, 0));
3916
3917      if (delay == QUEUE_READY)
3918	fprintf (sched_dump, " into ready\n");
3919      else if (delay >= 1)
3920	fprintf (sched_dump, " into queue with cost=%d\n", delay);
3921      else
3922	fprintf (sched_dump, " removed from ready or queue lists\n");
3923    }
3924}
3925
3926static int sched_ready_n_insns = -1;
3927
3928/* Initialize per region data structures.  */
3929void
3930sched_extend_ready_list (int new_sched_ready_n_insns)
3931{
3932  int i;
3933
3934  if (sched_ready_n_insns == -1)
3935    /* At the first call we need to initialize one more choice_stack
3936       entry.  */
3937    {
3938      i = 0;
3939      sched_ready_n_insns = 0;
3940    }
3941  else
3942    i = sched_ready_n_insns + 1;
3943
3944  ready.veclen = new_sched_ready_n_insns + issue_rate;
3945  ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3946
3947  gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3948
3949  ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3950                                  sched_ready_n_insns, sizeof (*ready_try));
3951
3952  /* We allocate +1 element to save initial state in the choice_stack[0]
3953     entry.  */
3954  choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3955			     new_sched_ready_n_insns + 1);
3956
3957  for (; i <= new_sched_ready_n_insns; i++)
3958    choice_stack[i].state = xmalloc (dfa_state_size);
3959
3960  sched_ready_n_insns = new_sched_ready_n_insns;
3961}
3962
3963/* Free per region data structures.  */
3964void
3965sched_finish_ready_list (void)
3966{
3967  int i;
3968
3969  free (ready.vec);
3970  ready.vec = NULL;
3971  ready.veclen = 0;
3972
3973  free (ready_try);
3974  ready_try = NULL;
3975
3976  for (i = 0; i <= sched_ready_n_insns; i++)
3977    free (choice_stack [i].state);
3978  free (choice_stack);
3979  choice_stack = NULL;
3980
3981  sched_ready_n_insns = -1;
3982}
3983
3984static int
3985haifa_luid_for_non_insn (rtx x)
3986{
3987  gcc_assert (NOTE_P (x) || LABEL_P (x));
3988
3989  return 0;
3990}
3991
3992/* Generates recovery code for INSN.  */
3993static void
3994generate_recovery_code (rtx insn)
3995{
3996  if (TODO_SPEC (insn) & BEGIN_SPEC)
3997    begin_speculative_block (insn);
3998
3999  /* Here we have insn with no dependencies to
4000     instructions other then CHECK_SPEC ones.  */
4001
4002  if (TODO_SPEC (insn) & BE_IN_SPEC)
4003    add_to_speculative_block (insn);
4004}
4005
4006/* Helper function.
4007   Tries to add speculative dependencies of type FS between instructions
4008   in deps_list L and TWIN.  */
4009static void
4010process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4011{
4012  sd_iterator_def sd_it;
4013  dep_t dep;
4014
4015  FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4016    {
4017      ds_t ds;
4018      rtx consumer;
4019
4020      consumer = DEP_CON (dep);
4021
4022      ds = DEP_STATUS (dep);
4023
4024      if (/* If we want to create speculative dep.  */
4025	  fs
4026	  /* And we can do that because this is a true dep.  */
4027	  && (ds & DEP_TYPES) == DEP_TRUE)
4028	{
4029	  gcc_assert (!(ds & BE_IN_SPEC));
4030
4031	  if (/* If this dep can be overcome with 'begin speculation'.  */
4032	      ds & BEGIN_SPEC)
4033	    /* Then we have a choice: keep the dep 'begin speculative'
4034	       or transform it into 'be in speculative'.  */
4035	    {
4036	      if (/* In try_ready we assert that if insn once became ready
4037		     it can be removed from the ready (or queue) list only
4038		     due to backend decision.  Hence we can't let the
4039		     probability of the speculative dep to decrease.  */
4040		  ds_weak (ds) <= ds_weak (fs))
4041		{
4042		  ds_t new_ds;
4043
4044		  new_ds = (ds & ~BEGIN_SPEC) | fs;
4045
4046		  if (/* consumer can 'be in speculative'.  */
4047		      sched_insn_is_legitimate_for_speculation_p (consumer,
4048								  new_ds))
4049		    /* Transform it to be in speculative.  */
4050		    ds = new_ds;
4051		}
4052	    }
4053	  else
4054	    /* Mark the dep as 'be in speculative'.  */
4055	    ds |= fs;
4056	}
4057
4058      {
4059	dep_def _new_dep, *new_dep = &_new_dep;
4060
4061	init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4062	sd_add_dep (new_dep, false);
4063      }
4064    }
4065}
4066
4067/* Generates recovery code for BEGIN speculative INSN.  */
4068static void
4069begin_speculative_block (rtx insn)
4070{
4071  if (TODO_SPEC (insn) & BEGIN_DATA)
4072    nr_begin_data++;
4073  if (TODO_SPEC (insn) & BEGIN_CONTROL)
4074    nr_begin_control++;
4075
4076  create_check_block_twin (insn, false);
4077
4078  TODO_SPEC (insn) &= ~BEGIN_SPEC;
4079}
4080
4081static void haifa_init_insn (rtx);
4082
4083/* Generates recovery code for BE_IN speculative INSN.  */
4084static void
4085add_to_speculative_block (rtx insn)
4086{
4087  ds_t ts;
4088  sd_iterator_def sd_it;
4089  dep_t dep;
4090  rtx twins = NULL;
4091  rtx_vec_t priorities_roots;
4092
4093  ts = TODO_SPEC (insn);
4094  gcc_assert (!(ts & ~BE_IN_SPEC));
4095
4096  if (ts & BE_IN_DATA)
4097    nr_be_in_data++;
4098  if (ts & BE_IN_CONTROL)
4099    nr_be_in_control++;
4100
4101  TODO_SPEC (insn) &= ~BE_IN_SPEC;
4102  gcc_assert (!TODO_SPEC (insn));
4103
4104  DONE_SPEC (insn) |= ts;
4105
4106  /* First we convert all simple checks to branchy.  */
4107  for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4108       sd_iterator_cond (&sd_it, &dep);)
4109    {
4110      rtx check = DEP_PRO (dep);
4111
4112      if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4113	{
4114	  create_check_block_twin (check, true);
4115
4116	  /* Restart search.  */
4117	  sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4118	}
4119      else
4120	/* Continue search.  */
4121	sd_iterator_next (&sd_it);
4122    }
4123
4124  priorities_roots = NULL;
4125  clear_priorities (insn, &priorities_roots);
4126
4127  while (1)
4128    {
4129      rtx check, twin;
4130      basic_block rec;
4131
4132      /* Get the first backward dependency of INSN.  */
4133      sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4134      if (!sd_iterator_cond (&sd_it, &dep))
4135	/* INSN has no backward dependencies left.  */
4136	break;
4137
4138      gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4139		  && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4140		  && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4141
4142      check = DEP_PRO (dep);
4143
4144      gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4145		  && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4146
4147      rec = BLOCK_FOR_INSN (check);
4148
4149      twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4150      haifa_init_insn (twin);
4151
4152      sd_copy_back_deps (twin, insn, true);
4153
4154      if (sched_verbose && spec_info->dump)
4155        /* INSN_BB (insn) isn't determined for twin insns yet.
4156           So we can't use current_sched_info->print_insn.  */
4157        fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4158                 INSN_UID (twin), rec->index);
4159
4160      twins = alloc_INSN_LIST (twin, twins);
4161
4162      /* Add dependences between TWIN and all appropriate
4163	 instructions from REC.  */
4164      FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4165	{
4166	  rtx pro = DEP_PRO (dep);
4167
4168	  gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4169
4170	  /* INSN might have dependencies from the instructions from
4171	     several recovery blocks.  At this iteration we process those
4172	     producers that reside in REC.  */
4173	  if (BLOCK_FOR_INSN (pro) == rec)
4174	    {
4175	      dep_def _new_dep, *new_dep = &_new_dep;
4176
4177	      init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4178	      sd_add_dep (new_dep, false);
4179	    }
4180	}
4181
4182      process_insn_forw_deps_be_in_spec (insn, twin, ts);
4183
4184      /* Remove all dependencies between INSN and insns in REC.  */
4185      for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4186	   sd_iterator_cond (&sd_it, &dep);)
4187	{
4188	  rtx pro = DEP_PRO (dep);
4189
4190	  if (BLOCK_FOR_INSN (pro) == rec)
4191	    sd_delete_dep (sd_it);
4192	  else
4193	    sd_iterator_next (&sd_it);
4194	}
4195    }
4196
4197  /* We couldn't have added the dependencies between INSN and TWINS earlier
4198     because that would make TWINS appear in the INSN_BACK_DEPS (INSN).  */
4199  while (twins)
4200    {
4201      rtx twin;
4202
4203      twin = XEXP (twins, 0);
4204
4205      {
4206	dep_def _new_dep, *new_dep = &_new_dep;
4207
4208	init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4209	sd_add_dep (new_dep, false);
4210      }
4211
4212      twin = XEXP (twins, 1);
4213      free_INSN_LIST_node (twins);
4214      twins = twin;
4215    }
4216
4217  calc_priorities (priorities_roots);
4218  VEC_free (rtx, heap, priorities_roots);
4219}
4220
4221/* Extends and fills with zeros (only the new part) array pointed to by P.  */
4222void *
4223xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4224{
4225  gcc_assert (new_nmemb >= old_nmemb);
4226  p = XRESIZEVAR (void, p, new_nmemb * size);
4227  memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4228  return p;
4229}
4230
4231/* Helper function.
4232   Find fallthru edge from PRED.  */
4233edge
4234find_fallthru_edge (basic_block pred)
4235{
4236  edge e;
4237  edge_iterator ei;
4238  basic_block succ;
4239
4240  succ = pred->next_bb;
4241  gcc_assert (succ->prev_bb == pred);
4242
4243  if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4244    {
4245      FOR_EACH_EDGE (e, ei, pred->succs)
4246	if (e->flags & EDGE_FALLTHRU)
4247	  {
4248	    gcc_assert (e->dest == succ);
4249	    return e;
4250	  }
4251    }
4252  else
4253    {
4254      FOR_EACH_EDGE (e, ei, succ->preds)
4255	if (e->flags & EDGE_FALLTHRU)
4256	  {
4257	    gcc_assert (e->src == pred);
4258	    return e;
4259	  }
4260    }
4261
4262  return NULL;
4263}
4264
4265/* Extend per basic block data structures.  */
4266static void
4267sched_extend_bb (void)
4268{
4269  rtx insn;
4270
4271  /* The following is done to keep current_sched_info->next_tail non null.  */
4272  insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4273  if (NEXT_INSN (insn) == 0
4274      || (!NOTE_P (insn)
4275	  && !LABEL_P (insn)
4276	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
4277	  && !BARRIER_P (NEXT_INSN (insn))))
4278    {
4279      rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4280      /* Make insn appear outside BB.  */
4281      set_block_for_insn (note, NULL);
4282      BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4283    }
4284}
4285
4286/* Init per basic block data structures.  */
4287void
4288sched_init_bbs (void)
4289{
4290  sched_extend_bb ();
4291}
4292
4293/* Initialize BEFORE_RECOVERY variable.  */
4294static void
4295init_before_recovery (basic_block *before_recovery_ptr)
4296{
4297  basic_block last;
4298  edge e;
4299
4300  last = EXIT_BLOCK_PTR->prev_bb;
4301  e = find_fallthru_edge (last);
4302
4303  if (e)
4304    {
4305      /* We create two basic blocks:
4306         1. Single instruction block is inserted right after E->SRC
4307         and has jump to
4308         2. Empty block right before EXIT_BLOCK.
4309         Between these two blocks recovery blocks will be emitted.  */
4310
4311      basic_block single, empty;
4312      rtx x, label;
4313
4314      /* If the fallthrough edge to exit we've found is from the block we've
4315	 created before, don't do anything more.  */
4316      if (last == after_recovery)
4317	return;
4318
4319      adding_bb_to_current_region_p = false;
4320
4321      single = sched_create_empty_bb (last);
4322      empty = sched_create_empty_bb (single);
4323
4324      /* Add new blocks to the root loop.  */
4325      if (current_loops != NULL)
4326	{
4327	  add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4328	  add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4329	}
4330
4331      single->count = last->count;
4332      empty->count = last->count;
4333      single->frequency = last->frequency;
4334      empty->frequency = last->frequency;
4335      BB_COPY_PARTITION (single, last);
4336      BB_COPY_PARTITION (empty, last);
4337
4338      redirect_edge_succ (e, single);
4339      make_single_succ_edge (single, empty, 0);
4340      make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4341			     EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4342
4343      label = block_label (empty);
4344      x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4345      JUMP_LABEL (x) = label;
4346      LABEL_NUSES (label)++;
4347      haifa_init_insn (x);
4348
4349      emit_barrier_after (x);
4350
4351      sched_init_only_bb (empty, NULL);
4352      sched_init_only_bb (single, NULL);
4353      sched_extend_bb ();
4354
4355      adding_bb_to_current_region_p = true;
4356      before_recovery = single;
4357      after_recovery = empty;
4358
4359      if (before_recovery_ptr)
4360        *before_recovery_ptr = before_recovery;
4361
4362      if (sched_verbose >= 2 && spec_info->dump)
4363        fprintf (spec_info->dump,
4364		 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4365                 last->index, single->index, empty->index);
4366    }
4367  else
4368    before_recovery = last;
4369}
4370
4371/* Returns new recovery block.  */
4372basic_block
4373sched_create_recovery_block (basic_block *before_recovery_ptr)
4374{
4375  rtx label;
4376  rtx barrier;
4377  basic_block rec;
4378
4379  haifa_recovery_bb_recently_added_p = true;
4380  haifa_recovery_bb_ever_added_p = true;
4381
4382  init_before_recovery (before_recovery_ptr);
4383
4384  barrier = get_last_bb_insn (before_recovery);
4385  gcc_assert (BARRIER_P (barrier));
4386
4387  label = emit_label_after (gen_label_rtx (), barrier);
4388
4389  rec = create_basic_block (label, label, before_recovery);
4390
4391  /* A recovery block always ends with an unconditional jump.  */
4392  emit_barrier_after (BB_END (rec));
4393
4394  if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4395    BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4396
4397  if (sched_verbose && spec_info->dump)
4398    fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4399             rec->index);
4400
4401  return rec;
4402}
4403
4404/* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4405   and emit necessary jumps.  */
4406void
4407sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4408			     basic_block second_bb)
4409{
4410  rtx label;
4411  rtx jump;
4412  int edge_flags;
4413
4414  /* This is fixing of incoming edge.  */
4415  /* ??? Which other flags should be specified?  */
4416  if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4417    /* Partition type is the same, if it is "unpartitioned".  */
4418    edge_flags = EDGE_CROSSING;
4419  else
4420    edge_flags = 0;
4421
4422  make_edge (first_bb, rec, edge_flags);
4423  label = block_label (second_bb);
4424  jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4425  JUMP_LABEL (jump) = label;
4426  LABEL_NUSES (label)++;
4427
4428  if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4429    /* Partition type is the same, if it is "unpartitioned".  */
4430    {
4431      /* Rewritten from cfgrtl.c.  */
4432      if (flag_reorder_blocks_and_partition
4433	  && targetm.have_named_sections)
4434	{
4435	  /* We don't need the same note for the check because
4436	     any_condjump_p (check) == true.  */
4437	  add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4438	}
4439      edge_flags = EDGE_CROSSING;
4440    }
4441  else
4442    edge_flags = 0;
4443
4444  make_single_succ_edge (rec, second_bb, edge_flags);
4445  if (dom_info_available_p (CDI_DOMINATORS))
4446    set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
4447}
4448
4449/* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
4450   INSN is a simple check, that should be converted to branchy one.  */
4451static void
4452create_check_block_twin (rtx insn, bool mutate_p)
4453{
4454  basic_block rec;
4455  rtx label, check, twin;
4456  ds_t fs;
4457  sd_iterator_def sd_it;
4458  dep_t dep;
4459  dep_def _new_dep, *new_dep = &_new_dep;
4460  ds_t todo_spec;
4461
4462  gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4463
4464  if (!mutate_p)
4465    todo_spec = TODO_SPEC (insn);
4466  else
4467    {
4468      gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4469		  && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4470
4471      todo_spec = CHECK_SPEC (insn);
4472    }
4473
4474  todo_spec &= SPECULATIVE;
4475
4476  /* Create recovery block.  */
4477  if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4478    {
4479      rec = sched_create_recovery_block (NULL);
4480      label = BB_HEAD (rec);
4481    }
4482  else
4483    {
4484      rec = EXIT_BLOCK_PTR;
4485      label = NULL_RTX;
4486    }
4487
4488  /* Emit CHECK.  */
4489  check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4490
4491  if (rec != EXIT_BLOCK_PTR)
4492    {
4493      /* To have mem_reg alive at the beginning of second_bb,
4494	 we emit check BEFORE insn, so insn after splitting
4495	 insn will be at the beginning of second_bb, which will
4496	 provide us with the correct life information.  */
4497      check = emit_jump_insn_before (check, insn);
4498      JUMP_LABEL (check) = label;
4499      LABEL_NUSES (label)++;
4500    }
4501  else
4502    check = emit_insn_before (check, insn);
4503
4504  /* Extend data structures.  */
4505  haifa_init_insn (check);
4506
4507  /* CHECK is being added to current region.  Extend ready list.  */
4508  gcc_assert (sched_ready_n_insns != -1);
4509  sched_extend_ready_list (sched_ready_n_insns + 1);
4510
4511  if (current_sched_info->add_remove_insn)
4512    current_sched_info->add_remove_insn (insn, 0);
4513
4514  RECOVERY_BLOCK (check) = rec;
4515
4516  if (sched_verbose && spec_info->dump)
4517    fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4518             (*current_sched_info->print_insn) (check, 0));
4519
4520  gcc_assert (ORIG_PAT (insn));
4521
4522  /* Initialize TWIN (twin is a duplicate of original instruction
4523     in the recovery block).  */
4524  if (rec != EXIT_BLOCK_PTR)
4525    {
4526      sd_iterator_def sd_it;
4527      dep_t dep;
4528
4529      FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4530	if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4531	  {
4532	    struct _dep _dep2, *dep2 = &_dep2;
4533
4534	    init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4535
4536	    sd_add_dep (dep2, true);
4537	  }
4538
4539      twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4540      haifa_init_insn (twin);
4541
4542      if (sched_verbose && spec_info->dump)
4543	/* INSN_BB (insn) isn't determined for twin insns yet.
4544	   So we can't use current_sched_info->print_insn.  */
4545	fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4546		 INSN_UID (twin), rec->index);
4547    }
4548  else
4549    {
4550      ORIG_PAT (check) = ORIG_PAT (insn);
4551      HAS_INTERNAL_DEP (check) = 1;
4552      twin = check;
4553      /* ??? We probably should change all OUTPUT dependencies to
4554	 (TRUE | OUTPUT).  */
4555    }
4556
4557  /* Copy all resolved back dependencies of INSN to TWIN.  This will
4558     provide correct value for INSN_TICK (TWIN).  */
4559  sd_copy_back_deps (twin, insn, true);
4560
4561  if (rec != EXIT_BLOCK_PTR)
4562    /* In case of branchy check, fix CFG.  */
4563    {
4564      basic_block first_bb, second_bb;
4565      rtx jump;
4566
4567      first_bb = BLOCK_FOR_INSN (check);
4568      second_bb = sched_split_block (first_bb, check);
4569
4570      sched_create_recovery_edges (first_bb, rec, second_bb);
4571
4572      sched_init_only_bb (second_bb, first_bb);
4573      sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4574
4575      jump = BB_END (rec);
4576      haifa_init_insn (jump);
4577    }
4578
4579  /* Move backward dependences from INSN to CHECK and
4580     move forward dependences from INSN to TWIN.  */
4581
4582  /* First, create dependencies between INSN's producers and CHECK & TWIN.  */
4583  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4584    {
4585      rtx pro = DEP_PRO (dep);
4586      ds_t ds;
4587
4588      /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4589	 check --TRUE--> producer  ??? or ANTI ???
4590	 twin  --TRUE--> producer
4591	 twin  --ANTI--> check
4592
4593	 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4594	 check --ANTI--> producer
4595	 twin  --ANTI--> producer
4596	 twin  --ANTI--> check
4597
4598	 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4599	 check ~~TRUE~~> producer
4600	 twin  ~~TRUE~~> producer
4601	 twin  --ANTI--> check  */
4602
4603      ds = DEP_STATUS (dep);
4604
4605      if (ds & BEGIN_SPEC)
4606	{
4607	  gcc_assert (!mutate_p);
4608	  ds &= ~BEGIN_SPEC;
4609	}
4610
4611      init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4612      sd_add_dep (new_dep, false);
4613
4614      if (rec != EXIT_BLOCK_PTR)
4615	{
4616	  DEP_CON (new_dep) = twin;
4617	  sd_add_dep (new_dep, false);
4618	}
4619    }
4620
4621  /* Second, remove backward dependencies of INSN.  */
4622  for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4623       sd_iterator_cond (&sd_it, &dep);)
4624    {
4625      if ((DEP_STATUS (dep) & BEGIN_SPEC)
4626	  || mutate_p)
4627	/* We can delete this dep because we overcome it with
4628	   BEGIN_SPECULATION.  */
4629	sd_delete_dep (sd_it);
4630      else
4631	sd_iterator_next (&sd_it);
4632    }
4633
4634  /* Future Speculations.  Determine what BE_IN speculations will be like.  */
4635  fs = 0;
4636
4637  /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4638     here.  */
4639
4640  gcc_assert (!DONE_SPEC (insn));
4641
4642  if (!mutate_p)
4643    {
4644      ds_t ts = TODO_SPEC (insn);
4645
4646      DONE_SPEC (insn) = ts & BEGIN_SPEC;
4647      CHECK_SPEC (check) = ts & BEGIN_SPEC;
4648
4649      /* Luckiness of future speculations solely depends upon initial
4650	 BEGIN speculation.  */
4651      if (ts & BEGIN_DATA)
4652	fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4653      if (ts & BEGIN_CONTROL)
4654	fs = set_dep_weak (fs, BE_IN_CONTROL,
4655			   get_dep_weak (ts, BEGIN_CONTROL));
4656    }
4657  else
4658    CHECK_SPEC (check) = CHECK_SPEC (insn);
4659
4660  /* Future speculations: call the helper.  */
4661  process_insn_forw_deps_be_in_spec (insn, twin, fs);
4662
4663  if (rec != EXIT_BLOCK_PTR)
4664    {
4665      /* Which types of dependencies should we use here is,
4666	 generally, machine-dependent question...  But, for now,
4667	 it is not.  */
4668
4669      if (!mutate_p)
4670	{
4671	  init_dep (new_dep, insn, check, REG_DEP_TRUE);
4672	  sd_add_dep (new_dep, false);
4673
4674	  init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4675	  sd_add_dep (new_dep, false);
4676	}
4677      else
4678	{
4679	  if (spec_info->dump)
4680	    fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4681		     (*current_sched_info->print_insn) (insn, 0));
4682
4683	  /* Remove all dependencies of the INSN.  */
4684	  {
4685	    sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4686					      | SD_LIST_BACK
4687					      | SD_LIST_RES_BACK));
4688	    while (sd_iterator_cond (&sd_it, &dep))
4689	      sd_delete_dep (sd_it);
4690	  }
4691
4692	  /* If former check (INSN) already was moved to the ready (or queue)
4693	     list, add new check (CHECK) there too.  */
4694	  if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4695	    try_ready (check);
4696
4697	  /* Remove old check from instruction stream and free its
4698	     data.  */
4699	  sched_remove_insn (insn);
4700	}
4701
4702      init_dep (new_dep, check, twin, REG_DEP_ANTI);
4703      sd_add_dep (new_dep, false);
4704    }
4705  else
4706    {
4707      init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4708      sd_add_dep (new_dep, false);
4709    }
4710
4711  if (!mutate_p)
4712    /* Fix priorities.  If MUTATE_P is nonzero, this is not necessary,
4713       because it'll be done later in add_to_speculative_block.  */
4714    {
4715      rtx_vec_t priorities_roots = NULL;
4716
4717      clear_priorities (twin, &priorities_roots);
4718      calc_priorities (priorities_roots);
4719      VEC_free (rtx, heap, priorities_roots);
4720    }
4721}
4722
4723/* Removes dependency between instructions in the recovery block REC
4724   and usual region instructions.  It keeps inner dependences so it
4725   won't be necessary to recompute them.  */
4726static void
4727fix_recovery_deps (basic_block rec)
4728{
4729  rtx note, insn, jump, ready_list = 0;
4730  bitmap_head in_ready;
4731  rtx link;
4732
4733  bitmap_initialize (&in_ready, 0);
4734
4735  /* NOTE - a basic block note.  */
4736  note = NEXT_INSN (BB_HEAD (rec));
4737  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4738  insn = BB_END (rec);
4739  gcc_assert (JUMP_P (insn));
4740  insn = PREV_INSN (insn);
4741
4742  do
4743    {
4744      sd_iterator_def sd_it;
4745      dep_t dep;
4746
4747      for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4748	   sd_iterator_cond (&sd_it, &dep);)
4749	{
4750	  rtx consumer = DEP_CON (dep);
4751
4752	  if (BLOCK_FOR_INSN (consumer) != rec)
4753	    {
4754	      sd_delete_dep (sd_it);
4755
4756	      if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4757		{
4758		  ready_list = alloc_INSN_LIST (consumer, ready_list);
4759		  bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4760		}
4761	    }
4762	  else
4763	    {
4764	      gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4765
4766	      sd_iterator_next (&sd_it);
4767	    }
4768	}
4769
4770      insn = PREV_INSN (insn);
4771    }
4772  while (insn != note);
4773
4774  bitmap_clear (&in_ready);
4775
4776  /* Try to add instructions to the ready or queue list.  */
4777  for (link = ready_list; link; link = XEXP (link, 1))
4778    try_ready (XEXP (link, 0));
4779  free_INSN_LIST_list (&ready_list);
4780
4781  /* Fixing jump's dependences.  */
4782  insn = BB_HEAD (rec);
4783  jump = BB_END (rec);
4784
4785  gcc_assert (LABEL_P (insn));
4786  insn = NEXT_INSN (insn);
4787
4788  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4789  add_jump_dependencies (insn, jump);
4790}
4791
4792/* Change pattern of INSN to NEW_PAT.  */
4793void
4794sched_change_pattern (rtx insn, rtx new_pat)
4795{
4796  int t;
4797
4798  t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4799  gcc_assert (t);
4800  dfa_clear_single_insn_cache (insn);
4801}
4802
4803/* Change pattern of INSN to NEW_PAT.  Invalidate cached haifa
4804   instruction data.  */
4805static void
4806haifa_change_pattern (rtx insn, rtx new_pat)
4807{
4808  sched_change_pattern (insn, new_pat);
4809
4810  /* Invalidate INSN_COST, so it'll be recalculated.  */
4811  INSN_COST (insn) = -1;
4812  /* Invalidate INSN_TICK, so it'll be recalculated.  */
4813  INSN_TICK (insn) = INVALID_TICK;
4814}
4815
4816/* -1 - can't speculate,
4817   0 - for speculation with REQUEST mode it is OK to use
4818   current instruction pattern,
4819   1 - need to change pattern for *NEW_PAT to be speculative.  */
4820int
4821sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4822{
4823  gcc_assert (current_sched_info->flags & DO_SPECULATION
4824              && (request & SPECULATIVE)
4825	      && sched_insn_is_legitimate_for_speculation_p (insn, request));
4826
4827  if ((request & spec_info->mask) != request)
4828    return -1;
4829
4830  if (request & BE_IN_SPEC
4831      && !(request & BEGIN_SPEC))
4832    return 0;
4833
4834  return targetm.sched.speculate_insn (insn, request, new_pat);
4835}
4836
4837static int
4838haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4839{
4840  gcc_assert (sched_deps_info->generate_spec_deps
4841	      && !IS_SPECULATION_CHECK_P (insn));
4842
4843  if (HAS_INTERNAL_DEP (insn)
4844      || SCHED_GROUP_P (insn))
4845    return -1;
4846
4847  return sched_speculate_insn (insn, request, new_pat);
4848}
4849
4850/* Print some information about block BB, which starts with HEAD and
4851   ends with TAIL, before scheduling it.
4852   I is zero, if scheduler is about to start with the fresh ebb.  */
4853static void
4854dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4855{
4856  if (!i)
4857    fprintf (sched_dump,
4858	     ";;   ======================================================\n");
4859  else
4860    fprintf (sched_dump,
4861	     ";;   =====================ADVANCING TO=====================\n");
4862  fprintf (sched_dump,
4863	   ";;   -- basic block %d from %d to %d -- %s reload\n",
4864	   bb->index, INSN_UID (head), INSN_UID (tail),
4865	   (reload_completed ? "after" : "before"));
4866  fprintf (sched_dump,
4867	   ";;   ======================================================\n");
4868  fprintf (sched_dump, "\n");
4869}
4870
4871/* Unlink basic block notes and labels and saves them, so they
4872   can be easily restored.  We unlink basic block notes in EBB to
4873   provide back-compatibility with the previous code, as target backends
4874   assume, that there'll be only instructions between
4875   current_sched_info->{head and tail}.  We restore these notes as soon
4876   as we can.
4877   FIRST (LAST) is the first (last) basic block in the ebb.
4878   NB: In usual case (FIRST == LAST) nothing is really done.  */
4879void
4880unlink_bb_notes (basic_block first, basic_block last)
4881{
4882  /* We DON'T unlink basic block notes of the first block in the ebb.  */
4883  if (first == last)
4884    return;
4885
4886  bb_header = XNEWVEC (rtx, last_basic_block);
4887
4888  /* Make a sentinel.  */
4889  if (last->next_bb != EXIT_BLOCK_PTR)
4890    bb_header[last->next_bb->index] = 0;
4891
4892  first = first->next_bb;
4893  do
4894    {
4895      rtx prev, label, note, next;
4896
4897      label = BB_HEAD (last);
4898      if (LABEL_P (label))
4899	note = NEXT_INSN (label);
4900      else
4901	note = label;
4902      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4903
4904      prev = PREV_INSN (label);
4905      next = NEXT_INSN (note);
4906      gcc_assert (prev && next);
4907
4908      NEXT_INSN (prev) = next;
4909      PREV_INSN (next) = prev;
4910
4911      bb_header[last->index] = label;
4912
4913      if (last == first)
4914	break;
4915
4916      last = last->prev_bb;
4917    }
4918  while (1);
4919}
4920
4921/* Restore basic block notes.
4922   FIRST is the first basic block in the ebb.  */
4923static void
4924restore_bb_notes (basic_block first)
4925{
4926  if (!bb_header)
4927    return;
4928
4929  /* We DON'T unlink basic block notes of the first block in the ebb.  */
4930  first = first->next_bb;
4931  /* Remember: FIRST is actually a second basic block in the ebb.  */
4932
4933  while (first != EXIT_BLOCK_PTR
4934	 && bb_header[first->index])
4935    {
4936      rtx prev, label, note, next;
4937
4938      label = bb_header[first->index];
4939      prev = PREV_INSN (label);
4940      next = NEXT_INSN (prev);
4941
4942      if (LABEL_P (label))
4943	note = NEXT_INSN (label);
4944      else
4945	note = label;
4946      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4947
4948      bb_header[first->index] = 0;
4949
4950      NEXT_INSN (prev) = label;
4951      NEXT_INSN (note) = next;
4952      PREV_INSN (next) = note;
4953
4954      first = first->next_bb;
4955    }
4956
4957  free (bb_header);
4958  bb_header = 0;
4959}
4960
4961/* Helper function.
4962   Fix CFG after both in- and inter-block movement of
4963   control_flow_insn_p JUMP.  */
4964static void
4965fix_jump_move (rtx jump)
4966{
4967  basic_block bb, jump_bb, jump_bb_next;
4968
4969  bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4970  jump_bb = BLOCK_FOR_INSN (jump);
4971  jump_bb_next = jump_bb->next_bb;
4972
4973  gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4974	      || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4975
4976  if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4977    /* if jump_bb_next is not empty.  */
4978    BB_END (jump_bb) = BB_END (jump_bb_next);
4979
4980  if (BB_END (bb) != PREV_INSN (jump))
4981    /* Then there are instruction after jump that should be placed
4982       to jump_bb_next.  */
4983    BB_END (jump_bb_next) = BB_END (bb);
4984  else
4985    /* Otherwise jump_bb_next is empty.  */
4986    BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4987
4988  /* To make assertion in move_insn happy.  */
4989  BB_END (bb) = PREV_INSN (jump);
4990
4991  update_bb_for_insn (jump_bb_next);
4992}
4993
4994/* Fix CFG after interblock movement of control_flow_insn_p JUMP.  */
4995static void
4996move_block_after_check (rtx jump)
4997{
4998  basic_block bb, jump_bb, jump_bb_next;
4999  VEC(edge,gc) *t;
5000
5001  bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5002  jump_bb = BLOCK_FOR_INSN (jump);
5003  jump_bb_next = jump_bb->next_bb;
5004
5005  update_bb_for_insn (jump_bb);
5006
5007  gcc_assert (IS_SPECULATION_CHECK_P (jump)
5008	      || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5009
5010  unlink_block (jump_bb_next);
5011  link_block (jump_bb_next, bb);
5012
5013  t = bb->succs;
5014  bb->succs = 0;
5015  move_succs (&(jump_bb->succs), bb);
5016  move_succs (&(jump_bb_next->succs), jump_bb);
5017  move_succs (&t, jump_bb_next);
5018
5019  df_mark_solutions_dirty ();
5020
5021  common_sched_info->fix_recovery_cfg
5022    (bb->index, jump_bb->index, jump_bb_next->index);
5023}
5024
5025/* Helper function for move_block_after_check.
5026   This functions attaches edge vector pointed to by SUCCSP to
5027   block TO.  */
5028static void
5029move_succs (VEC(edge,gc) **succsp, basic_block to)
5030{
5031  edge e;
5032  edge_iterator ei;
5033
5034  gcc_assert (to->succs == 0);
5035
5036  to->succs = *succsp;
5037
5038  FOR_EACH_EDGE (e, ei, to->succs)
5039    e->src = to;
5040
5041  *succsp = 0;
5042}
5043
5044/* Remove INSN from the instruction stream.
5045   INSN should have any dependencies.  */
5046static void
5047sched_remove_insn (rtx insn)
5048{
5049  sd_finish_insn (insn);
5050
5051  change_queue_index (insn, QUEUE_NOWHERE);
5052  current_sched_info->add_remove_insn (insn, 1);
5053  remove_insn (insn);
5054}
5055
5056/* Clear priorities of all instructions, that are forward dependent on INSN.
5057   Store in vector pointed to by ROOTS_PTR insns on which priority () should
5058   be invoked to initialize all cleared priorities.  */
5059static void
5060clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5061{
5062  sd_iterator_def sd_it;
5063  dep_t dep;
5064  bool insn_is_root_p = true;
5065
5066  gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5067
5068  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5069    {
5070      rtx pro = DEP_PRO (dep);
5071
5072      if (INSN_PRIORITY_STATUS (pro) >= 0
5073	  && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5074	{
5075	  /* If DEP doesn't contribute to priority then INSN itself should
5076	     be added to priority roots.  */
5077	  if (contributes_to_priority_p (dep))
5078	    insn_is_root_p = false;
5079
5080	  INSN_PRIORITY_STATUS (pro) = -1;
5081	  clear_priorities (pro, roots_ptr);
5082	}
5083    }
5084
5085  if (insn_is_root_p)
5086    VEC_safe_push (rtx, heap, *roots_ptr, insn);
5087}
5088
5089/* Recompute priorities of instructions, whose priorities might have been
5090   changed.  ROOTS is a vector of instructions whose priority computation will
5091   trigger initialization of all cleared priorities.  */
5092static void
5093calc_priorities (rtx_vec_t roots)
5094{
5095  int i;
5096  rtx insn;
5097
5098  for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
5099    priority (insn);
5100}
5101
5102
5103/* Add dependences between JUMP and other instructions in the recovery
5104   block.  INSN is the first insn the recovery block.  */
5105static void
5106add_jump_dependencies (rtx insn, rtx jump)
5107{
5108  do
5109    {
5110      insn = NEXT_INSN (insn);
5111      if (insn == jump)
5112	break;
5113
5114      if (dep_list_size (insn) == 0)
5115	{
5116	  dep_def _new_dep, *new_dep = &_new_dep;
5117
5118	  init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5119	  sd_add_dep (new_dep, false);
5120	}
5121    }
5122  while (1);
5123
5124  gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5125}
5126
5127/* Return the NOTE_INSN_BASIC_BLOCK of BB.  */
5128rtx
5129bb_note (basic_block bb)
5130{
5131  rtx note;
5132
5133  note = BB_HEAD (bb);
5134  if (LABEL_P (note))
5135    note = NEXT_INSN (note);
5136
5137  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5138  return note;
5139}
5140
5141#ifdef ENABLE_CHECKING
5142/* Helper function for check_cfg.
5143   Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5144   its flags.  */
5145static int
5146has_edge_p (VEC(edge,gc) *el, int type)
5147{
5148  edge e;
5149  edge_iterator ei;
5150
5151  FOR_EACH_EDGE (e, ei, el)
5152    if (e->flags & type)
5153      return 1;
5154  return 0;
5155}
5156
5157/* Search back, starting at INSN, for an insn that is not a
5158   NOTE_INSN_VAR_LOCATION.  Don't search beyond HEAD, and return it if
5159   no such insn can be found.  */
5160static inline rtx
5161prev_non_location_insn (rtx insn, rtx head)
5162{
5163  while (insn != head && NOTE_P (insn)
5164	 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5165    insn = PREV_INSN (insn);
5166
5167  return insn;
5168}
5169
5170/* Check few properties of CFG between HEAD and TAIL.
5171   If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5172   instruction stream.  */
5173static void
5174check_cfg (rtx head, rtx tail)
5175{
5176  rtx next_tail;
5177  basic_block bb = 0;
5178  int not_first = 0, not_last;
5179
5180  if (head == NULL)
5181    head = get_insns ();
5182  if (tail == NULL)
5183    tail = get_last_insn ();
5184  next_tail = NEXT_INSN (tail);
5185
5186  do
5187    {
5188      not_last = head != tail;
5189
5190      if (not_first)
5191	gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5192      if (not_last)
5193	gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5194
5195      if (LABEL_P (head)
5196	  || (NOTE_INSN_BASIC_BLOCK_P (head)
5197	      && (!not_first
5198		  || (not_first && !LABEL_P (PREV_INSN (head))))))
5199	{
5200	  gcc_assert (bb == 0);
5201	  bb = BLOCK_FOR_INSN (head);
5202	  if (bb != 0)
5203	    gcc_assert (BB_HEAD (bb) == head);
5204	  else
5205	    /* This is the case of jump table.  See inside_basic_block_p ().  */
5206	    gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5207	}
5208
5209      if (bb == 0)
5210	{
5211	  gcc_assert (!inside_basic_block_p (head));
5212	  head = NEXT_INSN (head);
5213	}
5214      else
5215	{
5216	  gcc_assert (inside_basic_block_p (head)
5217		      || NOTE_P (head));
5218	  gcc_assert (BLOCK_FOR_INSN (head) == bb);
5219
5220	  if (LABEL_P (head))
5221	    {
5222	      head = NEXT_INSN (head);
5223	      gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5224	    }
5225	  else
5226	    {
5227	      if (control_flow_insn_p (head))
5228		{
5229		  gcc_assert (prev_non_location_insn (BB_END (bb), head)
5230			      == head);
5231
5232		  if (any_uncondjump_p (head))
5233		    gcc_assert (EDGE_COUNT (bb->succs) == 1
5234				&& BARRIER_P (NEXT_INSN (head)));
5235		  else if (any_condjump_p (head))
5236		    gcc_assert (/* Usual case.  */
5237                                (EDGE_COUNT (bb->succs) > 1
5238                                 && !BARRIER_P (NEXT_INSN (head)))
5239                                /* Or jump to the next instruction.  */
5240                                || (EDGE_COUNT (bb->succs) == 1
5241                                    && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5242                                        == JUMP_LABEL (head))));
5243		}
5244	      if (BB_END (bb) == head)
5245		{
5246		  if (EDGE_COUNT (bb->succs) > 1)
5247		    gcc_assert (control_flow_insn_p (prev_non_location_insn
5248						     (head, BB_HEAD (bb)))
5249				|| has_edge_p (bb->succs, EDGE_COMPLEX));
5250		  bb = 0;
5251		}
5252
5253	      head = NEXT_INSN (head);
5254	    }
5255	}
5256
5257      not_first = 1;
5258    }
5259  while (head != next_tail);
5260
5261  gcc_assert (bb == 0);
5262}
5263
5264#endif /* ENABLE_CHECKING */
5265
5266/* Extend per basic block data structures.  */
5267static void
5268extend_bb (void)
5269{
5270  if (sched_scan_info->extend_bb)
5271    sched_scan_info->extend_bb ();
5272}
5273
5274/* Init data for BB.  */
5275static void
5276init_bb (basic_block bb)
5277{
5278  if (sched_scan_info->init_bb)
5279    sched_scan_info->init_bb (bb);
5280}
5281
5282/* Extend per insn data structures.  */
5283static void
5284extend_insn (void)
5285{
5286  if (sched_scan_info->extend_insn)
5287    sched_scan_info->extend_insn ();
5288}
5289
5290/* Init data structures for INSN.  */
5291static void
5292init_insn (rtx insn)
5293{
5294  if (sched_scan_info->init_insn)
5295    sched_scan_info->init_insn (insn);
5296}
5297
5298/* Init all insns in BB.  */
5299static void
5300init_insns_in_bb (basic_block bb)
5301{
5302  rtx insn;
5303
5304  FOR_BB_INSNS (bb, insn)
5305    init_insn (insn);
5306}
5307
5308/* A driver function to add a set of basic blocks (BBS),
5309   a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5310   to the scheduling region.  */
5311void
5312sched_scan (const struct sched_scan_info_def *ssi,
5313	    bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5314{
5315  sched_scan_info = ssi;
5316
5317  if (bbs != NULL || bb != NULL)
5318    {
5319      extend_bb ();
5320
5321      if (bbs != NULL)
5322	{
5323	  unsigned i;
5324	  basic_block x;
5325
5326	  for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5327	    init_bb (x);
5328	}
5329
5330      if (bb != NULL)
5331	init_bb (bb);
5332    }
5333
5334  extend_insn ();
5335
5336  if (bbs != NULL)
5337    {
5338      unsigned i;
5339      basic_block x;
5340
5341      for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5342	init_insns_in_bb (x);
5343    }
5344
5345  if (bb != NULL)
5346    init_insns_in_bb (bb);
5347
5348  if (insns != NULL)
5349    {
5350      unsigned i;
5351      rtx x;
5352
5353      for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
5354	init_insn (x);
5355    }
5356
5357  if (insn != NULL)
5358    init_insn (insn);
5359}
5360
5361
5362/* Extend data structures for logical insn UID.  */
5363static void
5364luids_extend_insn (void)
5365{
5366  int new_luids_max_uid = get_max_uid () + 1;
5367
5368  VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5369}
5370
5371/* Initialize LUID for INSN.  */
5372static void
5373luids_init_insn (rtx insn)
5374{
5375  int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5376  int luid;
5377
5378  if (i >= 0)
5379    {
5380      luid = sched_max_luid;
5381      sched_max_luid += i;
5382    }
5383  else
5384    luid = -1;
5385
5386  SET_INSN_LUID (insn, luid);
5387}
5388
5389/* Initialize luids for BBS, BB, INSNS and INSN.
5390   The hook common_sched_info->luid_for_non_insn () is used to determine
5391   if notes, labels, etc. need luids.  */
5392void
5393sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5394{
5395  const struct sched_scan_info_def ssi =
5396    {
5397      NULL, /* extend_bb */
5398      NULL, /* init_bb */
5399      luids_extend_insn, /* extend_insn */
5400      luids_init_insn /* init_insn */
5401    };
5402
5403  sched_scan (&ssi, bbs, bb, insns, insn);
5404}
5405
5406/* Free LUIDs.  */
5407void
5408sched_finish_luids (void)
5409{
5410  VEC_free (int, heap, sched_luids);
5411  sched_max_luid = 1;
5412}
5413
5414/* Return logical uid of INSN.  Helpful while debugging.  */
5415int
5416insn_luid (rtx insn)
5417{
5418  return INSN_LUID (insn);
5419}
5420
5421/* Extend per insn data in the target.  */
5422void
5423sched_extend_target (void)
5424{
5425  if (targetm.sched.h_i_d_extended)
5426    targetm.sched.h_i_d_extended ();
5427}
5428
5429/* Extend global scheduler structures (those, that live across calls to
5430   schedule_block) to include information about just emitted INSN.  */
5431static void
5432extend_h_i_d (void)
5433{
5434  int reserve = (get_max_uid () + 1
5435                 - VEC_length (haifa_insn_data_def, h_i_d));
5436  if (reserve > 0
5437      && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5438    {
5439      VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5440                             3 * get_max_uid () / 2);
5441      sched_extend_target ();
5442    }
5443}
5444
5445/* Initialize h_i_d entry of the INSN with default values.
5446   Values, that are not explicitly initialized here, hold zero.  */
5447static void
5448init_h_i_d (rtx insn)
5449{
5450  if (INSN_LUID (insn) > 0)
5451    {
5452      INSN_COST (insn) = -1;
5453      QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5454      INSN_TICK (insn) = INVALID_TICK;
5455      INTER_TICK (insn) = INVALID_TICK;
5456      TODO_SPEC (insn) = HARD_DEP;
5457    }
5458}
5459
5460/* Initialize haifa_insn_data for BBS, BB, INSNS and INSN.  */
5461void
5462haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5463{
5464  const struct sched_scan_info_def ssi =
5465    {
5466      NULL, /* extend_bb */
5467      NULL, /* init_bb */
5468      extend_h_i_d, /* extend_insn */
5469      init_h_i_d /* init_insn */
5470    };
5471
5472  sched_scan (&ssi, bbs, bb, insns, insn);
5473}
5474
5475/* Finalize haifa_insn_data.  */
5476void
5477haifa_finish_h_i_d (void)
5478{
5479  int i;
5480  haifa_insn_data_t data;
5481  struct reg_use_data *use, *next;
5482
5483  for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
5484    {
5485      if (data->reg_pressure != NULL)
5486	free (data->reg_pressure);
5487      for (use = data->reg_use_list; use != NULL; use = next)
5488	{
5489	  next = use->next_insn_use;
5490	  free (use);
5491	}
5492    }
5493  VEC_free (haifa_insn_data_def, heap, h_i_d);
5494}
5495
5496/* Init data for the new insn INSN.  */
5497static void
5498haifa_init_insn (rtx insn)
5499{
5500  gcc_assert (insn != NULL);
5501
5502  sched_init_luids (NULL, NULL, NULL, insn);
5503  sched_extend_target ();
5504  sched_deps_init (false);
5505  haifa_init_h_i_d (NULL, NULL, NULL, insn);
5506
5507  if (adding_bb_to_current_region_p)
5508    {
5509      sd_init_insn (insn);
5510
5511      /* Extend dependency caches by one element.  */
5512      extend_dependency_caches (1, false);
5513    }
5514}
5515
5516/* Init data for the new basic block BB which comes after AFTER.  */
5517static void
5518haifa_init_only_bb (basic_block bb, basic_block after)
5519{
5520  gcc_assert (bb != NULL);
5521
5522  sched_init_bbs ();
5523
5524  if (common_sched_info->add_block)
5525    /* This changes only data structures of the front-end.  */
5526    common_sched_info->add_block (bb, after);
5527}
5528
5529/* A generic version of sched_split_block ().  */
5530basic_block
5531sched_split_block_1 (basic_block first_bb, rtx after)
5532{
5533  edge e;
5534
5535  e = split_block (first_bb, after);
5536  gcc_assert (e->src == first_bb);
5537
5538  /* sched_split_block emits note if *check == BB_END.  Probably it
5539     is better to rip that note off.  */
5540
5541  return e->dest;
5542}
5543
5544/* A generic version of sched_create_empty_bb ().  */
5545basic_block
5546sched_create_empty_bb_1 (basic_block after)
5547{
5548  return create_empty_bb (after);
5549}
5550
5551/* Insert PAT as an INSN into the schedule and update the necessary data
5552   structures to account for it. */
5553rtx
5554sched_emit_insn (rtx pat)
5555{
5556  rtx insn = emit_insn_after (pat, last_scheduled_insn);
5557  last_scheduled_insn = insn;
5558  haifa_init_insn (insn);
5559  return insn;
5560}
5561
5562#endif /* INSN_SCHEDULING */
5563