1/* Instruction scheduling pass.  Selective scheduler and pipeliner.
2   Copyright (C) 2006-2015 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "rtl-error.h"
25#include "tm_p.h"
26#include "hard-reg-set.h"
27#include "regs.h"
28#include "hashtab.h"
29#include "hash-set.h"
30#include "vec.h"
31#include "machmode.h"
32#include "input.h"
33#include "function.h"
34#include "predict.h"
35#include "dominance.h"
36#include "cfg.h"
37#include "cfgbuild.h"
38#include "basic-block.h"
39#include "flags.h"
40#include "insn-config.h"
41#include "insn-attr.h"
42#include "except.h"
43#include "recog.h"
44#include "params.h"
45#include "target.h"
46#include "output.h"
47#include "sched-int.h"
48#include "ggc.h"
49#include "symtab.h"
50#include "wide-int.h"
51#include "inchash.h"
52#include "tree.h"
53#include "langhooks.h"
54#include "rtlhooks-def.h"
55#include "emit-rtl.h"
56#include "ira.h"
57#include "ira-int.h"
58#include "rtl-iter.h"
59
60#ifdef INSN_SCHEDULING
61#include "sel-sched-ir.h"
62#include "sel-sched-dump.h"
63#include "sel-sched.h"
64#include "dbgcnt.h"
65
66/* Implementation of selective scheduling approach.
67   The below implementation follows the original approach with the following
68   changes:
69
70   o the scheduler works after register allocation (but can be also tuned
71   to work before RA);
72   o some instructions are not copied or register renamed;
73   o conditional jumps are not moved with code duplication;
74   o several jumps in one parallel group are not supported;
75   o when pipelining outer loops, code motion through inner loops
76   is not supported;
77   o control and data speculation are supported;
78   o some improvements for better compile time/performance were made.
79
80   Terminology
81   ===========
82
83   A vinsn, or virtual insn, is an insn with additional data characterizing
84   insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
85   Vinsns also act as smart pointers to save memory by reusing them in
86   different expressions.  A vinsn is described by vinsn_t type.
87
88   An expression is a vinsn with additional data characterizing its properties
89   at some point in the control flow graph.  The data may be its usefulness,
90   priority, speculative status, whether it was renamed/subsituted, etc.
91   An expression is described by expr_t type.
92
93   Availability set (av_set) is a set of expressions at a given control flow
94   point. It is represented as av_set_t.  The expressions in av sets are kept
95   sorted in the terms of expr_greater_p function.  It allows to truncate
96   the set while leaving the best expressions.
97
98   A fence is a point through which code motion is prohibited.  On each step,
99   we gather a parallel group of insns at a fence.  It is possible to have
100   multiple fences. A fence is represented via fence_t.
101
102   A boundary is the border between the fence group and the rest of the code.
103   Currently, we never have more than one boundary per fence, as we finalize
104   the fence group when a jump is scheduled. A boundary is represented
105   via bnd_t.
106
107   High-level overview
108   ===================
109
110   The scheduler finds regions to schedule, schedules each one, and finalizes.
111   The regions are formed starting from innermost loops, so that when the inner
112   loop is pipelined, its prologue can be scheduled together with yet unprocessed
113   outer loop. The rest of acyclic regions are found using extend_rgns:
114   the blocks that are not yet allocated to any regions are traversed in top-down
115   order, and a block is added to a region to which all its predecessors belong;
116   otherwise, the block starts its own region.
117
118   The main scheduling loop (sel_sched_region_2) consists of just
119   scheduling on each fence and updating fences.  For each fence,
120   we fill a parallel group of insns (fill_insns) until some insns can be added.
121   First, we compute available exprs (av-set) at the boundary of the current
122   group.  Second, we choose the best expression from it.  If the stall is
123   required to schedule any of the expressions, we advance the current cycle
124   appropriately.  So, the final group does not exactly correspond to a VLIW
125   word.  Third, we move the chosen expression to the boundary (move_op)
126   and update the intermediate av sets and liveness sets.  We quit fill_insns
127   when either no insns left for scheduling or we have scheduled enough insns
128   so we feel like advancing a scheduling point.
129
130   Computing available expressions
131   ===============================
132
133   The computation (compute_av_set) is a bottom-up traversal.  At each insn,
134   we're moving the union of its successors' sets through it via
135   moveup_expr_set.  The dependent expressions are removed.  Local
136   transformations (substitution, speculation) are applied to move more
137   exprs.  Then the expr corresponding to the current insn is added.
138   The result is saved on each basic block header.
139
140   When traversing the CFG, we're moving down for no more than max_ws insns.
141   Also, we do not move down to ineligible successors (is_ineligible_successor),
142   which include moving along a back-edge, moving to already scheduled code,
143   and moving to another fence.  The first two restrictions are lifted during
144   pipelining, which allows us to move insns along a back-edge.  We always have
145   an acyclic region for scheduling because we forbid motion through fences.
146
147   Choosing the best expression
148   ============================
149
150   We sort the final availability set via sel_rank_for_schedule, then we remove
151   expressions which are not yet ready (tick_check_p) or which dest registers
152   cannot be used.  For some of them, we choose another register via
153   find_best_reg.  To do this, we run find_used_regs to calculate the set of
154   registers which cannot be used.  The find_used_regs function performs
155   a traversal of code motion paths for an expr.  We consider for renaming
156   only registers which are from the same regclass as the original one and
157   using which does not interfere with any live ranges.  Finally, we convert
158   the resulting set to the ready list format and use max_issue and reorder*
159   hooks similarly to the Haifa scheduler.
160
161   Scheduling the best expression
162   ==============================
163
164   We run the move_op routine to perform the same type of code motion paths
165   traversal as in find_used_regs.  (These are working via the same driver,
166   code_motion_path_driver.)  When moving down the CFG, we look for original
167   instruction that gave birth to a chosen expression.  We undo
168   the transformations performed on an expression via the history saved in it.
169   When found, we remove the instruction or leave a reg-reg copy/speculation
170   check if needed.  On a way up, we insert bookkeeping copies at each join
171   point.  If a copy is not needed, it will be removed later during this
172   traversal.  We update the saved av sets and liveness sets on the way up, too.
173
174   Finalizing the schedule
175   =======================
176
177   When pipelining, we reschedule the blocks from which insns were pipelined
178   to get a tighter schedule.  On Itanium, we also perform bundling via
179   the same routine from ia64.c.
180
181   Dependence analysis changes
182   ===========================
183
184   We augmented the sched-deps.c with hooks that get called when a particular
185   dependence is found in a particular part of an insn.  Using these hooks, we
186   can do several actions such as: determine whether an insn can be moved through
187   another (has_dependence_p, moveup_expr); find out whether an insn can be
188   scheduled on the current cycle (tick_check_p); find out registers that
189   are set/used/clobbered by an insn and find out all the strange stuff that
190   restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
191   init_global_and_expr_for_insn).
192
193   Initialization changes
194   ======================
195
196   There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
197   reused in all of the schedulers.  We have split up the initialization of data
198   of such parts into different functions prefixed with scheduler type and
199   postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
200   sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
201   The same splitting is done with current_sched_info structure:
202   dependence-related parts are in sched_deps_info, common part is in
203   common_sched_info, and haifa/sel/etc part is in current_sched_info.
204
205   Target contexts
206   ===============
207
208   As we now have multiple-point scheduling, this would not work with backends
209   which save some of the scheduler state to use it in the target hooks.
210   For this purpose, we introduce a concept of target contexts, which
211   encapsulate such information.  The backend should implement simple routines
212   of allocating/freeing/setting such a context.  The scheduler calls these
213   as target hooks and handles the target context as an opaque pointer (similar
214   to the DFA state type, state_t).
215
216   Various speedups
217   ================
218
219   As the correct data dependence graph is not supported during scheduling (which
220   is to be changed in mid-term), we cache as much of the dependence analysis
221   results as possible to avoid reanalyzing.  This includes: bitmap caches on
222   each insn in stream of the region saying yes/no for a query with a pair of
223   UIDs; hashtables with the previously done transformations on each insn in
224   stream; a vector keeping a history of transformations on each expr.
225
226   Also, we try to minimize the dependence context used on each fence to check
227   whether the given expression is ready for scheduling by removing from it
228   insns that are definitely completed the execution.  The results of
229   tick_check_p checks are also cached in a vector on each fence.
230
231   We keep a valid liveness set on each insn in a region to avoid the high
232   cost of recomputation on large basic blocks.
233
234   Finally, we try to minimize the number of needed updates to the availability
235   sets.  The updates happen in two cases: when fill_insns terminates,
236   we advance all fences and increase the stage number to show that the region
237   has changed and the sets are to be recomputed; and when the next iteration
238   of a loop in fill_insns happens (but this one reuses the saved av sets
239   on bb headers.)  Thus, we try to break the fill_insns loop only when
240   "significant" number of insns from the current scheduling window was
241   scheduled.  This should be made a target param.
242
243
244   TODO: correctly support the data dependence graph at all stages and get rid
245   of all caches.  This should speed up the scheduler.
246   TODO: implement moving cond jumps with bookkeeping copies on both targets.
247   TODO: tune the scheduler before RA so it does not create too much pseudos.
248
249
250   References:
251   S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
252   selective scheduling and software pipelining.
253   ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
254
255   Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
256   and Dmitry Zhurikhin.  An interblock VLIW-targeted instruction scheduler
257   for GCC. In Proceedings of GCC Developers' Summit 2006.
258
259   Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik.  GCC Instruction
260   Scheduler and Software Pipeliner on the Itanium Platform.   EPIC-7 Workshop.
261   http://rogue.colorado.edu/EPIC7/.
262
263*/
264
265/* True when pipelining is enabled.  */
266bool pipelining_p;
267
268/* True if bookkeeping is enabled.  */
269bool bookkeeping_p;
270
271/* Maximum number of insns that are eligible for renaming.  */
272int max_insns_to_rename;
273
274
275/* Definitions of local types and macros.  */
276
277/* Represents possible outcomes of moving an expression through an insn.  */
278enum MOVEUP_EXPR_CODE
279  {
280    /* The expression is not changed.  */
281    MOVEUP_EXPR_SAME,
282
283    /* Not changed, but requires a new destination register.  */
284    MOVEUP_EXPR_AS_RHS,
285
286    /* Cannot be moved.  */
287    MOVEUP_EXPR_NULL,
288
289    /* Changed (substituted or speculated).  */
290    MOVEUP_EXPR_CHANGED
291  };
292
293/* The container to be passed into rtx search & replace functions.  */
294struct rtx_search_arg
295{
296  /* What we are searching for.  */
297  rtx x;
298
299  /* The occurrence counter.  */
300  int n;
301};
302
303typedef struct rtx_search_arg *rtx_search_arg_p;
304
305/* This struct contains precomputed hard reg sets that are needed when
306   computing registers available for renaming.  */
307struct hard_regs_data
308{
309  /* For every mode, this stores registers available for use with
310     that mode.  */
311  HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
312
313  /* True when regs_for_mode[mode] is initialized.  */
314  bool regs_for_mode_ok[NUM_MACHINE_MODES];
315
316  /* For every register, it has regs that are ok to rename into it.
317     The register in question is always set.  If not, this means
318     that the whole set is not computed yet.  */
319  HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
320
321  /* For every mode, this stores registers not available due to
322     call clobbering.  */
323  HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
324
325  /* All registers that are used or call used.  */
326  HARD_REG_SET regs_ever_used;
327
328#ifdef STACK_REGS
329  /* Stack registers.  */
330  HARD_REG_SET stack_regs;
331#endif
332};
333
334/* Holds the results of computation of available for renaming and
335   unavailable hard registers.  */
336struct reg_rename
337{
338  /* These are unavailable due to calls crossing, globalness, etc.  */
339  HARD_REG_SET unavailable_hard_regs;
340
341  /* These are *available* for renaming.  */
342  HARD_REG_SET available_for_renaming;
343
344  /* Whether this code motion path crosses a call.  */
345  bool crosses_call;
346};
347
348/* A global structure that contains the needed information about harg
349   regs.  */
350static struct hard_regs_data sel_hrd;
351
352
353/* This structure holds local data used in code_motion_path_driver hooks on
354   the same or adjacent levels of recursion.  Here we keep those parameters
355   that are not used in code_motion_path_driver routine itself, but only in
356   its hooks.  Moreover, all parameters that can be modified in hooks are
357   in this structure, so all other parameters passed explicitly to hooks are
358   read-only.  */
359struct cmpd_local_params
360{
361  /* Local params used in move_op_* functions.  */
362
363  /* Edges for bookkeeping generation.  */
364  edge e1, e2;
365
366  /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
367  expr_t c_expr_merged, c_expr_local;
368
369  /* Local params used in fur_* functions.  */
370  /* Copy of the ORIGINAL_INSN list, stores the original insns already
371     found before entering the current level of code_motion_path_driver.  */
372  def_list_t old_original_insns;
373
374  /* Local params used in move_op_* functions.  */
375  /* True when we have removed last insn in the block which was
376     also a boundary.  Do not update anything or create bookkeeping copies.  */
377  BOOL_BITFIELD removed_last_insn : 1;
378};
379
380/* Stores the static parameters for move_op_* calls.  */
381struct moveop_static_params
382{
383  /* Destination register.  */
384  rtx dest;
385
386  /* Current C_EXPR.  */
387  expr_t c_expr;
388
389  /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
390     they are to be removed.  */
391  int uid;
392
393#ifdef ENABLE_CHECKING
394  /* This is initialized to the insn on which the driver stopped its traversal.  */
395  insn_t failed_insn;
396#endif
397
398  /* True if we scheduled an insn with different register.  */
399  bool was_renamed;
400};
401
402/* Stores the static parameters for fur_* calls.  */
403struct fur_static_params
404{
405  /* Set of registers unavailable on the code motion path.  */
406  regset used_regs;
407
408  /* Pointer to the list of original insns definitions.  */
409  def_list_t *original_insns;
410
411  /* True if a code motion path contains a CALL insn.  */
412  bool crosses_call;
413};
414
415typedef struct fur_static_params *fur_static_params_p;
416typedef struct cmpd_local_params *cmpd_local_params_p;
417typedef struct moveop_static_params *moveop_static_params_p;
418
419/* Set of hooks and parameters that determine behaviour specific to
420   move_op or find_used_regs functions.  */
421struct code_motion_path_driver_info_def
422{
423  /* Called on enter to the basic block.  */
424  int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool);
425
426  /* Called when original expr is found.  */
427  void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *);
428
429  /* Called while descending current basic block if current insn is not
430     the original EXPR we're searching for.  */
431  bool (*orig_expr_not_found) (insn_t, av_set_t, void *);
432
433  /* Function to merge C_EXPRes from different successors.  */
434  void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *);
435
436  /* Function to finalize merge from different successors and possibly
437     deallocate temporary data structures used for merging.  */
438  void (*after_merge_succs) (cmpd_local_params_p, void *);
439
440  /* Called on the backward stage of recursion to do moveup_expr.
441     Used only with move_op_*.  */
442  void (*ascend) (insn_t, void *);
443
444  /* Called on the ascending pass, before returning from the current basic
445     block or from the whole traversal.  */
446  void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
447
448  /* When processing successors in move_op we need only descend into
449     SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL.  */
450  int succ_flags;
451
452  /* The routine name to print in dumps ("move_op" of "find_used_regs").  */
453  const char *routine_name;
454};
455
456/* Global pointer to current hooks, either points to MOVE_OP_HOOKS or
457   FUR_HOOKS.  */
458struct code_motion_path_driver_info_def *code_motion_path_driver_info;
459
460/* Set of hooks for performing move_op and find_used_regs routines with
461   code_motion_path_driver.  */
462extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
463
464/* True if/when we want to emulate Haifa scheduler in the common code.
465   This is used in sched_rgn_local_init and in various places in
466   sched-deps.c.  */
467int sched_emulate_haifa_p;
468
469/* GLOBAL_LEVEL is used to discard information stored in basic block headers
470   av_sets.  Av_set of bb header is valid if its (bb header's) level is equal
471   to GLOBAL_LEVEL.  And invalid if lesser.  This is primarily used to advance
472   scheduling window.  */
473int global_level;
474
475/* Current fences.  */
476flist_t fences;
477
478/* True when separable insns should be scheduled as RHSes.  */
479static bool enable_schedule_as_rhs_p;
480
481/* Used in verify_target_availability to assert that target reg is reported
482   unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
483   we haven't scheduled anything on the previous fence.
484   if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
485   have more conservative value than the one returned by the
486   find_used_regs, thus we shouldn't assert that these values are equal.  */
487static bool scheduled_something_on_previous_fence;
488
489/* All newly emitted insns will have their uids greater than this value.  */
490static int first_emitted_uid;
491
492/* Set of basic blocks that are forced to start new ebbs.  This is a subset
493   of all the ebb heads.  */
494static bitmap_head _forced_ebb_heads;
495bitmap_head *forced_ebb_heads = &_forced_ebb_heads;
496
497/* Blocks that need to be rescheduled after pipelining.  */
498bitmap blocks_to_reschedule = NULL;
499
500/* True when the first lv set should be ignored when updating liveness.  */
501static bool ignore_first = false;
502
503/* Number of insns max_issue has initialized data structures for.  */
504static int max_issue_size = 0;
505
506/* Whether we can issue more instructions.  */
507static int can_issue_more;
508
509/* Maximum software lookahead window size, reduced when rescheduling after
510   pipelining.  */
511static int max_ws;
512
513/* Number of insns scheduled in current region.  */
514static int num_insns_scheduled;
515
516/* A vector of expressions is used to be able to sort them.  */
517static vec<expr_t> vec_av_set = vNULL;
518
519/* A vector of vinsns is used to hold temporary lists of vinsns.  */
520typedef vec<vinsn_t> vinsn_vec_t;
521
522/* This vector has the exprs which may still present in av_sets, but actually
523   can't be moved up due to bookkeeping created during code motion to another
524   fence.  See comment near the call to update_and_record_unavailable_insns
525   for the detailed explanations.  */
526static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t ();
527
528/* This vector has vinsns which are scheduled with renaming on the first fence
529   and then seen on the second.  For expressions with such vinsns, target
530   availability information may be wrong.  */
531static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t ();
532
533/* Vector to store temporary nops inserted in move_op to prevent removal
534   of empty bbs.  */
535static vec<insn_t> vec_temp_moveop_nops = vNULL;
536
537/* These bitmaps record original instructions scheduled on the current
538   iteration and bookkeeping copies created by them.  */
539static bitmap current_originators = NULL;
540static bitmap current_copies = NULL;
541
542/* This bitmap marks the blocks visited by code_motion_path_driver so we don't
543   visit them afterwards.  */
544static bitmap code_motion_visited_blocks = NULL;
545
546/* Variables to accumulate different statistics.  */
547
548/* The number of bookkeeping copies created.  */
549static int stat_bookkeeping_copies;
550
551/* The number of insns that required bookkeeiping for their scheduling.  */
552static int stat_insns_needed_bookkeeping;
553
554/* The number of insns that got renamed.  */
555static int stat_renamed_scheduled;
556
557/* The number of substitutions made during scheduling.  */
558static int stat_substitutions_total;
559
560
561/* Forward declarations of static functions.  */
562static bool rtx_ok_for_substitution_p (rtx, rtx);
563static int sel_rank_for_schedule (const void *, const void *);
564static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
565static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
566
567static rtx get_dest_from_orig_ops (av_set_t);
568static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
569static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
570                            def_list_t *);
571static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
572static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
573                                    cmpd_local_params_p, void *);
574static void sel_sched_region_1 (void);
575static void sel_sched_region_2 (int);
576static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
577
578static void debug_state (state_t);
579
580
581/* Functions that work with fences.  */
582
583/* Advance one cycle on FENCE.  */
584static void
585advance_one_cycle (fence_t fence)
586{
587  unsigned i;
588  int cycle;
589  rtx_insn *insn;
590
591  advance_state (FENCE_STATE (fence));
592  cycle = ++FENCE_CYCLE (fence);
593  FENCE_ISSUED_INSNS (fence) = 0;
594  FENCE_STARTS_CYCLE_P (fence) = 1;
595  can_issue_more = issue_rate;
596  FENCE_ISSUE_MORE (fence) = can_issue_more;
597
598  for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); )
599    {
600      if (INSN_READY_CYCLE (insn) < cycle)
601        {
602          remove_from_deps (FENCE_DC (fence), insn);
603          FENCE_EXECUTING_INSNS (fence)->unordered_remove (i);
604          continue;
605        }
606      i++;
607    }
608  if (sched_verbose >= 2)
609    {
610      sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
611      debug_state (FENCE_STATE (fence));
612    }
613}
614
615/* Returns true when SUCC in a fallthru bb of INSN, possibly
616   skipping empty basic blocks.  */
617static bool
618in_fallthru_bb_p (rtx insn, rtx succ)
619{
620  basic_block bb = BLOCK_FOR_INSN (insn);
621  edge e;
622
623  if (bb == BLOCK_FOR_INSN (succ))
624    return true;
625
626  e = find_fallthru_edge_from (bb);
627  if (e)
628    bb = e->dest;
629  else
630    return false;
631
632  while (sel_bb_empty_p (bb))
633    bb = bb->next_bb;
634
635  return bb == BLOCK_FOR_INSN (succ);
636}
637
638/* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
639   When a successor will continue a ebb, transfer all parameters of a fence
640   to the new fence.  ORIG_MAX_SEQNO is the maximal seqno before this round
641   of scheduling helping to distinguish between the old and the new code.  */
642static void
643extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
644			 int orig_max_seqno)
645{
646  bool was_here_p = false;
647  insn_t insn = NULL;
648  insn_t succ;
649  succ_iterator si;
650  ilist_iterator ii;
651  fence_t fence = FLIST_FENCE (old_fences);
652  basic_block bb;
653
654  /* Get the only element of FENCE_BNDS (fence).  */
655  FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))
656    {
657      gcc_assert (!was_here_p);
658      was_here_p = true;
659    }
660  gcc_assert (was_here_p && insn != NULL_RTX);
661
662  /* When in the "middle" of the block, just move this fence
663     to the new list.  */
664  bb = BLOCK_FOR_INSN (insn);
665  if (! sel_bb_end_p (insn)
666      || (single_succ_p (bb)
667          && single_pred_p (single_succ (bb))))
668    {
669      insn_t succ;
670
671      succ = (sel_bb_end_p (insn)
672              ? sel_bb_head (single_succ (bb))
673              : NEXT_INSN (insn));
674
675      if (INSN_SEQNO (succ) > 0
676          && INSN_SEQNO (succ) <= orig_max_seqno
677          && INSN_SCHED_TIMES (succ) <= 0)
678        {
679          FENCE_INSN (fence) = succ;
680          move_fence_to_fences (old_fences, new_fences);
681
682          if (sched_verbose >= 1)
683            sel_print ("Fence %d continues as %d[%d] (state continue)\n",
684                       INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
685        }
686      return;
687    }
688
689  /* Otherwise copy fence's structures to (possibly) multiple successors.  */
690  FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
691    {
692      int seqno = INSN_SEQNO (succ);
693
694      if (0 < seqno && seqno <= orig_max_seqno
695          && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
696        {
697          bool b = (in_same_ebb_p (insn, succ)
698                    || in_fallthru_bb_p (insn, succ));
699
700          if (sched_verbose >= 1)
701            sel_print ("Fence %d continues as %d[%d] (state %s)\n",
702                       INSN_UID (insn), INSN_UID (succ),
703                       BLOCK_NUM (succ), b ? "continue" : "reset");
704
705          if (b)
706            add_dirty_fence_to_fences (new_fences, succ, fence);
707          else
708            {
709              /* Mark block of the SUCC as head of the new ebb.  */
710              bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ));
711              add_clean_fence_to_fences (new_fences, succ, fence);
712            }
713        }
714    }
715}
716
717
718/* Functions to support substitution.  */
719
720/* Returns whether INSN with dependence status DS is eligible for
721   substitution, i.e. it's a copy operation x := y, and RHS that is
722   moved up through this insn should be substituted.  */
723static bool
724can_substitute_through_p (insn_t insn, ds_t ds)
725{
726  /* We can substitute only true dependencies.  */
727  if ((ds & DEP_OUTPUT)
728      || (ds & DEP_ANTI)
729      || ! INSN_RHS (insn)
730      || ! INSN_LHS (insn))
731    return false;
732
733  /* Now we just need to make sure the INSN_RHS consists of only one
734     simple REG rtx.  */
735  if (REG_P (INSN_LHS (insn))
736      && REG_P (INSN_RHS (insn)))
737    return true;
738  return false;
739}
740
741/* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's
742   source (if INSN is eligible for substitution).  Returns TRUE if
743   substitution was actually performed, FALSE otherwise.  Substitution might
744   be not performed because it's either EXPR' vinsn doesn't contain INSN's
745   destination or the resulting insn is invalid for the target machine.
746   When UNDO is true, perform unsubstitution instead (the difference is in
747   the part of rtx on which validate_replace_rtx is called).  */
748static bool
749substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
750{
751  rtx *where;
752  bool new_insn_valid;
753  vinsn_t *vi = &EXPR_VINSN (expr);
754  bool has_rhs = VINSN_RHS (*vi) != NULL;
755  rtx old, new_rtx;
756
757  /* Do not try to replace in SET_DEST.  Although we'll choose new
758     register for the RHS, we don't want to change RHS' original reg.
759     If the insn is not SET, we may still be able to substitute something
760     in it, and if we're here (don't have deps), it doesn't write INSN's
761     dest.  */
762  where = (has_rhs
763	   ? &VINSN_RHS (*vi)
764	   : &PATTERN (VINSN_INSN_RTX (*vi)));
765  old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
766
767  /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
768  if (rtx_ok_for_substitution_p (old, *where))
769    {
770      rtx_insn *new_insn;
771      rtx *where_replace;
772
773      /* We should copy these rtxes before substitution.  */
774      new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
775      new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
776
777      /* Where we'll replace.
778         WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
779	 used instead of SET_SRC.  */
780      where_replace = (has_rhs
781		       ? &SET_SRC (PATTERN (new_insn))
782		       : &PATTERN (new_insn));
783
784      new_insn_valid
785        = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
786                                                new_insn);
787
788      /* ??? Actually, constrain_operands result depends upon choice of
789         destination register.  E.g. if we allow single register to be an rhs,
790	 and if we try to move dx=ax(as rhs) through ax=dx, we'll result
791	 in invalid insn dx=dx, so we'll loose this rhs here.
792	 Just can't come up with significant testcase for this, so just
793	 leaving it for now.  */
794      if (new_insn_valid)
795	{
796	  change_vinsn_in_expr (expr,
797				create_vinsn_from_insn_rtx (new_insn, false));
798
799	  /* Do not allow clobbering the address register of speculative
800             insns.  */
801	  if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
802              && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
803					 expr_dest_reg (expr)))
804	    EXPR_TARGET_AVAILABLE (expr) = false;
805
806	  return true;
807	}
808      else
809        return false;
810    }
811  else
812    return false;
813}
814
815/* Return the number of places WHAT appears within WHERE.
816   Bail out when we found a reference occupying several hard registers.  */
817static int
818count_occurrences_equiv (const_rtx what, const_rtx where)
819{
820  int count = 0;
821  subrtx_iterator::array_type array;
822  FOR_EACH_SUBRTX (iter, array, where, NONCONST)
823    {
824      const_rtx x = *iter;
825      if (REG_P (x) && REGNO (x) == REGNO (what))
826	{
827	  /* Bail out if mode is different or more than one register is
828	     used.  */
829	  if (GET_MODE (x) != GET_MODE (what)
830	      || (HARD_REGISTER_P (x)
831		  && hard_regno_nregs[REGNO (x)][GET_MODE (x)] > 1))
832	    return 0;
833	  count += 1;
834	}
835      else if (GET_CODE (x) == SUBREG
836	       && (!REG_P (SUBREG_REG (x))
837		   || REGNO (SUBREG_REG (x)) == REGNO (what)))
838	/* ??? Do not support substituting regs inside subregs.  In that case,
839	   simplify_subreg will be called by validate_replace_rtx, and
840	   unsubstitution will fail later.  */
841	return 0;
842    }
843  return count;
844}
845
846/* Returns TRUE if WHAT is found in WHERE rtx tree.  */
847static bool
848rtx_ok_for_substitution_p (rtx what, rtx where)
849{
850  return (count_occurrences_equiv (what, where) > 0);
851}
852
853
854/* Functions to support register renaming.  */
855
856/* Substitute VI's set source with REGNO.  Returns newly created pattern
857   that has REGNO as its source.  */
858static rtx_insn *
859create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
860{
861  rtx lhs_rtx;
862  rtx pattern;
863  rtx_insn *insn_rtx;
864
865  lhs_rtx = copy_rtx (VINSN_LHS (vi));
866
867  pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
868  insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
869
870  return insn_rtx;
871}
872
873/* Returns whether INSN's src can be replaced with register number
874   NEW_SRC_REG. E.g. the following insn is valid for i386:
875
876    (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
877      (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
878			(reg:SI 0 ax [orig:770 c1 ] [770]))
879		    (const_int 288 [0x120])) [0 str S1 A8])
880	    (const_int 0 [0x0])) 43 {*movqi_1} (nil)
881	(nil))
882
883  But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
884  because of operand constraints:
885
886    (define_insn "*movqi_1"
887      [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
888	    (match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn")
889	    )]
890
891  So do constrain_operands here, before choosing NEW_SRC_REG as best
892  reg for rhs.  */
893
894static bool
895replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg)
896{
897  vinsn_t vi = INSN_VINSN (insn);
898  machine_mode mode;
899  rtx dst_loc;
900  bool res;
901
902  gcc_assert (VINSN_SEPARABLE_P (vi));
903
904  get_dest_and_mode (insn, &dst_loc, &mode);
905  gcc_assert (mode == GET_MODE (new_src_reg));
906
907  if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc))
908    return true;
909
910  /* See whether SET_SRC can be replaced with this register.  */
911  validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1);
912  res = verify_changes (0);
913  cancel_changes (0);
914
915  return res;
916}
917
918/* Returns whether INSN still be valid after replacing it's DEST with
919   register NEW_REG.  */
920static bool
921replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg)
922{
923  vinsn_t vi = INSN_VINSN (insn);
924  bool res;
925
926  /* We should deal here only with separable insns.  */
927  gcc_assert (VINSN_SEPARABLE_P (vi));
928  gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg));
929
930  /* See whether SET_DEST can be replaced with this register.  */
931  validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1);
932  res = verify_changes (0);
933  cancel_changes (0);
934
935  return res;
936}
937
938/* Create a pattern with rhs of VI and lhs of LHS_RTX.  */
939static rtx_insn *
940create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
941{
942  rtx rhs_rtx;
943  rtx pattern;
944  rtx_insn *insn_rtx;
945
946  rhs_rtx = copy_rtx (VINSN_RHS (vi));
947
948  pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
949  insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
950
951  return insn_rtx;
952}
953
954/* Substitute lhs in the given expression EXPR for the register with number
955   NEW_REGNO.  SET_DEST may be arbitrary rtx, not only register.  */
956static void
957replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
958{
959  rtx_insn *insn_rtx;
960  vinsn_t vinsn;
961
962  insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
963  vinsn = create_vinsn_from_insn_rtx (insn_rtx, false);
964
965  change_vinsn_in_expr (expr, vinsn);
966  EXPR_WAS_RENAMED (expr) = 1;
967  EXPR_TARGET_AVAILABLE (expr) = 1;
968}
969
970/* Returns whether VI writes either one of the USED_REGS registers or,
971   if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers.  */
972static bool
973vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
974                            HARD_REG_SET unavailable_hard_regs)
975{
976  unsigned regno;
977  reg_set_iterator rsi;
978
979  EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)
980    {
981      if (REGNO_REG_SET_P (used_regs, regno))
982        return true;
983      if (HARD_REGISTER_NUM_P (regno)
984          && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
985	return true;
986    }
987
988  EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)
989    {
990      if (REGNO_REG_SET_P (used_regs, regno))
991        return true;
992      if (HARD_REGISTER_NUM_P (regno)
993          && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
994	return true;
995    }
996
997  return false;
998}
999
1000/* Returns register class of the output register in INSN.
1001   Returns NO_REGS for call insns because some targets have constraints on
1002   destination register of a call insn.
1003
1004   Code adopted from regrename.c::build_def_use.  */
1005static enum reg_class
1006get_reg_class (rtx_insn *insn)
1007{
1008  int i, n_ops;
1009
1010  extract_constrain_insn (insn);
1011  preprocess_constraints (insn);
1012  n_ops = recog_data.n_operands;
1013
1014  const operand_alternative *op_alt = which_op_alt ();
1015  if (asm_noperands (PATTERN (insn)) > 0)
1016    {
1017      for (i = 0; i < n_ops; i++)
1018	if (recog_data.operand_type[i] == OP_OUT)
1019	  {
1020	    rtx *loc = recog_data.operand_loc[i];
1021	    rtx op = *loc;
1022	    enum reg_class cl = alternative_class (op_alt, i);
1023
1024	    if (REG_P (op)
1025		&& REGNO (op) == ORIGINAL_REGNO (op))
1026	      continue;
1027
1028	    return cl;
1029	  }
1030    }
1031  else if (!CALL_P (insn))
1032    {
1033      for (i = 0; i < n_ops + recog_data.n_dups; i++)
1034       {
1035	 int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
1036	 enum reg_class cl = alternative_class (op_alt, opn);
1037
1038	 if (recog_data.operand_type[opn] == OP_OUT ||
1039	     recog_data.operand_type[opn] == OP_INOUT)
1040	   return cl;
1041       }
1042    }
1043
1044/*  Insns like
1045    (insn (set (reg:CCZ 17 flags) (compare:CCZ ...)))
1046    may result in returning NO_REGS, cause flags is written implicitly through
1047    CMP insn, which has no OP_OUT | OP_INOUT operands.  */
1048  return NO_REGS;
1049}
1050
1051#ifdef HARD_REGNO_RENAME_OK
1052/* Calculate HARD_REGNO_RENAME_OK data for REGNO.  */
1053static void
1054init_hard_regno_rename (int regno)
1055{
1056  int cur_reg;
1057
1058  SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno);
1059
1060  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1061    {
1062      /* We are not interested in renaming in other regs.  */
1063      if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg))
1064        continue;
1065
1066      if (HARD_REGNO_RENAME_OK (regno, cur_reg))
1067        SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg);
1068    }
1069}
1070#endif
1071
1072/* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
1073   data first.  */
1074static inline bool
1075sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
1076{
1077#ifdef HARD_REGNO_RENAME_OK
1078  /* Check whether this is all calculated.  */
1079  if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from))
1080    return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1081
1082  init_hard_regno_rename (from);
1083
1084  return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1085#else
1086  return true;
1087#endif
1088}
1089
1090/* Calculate set of registers that are capable of holding MODE.  */
1091static void
1092init_regs_for_mode (machine_mode mode)
1093{
1094  int cur_reg;
1095
1096  CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
1097  CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
1098
1099  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1100    {
1101      int nregs;
1102      int i;
1103
1104      /* See whether it accepts all modes that occur in
1105         original insns.  */
1106      if (! HARD_REGNO_MODE_OK (cur_reg, mode))
1107        continue;
1108
1109      nregs = hard_regno_nregs[cur_reg][mode];
1110
1111      for (i = nregs - 1; i >= 0; --i)
1112        if (fixed_regs[cur_reg + i]
1113                || global_regs[cur_reg + i]
1114            /* Can't use regs which aren't saved by
1115               the prologue.  */
1116            || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
1117	    /* Can't use regs with non-null REG_BASE_VALUE, because adjusting
1118	       it affects aliasing globally and invalidates all AV sets.  */
1119	    || get_reg_base_value (cur_reg + i)
1120#ifdef LEAF_REGISTERS
1121            /* We can't use a non-leaf register if we're in a
1122               leaf function.  */
1123            || (crtl->is_leaf
1124                && !LEAF_REGISTERS[cur_reg + i])
1125#endif
1126            )
1127          break;
1128
1129      if (i >= 0)
1130        continue;
1131
1132      if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode))
1133        SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
1134                          cur_reg);
1135
1136      /* If the CUR_REG passed all the checks above,
1137         then it's ok.  */
1138      SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
1139    }
1140
1141  sel_hrd.regs_for_mode_ok[mode] = true;
1142}
1143
1144/* Init all register sets gathered in HRD.  */
1145static void
1146init_hard_regs_data (void)
1147{
1148  int cur_reg = 0;
1149  int cur_mode = 0;
1150
1151  CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
1152  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1153    if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
1154      SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
1155
1156  /* Initialize registers that are valid based on mode when this is
1157     really needed.  */
1158  for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
1159    sel_hrd.regs_for_mode_ok[cur_mode] = false;
1160
1161  /* Mark that all HARD_REGNO_RENAME_OK is not calculated.  */
1162  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1163    CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
1164
1165#ifdef STACK_REGS
1166  CLEAR_HARD_REG_SET (sel_hrd.stack_regs);
1167
1168  for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
1169    SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
1170#endif
1171}
1172
1173/* Mark hardware regs in REG_RENAME_P that are not suitable
1174   for renaming rhs in INSN due to hardware restrictions (register class,
1175   modes compatibility etc).  This doesn't affect original insn's dest reg,
1176   if it isn't in USED_REGS.  DEF is a definition insn of rhs for which the
1177   destination register is sought.  LHS (DEF->ORIG_INSN) may be REG or MEM.
1178   Registers that are in used_regs are always marked in
1179   unavailable_hard_regs as well.  */
1180
1181static void
1182mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
1183                            regset used_regs ATTRIBUTE_UNUSED)
1184{
1185  machine_mode mode;
1186  enum reg_class cl = NO_REGS;
1187  rtx orig_dest;
1188  unsigned cur_reg, regno;
1189  hard_reg_set_iterator hrsi;
1190
1191  gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET);
1192  gcc_assert (reg_rename_p);
1193
1194  orig_dest = SET_DEST (PATTERN (def->orig_insn));
1195
1196  /* We have decided not to rename 'mem = something;' insns, as 'something'
1197     is usually a register.  */
1198  if (!REG_P (orig_dest))
1199    return;
1200
1201  regno = REGNO (orig_dest);
1202
1203  /* If before reload, don't try to work with pseudos.  */
1204  if (!reload_completed && !HARD_REGISTER_NUM_P (regno))
1205    return;
1206
1207  if (reload_completed)
1208    cl = get_reg_class (def->orig_insn);
1209
1210  /* Stop if the original register is one of the fixed_regs, global_regs or
1211     frame pointer, or we could not discover its class.  */
1212  if (fixed_regs[regno]
1213      || global_regs[regno]
1214#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1215      || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM)
1216#else
1217      || (frame_pointer_needed && regno == FRAME_POINTER_REGNUM)
1218#endif
1219      || (reload_completed && cl == NO_REGS))
1220    {
1221      SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
1222
1223      /* Give a chance for original register, if it isn't in used_regs.  */
1224      if (!def->crosses_call)
1225        CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
1226
1227      return;
1228    }
1229
1230  /* If something allocated on stack in this function, mark frame pointer
1231     register unavailable, considering also modes.
1232     FIXME: it is enough to do this once per all original defs.  */
1233  if (frame_pointer_needed)
1234    {
1235      add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1236			   Pmode, FRAME_POINTER_REGNUM);
1237
1238      if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
1239        add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1240			     Pmode, HARD_FRAME_POINTER_REGNUM);
1241    }
1242
1243#ifdef STACK_REGS
1244  /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
1245     is equivalent to as if all stack regs were in this set.
1246     I.e. no stack register can be renamed, and even if it's an original
1247     register here we make sure it won't be lifted over it's previous def
1248     (it's previous def will appear as if it's a FIRST_STACK_REG def.
1249     The HARD_REGNO_RENAME_OK covers other cases in condition below.  */
1250  if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
1251      && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
1252    IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1253                      sel_hrd.stack_regs);
1254#endif
1255
1256  /* If there's a call on this path, make regs from call_used_reg_set
1257     unavailable.  */
1258  if (def->crosses_call)
1259    IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1260                      call_used_reg_set);
1261
1262  /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
1263     but not register classes.  */
1264  if (!reload_completed)
1265    return;
1266
1267  /* Leave regs as 'available' only from the current
1268     register class.  */
1269  COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
1270                     reg_class_contents[cl]);
1271
1272  mode = GET_MODE (orig_dest);
1273
1274  /* Leave only registers available for this mode.  */
1275  if (!sel_hrd.regs_for_mode_ok[mode])
1276    init_regs_for_mode (mode);
1277  AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
1278                    sel_hrd.regs_for_mode[mode]);
1279
1280  /* Exclude registers that are partially call clobbered.  */
1281  if (def->crosses_call
1282      && ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
1283    AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1284                            sel_hrd.regs_for_call_clobbered[mode]);
1285
1286  /* Leave only those that are ok to rename.  */
1287  EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1288                                  0, cur_reg, hrsi)
1289    {
1290      int nregs;
1291      int i;
1292
1293      nregs = hard_regno_nregs[cur_reg][mode];
1294      gcc_assert (nregs > 0);
1295
1296      for (i = nregs - 1; i >= 0; --i)
1297        if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
1298          break;
1299
1300      if (i >= 0)
1301        CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1302                            cur_reg);
1303    }
1304
1305  AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1306                          reg_rename_p->unavailable_hard_regs);
1307
1308  /* Regno is always ok from the renaming part of view, but it really
1309     could be in *unavailable_hard_regs already, so set it here instead
1310     of there.  */
1311  SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno);
1312}
1313
1314/* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the
1315   best register more recently than REG2.  */
1316static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
1317
1318/* Indicates the number of times renaming happened before the current one.  */
1319static int reg_rename_this_tick;
1320
1321/* Choose the register among free, that is suitable for storing
1322   the rhs value.
1323
1324   ORIGINAL_INSNS is the list of insns where the operation (rhs)
1325   originally appears.  There could be multiple original operations
1326   for single rhs since we moving it up and merging along different
1327   paths.
1328
1329   Some code is adapted from regrename.c (regrename_optimize).
1330   If original register is available, function returns it.
1331   Otherwise it performs the checks, so the new register should
1332   comply with the following:
1333    - it should not violate any live ranges (such registers are in
1334      REG_RENAME_P->available_for_renaming set);
1335    - it should not be in the HARD_REGS_USED regset;
1336    - it should be in the class compatible with original uses;
1337    - it should not be clobbered through reference with different mode;
1338    - if we're in the leaf function, then the new register should
1339      not be in the LEAF_REGISTERS;
1340    - etc.
1341
1342   If several registers meet the conditions, the register with smallest
1343   tick is returned to achieve more even register allocation.
1344
1345   If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true.
1346
1347   If no register satisfies the above conditions, NULL_RTX is returned.  */
1348static rtx
1349choose_best_reg_1 (HARD_REG_SET hard_regs_used,
1350                   struct reg_rename *reg_rename_p,
1351                   def_list_t original_insns, bool *is_orig_reg_p_ptr)
1352{
1353  int best_new_reg;
1354  unsigned cur_reg;
1355  machine_mode mode = VOIDmode;
1356  unsigned regno, i, n;
1357  hard_reg_set_iterator hrsi;
1358  def_list_iterator di;
1359  def_t def;
1360
1361  /* If original register is available, return it.  */
1362  *is_orig_reg_p_ptr = true;
1363
1364  FOR_EACH_DEF (def, di, original_insns)
1365    {
1366      rtx orig_dest = SET_DEST (PATTERN (def->orig_insn));
1367
1368      gcc_assert (REG_P (orig_dest));
1369
1370      /* Check that all original operations have the same mode.
1371         This is done for the next loop; if we'd return from this
1372         loop, we'd check only part of them, but in this case
1373         it doesn't matter.  */
1374      if (mode == VOIDmode)
1375        mode = GET_MODE (orig_dest);
1376      gcc_assert (mode == GET_MODE (orig_dest));
1377
1378      regno = REGNO (orig_dest);
1379      for (i = 0, n = hard_regno_nregs[regno][mode]; i < n; i++)
1380        if (TEST_HARD_REG_BIT (hard_regs_used, regno + i))
1381          break;
1382
1383      /* All hard registers are available.  */
1384      if (i == n)
1385        {
1386          gcc_assert (mode != VOIDmode);
1387
1388          /* Hard registers should not be shared.  */
1389          return gen_rtx_REG (mode, regno);
1390        }
1391    }
1392
1393  *is_orig_reg_p_ptr = false;
1394  best_new_reg = -1;
1395
1396  /* Among all available regs choose the register that was
1397     allocated earliest.  */
1398  EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1399                                  0, cur_reg, hrsi)
1400    if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
1401      {
1402	/* Check that all hard regs for mode are available.  */
1403	for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++)
1404	  if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
1405	      || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1406				     cur_reg + i))
1407	    break;
1408
1409	if (i < n)
1410	  continue;
1411
1412        /* All hard registers are available.  */
1413        if (best_new_reg < 0
1414            || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
1415          {
1416            best_new_reg = cur_reg;
1417
1418            /* Return immediately when we know there's no better reg.  */
1419            if (! reg_rename_tick[best_new_reg])
1420              break;
1421          }
1422      }
1423
1424  if (best_new_reg >= 0)
1425    {
1426      /* Use the check from the above loop.  */
1427      gcc_assert (mode != VOIDmode);
1428      return gen_rtx_REG (mode, best_new_reg);
1429    }
1430
1431  return NULL_RTX;
1432}
1433
1434/* A wrapper around choose_best_reg_1 () to verify that we make correct
1435   assumptions about available registers in the function.  */
1436static rtx
1437choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
1438                 def_list_t original_insns, bool *is_orig_reg_p_ptr)
1439{
1440  rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
1441                                    original_insns, is_orig_reg_p_ptr);
1442
1443  /* FIXME loop over hard_regno_nregs here.  */
1444  gcc_assert (best_reg == NULL_RTX
1445	      || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)));
1446
1447  return best_reg;
1448}
1449
1450/* Choose the pseudo register for storing rhs value.  As this is supposed
1451   to work before reload, we return either the original register or make
1452   the new one.  The parameters are the same that in choose_nest_reg_1
1453   functions, except that USED_REGS may contain pseudos.
1454   If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
1455
1456   TODO: take into account register pressure while doing this.  Up to this
1457   moment, this function would never return NULL for pseudos, but we should
1458   not rely on this.  */
1459static rtx
1460choose_best_pseudo_reg (regset used_regs,
1461                        struct reg_rename *reg_rename_p,
1462                        def_list_t original_insns, bool *is_orig_reg_p_ptr)
1463{
1464  def_list_iterator i;
1465  def_t def;
1466  machine_mode mode = VOIDmode;
1467  bool bad_hard_regs = false;
1468
1469  /* We should not use this after reload.  */
1470  gcc_assert (!reload_completed);
1471
1472  /* If original register is available, return it.  */
1473  *is_orig_reg_p_ptr = true;
1474
1475  FOR_EACH_DEF (def, i, original_insns)
1476    {
1477      rtx dest = SET_DEST (PATTERN (def->orig_insn));
1478      int orig_regno;
1479
1480      gcc_assert (REG_P (dest));
1481
1482      /* Check that all original operations have the same mode.  */
1483      if (mode == VOIDmode)
1484        mode = GET_MODE (dest);
1485      else
1486        gcc_assert (mode == GET_MODE (dest));
1487      orig_regno = REGNO (dest);
1488
1489      /* Check that nothing in used_regs intersects with orig_regno.  When
1490	 we have a hard reg here, still loop over hard_regno_nregs.  */
1491      if (HARD_REGISTER_NUM_P (orig_regno))
1492	{
1493	  int j, n;
1494	  for (j = 0, n = hard_regno_nregs[orig_regno][mode]; j < n; j++)
1495	    if (REGNO_REG_SET_P (used_regs, orig_regno + j))
1496	      break;
1497	  if (j < n)
1498	    continue;
1499	}
1500      else
1501	{
1502	  if (REGNO_REG_SET_P (used_regs, orig_regno))
1503	    continue;
1504	}
1505      if (HARD_REGISTER_NUM_P (orig_regno))
1506	{
1507	  gcc_assert (df_regs_ever_live_p (orig_regno));
1508
1509	  /* For hard registers, we have to check hardware imposed
1510	     limitations (frame/stack registers, calls crossed).  */
1511	  if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1512				  orig_regno))
1513	    {
1514	      /* Don't let register cross a call if it doesn't already
1515		 cross one.  This condition is written in accordance with
1516		 that in sched-deps.c sched_analyze_reg().  */
1517	      if (!reg_rename_p->crosses_call
1518		  || REG_N_CALLS_CROSSED (orig_regno) > 0)
1519		return gen_rtx_REG (mode, orig_regno);
1520	    }
1521
1522	  bad_hard_regs = true;
1523	}
1524      else
1525	return dest;
1526    }
1527
1528  *is_orig_reg_p_ptr = false;
1529
1530  /* We had some original hard registers that couldn't be used.
1531     Those were likely special.  Don't try to create a pseudo.  */
1532  if (bad_hard_regs)
1533    return NULL_RTX;
1534
1535  /* We haven't found a register from original operations.  Get a new one.
1536     FIXME: control register pressure somehow.  */
1537  {
1538    rtx new_reg = gen_reg_rtx (mode);
1539
1540    gcc_assert (mode != VOIDmode);
1541
1542    max_regno = max_reg_num ();
1543    maybe_extend_reg_info_p ();
1544    REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
1545
1546    return new_reg;
1547  }
1548}
1549
1550/* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
1551   USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS.  */
1552static void
1553verify_target_availability (expr_t expr, regset used_regs,
1554			    struct reg_rename *reg_rename_p)
1555{
1556  unsigned n, i, regno;
1557  machine_mode mode;
1558  bool target_available, live_available, hard_available;
1559
1560  if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
1561    return;
1562
1563  regno = expr_dest_regno (expr);
1564  mode = GET_MODE (EXPR_LHS (expr));
1565  target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
1566  n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1;
1567
1568  live_available = hard_available = true;
1569  for (i = 0; i < n; i++)
1570    {
1571      if (bitmap_bit_p (used_regs, regno + i))
1572        live_available = false;
1573      if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
1574        hard_available = false;
1575    }
1576
1577  /* When target is not available, it may be due to hard register
1578     restrictions, e.g. crosses calls, so we check hard_available too.  */
1579  if (target_available)
1580    gcc_assert (live_available);
1581  else
1582    /* Check only if we haven't scheduled something on the previous fence,
1583       cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
1584       and having more than one fence, we may end having targ_un in a block
1585       in which successors target register is actually available.
1586
1587       The last condition handles the case when a dependence from a call insn
1588       was created in sched-deps.c for insns with destination registers that
1589       never crossed a call before, but do cross one after our code motion.
1590
1591       FIXME: in the latter case, we just uselessly called find_used_regs,
1592       because we can't move this expression with any other register
1593       as well.  */
1594    gcc_assert (scheduled_something_on_previous_fence || !live_available
1595		|| !hard_available
1596		|| (!reload_completed && reg_rename_p->crosses_call
1597		    && REG_N_CALLS_CROSSED (regno) == 0));
1598}
1599
1600/* Collect unavailable registers due to liveness for EXPR from BNDS
1601   into USED_REGS.  Save additional information about available
1602   registers and unavailable due to hardware restriction registers
1603   into REG_RENAME_P structure.  Save original insns into ORIGINAL_INSNS
1604   list.  */
1605static void
1606collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs,
1607				    struct reg_rename *reg_rename_p,
1608				    def_list_t *original_insns)
1609{
1610  for (; bnds; bnds = BLIST_NEXT (bnds))
1611    {
1612      bool res;
1613      av_set_t orig_ops = NULL;
1614      bnd_t bnd = BLIST_BND (bnds);
1615
1616      /* If the chosen best expr doesn't belong to current boundary,
1617	 skip it.  */
1618      if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr)))
1619	continue;
1620
1621      /* Put in ORIG_OPS all exprs from this boundary that became
1622	 RES on top.  */
1623      orig_ops = find_sequential_best_exprs (bnd, expr, false);
1624
1625      /* Compute used regs and OR it into the USED_REGS.  */
1626      res = find_used_regs (BND_TO (bnd), orig_ops, used_regs,
1627			    reg_rename_p, original_insns);
1628
1629      /* FIXME: the assert is true until we'd have several boundaries.  */
1630      gcc_assert (res);
1631      av_set_clear (&orig_ops);
1632    }
1633}
1634
1635/* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG.
1636   If BEST_REG is valid, replace LHS of EXPR with it.  */
1637static bool
1638try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
1639{
1640  /* Try whether we'll be able to generate the insn
1641     'dest := best_reg' at the place of the original operation.  */
1642  for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns))
1643    {
1644      insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn;
1645
1646      gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)));
1647
1648      if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn))
1649	  && (! replace_src_with_reg_ok_p (orig_insn, best_reg)
1650	      || ! replace_dest_with_reg_ok_p (orig_insn, best_reg)))
1651	return false;
1652    }
1653
1654  /* Make sure that EXPR has the right destination
1655     register.  */
1656  if (expr_dest_regno (expr) != REGNO (best_reg))
1657    replace_dest_with_reg_in_expr (expr, best_reg);
1658  else
1659    EXPR_TARGET_AVAILABLE (expr) = 1;
1660
1661  return true;
1662}
1663
1664/* Select and assign best register to EXPR searching from BNDS.
1665   Set *IS_ORIG_REG_P to TRUE if original register was selected.
1666   Return FALSE if no register can be chosen, which could happen when:
1667   * EXPR_SEPARABLE_P is true but we were unable to find suitable register;
1668   * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
1669     that are used on the moving path.  */
1670static bool
1671find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
1672{
1673  static struct reg_rename reg_rename_data;
1674
1675  regset used_regs;
1676  def_list_t original_insns = NULL;
1677  bool reg_ok;
1678
1679  *is_orig_reg_p = false;
1680
1681  /* Don't bother to do anything if this insn doesn't set any registers.  */
1682  if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr)))
1683      && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))))
1684    return true;
1685
1686  used_regs = get_clear_regset_from_pool ();
1687  CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs);
1688
1689  collect_unavailable_regs_from_bnds (expr, bnds, used_regs, &reg_rename_data,
1690				      &original_insns);
1691
1692#ifdef ENABLE_CHECKING
1693  /* If after reload, make sure we're working with hard regs here.  */
1694  if (reload_completed)
1695    {
1696      reg_set_iterator rsi;
1697      unsigned i;
1698
1699      EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
1700        gcc_unreachable ();
1701    }
1702#endif
1703
1704  if (EXPR_SEPARABLE_P (expr))
1705    {
1706      rtx best_reg = NULL_RTX;
1707      /* Check that we have computed availability of a target register
1708	 correctly.  */
1709      verify_target_availability (expr, used_regs, &reg_rename_data);
1710
1711      /* Turn everything in hard regs after reload.  */
1712      if (reload_completed)
1713	{
1714	  HARD_REG_SET hard_regs_used;
1715	  REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs);
1716
1717	  /* Join hard registers unavailable due to register class
1718	     restrictions and live range intersection.  */
1719	  IOR_HARD_REG_SET (hard_regs_used,
1720			    reg_rename_data.unavailable_hard_regs);
1721
1722	  best_reg = choose_best_reg (hard_regs_used, &reg_rename_data,
1723				      original_insns, is_orig_reg_p);
1724	}
1725      else
1726	best_reg = choose_best_pseudo_reg (used_regs, &reg_rename_data,
1727					   original_insns, is_orig_reg_p);
1728
1729      if (!best_reg)
1730	reg_ok = false;
1731      else if (*is_orig_reg_p)
1732	{
1733	  /* In case of unification BEST_REG may be different from EXPR's LHS
1734	     when EXPR's LHS is unavailable, and there is another LHS among
1735	     ORIGINAL_INSNS.  */
1736	  reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1737	}
1738      else
1739	{
1740	  /* Forbid renaming of low-cost insns.  */
1741	  if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2)
1742	    reg_ok = false;
1743	  else
1744	    reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1745	}
1746    }
1747  else
1748    {
1749      /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set
1750	 any of the HARD_REGS_USED set.  */
1751      if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs,
1752				      reg_rename_data.unavailable_hard_regs))
1753	{
1754	  reg_ok = false;
1755	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0);
1756	}
1757      else
1758	{
1759	  reg_ok = true;
1760	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0);
1761	}
1762    }
1763
1764  ilist_clear (&original_insns);
1765  return_regset_to_pool (used_regs);
1766
1767  return reg_ok;
1768}
1769
1770
1771/* Return true if dependence described by DS can be overcomed.  */
1772static bool
1773can_speculate_dep_p (ds_t ds)
1774{
1775  if (spec_info == NULL)
1776    return false;
1777
1778  /* Leave only speculative data.  */
1779  ds &= SPECULATIVE;
1780
1781  if (ds == 0)
1782    return false;
1783
1784  {
1785    /* FIXME: make sched-deps.c produce only those non-hard dependencies,
1786       that we can overcome.  */
1787    ds_t spec_mask = spec_info->mask;
1788
1789    if ((ds & spec_mask) != ds)
1790      return false;
1791  }
1792
1793  if (ds_weak (ds) < spec_info->data_weakness_cutoff)
1794    return false;
1795
1796  return true;
1797}
1798
1799/* Get a speculation check instruction.
1800   C_EXPR is a speculative expression,
1801   CHECK_DS describes speculations that should be checked,
1802   ORIG_INSN is the original non-speculative insn in the stream.  */
1803static insn_t
1804create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
1805{
1806  rtx check_pattern;
1807  rtx_insn *insn_rtx;
1808  insn_t insn;
1809  basic_block recovery_block;
1810  rtx_insn *label;
1811
1812  /* Create a recovery block if target is going to emit branchy check, or if
1813     ORIG_INSN was speculative already.  */
1814  if (targetm.sched.needs_block_p (check_ds)
1815      || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0)
1816    {
1817      recovery_block = sel_create_recovery_block (orig_insn);
1818      label = BB_HEAD (recovery_block);
1819    }
1820  else
1821    {
1822      recovery_block = NULL;
1823      label = NULL;
1824    }
1825
1826  /* Get pattern of the check.  */
1827  check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label,
1828						check_ds);
1829
1830  gcc_assert (check_pattern != NULL);
1831
1832  /* Emit check.  */
1833  insn_rtx = create_insn_rtx_from_pattern (check_pattern, label);
1834
1835  insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn),
1836				      INSN_SEQNO (orig_insn), orig_insn);
1837
1838  /* Make check to be non-speculative.  */
1839  EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
1840  INSN_SPEC_CHECKED_DS (insn) = check_ds;
1841
1842  /* Decrease priority of check by difference of load/check instruction
1843     latencies.  */
1844  EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn))
1845				       - sel_vinsn_cost (INSN_VINSN (insn)));
1846
1847  /* Emit copy of original insn (though with replaced target register,
1848     if needed) to the recovery block.  */
1849  if (recovery_block != NULL)
1850    {
1851      rtx twin_rtx;
1852
1853      twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)));
1854      twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX);
1855      sel_gen_recovery_insn_from_rtx_after (twin_rtx,
1856					    INSN_EXPR (orig_insn),
1857					    INSN_SEQNO (insn),
1858					    bb_note (recovery_block));
1859    }
1860
1861  /* If we've generated a data speculation check, make sure
1862     that all the bookkeeping instruction we'll create during
1863     this move_op () will allocate an ALAT entry so that the
1864     check won't fail.
1865     In case of control speculation we must convert C_EXPR to control
1866     speculative mode, because failing to do so will bring us an exception
1867     thrown by the non-control-speculative load.  */
1868  check_ds = ds_get_max_dep_weak (check_ds);
1869  speculate_expr (c_expr, check_ds);
1870
1871  return insn;
1872}
1873
1874/* True when INSN is a "regN = regN" copy.  */
1875static bool
1876identical_copy_p (rtx insn)
1877{
1878  rtx lhs, rhs, pat;
1879
1880  pat = PATTERN (insn);
1881
1882  if (GET_CODE (pat) != SET)
1883    return false;
1884
1885  lhs = SET_DEST (pat);
1886  if (!REG_P (lhs))
1887    return false;
1888
1889  rhs = SET_SRC (pat);
1890  if (!REG_P (rhs))
1891    return false;
1892
1893  return REGNO (lhs) == REGNO (rhs);
1894}
1895
1896/* Undo all transformations on *AV_PTR that were done when
1897   moving through INSN.  */
1898static void
1899undo_transformations (av_set_t *av_ptr, rtx_insn *insn)
1900{
1901  av_set_iterator av_iter;
1902  expr_t expr;
1903  av_set_t new_set = NULL;
1904
1905  /* First, kill any EXPR that uses registers set by an insn.  This is
1906     required for correctness.  */
1907  FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
1908    if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
1909        && bitmap_intersect_p (INSN_REG_SETS (insn),
1910                               VINSN_REG_USES (EXPR_VINSN (expr)))
1911        /* When an insn looks like 'r1 = r1', we could substitute through
1912           it, but the above condition will still hold.  This happened with
1913           gcc.c-torture/execute/961125-1.c.  */
1914        && !identical_copy_p (insn))
1915      {
1916        if (sched_verbose >= 6)
1917          sel_print ("Expr %d removed due to use/set conflict\n",
1918                     INSN_UID (EXPR_INSN_RTX (expr)));
1919        av_set_iter_remove (&av_iter);
1920      }
1921
1922  /* Undo transformations looking at the history vector.  */
1923  FOR_EACH_EXPR (expr, av_iter, *av_ptr)
1924    {
1925      int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr),
1926                                        insn, EXPR_VINSN (expr), true);
1927
1928      if (index >= 0)
1929        {
1930          expr_history_def *phist;
1931
1932          phist = &EXPR_HISTORY_OF_CHANGES (expr)[index];
1933
1934          switch (phist->type)
1935            {
1936            case TRANS_SPECULATION:
1937              {
1938                ds_t old_ds, new_ds;
1939
1940                /* Compute the difference between old and new speculative
1941                   statuses: that's what we need to check.
1942                   Earlier we used to assert that the status will really
1943                   change.  This no longer works because only the probability
1944                   bits in the status may have changed during compute_av_set,
1945                   and in the case of merging different probabilities of the
1946                   same speculative status along different paths we do not
1947                   record this in the history vector.  */
1948                old_ds = phist->spec_ds;
1949                new_ds = EXPR_SPEC_DONE_DS (expr);
1950
1951                old_ds &= SPECULATIVE;
1952                new_ds &= SPECULATIVE;
1953                new_ds &= ~old_ds;
1954
1955                EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
1956                break;
1957              }
1958            case TRANS_SUBSTITUTION:
1959              {
1960                expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
1961                vinsn_t new_vi;
1962                bool add = true;
1963
1964                new_vi = phist->old_expr_vinsn;
1965
1966                gcc_assert (VINSN_SEPARABLE_P (new_vi)
1967                            == EXPR_SEPARABLE_P (expr));
1968                copy_expr (tmp_expr, expr);
1969
1970                if (vinsn_equal_p (phist->new_expr_vinsn,
1971                                   EXPR_VINSN (tmp_expr)))
1972                  change_vinsn_in_expr (tmp_expr, new_vi);
1973                else
1974                  /* This happens when we're unsubstituting on a bookkeeping
1975                     copy, which was in turn substituted.  The history is wrong
1976                     in this case.  Do it the hard way.  */
1977                  add = substitute_reg_in_expr (tmp_expr, insn, true);
1978                if (add)
1979                  av_set_add (&new_set, tmp_expr);
1980                clear_expr (tmp_expr);
1981                break;
1982              }
1983            default:
1984              gcc_unreachable ();
1985            }
1986        }
1987
1988    }
1989
1990  av_set_union_and_clear (av_ptr, &new_set, NULL);
1991}
1992
1993
1994/* Moveup_* helpers for code motion and computing av sets.  */
1995
1996/* Propagates EXPR inside an insn group through THROUGH_INSN.
1997   The difference from the below function is that only substitution is
1998   performed.  */
1999static enum MOVEUP_EXPR_CODE
2000moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
2001{
2002  vinsn_t vi = EXPR_VINSN (expr);
2003  ds_t *has_dep_p;
2004  ds_t full_ds;
2005
2006  /* Do this only inside insn group.  */
2007  gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0);
2008
2009  full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2010  if (full_ds == 0)
2011    return MOVEUP_EXPR_SAME;
2012
2013  /* Substitution is the possible choice in this case.  */
2014  if (has_dep_p[DEPS_IN_RHS])
2015    {
2016      /* Can't substitute UNIQUE VINSNs.  */
2017      gcc_assert (!VINSN_UNIQUE_P (vi));
2018
2019      if (can_substitute_through_p (through_insn,
2020                                    has_dep_p[DEPS_IN_RHS])
2021          && substitute_reg_in_expr (expr, through_insn, false))
2022        {
2023          EXPR_WAS_SUBSTITUTED (expr) = true;
2024          return MOVEUP_EXPR_CHANGED;
2025        }
2026
2027      /* Don't care about this, as even true dependencies may be allowed
2028         in an insn group.  */
2029      return MOVEUP_EXPR_SAME;
2030    }
2031
2032  /* This can catch output dependencies in COND_EXECs.  */
2033  if (has_dep_p[DEPS_IN_INSN])
2034    return MOVEUP_EXPR_NULL;
2035
2036  /* This is either an output or an anti dependence, which usually have
2037     a zero latency.  Allow this here, if we'd be wrong, tick_check_p
2038     will fix this.  */
2039  gcc_assert (has_dep_p[DEPS_IN_LHS]);
2040  return MOVEUP_EXPR_AS_RHS;
2041}
2042
2043/* True when a trapping EXPR cannot be moved through THROUGH_INSN.  */
2044#define CANT_MOVE_TRAPPING(expr, through_insn)                \
2045  (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))                       \
2046   && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \
2047   && !sel_insn_is_speculation_check (through_insn))
2048
2049/* True when a conflict on a target register was found during moveup_expr.  */
2050static bool was_target_conflict = false;
2051
2052/* Return true when moving a debug INSN across THROUGH_INSN will
2053   create a bookkeeping block.  We don't want to create such blocks,
2054   for they would cause codegen differences between compilations with
2055   and without debug info.  */
2056
2057static bool
2058moving_insn_creates_bookkeeping_block_p (insn_t insn,
2059					 insn_t through_insn)
2060{
2061  basic_block bbi, bbt;
2062  edge e1, e2;
2063  edge_iterator ei1, ei2;
2064
2065  if (!bookkeeping_can_be_created_if_moved_through_p (through_insn))
2066    {
2067      if (sched_verbose >= 9)
2068	sel_print ("no bookkeeping required: ");
2069      return FALSE;
2070    }
2071
2072  bbi = BLOCK_FOR_INSN (insn);
2073
2074  if (EDGE_COUNT (bbi->preds) == 1)
2075    {
2076      if (sched_verbose >= 9)
2077	sel_print ("only one pred edge: ");
2078      return TRUE;
2079    }
2080
2081  bbt = BLOCK_FOR_INSN (through_insn);
2082
2083  FOR_EACH_EDGE (e1, ei1, bbt->succs)
2084    {
2085      FOR_EACH_EDGE (e2, ei2, bbi->preds)
2086	{
2087	  if (find_block_for_bookkeeping (e1, e2, TRUE))
2088	    {
2089	      if (sched_verbose >= 9)
2090		sel_print ("found existing block: ");
2091	      return FALSE;
2092	    }
2093	}
2094    }
2095
2096  if (sched_verbose >= 9)
2097    sel_print ("would create bookkeeping block: ");
2098
2099  return TRUE;
2100}
2101
2102/* Return true when the conflict with newly created implicit clobbers
2103   between EXPR and THROUGH_INSN is found because of renaming.  */
2104static bool
2105implicit_clobber_conflict_p (insn_t through_insn, expr_t expr)
2106{
2107  HARD_REG_SET temp;
2108  rtx_insn *insn;
2109  rtx reg, rhs, pat;
2110  hard_reg_set_iterator hrsi;
2111  unsigned regno;
2112  bool valid;
2113
2114  /* Make a new pseudo register.  */
2115  reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr)));
2116  max_regno = max_reg_num ();
2117  maybe_extend_reg_info_p ();
2118
2119  /* Validate a change and bail out early.  */
2120  insn = EXPR_INSN_RTX (expr);
2121  validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true);
2122  valid = verify_changes (0);
2123  cancel_changes (0);
2124  if (!valid)
2125    {
2126      if (sched_verbose >= 6)
2127	sel_print ("implicit clobbers failed validation, ");
2128      return true;
2129    }
2130
2131  /* Make a new insn with it.  */
2132  rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr)));
2133  pat = gen_rtx_SET (VOIDmode, reg, rhs);
2134  start_sequence ();
2135  insn = emit_insn (pat);
2136  end_sequence ();
2137
2138  /* Calculate implicit clobbers.  */
2139  extract_insn (insn);
2140  preprocess_constraints (insn);
2141  alternative_mask prefrred = get_preferred_alternatives (insn);
2142  ira_implicitly_set_insn_hard_regs (&temp, prefrred);
2143  AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2144
2145  /* If any implicit clobber registers intersect with regular ones in
2146     through_insn, we have a dependency and thus bail out.  */
2147  EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
2148    {
2149      vinsn_t vi = INSN_VINSN (through_insn);
2150      if (bitmap_bit_p (VINSN_REG_SETS (vi), regno)
2151	  || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno)
2152	  || bitmap_bit_p (VINSN_REG_USES (vi), regno))
2153	return true;
2154    }
2155
2156  return false;
2157}
2158
2159/* Modifies EXPR so it can be moved through the THROUGH_INSN,
2160   performing necessary transformations.  Record the type of transformation
2161   made in PTRANS_TYPE, when it is not NULL.  When INSIDE_INSN_GROUP,
2162   permit all dependencies except true ones, and try to remove those
2163   too via forward substitution.  All cases when a non-eliminable
2164   non-zero cost dependency exists inside an insn group will be fixed
2165   in tick_check_p instead.  */
2166static enum MOVEUP_EXPR_CODE
2167moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
2168            enum local_trans_type *ptrans_type)
2169{
2170  vinsn_t vi = EXPR_VINSN (expr);
2171  insn_t insn = VINSN_INSN_RTX (vi);
2172  bool was_changed = false;
2173  bool as_rhs = false;
2174  ds_t *has_dep_p;
2175  ds_t full_ds;
2176
2177  /* ??? We use dependencies of non-debug insns on debug insns to
2178     indicate that the debug insns need to be reset if the non-debug
2179     insn is pulled ahead of it.  It's hard to figure out how to
2180     introduce such a notion in sel-sched, but it already fails to
2181     support debug insns in other ways, so we just go ahead and
2182     let the deug insns go corrupt for now.  */
2183  if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn))
2184    return MOVEUP_EXPR_SAME;
2185
2186  /* When inside_insn_group, delegate to the helper.  */
2187  if (inside_insn_group)
2188    return moveup_expr_inside_insn_group (expr, through_insn);
2189
2190  /* Deal with unique insns and control dependencies.  */
2191  if (VINSN_UNIQUE_P (vi))
2192    {
2193      /* We can move jumps without side-effects or jumps that are
2194	 mutually exclusive with instruction THROUGH_INSN (all in cases
2195	 dependencies allow to do so and jump is not speculative).  */
2196      if (control_flow_insn_p (insn))
2197        {
2198          basic_block fallthru_bb;
2199
2200          /* Do not move checks and do not move jumps through other
2201             jumps.  */
2202          if (control_flow_insn_p (through_insn)
2203              || sel_insn_is_speculation_check (insn))
2204            return MOVEUP_EXPR_NULL;
2205
2206          /* Don't move jumps through CFG joins.  */
2207          if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
2208            return MOVEUP_EXPR_NULL;
2209
2210          /* The jump should have a clear fallthru block, and
2211             this block should be in the current region.  */
2212          if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
2213              || ! in_current_region_p (fallthru_bb))
2214            return MOVEUP_EXPR_NULL;
2215
2216          /* And it should be mutually exclusive with through_insn.  */
2217          if (! sched_insns_conditions_mutex_p (insn, through_insn)
2218	      && ! DEBUG_INSN_P (through_insn))
2219            return MOVEUP_EXPR_NULL;
2220        }
2221
2222      /* Don't move what we can't move.  */
2223      if (EXPR_CANT_MOVE (expr)
2224	  && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn))
2225	return MOVEUP_EXPR_NULL;
2226
2227      /* Don't move SCHED_GROUP instruction through anything.
2228         If we don't force this, then it will be possible to start
2229         scheduling a sched_group before all its dependencies are
2230         resolved.
2231         ??? Haifa deals with this issue by delaying the SCHED_GROUP
2232         as late as possible through rank_for_schedule.  */
2233      if (SCHED_GROUP_P (insn))
2234	return MOVEUP_EXPR_NULL;
2235    }
2236  else
2237    gcc_assert (!control_flow_insn_p (insn));
2238
2239  /* Don't move debug insns if this would require bookkeeping.  */
2240  if (DEBUG_INSN_P (insn)
2241      && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)
2242      && moving_insn_creates_bookkeeping_block_p (insn, through_insn))
2243    return MOVEUP_EXPR_NULL;
2244
2245  /* Deal with data dependencies.  */
2246  was_target_conflict = false;
2247  full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2248  if (full_ds == 0)
2249    {
2250      if (!CANT_MOVE_TRAPPING (expr, through_insn))
2251	return MOVEUP_EXPR_SAME;
2252    }
2253  else
2254    {
2255      /* We can move UNIQUE insn up only as a whole and unchanged,
2256         so it shouldn't have any dependencies.  */
2257      if (VINSN_UNIQUE_P (vi))
2258	return MOVEUP_EXPR_NULL;
2259    }
2260
2261  if (full_ds != 0 && can_speculate_dep_p (full_ds))
2262    {
2263      int res;
2264
2265      res = speculate_expr (expr, full_ds);
2266      if (res >= 0)
2267	{
2268          /* Speculation was successful.  */
2269          full_ds = 0;
2270          was_changed = (res > 0);
2271          if (res == 2)
2272            was_target_conflict = true;
2273          if (ptrans_type)
2274            *ptrans_type = TRANS_SPECULATION;
2275	  sel_clear_has_dependence ();
2276	}
2277    }
2278
2279  if (has_dep_p[DEPS_IN_INSN])
2280    /* We have some dependency that cannot be discarded.  */
2281    return MOVEUP_EXPR_NULL;
2282
2283  if (has_dep_p[DEPS_IN_LHS])
2284    {
2285      /* Only separable insns can be moved up with the new register.
2286         Anyways, we should mark that the original register is
2287         unavailable.  */
2288      if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
2289        return MOVEUP_EXPR_NULL;
2290
2291      /* When renaming a hard register to a pseudo before reload, extra
2292	 dependencies can occur from the implicit clobbers of the insn.
2293	 Filter out such cases here.  */
2294      if (!reload_completed && REG_P (EXPR_LHS (expr))
2295	  && HARD_REGISTER_P (EXPR_LHS (expr))
2296	  && implicit_clobber_conflict_p (through_insn, expr))
2297	{
2298	  if (sched_verbose >= 6)
2299	    sel_print ("implicit clobbers conflict detected, ");
2300	  return MOVEUP_EXPR_NULL;
2301	}
2302      EXPR_TARGET_AVAILABLE (expr) = false;
2303      was_target_conflict = true;
2304      as_rhs = true;
2305    }
2306
2307  /* At this point we have either separable insns, that will be lifted
2308     up only as RHSes, or non-separable insns with no dependency in lhs.
2309     If dependency is in RHS, then try to perform substitution and move up
2310     substituted RHS:
2311
2312      Ex. 1:				  Ex.2
2313	y = x;				    y = x;
2314	z = y*2;			    y = y*2;
2315
2316    In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
2317    moved above y=x assignment as z=x*2.
2318
2319    In Ex.2 y*2 also can be substituted for x*2, but only the right hand
2320    side can be moved because of the output dependency.  The operation was
2321    cropped to its rhs above.  */
2322  if (has_dep_p[DEPS_IN_RHS])
2323    {
2324      ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS];
2325
2326      /* Can't substitute UNIQUE VINSNs.  */
2327      gcc_assert (!VINSN_UNIQUE_P (vi));
2328
2329      if (can_speculate_dep_p (*rhs_dsp))
2330	{
2331          int res;
2332
2333          res = speculate_expr (expr, *rhs_dsp);
2334          if (res >= 0)
2335            {
2336              /* Speculation was successful.  */
2337              *rhs_dsp = 0;
2338              was_changed = (res > 0);
2339              if (res == 2)
2340                was_target_conflict = true;
2341              if (ptrans_type)
2342                *ptrans_type = TRANS_SPECULATION;
2343            }
2344	  else
2345	    return MOVEUP_EXPR_NULL;
2346	}
2347      else if (can_substitute_through_p (through_insn,
2348                                         *rhs_dsp)
2349               && substitute_reg_in_expr (expr, through_insn, false))
2350	{
2351          /* ??? We cannot perform substitution AND speculation on the same
2352             insn.  */
2353          gcc_assert (!was_changed);
2354          was_changed = true;
2355          if (ptrans_type)
2356            *ptrans_type = TRANS_SUBSTITUTION;
2357          EXPR_WAS_SUBSTITUTED (expr) = true;
2358	}
2359      else
2360	return MOVEUP_EXPR_NULL;
2361    }
2362
2363  /* Don't move trapping insns through jumps.
2364     This check should be at the end to give a chance to control speculation
2365     to perform its duties.  */
2366  if (CANT_MOVE_TRAPPING (expr, through_insn))
2367    return MOVEUP_EXPR_NULL;
2368
2369  return (was_changed
2370          ? MOVEUP_EXPR_CHANGED
2371          : (as_rhs
2372             ? MOVEUP_EXPR_AS_RHS
2373             : MOVEUP_EXPR_SAME));
2374}
2375
2376/* Try to look at bitmap caches for EXPR and INSN pair, return true
2377   if successful.  When INSIDE_INSN_GROUP, also try ignore dependencies
2378   that can exist within a parallel group.  Write to RES the resulting
2379   code for moveup_expr.  */
2380static bool
2381try_bitmap_cache (expr_t expr, insn_t insn,
2382                  bool inside_insn_group,
2383                  enum MOVEUP_EXPR_CODE *res)
2384{
2385  int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2386
2387  /* First check whether we've analyzed this situation already.  */
2388  if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
2389    {
2390      if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2391        {
2392          if (sched_verbose >= 6)
2393            sel_print ("removed (cached)\n");
2394          *res = MOVEUP_EXPR_NULL;
2395          return true;
2396        }
2397      else
2398        {
2399          if (sched_verbose >= 6)
2400            sel_print ("unchanged (cached)\n");
2401          *res = MOVEUP_EXPR_SAME;
2402          return true;
2403        }
2404    }
2405  else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2406    {
2407      if (inside_insn_group)
2408        {
2409          if (sched_verbose >= 6)
2410            sel_print ("unchanged (as RHS, cached, inside insn group)\n");
2411          *res = MOVEUP_EXPR_SAME;
2412          return true;
2413
2414        }
2415      else
2416        EXPR_TARGET_AVAILABLE (expr) = false;
2417
2418      /* This is the only case when propagation result can change over time,
2419         as we can dynamically switch off scheduling as RHS.  In this case,
2420         just check the flag to reach the correct decision.  */
2421      if (enable_schedule_as_rhs_p)
2422        {
2423          if (sched_verbose >= 6)
2424            sel_print ("unchanged (as RHS, cached)\n");
2425          *res = MOVEUP_EXPR_AS_RHS;
2426          return true;
2427        }
2428      else
2429        {
2430          if (sched_verbose >= 6)
2431            sel_print ("removed (cached as RHS, but renaming"
2432                       " is now disabled)\n");
2433          *res = MOVEUP_EXPR_NULL;
2434          return true;
2435        }
2436    }
2437
2438  return false;
2439}
2440
2441/* Try to look at bitmap caches for EXPR and INSN pair, return true
2442   if successful.  Write to RES the resulting code for moveup_expr.  */
2443static bool
2444try_transformation_cache (expr_t expr, insn_t insn,
2445                          enum MOVEUP_EXPR_CODE *res)
2446{
2447  struct transformed_insns *pti
2448    = (struct transformed_insns *)
2449    htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
2450                         &EXPR_VINSN (expr),
2451                         VINSN_HASH_RTX (EXPR_VINSN (expr)));
2452  if (pti)
2453    {
2454      /* This EXPR was already moved through this insn and was
2455         changed as a result.  Fetch the proper data from
2456         the hashtable.  */
2457      insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2458                              INSN_UID (insn), pti->type,
2459                              pti->vinsn_old, pti->vinsn_new,
2460                              EXPR_SPEC_DONE_DS (expr));
2461
2462      if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
2463        pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
2464      change_vinsn_in_expr (expr, pti->vinsn_new);
2465      if (pti->was_target_conflict)
2466        EXPR_TARGET_AVAILABLE (expr) = false;
2467      if (pti->type == TRANS_SPECULATION)
2468        {
2469          EXPR_SPEC_DONE_DS (expr) = pti->ds;
2470          EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
2471        }
2472
2473      if (sched_verbose >= 6)
2474        {
2475          sel_print ("changed (cached): ");
2476          dump_expr (expr);
2477          sel_print ("\n");
2478        }
2479
2480      *res = MOVEUP_EXPR_CHANGED;
2481      return true;
2482    }
2483
2484  return false;
2485}
2486
2487/* Update bitmap caches on INSN with result RES of propagating EXPR.  */
2488static void
2489update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
2490                     enum MOVEUP_EXPR_CODE res)
2491{
2492  int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2493
2494  /* Do not cache result of propagating jumps through an insn group,
2495     as it is always true, which is not useful outside the group.  */
2496  if (inside_insn_group)
2497    return;
2498
2499  if (res == MOVEUP_EXPR_NULL)
2500    {
2501      bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2502      bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2503    }
2504  else if (res == MOVEUP_EXPR_SAME)
2505    {
2506      bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2507      bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid);
2508    }
2509  else if (res == MOVEUP_EXPR_AS_RHS)
2510    {
2511      bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2512      bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2513    }
2514  else
2515    gcc_unreachable ();
2516}
2517
2518/* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
2519   and transformation type TRANS_TYPE.  */
2520static void
2521update_transformation_cache (expr_t expr, insn_t insn,
2522                             bool inside_insn_group,
2523                             enum local_trans_type trans_type,
2524                             vinsn_t expr_old_vinsn)
2525{
2526  struct transformed_insns *pti;
2527
2528  if (inside_insn_group)
2529    return;
2530
2531  pti = XNEW (struct transformed_insns);
2532  pti->vinsn_old = expr_old_vinsn;
2533  pti->vinsn_new = EXPR_VINSN (expr);
2534  pti->type = trans_type;
2535  pti->was_target_conflict = was_target_conflict;
2536  pti->ds = EXPR_SPEC_DONE_DS (expr);
2537  pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
2538  vinsn_attach (pti->vinsn_old);
2539  vinsn_attach (pti->vinsn_new);
2540  *((struct transformed_insns **)
2541    htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
2542                              pti, VINSN_HASH_RTX (expr_old_vinsn),
2543                              INSERT)) = pti;
2544}
2545
2546/* Same as moveup_expr, but first looks up the result of
2547   transformation in caches.  */
2548static enum MOVEUP_EXPR_CODE
2549moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
2550{
2551  enum MOVEUP_EXPR_CODE res;
2552  bool got_answer = false;
2553
2554  if (sched_verbose >= 6)
2555    {
2556      sel_print ("Moving ");
2557      dump_expr (expr);
2558      sel_print (" through %d: ", INSN_UID (insn));
2559    }
2560
2561  if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))
2562      && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)))
2563	  == EXPR_INSN_RTX (expr)))
2564    /* Don't use cached information for debug insns that are heads of
2565       basic blocks.  */;
2566  else if (try_bitmap_cache (expr, insn, inside_insn_group, &res))
2567    /* When inside insn group, we do not want remove stores conflicting
2568       with previosly issued loads.  */
2569    got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL;
2570  else if (try_transformation_cache (expr, insn, &res))
2571    got_answer = true;
2572
2573  if (! got_answer)
2574    {
2575      /* Invoke moveup_expr and record the results.  */
2576      vinsn_t expr_old_vinsn = EXPR_VINSN (expr);
2577      ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr);
2578      int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn));
2579      bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
2580      enum local_trans_type trans_type = TRANS_SUBSTITUTION;
2581
2582      /* ??? Invent something better than this.  We can't allow old_vinsn
2583         to go, we need it for the history vector.  */
2584      vinsn_attach (expr_old_vinsn);
2585
2586      res = moveup_expr (expr, insn, inside_insn_group,
2587                         &trans_type);
2588      switch (res)
2589        {
2590        case MOVEUP_EXPR_NULL:
2591          update_bitmap_cache (expr, insn, inside_insn_group, res);
2592	  if (sched_verbose >= 6)
2593            sel_print ("removed\n");
2594	  break;
2595
2596	case MOVEUP_EXPR_SAME:
2597          update_bitmap_cache (expr, insn, inside_insn_group, res);
2598          if (sched_verbose >= 6)
2599            sel_print ("unchanged\n");
2600	  break;
2601
2602        case MOVEUP_EXPR_AS_RHS:
2603          gcc_assert (!unique_p || inside_insn_group);
2604          update_bitmap_cache (expr, insn, inside_insn_group, res);
2605	  if (sched_verbose >= 6)
2606            sel_print ("unchanged (as RHS)\n");
2607	  break;
2608
2609	case MOVEUP_EXPR_CHANGED:
2610          gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
2611                      || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
2612          insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2613                                  INSN_UID (insn), trans_type,
2614                                  expr_old_vinsn, EXPR_VINSN (expr),
2615                                  expr_old_spec_ds);
2616          update_transformation_cache (expr, insn, inside_insn_group,
2617                                       trans_type, expr_old_vinsn);
2618          if (sched_verbose >= 6)
2619            {
2620              sel_print ("changed: ");
2621              dump_expr (expr);
2622              sel_print ("\n");
2623            }
2624	  break;
2625	default:
2626	  gcc_unreachable ();
2627        }
2628
2629      vinsn_detach (expr_old_vinsn);
2630    }
2631
2632  return res;
2633}
2634
2635/* Moves an av set AVP up through INSN, performing necessary
2636   transformations.  */
2637static void
2638moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
2639{
2640  av_set_iterator i;
2641  expr_t expr;
2642
2643  FOR_EACH_EXPR_1 (expr, i, avp)
2644    {
2645
2646      switch (moveup_expr_cached (expr, insn, inside_insn_group))
2647	{
2648	case MOVEUP_EXPR_SAME:
2649        case MOVEUP_EXPR_AS_RHS:
2650	  break;
2651
2652	case MOVEUP_EXPR_NULL:
2653	  av_set_iter_remove (&i);
2654	  break;
2655
2656	case MOVEUP_EXPR_CHANGED:
2657          expr = merge_with_other_exprs (avp, &i, expr);
2658	  break;
2659
2660	default:
2661	  gcc_unreachable ();
2662	}
2663    }
2664}
2665
2666/* Moves AVP set along PATH.  */
2667static void
2668moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
2669{
2670  int last_cycle;
2671
2672  if (sched_verbose >= 6)
2673    sel_print ("Moving expressions up in the insn group...\n");
2674  if (! path)
2675    return;
2676  last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
2677  while (path
2678         && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2679    {
2680      moveup_set_expr (avp, ILIST_INSN (path), true);
2681      path = ILIST_NEXT (path);
2682    }
2683}
2684
2685/* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW.  */
2686static bool
2687equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
2688{
2689  expr_def _tmp, *tmp = &_tmp;
2690  int last_cycle;
2691  bool res = true;
2692
2693  copy_expr_onside (tmp, expr);
2694  last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
2695  while (path
2696         && res
2697         && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2698    {
2699      res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
2700             != MOVEUP_EXPR_NULL);
2701      path = ILIST_NEXT (path);
2702    }
2703
2704  if (res)
2705    {
2706      vinsn_t tmp_vinsn = EXPR_VINSN (tmp);
2707      vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw);
2708
2709      if (tmp_vinsn != expr_vliw_vinsn)
2710	res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn);
2711    }
2712
2713  clear_expr (tmp);
2714  return res;
2715}
2716
2717
2718/* Functions that compute av and lv sets.  */
2719
2720/* Returns true if INSN is not a downward continuation of the given path P in
2721   the current stage.  */
2722static bool
2723is_ineligible_successor (insn_t insn, ilist_t p)
2724{
2725  insn_t prev_insn;
2726
2727  /* Check if insn is not deleted.  */
2728  if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
2729    gcc_unreachable ();
2730  else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
2731    gcc_unreachable ();
2732
2733  /* If it's the first insn visited, then the successor is ok.  */
2734  if (!p)
2735    return false;
2736
2737  prev_insn = ILIST_INSN (p);
2738
2739  if (/* a backward edge.  */
2740      INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
2741      /* is already visited.  */
2742      || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
2743	  && (ilist_is_in_p (p, insn)
2744              /* We can reach another fence here and still seqno of insn
2745                 would be equal to seqno of prev_insn.  This is possible
2746                 when prev_insn is a previously created bookkeeping copy.
2747                 In that case it'd get a seqno of insn.  Thus, check here
2748                 whether insn is in current fence too.  */
2749              || IN_CURRENT_FENCE_P (insn)))
2750      /* Was already scheduled on this round.  */
2751      || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
2752	  && IN_CURRENT_FENCE_P (insn))
2753      /* An insn from another fence could also be
2754	 scheduled earlier even if this insn is not in
2755	 a fence list right now.  Check INSN_SCHED_CYCLE instead.  */
2756      || (!pipelining_p
2757          && INSN_SCHED_TIMES (insn) > 0))
2758    return true;
2759  else
2760    return false;
2761}
2762
2763/* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
2764   of handling multiple successors and properly merging its av_sets.  P is
2765   the current path traversed.  WS is the size of lookahead window.
2766   Return the av set computed.  */
2767static av_set_t
2768compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
2769{
2770  struct succs_info *sinfo;
2771  av_set_t expr_in_all_succ_branches = NULL;
2772  int is;
2773  insn_t succ, zero_succ = NULL;
2774  av_set_t av1 = NULL;
2775
2776  gcc_assert (sel_bb_end_p (insn));
2777
2778  /* Find different kind of successors needed for correct computing of
2779     SPEC and TARGET_AVAILABLE attributes.  */
2780  sinfo = compute_succs_info (insn, SUCCS_NORMAL);
2781
2782  /* Debug output.  */
2783  if (sched_verbose >= 6)
2784    {
2785      sel_print ("successors of bb end (%d): ", INSN_UID (insn));
2786      dump_insn_vector (sinfo->succs_ok);
2787      sel_print ("\n");
2788      if (sinfo->succs_ok_n != sinfo->all_succs_n)
2789        sel_print ("real successors num: %d\n", sinfo->all_succs_n);
2790    }
2791
2792  /* Add insn to the tail of current path.  */
2793  ilist_add (&p, insn);
2794
2795  FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
2796    {
2797      av_set_t succ_set;
2798
2799      /* We will edit SUCC_SET and EXPR_SPEC field of its elements.  */
2800      succ_set = compute_av_set_inside_bb (succ, p, ws, true);
2801
2802      av_set_split_usefulness (succ_set,
2803                               sinfo->probs_ok[is],
2804                               sinfo->all_prob);
2805
2806      if (sinfo->all_succs_n > 1)
2807	{
2808          /* Find EXPR'es that came from *all* successors and save them
2809             into expr_in_all_succ_branches.  This set will be used later
2810             for calculating speculation attributes of EXPR'es.  */
2811          if (is == 0)
2812            {
2813              expr_in_all_succ_branches = av_set_copy (succ_set);
2814
2815              /* Remember the first successor for later. */
2816              zero_succ = succ;
2817            }
2818          else
2819            {
2820              av_set_iterator i;
2821              expr_t expr;
2822
2823              FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
2824                if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
2825                  av_set_iter_remove (&i);
2826            }
2827	}
2828
2829      /* Union the av_sets.  Check liveness restrictions on target registers
2830         in special case of two successors.  */
2831      if (sinfo->succs_ok_n == 2 && is == 1)
2832        {
2833          basic_block bb0 = BLOCK_FOR_INSN (zero_succ);
2834          basic_block bb1 = BLOCK_FOR_INSN (succ);
2835
2836          gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
2837          av_set_union_and_live (&av1, &succ_set,
2838                                 BB_LV_SET (bb0),
2839                                 BB_LV_SET (bb1),
2840                                 insn);
2841        }
2842      else
2843        av_set_union_and_clear (&av1, &succ_set, insn);
2844    }
2845
2846  /* Check liveness restrictions via hard way when there are more than
2847     two successors.  */
2848  if (sinfo->succs_ok_n > 2)
2849    FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
2850      {
2851        basic_block succ_bb = BLOCK_FOR_INSN (succ);
2852
2853        gcc_assert (BB_LV_SET_VALID_P (succ_bb));
2854        mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
2855                                  BB_LV_SET (succ_bb));
2856      }
2857
2858  /* Finally, check liveness restrictions on paths leaving the region.  */
2859  if (sinfo->all_succs_n > sinfo->succs_ok_n)
2860    FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ)
2861      mark_unavailable_targets
2862        (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
2863
2864  if (sinfo->all_succs_n > 1)
2865    {
2866      av_set_iterator i;
2867      expr_t expr;
2868
2869      /* Increase the spec attribute of all EXPR'es that didn't come
2870	 from all successors.  */
2871      FOR_EACH_EXPR (expr, i, av1)
2872	if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
2873	  EXPR_SPEC (expr)++;
2874
2875      av_set_clear (&expr_in_all_succ_branches);
2876
2877      /* Do not move conditional branches through other
2878	 conditional branches.  So, remove all conditional
2879	 branches from av_set if current operator is a conditional
2880	 branch.  */
2881      av_set_substract_cond_branches (&av1);
2882    }
2883
2884  ilist_remove (&p);
2885  free_succs_info (sinfo);
2886
2887  if (sched_verbose >= 6)
2888    {
2889      sel_print ("av_succs (%d): ", INSN_UID (insn));
2890      dump_av_set (av1);
2891      sel_print ("\n");
2892    }
2893
2894  return av1;
2895}
2896
2897/* This function computes av_set for the FIRST_INSN by dragging valid
2898   av_set through all basic block insns either from the end of basic block
2899   (computed using compute_av_set_at_bb_end) or from the insn on which
2900   MAX_WS was exceeded.  It uses compute_av_set_at_bb_end to compute av_set
2901   below the basic block and handling conditional branches.
2902   FIRST_INSN - the basic block head, P - path consisting of the insns
2903   traversed on the way to the FIRST_INSN (the path is sparse, only bb heads
2904   and bb ends are added to the path), WS - current window size,
2905   NEED_COPY_P - true if we'll make a copy of av_set before returning it.  */
2906static av_set_t
2907compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
2908			  bool need_copy_p)
2909{
2910  insn_t cur_insn;
2911  int end_ws = ws;
2912  insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn));
2913  insn_t after_bb_end = NEXT_INSN (bb_end);
2914  insn_t last_insn;
2915  av_set_t av = NULL;
2916  basic_block cur_bb = BLOCK_FOR_INSN (first_insn);
2917
2918  /* Return NULL if insn is not on the legitimate downward path.  */
2919  if (is_ineligible_successor (first_insn, p))
2920    {
2921      if (sched_verbose >= 6)
2922        sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn));
2923
2924      return NULL;
2925    }
2926
2927  /* If insn already has valid av(insn) computed, just return it.  */
2928  if (AV_SET_VALID_P (first_insn))
2929    {
2930      av_set_t av_set;
2931
2932      if (sel_bb_head_p (first_insn))
2933	av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn));
2934      else
2935	av_set = NULL;
2936
2937      if (sched_verbose >= 6)
2938        {
2939          sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn));
2940          dump_av_set (av_set);
2941          sel_print ("\n");
2942        }
2943
2944      return need_copy_p ? av_set_copy (av_set) : av_set;
2945    }
2946
2947  ilist_add (&p, first_insn);
2948
2949  /* As the result after this loop have completed, in LAST_INSN we'll
2950     have the insn which has valid av_set to start backward computation
2951     from: it either will be NULL because on it the window size was exceeded
2952     or other valid av_set as returned by compute_av_set for the last insn
2953     of the basic block.  */
2954  for (last_insn = first_insn; last_insn != after_bb_end;
2955       last_insn = NEXT_INSN (last_insn))
2956    {
2957      /* We may encounter valid av_set not only on bb_head, but also on
2958	 those insns on which previously MAX_WS was exceeded.  */
2959      if (AV_SET_VALID_P (last_insn))
2960	{
2961          if (sched_verbose >= 6)
2962            sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn));
2963	  break;
2964	}
2965
2966      /* The special case: the last insn of the BB may be an
2967         ineligible_successor due to its SEQ_NO that was set on
2968	 it as a bookkeeping.  */
2969      if (last_insn != first_insn
2970          && is_ineligible_successor (last_insn, p))
2971	{
2972          if (sched_verbose >= 6)
2973            sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
2974	  break;
2975	}
2976
2977      if (DEBUG_INSN_P (last_insn))
2978	continue;
2979
2980      if (end_ws > max_ws)
2981	{
2982	  /* We can reach max lookahead size at bb_header, so clean av_set
2983	     first.  */
2984	  INSN_WS_LEVEL (last_insn) = global_level;
2985
2986	  if (sched_verbose >= 6)
2987            sel_print ("Insn %d is beyond the software lookahead window size\n",
2988                       INSN_UID (last_insn));
2989	  break;
2990	}
2991
2992      end_ws++;
2993    }
2994
2995  /* Get the valid av_set into AV above the LAST_INSN to start backward
2996     computation from.  It either will be empty av_set or av_set computed from
2997     the successors on the last insn of the current bb.  */
2998  if (last_insn != after_bb_end)
2999    {
3000      av = NULL;
3001
3002      /* This is needed only to obtain av_sets that are identical to
3003         those computed by the old compute_av_set version.  */
3004      if (last_insn == first_insn && !INSN_NOP_P (last_insn))
3005        av_set_add (&av, INSN_EXPR (last_insn));
3006    }
3007  else
3008    /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END.  */
3009    av = compute_av_set_at_bb_end (bb_end, p, end_ws);
3010
3011  /* Compute av_set in AV starting from below the LAST_INSN up to
3012     location above the FIRST_INSN.  */
3013  for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
3014       cur_insn = PREV_INSN (cur_insn))
3015    if (!INSN_NOP_P (cur_insn))
3016      {
3017        expr_t expr;
3018
3019        moveup_set_expr (&av, cur_insn, false);
3020
3021        /* If the expression for CUR_INSN is already in the set,
3022           replace it by the new one.  */
3023        expr = av_set_lookup (av, INSN_VINSN (cur_insn));
3024        if (expr != NULL)
3025          {
3026            clear_expr (expr);
3027            copy_expr (expr, INSN_EXPR (cur_insn));
3028          }
3029        else
3030          av_set_add (&av, INSN_EXPR (cur_insn));
3031      }
3032
3033  /* Clear stale bb_av_set.  */
3034  if (sel_bb_head_p (first_insn))
3035    {
3036      av_set_clear (&BB_AV_SET (cur_bb));
3037      BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av;
3038      BB_AV_LEVEL (cur_bb) = global_level;
3039    }
3040
3041  if (sched_verbose >= 6)
3042    {
3043      sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn));
3044      dump_av_set (av);
3045      sel_print ("\n");
3046    }
3047
3048  ilist_remove (&p);
3049  return av;
3050}
3051
3052/* Compute av set before INSN.
3053   INSN - the current operation (actual rtx INSN)
3054   P - the current path, which is list of insns visited so far
3055   WS - software lookahead window size.
3056   UNIQUE_P - TRUE, if returned av_set will be changed, hence
3057   if we want to save computed av_set in s_i_d, we should make a copy of it.
3058
3059   In the resulting set we will have only expressions that don't have delay
3060   stalls and nonsubstitutable dependences.  */
3061static av_set_t
3062compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
3063{
3064  return compute_av_set_inside_bb (insn, p, ws, unique_p);
3065}
3066
3067/* Propagate a liveness set LV through INSN.  */
3068static void
3069propagate_lv_set (regset lv, insn_t insn)
3070{
3071  gcc_assert (INSN_P (insn));
3072
3073  if (INSN_NOP_P (insn))
3074    return;
3075
3076  df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv);
3077}
3078
3079/* Return livness set at the end of BB.  */
3080static regset
3081compute_live_after_bb (basic_block bb)
3082{
3083  edge e;
3084  edge_iterator ei;
3085  regset lv = get_clear_regset_from_pool ();
3086
3087  gcc_assert (!ignore_first);
3088
3089  FOR_EACH_EDGE (e, ei, bb->succs)
3090    if (sel_bb_empty_p (e->dest))
3091      {
3092        if (! BB_LV_SET_VALID_P (e->dest))
3093          {
3094            gcc_unreachable ();
3095            gcc_assert (BB_LV_SET (e->dest) == NULL);
3096            BB_LV_SET (e->dest) = compute_live_after_bb (e->dest);
3097            BB_LV_SET_VALID_P (e->dest) = true;
3098          }
3099        IOR_REG_SET (lv, BB_LV_SET (e->dest));
3100      }
3101    else
3102      IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)));
3103
3104  return lv;
3105}
3106
3107/* Compute the set of all live registers at the point before INSN and save
3108   it at INSN if INSN is bb header.  */
3109regset
3110compute_live (insn_t insn)
3111{
3112  basic_block bb = BLOCK_FOR_INSN (insn);
3113  insn_t final, temp;
3114  regset lv;
3115
3116  /* Return the valid set if we're already on it.  */
3117  if (!ignore_first)
3118    {
3119      regset src = NULL;
3120
3121      if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
3122        src = BB_LV_SET (bb);
3123      else
3124        {
3125          gcc_assert (in_current_region_p (bb));
3126          if (INSN_LIVE_VALID_P (insn))
3127            src = INSN_LIVE (insn);
3128        }
3129
3130      if (src)
3131	{
3132	  lv = get_regset_from_pool ();
3133	  COPY_REG_SET (lv, src);
3134
3135          if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb))
3136            {
3137              COPY_REG_SET (BB_LV_SET (bb), lv);
3138              BB_LV_SET_VALID_P (bb) = true;
3139            }
3140
3141	  return_regset_to_pool (lv);
3142	  return lv;
3143	}
3144    }
3145
3146  /* We've skipped the wrong lv_set.  Don't skip the right one.  */
3147  ignore_first = false;
3148  gcc_assert (in_current_region_p (bb));
3149
3150  /* Find a valid LV set in this block or below, if needed.
3151     Start searching from the next insn: either ignore_first is true, or
3152     INSN doesn't have a correct live set.  */
3153  temp = NEXT_INSN (insn);
3154  final = NEXT_INSN (BB_END (bb));
3155  while (temp != final && ! INSN_LIVE_VALID_P (temp))
3156    temp = NEXT_INSN (temp);
3157  if (temp == final)
3158    {
3159      lv = compute_live_after_bb (bb);
3160      temp = PREV_INSN (temp);
3161    }
3162  else
3163    {
3164      lv = get_regset_from_pool ();
3165      COPY_REG_SET (lv, INSN_LIVE (temp));
3166    }
3167
3168  /* Put correct lv sets on the insns which have bad sets.  */
3169  final = PREV_INSN (insn);
3170  while (temp != final)
3171    {
3172      propagate_lv_set (lv, temp);
3173      COPY_REG_SET (INSN_LIVE (temp), lv);
3174      INSN_LIVE_VALID_P (temp) = true;
3175      temp = PREV_INSN (temp);
3176    }
3177
3178  /* Also put it in a BB.  */
3179  if (sel_bb_head_p (insn))
3180    {
3181      basic_block bb = BLOCK_FOR_INSN (insn);
3182
3183      COPY_REG_SET (BB_LV_SET (bb), lv);
3184      BB_LV_SET_VALID_P (bb) = true;
3185    }
3186
3187  /* We return LV to the pool, but will not clear it there.  Thus we can
3188     legimatelly use LV till the next use of regset_pool_get ().  */
3189  return_regset_to_pool (lv);
3190  return lv;
3191}
3192
3193/* Update liveness sets for INSN.  */
3194static inline void
3195update_liveness_on_insn (rtx_insn *insn)
3196{
3197  ignore_first = true;
3198  compute_live (insn);
3199}
3200
3201/* Compute liveness below INSN and write it into REGS.  */
3202static inline void
3203compute_live_below_insn (rtx_insn *insn, regset regs)
3204{
3205  rtx_insn *succ;
3206  succ_iterator si;
3207
3208  FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
3209    IOR_REG_SET (regs, compute_live (succ));
3210}
3211
3212/* Update the data gathered in av and lv sets starting from INSN.  */
3213static void
3214update_data_sets (rtx_insn *insn)
3215{
3216  update_liveness_on_insn (insn);
3217  if (sel_bb_head_p (insn))
3218    {
3219      gcc_assert (AV_LEVEL (insn) != 0);
3220      BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1;
3221      compute_av_set (insn, NULL, 0, 0);
3222    }
3223}
3224
3225
3226/* Helper for move_op () and find_used_regs ().
3227   Return speculation type for which a check should be created on the place
3228   of INSN.  EXPR is one of the original ops we are searching for.  */
3229static ds_t
3230get_spec_check_type_for_insn (insn_t insn, expr_t expr)
3231{
3232  ds_t to_check_ds;
3233  ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn));
3234
3235  to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr);
3236
3237  if (targetm.sched.get_insn_checked_ds)
3238    already_checked_ds |= targetm.sched.get_insn_checked_ds (insn);
3239
3240  if (spec_info != NULL
3241      && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL))
3242    already_checked_ds |= BEGIN_CONTROL;
3243
3244  already_checked_ds = ds_get_speculation_types (already_checked_ds);
3245
3246  to_check_ds &= ~already_checked_ds;
3247
3248  return to_check_ds;
3249}
3250
3251/* Find the set of registers that are unavailable for storing expres
3252   while moving ORIG_OPS up on the path starting from INSN due to
3253   liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
3254
3255   All the original operations found during the traversal are saved in the
3256   ORIGINAL_INSNS list.
3257
3258   REG_RENAME_P denotes the set of hardware registers that
3259   can not be used with renaming due to the register class restrictions,
3260   mode restrictions and other (the register we'll choose should be
3261   compatible class with the original uses, shouldn't be in call_used_regs,
3262   should be HARD_REGNO_RENAME_OK etc).
3263
3264   Returns TRUE if we've found all original insns, FALSE otherwise.
3265
3266   This function utilizes code_motion_path_driver (formerly find_used_regs_1)
3267   to traverse the code motion paths.  This helper function finds registers
3268   that are not available for storing expres while moving ORIG_OPS up on the
3269   path starting from INSN.  A register considered as used on the moving path,
3270   if one of the following conditions is not satisfied:
3271
3272      (1) a register not set or read on any path from xi to an instance of
3273	  the original operation,
3274      (2) not among the live registers of the point immediately following the
3275          first original operation on a given downward path, except for the
3276	  original target register of the operation,
3277      (3) not live on the other path of any conditional branch that is passed
3278	  by the operation, in case original operations are not present on
3279	  both paths of the conditional branch.
3280
3281   All the original operations found during the traversal are saved in the
3282   ORIGINAL_INSNS list.
3283
3284   REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
3285   from INSN to original insn. In this case CALL_USED_REG_SET will be added
3286   to unavailable hard regs at the point original operation is found.  */
3287
3288static bool
3289find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
3290		struct reg_rename  *reg_rename_p, def_list_t *original_insns)
3291{
3292  def_list_iterator i;
3293  def_t def;
3294  int res;
3295  bool needs_spec_check_p = false;
3296  expr_t expr;
3297  av_set_iterator expr_iter;
3298  struct fur_static_params sparams;
3299  struct cmpd_local_params lparams;
3300
3301  /* We haven't visited any blocks yet.  */
3302  bitmap_clear (code_motion_visited_blocks);
3303
3304  /* Init parameters for code_motion_path_driver.  */
3305  sparams.crosses_call = false;
3306  sparams.original_insns = original_insns;
3307  sparams.used_regs = used_regs;
3308
3309  /* Set the appropriate hooks and data.  */
3310  code_motion_path_driver_info = &fur_hooks;
3311
3312  res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
3313
3314  reg_rename_p->crosses_call |= sparams.crosses_call;
3315
3316  gcc_assert (res == 1);
3317  gcc_assert (original_insns && *original_insns);
3318
3319  /* ??? We calculate whether an expression needs a check when computing
3320     av sets.  This information is not as precise as it could be due to
3321     merging this bit in merge_expr.  We can do better in find_used_regs,
3322     but we want to avoid multiple traversals of the same code motion
3323     paths.  */
3324  FOR_EACH_EXPR (expr, expr_iter, orig_ops)
3325    needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
3326
3327  /* Mark hardware regs in REG_RENAME_P that are not suitable
3328     for renaming expr in INSN due to hardware restrictions (register class,
3329     modes compatibility etc).  */
3330  FOR_EACH_DEF (def, i, *original_insns)
3331    {
3332      vinsn_t vinsn = INSN_VINSN (def->orig_insn);
3333
3334      if (VINSN_SEPARABLE_P (vinsn))
3335	mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
3336
3337      /* Do not allow clobbering of ld.[sa] address in case some of the
3338         original operations need a check.  */
3339      if (needs_spec_check_p)
3340	IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
3341    }
3342
3343  return true;
3344}
3345
3346
3347/* Functions to choose the best insn from available ones.  */
3348
3349/* Adjusts the priority for EXPR using the backend *_adjust_priority hook.  */
3350static int
3351sel_target_adjust_priority (expr_t expr)
3352{
3353  int priority = EXPR_PRIORITY (expr);
3354  int new_priority;
3355
3356  if (targetm.sched.adjust_priority)
3357    new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority);
3358  else
3359    new_priority = priority;
3360
3361  /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly.  */
3362  EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
3363
3364  gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0);
3365
3366  if (sched_verbose >= 4)
3367    sel_print ("sel_target_adjust_priority: insn %d,  %d+%d = %d.\n",
3368	       INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
3369	       EXPR_PRIORITY_ADJ (expr), new_priority);
3370
3371  return new_priority;
3372}
3373
3374/* Rank two available exprs for schedule.  Never return 0 here.  */
3375static int
3376sel_rank_for_schedule (const void *x, const void *y)
3377{
3378  expr_t tmp = *(const expr_t *) y;
3379  expr_t tmp2 = *(const expr_t *) x;
3380  insn_t tmp_insn, tmp2_insn;
3381  vinsn_t tmp_vinsn, tmp2_vinsn;
3382  int val;
3383
3384  tmp_vinsn = EXPR_VINSN (tmp);
3385  tmp2_vinsn = EXPR_VINSN (tmp2);
3386  tmp_insn = EXPR_INSN_RTX (tmp);
3387  tmp2_insn = EXPR_INSN_RTX (tmp2);
3388
3389  /* Schedule debug insns as early as possible.  */
3390  if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
3391    return -1;
3392  else if (DEBUG_INSN_P (tmp2_insn))
3393    return 1;
3394
3395  /* Prefer SCHED_GROUP_P insns to any others.  */
3396  if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
3397    {
3398      if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
3399        return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
3400
3401      /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
3402         cannot be cloned.  */
3403      if (VINSN_UNIQUE_P (tmp2_vinsn))
3404        return 1;
3405      return -1;
3406    }
3407
3408  /* Discourage scheduling of speculative checks.  */
3409  val = (sel_insn_is_speculation_check (tmp_insn)
3410	 - sel_insn_is_speculation_check (tmp2_insn));
3411  if (val)
3412    return val;
3413
3414  /* Prefer not scheduled insn over scheduled one.  */
3415  if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0)
3416    {
3417      val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2);
3418      if (val)
3419	return val;
3420    }
3421
3422  /* Prefer jump over non-jump instruction.  */
3423  if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn))
3424    return -1;
3425  else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
3426    return 1;
3427
3428  /* Prefer an expr with greater priority.  */
3429  if (EXPR_USEFULNESS (tmp) != 0 && EXPR_USEFULNESS (tmp2) != 0)
3430    {
3431      int p2 = EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2),
3432          p1 = EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp);
3433
3434      val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp);
3435    }
3436  else
3437    val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
3438	  + EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp);
3439  if (val)
3440    return val;
3441
3442  if (spec_info != NULL && spec_info->mask != 0)
3443    /* This code was taken from haifa-sched.c: rank_for_schedule ().  */
3444    {
3445      ds_t ds1, ds2;
3446      dw_t dw1, dw2;
3447      int dw;
3448
3449      ds1 = EXPR_SPEC_DONE_DS (tmp);
3450      if (ds1)
3451	dw1 = ds_weak (ds1);
3452      else
3453	dw1 = NO_DEP_WEAK;
3454
3455      ds2 = EXPR_SPEC_DONE_DS (tmp2);
3456      if (ds2)
3457	dw2 = ds_weak (ds2);
3458      else
3459	dw2 = NO_DEP_WEAK;
3460
3461      dw = dw2 - dw1;
3462      if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
3463	return dw;
3464    }
3465
3466  /* Prefer an old insn to a bookkeeping insn.  */
3467  if (INSN_UID (tmp_insn) < first_emitted_uid
3468      && INSN_UID (tmp2_insn) >= first_emitted_uid)
3469    return -1;
3470  if (INSN_UID (tmp_insn) >= first_emitted_uid
3471      && INSN_UID (tmp2_insn) < first_emitted_uid)
3472    return 1;
3473
3474  /* Prefer an insn with smaller UID, as a last resort.
3475     We can't safely use INSN_LUID as it is defined only for those insns
3476     that are in the stream.  */
3477  return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
3478}
3479
3480/* Filter out expressions from av set pointed to by AV_PTR
3481   that are pipelined too many times.  */
3482static void
3483process_pipelined_exprs (av_set_t *av_ptr)
3484{
3485  expr_t expr;
3486  av_set_iterator si;
3487
3488  /* Don't pipeline already pipelined code as that would increase
3489     number of unnecessary register moves.  */
3490  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3491    {
3492      if (EXPR_SCHED_TIMES (expr)
3493	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
3494	av_set_iter_remove (&si);
3495    }
3496}
3497
3498/* Filter speculative insns from AV_PTR if we don't want them.  */
3499static void
3500process_spec_exprs (av_set_t *av_ptr)
3501{
3502  expr_t expr;
3503  av_set_iterator si;
3504
3505  if (spec_info == NULL)
3506    return;
3507
3508  /* Scan *AV_PTR to find out if we want to consider speculative
3509     instructions for scheduling.  */
3510  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3511    {
3512      ds_t ds;
3513
3514      ds = EXPR_SPEC_DONE_DS (expr);
3515
3516      /* The probability of a success is too low - don't speculate.  */
3517      if ((ds & SPECULATIVE)
3518          && (ds_weak (ds) < spec_info->data_weakness_cutoff
3519              || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff
3520	      || (pipelining_p && false
3521		  && (ds & DATA_SPEC)
3522		  && (ds & CONTROL_SPEC))))
3523        {
3524          av_set_iter_remove (&si);
3525          continue;
3526        }
3527    }
3528}
3529
3530/* Search for any use-like insns in AV_PTR and decide on scheduling
3531   them.  Return one when found, and NULL otherwise.
3532   Note that we check here whether a USE could be scheduled to avoid
3533   an infinite loop later.  */
3534static expr_t
3535process_use_exprs (av_set_t *av_ptr)
3536{
3537  expr_t expr;
3538  av_set_iterator si;
3539  bool uses_present_p = false;
3540  bool try_uses_p = true;
3541
3542  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3543    {
3544      /* This will also initialize INSN_CODE for later use.  */
3545      if (recog_memoized (EXPR_INSN_RTX (expr)) < 0)
3546        {
3547          /* If we have a USE in *AV_PTR that was not scheduled yet,
3548             do so because it will do good only.  */
3549          if (EXPR_SCHED_TIMES (expr) <= 0)
3550            {
3551              if (EXPR_TARGET_AVAILABLE (expr) == 1)
3552                return expr;
3553
3554              av_set_iter_remove (&si);
3555            }
3556          else
3557            {
3558              gcc_assert (pipelining_p);
3559
3560              uses_present_p = true;
3561            }
3562        }
3563      else
3564        try_uses_p = false;
3565    }
3566
3567  if (uses_present_p)
3568    {
3569      /* If we don't want to schedule any USEs right now and we have some
3570           in *AV_PTR, remove them, else just return the first one found.  */
3571      if (!try_uses_p)
3572        {
3573          FOR_EACH_EXPR_1 (expr, si, av_ptr)
3574            if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)
3575              av_set_iter_remove (&si);
3576        }
3577      else
3578        {
3579          FOR_EACH_EXPR_1 (expr, si, av_ptr)
3580            {
3581              gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0);
3582
3583              if (EXPR_TARGET_AVAILABLE (expr) == 1)
3584                return expr;
3585
3586              av_set_iter_remove (&si);
3587            }
3588        }
3589    }
3590
3591  return NULL;
3592}
3593
3594/* Lookup EXPR in VINSN_VEC and return TRUE if found.  Also check patterns from
3595   EXPR's history of changes.  */
3596static bool
3597vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
3598{
3599  vinsn_t vinsn, expr_vinsn;
3600  int n;
3601  unsigned i;
3602
3603  /* Start with checking expr itself and then proceed with all the old forms
3604     of expr taken from its history vector.  */
3605  for (i = 0, expr_vinsn = EXPR_VINSN (expr);
3606       expr_vinsn;
3607       expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr).length ()
3608		     ? EXPR_HISTORY_OF_CHANGES (expr)[i++].old_expr_vinsn
3609		     : NULL))
3610    FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn)
3611      if (VINSN_SEPARABLE_P (vinsn))
3612	{
3613	  if (vinsn_equal_p (vinsn, expr_vinsn))
3614	    return true;
3615	}
3616      else
3617	{
3618	  /* For non-separable instructions, the blocking insn can have
3619	     another pattern due to substitution, and we can't choose
3620	     different register as in the above case.  Check all registers
3621	     being written instead.  */
3622	  if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
3623				  VINSN_REG_SETS (expr_vinsn)))
3624	    return true;
3625	}
3626
3627  return false;
3628}
3629
3630#ifdef ENABLE_CHECKING
3631/* Return true if either of expressions from ORIG_OPS can be blocked
3632   by previously created bookkeeping code.  STATIC_PARAMS points to static
3633   parameters of move_op.  */
3634static bool
3635av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params)
3636{
3637  expr_t expr;
3638  av_set_iterator iter;
3639  moveop_static_params_p sparams;
3640
3641  /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping
3642     created while scheduling on another fence.  */
3643  FOR_EACH_EXPR (expr, iter, orig_ops)
3644    if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3645      return true;
3646
3647  gcc_assert (code_motion_path_driver_info == &move_op_hooks);
3648  sparams = (moveop_static_params_p) static_params;
3649
3650  /* Expressions can be also blocked by bookkeeping created during current
3651     move_op.  */
3652  if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn)))
3653    FOR_EACH_EXPR (expr, iter, orig_ops)
3654      if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL)
3655        return true;
3656
3657  /* Expressions in ORIG_OPS may have wrong destination register due to
3658     renaming.  Check with the right register instead.  */
3659  if (sparams->dest && REG_P (sparams->dest))
3660    {
3661      rtx reg = sparams->dest;
3662      vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
3663
3664      if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg)
3665	  || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg)
3666	  || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg))
3667	return true;
3668    }
3669
3670  return false;
3671}
3672#endif
3673
3674/* Clear VINSN_VEC and detach vinsns.  */
3675static void
3676vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
3677{
3678  unsigned len = vinsn_vec->length ();
3679  if (len > 0)
3680    {
3681      vinsn_t vinsn;
3682      int n;
3683
3684      FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn)
3685        vinsn_detach (vinsn);
3686      vinsn_vec->block_remove (0, len);
3687    }
3688}
3689
3690/* Add the vinsn of EXPR to the VINSN_VEC.  */
3691static void
3692vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
3693{
3694  vinsn_attach (EXPR_VINSN (expr));
3695  vinsn_vec->safe_push (EXPR_VINSN (expr));
3696}
3697
3698/* Free the vector representing blocked expressions.  */
3699static void
3700vinsn_vec_free (vinsn_vec_t &vinsn_vec)
3701{
3702  vinsn_vec.release ();
3703}
3704
3705/* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT.  */
3706
3707void sel_add_to_insn_priority (rtx insn, int amount)
3708{
3709  EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
3710
3711  if (sched_verbose >= 2)
3712    sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
3713	       INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
3714	       EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
3715}
3716
3717/* Turn AV into a vector, filter inappropriate insns and sort it.  Return
3718   true if there is something to schedule.  BNDS and FENCE are current
3719   boundaries and fence, respectively.  If we need to stall for some cycles
3720   before an expr from AV would become available, write this number to
3721   *PNEED_STALL.  */
3722static bool
3723fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
3724                 int *pneed_stall)
3725{
3726  av_set_iterator si;
3727  expr_t expr;
3728  int sched_next_worked = 0, stalled, n;
3729  static int av_max_prio, est_ticks_till_branch;
3730  int min_need_stall = -1;
3731  deps_t dc = BND_DC (BLIST_BND (bnds));
3732
3733  /* Bail out early when the ready list contained only USEs/CLOBBERs that are
3734     already scheduled.  */
3735  if (av == NULL)
3736    return false;
3737
3738  /* Empty vector from the previous stuff.  */
3739  if (vec_av_set.length () > 0)
3740    vec_av_set.block_remove (0, vec_av_set.length ());
3741
3742  /* Turn the set into a vector for sorting and call sel_target_adjust_priority
3743     for each insn.  */
3744  gcc_assert (vec_av_set.is_empty ());
3745  FOR_EACH_EXPR (expr, si, av)
3746    {
3747      vec_av_set.safe_push (expr);
3748
3749      gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
3750
3751      /* Adjust priority using target backend hook.  */
3752      sel_target_adjust_priority (expr);
3753    }
3754
3755  /* Sort the vector.  */
3756  vec_av_set.qsort (sel_rank_for_schedule);
3757
3758  /* We record maximal priority of insns in av set for current instruction
3759     group.  */
3760  if (FENCE_STARTS_CYCLE_P (fence))
3761    av_max_prio = est_ticks_till_branch = INT_MIN;
3762
3763  /* Filter out inappropriate expressions.  Loop's direction is reversed to
3764     visit "best" instructions first.  We assume that vec::unordered_remove
3765     moves last element in place of one being deleted.  */
3766  for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--)
3767    {
3768      expr_t expr = vec_av_set[n];
3769      insn_t insn = EXPR_INSN_RTX (expr);
3770      signed char target_available;
3771      bool is_orig_reg_p = true;
3772      int need_cycles, new_prio;
3773      bool fence_insn_p = INSN_UID (insn) == INSN_UID (FENCE_INSN (fence));
3774
3775      /* Don't allow any insns other than from SCHED_GROUP if we have one.  */
3776      if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
3777        {
3778          vec_av_set.unordered_remove (n);
3779          continue;
3780        }
3781
3782      /* Set number of sched_next insns (just in case there
3783         could be several).  */
3784      if (FENCE_SCHED_NEXT (fence))
3785        sched_next_worked++;
3786
3787      /* Check all liveness requirements and try renaming.
3788         FIXME: try to minimize calls to this.  */
3789      target_available = EXPR_TARGET_AVAILABLE (expr);
3790
3791      /* If insn was already scheduled on the current fence,
3792	 set TARGET_AVAILABLE to -1 no matter what expr's attribute says.  */
3793      if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr)
3794	  && !fence_insn_p)
3795	target_available = -1;
3796
3797      /* If the availability of the EXPR is invalidated by the insertion of
3798	 bookkeeping earlier, make sure that we won't choose this expr for
3799	 scheduling if it's not separable, and if it is separable, then
3800	 we have to recompute the set of available registers for it.  */
3801      if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3802	{
3803          vec_av_set.unordered_remove (n);
3804          if (sched_verbose >= 4)
3805            sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
3806                       INSN_UID (insn));
3807          continue;
3808        }
3809
3810      if (target_available == true)
3811	{
3812          /* Do nothing -- we can use an existing register.  */
3813	  is_orig_reg_p = EXPR_SEPARABLE_P (expr);
3814        }
3815      else if (/* Non-separable instruction will never
3816                  get another register. */
3817               (target_available == false
3818                && !EXPR_SEPARABLE_P (expr))
3819               /* Don't try to find a register for low-priority expression.  */
3820               || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename
3821               /* ??? FIXME: Don't try to rename data speculation.  */
3822               || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
3823               || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
3824        {
3825          vec_av_set.unordered_remove (n);
3826          if (sched_verbose >= 4)
3827            sel_print ("Expr %d has no suitable target register\n",
3828                       INSN_UID (insn));
3829
3830	  /* A fence insn should not get here.  */
3831	  gcc_assert (!fence_insn_p);
3832	  continue;
3833        }
3834
3835      /* At this point a fence insn should always be available.  */
3836      gcc_assert (!fence_insn_p
3837		  || INSN_UID (FENCE_INSN (fence)) == INSN_UID (EXPR_INSN_RTX (expr)));
3838
3839      /* Filter expressions that need to be renamed or speculated when
3840	 pipelining, because compensating register copies or speculation
3841	 checks are likely to be placed near the beginning of the loop,
3842	 causing a stall.  */
3843      if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0
3844	  && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0))
3845	{
3846	  /* Estimation of number of cycles until loop branch for
3847	     renaming/speculation to be successful.  */
3848	  int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr));
3849
3850	  if ((int) current_loop_nest->ninsns < 9)
3851	    {
3852	      vec_av_set.unordered_remove (n);
3853	      if (sched_verbose >= 4)
3854		sel_print ("Pipelining expr %d will likely cause stall\n",
3855			   INSN_UID (insn));
3856	      continue;
3857	    }
3858
3859	  if ((int) current_loop_nest->ninsns - num_insns_scheduled
3860	      < need_n_ticks_till_branch * issue_rate / 2
3861	      && est_ticks_till_branch < need_n_ticks_till_branch)
3862	     {
3863	       vec_av_set.unordered_remove (n);
3864	       if (sched_verbose >= 4)
3865		 sel_print ("Pipelining expr %d will likely cause stall\n",
3866			    INSN_UID (insn));
3867	       continue;
3868	     }
3869	}
3870
3871      /* We want to schedule speculation checks as late as possible.  Discard
3872	 them from av set if there are instructions with higher priority.  */
3873      if (sel_insn_is_speculation_check (insn)
3874	  && EXPR_PRIORITY (expr) < av_max_prio)
3875	{
3876          stalled++;
3877          min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
3878          vec_av_set.unordered_remove (n);
3879	  if (sched_verbose >= 4)
3880	    sel_print ("Delaying speculation check %d until its first use\n",
3881		       INSN_UID (insn));
3882	  continue;
3883	}
3884
3885      /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO.  */
3886      if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3887	av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr));
3888
3889      /* Don't allow any insns whose data is not yet ready.
3890         Check first whether we've already tried them and failed.  */
3891      if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
3892	{
3893          need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3894			 - FENCE_CYCLE (fence));
3895	  if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3896	    est_ticks_till_branch = MAX (est_ticks_till_branch,
3897					 EXPR_PRIORITY (expr) + need_cycles);
3898
3899	  if (need_cycles > 0)
3900	    {
3901	      stalled++;
3902	      min_need_stall = (min_need_stall < 0
3903				? need_cycles
3904				: MIN (min_need_stall, need_cycles));
3905	      vec_av_set.unordered_remove (n);
3906
3907	      if (sched_verbose >= 4)
3908		sel_print ("Expr %d is not ready until cycle %d (cached)\n",
3909			   INSN_UID (insn),
3910			   FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3911	      continue;
3912	    }
3913	}
3914
3915      /* Now resort to dependence analysis to find whether EXPR might be
3916         stalled due to dependencies from FENCE's context.  */
3917      need_cycles = tick_check_p (expr, dc, fence);
3918      new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
3919
3920      if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3921	est_ticks_till_branch = MAX (est_ticks_till_branch,
3922				     new_prio);
3923
3924      if (need_cycles > 0)
3925        {
3926          if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
3927            {
3928              int new_size = INSN_UID (insn) * 3 / 2;
3929
3930              FENCE_READY_TICKS (fence)
3931                = (int *) xrecalloc (FENCE_READY_TICKS (fence),
3932                                     new_size, FENCE_READY_TICKS_SIZE (fence),
3933                                     sizeof (int));
3934            }
3935          FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3936            = FENCE_CYCLE (fence) + need_cycles;
3937
3938          stalled++;
3939          min_need_stall = (min_need_stall < 0
3940                            ? need_cycles
3941                            : MIN (min_need_stall, need_cycles));
3942
3943          vec_av_set.unordered_remove (n);
3944
3945          if (sched_verbose >= 4)
3946            sel_print ("Expr %d is not ready yet until cycle %d\n",
3947                       INSN_UID (insn),
3948                       FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3949          continue;
3950        }
3951
3952      if (sched_verbose >= 4)
3953        sel_print ("Expr %d is ok\n", INSN_UID (insn));
3954      min_need_stall = 0;
3955    }
3956
3957  /* Clear SCHED_NEXT.  */
3958  if (FENCE_SCHED_NEXT (fence))
3959    {
3960      gcc_assert (sched_next_worked == 1);
3961      FENCE_SCHED_NEXT (fence) = NULL;
3962    }
3963
3964  /* No need to stall if this variable was not initialized.  */
3965  if (min_need_stall < 0)
3966    min_need_stall = 0;
3967
3968  if (vec_av_set.is_empty ())
3969    {
3970      /* We need to set *pneed_stall here, because later we skip this code
3971         when ready list is empty.  */
3972      *pneed_stall = min_need_stall;
3973      return false;
3974    }
3975  else
3976    gcc_assert (min_need_stall == 0);
3977
3978  /* Sort the vector.  */
3979  vec_av_set.qsort (sel_rank_for_schedule);
3980
3981  if (sched_verbose >= 4)
3982    {
3983      sel_print ("Total ready exprs: %d, stalled: %d\n",
3984                 vec_av_set.length (), stalled);
3985      sel_print ("Sorted av set (%d): ", vec_av_set.length ());
3986      FOR_EACH_VEC_ELT (vec_av_set, n, expr)
3987        dump_expr (expr);
3988      sel_print ("\n");
3989    }
3990
3991  *pneed_stall = 0;
3992  return true;
3993}
3994
3995/* Convert a vectored and sorted av set to the ready list that
3996   the rest of the backend wants to see.  */
3997static void
3998convert_vec_av_set_to_ready (void)
3999{
4000  int n;
4001  expr_t expr;
4002
4003  /* Allocate and fill the ready list from the sorted vector.  */
4004  ready.n_ready = vec_av_set.length ();
4005  ready.first = ready.n_ready - 1;
4006
4007  gcc_assert (ready.n_ready > 0);
4008
4009  if (ready.n_ready > max_issue_size)
4010    {
4011      max_issue_size = ready.n_ready;
4012      sched_extend_ready_list (ready.n_ready);
4013    }
4014
4015  FOR_EACH_VEC_ELT (vec_av_set, n, expr)
4016    {
4017      vinsn_t vi = EXPR_VINSN (expr);
4018      insn_t insn = VINSN_INSN_RTX (vi);
4019
4020      ready_try[n] = 0;
4021      ready.vec[n] = insn;
4022    }
4023}
4024
4025/* Initialize ready list from *AV_PTR for the max_issue () call.
4026   If any unrecognizable insn found in *AV_PTR, return it (and skip
4027   max_issue).  BND and FENCE are current boundary and fence,
4028   respectively.  If we need to stall for some cycles before an expr
4029   from *AV_PTR would become available, write this number to *PNEED_STALL.  */
4030static expr_t
4031fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
4032                 int *pneed_stall)
4033{
4034  expr_t expr;
4035
4036  /* We do not support multiple boundaries per fence.  */
4037  gcc_assert (BLIST_NEXT (bnds) == NULL);
4038
4039  /* Process expressions required special handling, i.e.  pipelined,
4040     speculative and recog() < 0 expressions first.  */
4041  process_pipelined_exprs (av_ptr);
4042  process_spec_exprs (av_ptr);
4043
4044  /* A USE could be scheduled immediately.  */
4045  expr = process_use_exprs (av_ptr);
4046  if (expr)
4047    {
4048      *pneed_stall = 0;
4049      return expr;
4050    }
4051
4052  /* Turn the av set to a vector for sorting.  */
4053  if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall))
4054    {
4055      ready.n_ready = 0;
4056      return NULL;
4057    }
4058
4059  /* Build the final ready list.  */
4060  convert_vec_av_set_to_ready ();
4061  return NULL;
4062}
4063
4064/* Wrapper for dfa_new_cycle ().  Returns TRUE if cycle was advanced.  */
4065static bool
4066sel_dfa_new_cycle (insn_t insn, fence_t fence)
4067{
4068  int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
4069                             ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
4070                             : FENCE_CYCLE (fence) - 1;
4071  bool res = false;
4072  int sort_p = 0;
4073
4074  if (!targetm.sched.dfa_new_cycle)
4075    return false;
4076
4077  memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4078
4079  while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4080                                                 insn, last_scheduled_cycle,
4081                                                 FENCE_CYCLE (fence), &sort_p))
4082    {
4083      memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4084      advance_one_cycle (fence);
4085      memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4086      res = true;
4087    }
4088
4089  return res;
4090}
4091
4092/* Invoke reorder* target hooks on the ready list.  Return the number of insns
4093   we can issue.  FENCE is the current fence.  */
4094static int
4095invoke_reorder_hooks (fence_t fence)
4096{
4097  int issue_more;
4098  bool ran_hook = false;
4099
4100  /* Call the reorder hook at the beginning of the cycle, and call
4101     the reorder2 hook in the middle of the cycle.  */
4102  if (FENCE_ISSUED_INSNS (fence) == 0)
4103    {
4104      if (targetm.sched.reorder
4105          && !SCHED_GROUP_P (ready_element (&ready, 0))
4106          && ready.n_ready > 1)
4107        {
4108          /* Don't give reorder the most prioritized insn as it can break
4109             pipelining.  */
4110          if (pipelining_p)
4111            --ready.n_ready;
4112
4113          issue_more
4114            = targetm.sched.reorder (sched_dump, sched_verbose,
4115                                     ready_lastpos (&ready),
4116                                     &ready.n_ready, FENCE_CYCLE (fence));
4117
4118          if (pipelining_p)
4119            ++ready.n_ready;
4120
4121          ran_hook = true;
4122        }
4123      else
4124        /* Initialize can_issue_more for variable_issue.  */
4125        issue_more = issue_rate;
4126    }
4127  else if (targetm.sched.reorder2
4128           && !SCHED_GROUP_P (ready_element (&ready, 0)))
4129    {
4130      if (ready.n_ready == 1)
4131        issue_more =
4132          targetm.sched.reorder2 (sched_dump, sched_verbose,
4133                                  ready_lastpos (&ready),
4134                                  &ready.n_ready, FENCE_CYCLE (fence));
4135      else
4136        {
4137          if (pipelining_p)
4138            --ready.n_ready;
4139
4140          issue_more =
4141            targetm.sched.reorder2 (sched_dump, sched_verbose,
4142                                    ready.n_ready
4143                                    ? ready_lastpos (&ready) : NULL,
4144                                    &ready.n_ready, FENCE_CYCLE (fence));
4145
4146          if (pipelining_p)
4147            ++ready.n_ready;
4148        }
4149
4150      ran_hook = true;
4151    }
4152  else
4153    issue_more = FENCE_ISSUE_MORE (fence);
4154
4155  /* Ensure that ready list and vec_av_set are in line with each other,
4156     i.e. vec_av_set[i] == ready_element (&ready, i).  */
4157  if (issue_more && ran_hook)
4158    {
4159      int i, j, n;
4160      rtx_insn **arr = ready.vec;
4161      expr_t *vec = vec_av_set.address ();
4162
4163      for (i = 0, n = ready.n_ready; i < n; i++)
4164        if (EXPR_INSN_RTX (vec[i]) != arr[i])
4165          {
4166            expr_t tmp;
4167
4168            for (j = i; j < n; j++)
4169              if (EXPR_INSN_RTX (vec[j]) == arr[i])
4170                break;
4171            gcc_assert (j < n);
4172
4173            tmp = vec[i];
4174            vec[i] = vec[j];
4175            vec[j] = tmp;
4176          }
4177    }
4178
4179  return issue_more;
4180}
4181
4182/* Return an EXPR corresponding to INDEX element of ready list, if
4183   FOLLOW_READY_ELEMENT is true (i.e., an expr of
4184   ready_element (&ready, INDEX) will be returned), and to INDEX element of
4185   ready.vec otherwise.  */
4186static inline expr_t
4187find_expr_for_ready (int index, bool follow_ready_element)
4188{
4189  expr_t expr;
4190  int real_index;
4191
4192  real_index = follow_ready_element ? ready.first - index : index;
4193
4194  expr = vec_av_set[real_index];
4195  gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
4196
4197  return expr;
4198}
4199
4200/* Calculate insns worth trying via lookahead_guard hook.  Return a number
4201   of such insns found.  */
4202static int
4203invoke_dfa_lookahead_guard (void)
4204{
4205  int i, n;
4206  bool have_hook
4207    = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
4208
4209  if (sched_verbose >= 2)
4210    sel_print ("ready after reorder: ");
4211
4212  for (i = 0, n = 0; i < ready.n_ready; i++)
4213    {
4214      expr_t expr;
4215      insn_t insn;
4216      int r;
4217
4218      /* In this loop insn is Ith element of the ready list given by
4219         ready_element, not Ith element of ready.vec.  */
4220      insn = ready_element (&ready, i);
4221
4222      if (! have_hook || i == 0)
4223        r = 0;
4224      else
4225        r = targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn, i);
4226
4227      gcc_assert (INSN_CODE (insn) >= 0);
4228
4229      /* Only insns with ready_try = 0 can get here
4230         from fill_ready_list.  */
4231      gcc_assert (ready_try [i] == 0);
4232      ready_try[i] = r;
4233      if (!r)
4234        n++;
4235
4236      expr = find_expr_for_ready (i, true);
4237
4238      if (sched_verbose >= 2)
4239        {
4240          dump_vinsn (EXPR_VINSN (expr));
4241          sel_print (":%d; ", ready_try[i]);
4242        }
4243    }
4244
4245  if (sched_verbose >= 2)
4246    sel_print ("\n");
4247  return n;
4248}
4249
4250/* Calculate the number of privileged insns and return it.  */
4251static int
4252calculate_privileged_insns (void)
4253{
4254  expr_t cur_expr, min_spec_expr = NULL;
4255  int privileged_n = 0, i;
4256
4257  for (i = 0; i < ready.n_ready; i++)
4258    {
4259      if (ready_try[i])
4260        continue;
4261
4262      if (! min_spec_expr)
4263	min_spec_expr = find_expr_for_ready (i, true);
4264
4265      cur_expr = find_expr_for_ready (i, true);
4266
4267      if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr))
4268        break;
4269
4270      ++privileged_n;
4271    }
4272
4273  if (i == ready.n_ready)
4274    privileged_n = 0;
4275
4276  if (sched_verbose >= 2)
4277    sel_print ("privileged_n: %d insns with SPEC %d\n",
4278               privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1);
4279  return privileged_n;
4280}
4281
4282/* Call the rest of the hooks after the choice was made.  Return
4283   the number of insns that still can be issued given that the current
4284   number is ISSUE_MORE.  FENCE and BEST_INSN are the current fence
4285   and the insn chosen for scheduling, respectively.  */
4286static int
4287invoke_aftermath_hooks (fence_t fence, rtx_insn *best_insn, int issue_more)
4288{
4289  gcc_assert (INSN_P (best_insn));
4290
4291  /* First, call dfa_new_cycle, and then variable_issue, if available.  */
4292  sel_dfa_new_cycle (best_insn, fence);
4293
4294  if (targetm.sched.variable_issue)
4295    {
4296      memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4297      issue_more =
4298        targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
4299                                      issue_more);
4300      memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4301    }
4302  else if (GET_CODE (PATTERN (best_insn)) != USE
4303           && GET_CODE (PATTERN (best_insn)) != CLOBBER)
4304    issue_more--;
4305
4306  return issue_more;
4307}
4308
4309/* Estimate the cost of issuing INSN on DFA state STATE.  */
4310static int
4311estimate_insn_cost (rtx_insn *insn, state_t state)
4312{
4313  static state_t temp = NULL;
4314  int cost;
4315
4316  if (!temp)
4317    temp = xmalloc (dfa_state_size);
4318
4319  memcpy (temp, state, dfa_state_size);
4320  cost = state_transition (temp, insn);
4321
4322  if (cost < 0)
4323    return 0;
4324  else if (cost == 0)
4325    return 1;
4326  return cost;
4327}
4328
4329/* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
4330   This function properly handles ASMs, USEs etc.  */
4331static int
4332get_expr_cost (expr_t expr, fence_t fence)
4333{
4334  rtx_insn *insn = EXPR_INSN_RTX (expr);
4335
4336  if (recog_memoized (insn) < 0)
4337    {
4338      if (!FENCE_STARTS_CYCLE_P (fence)
4339	  && INSN_ASM_P (insn))
4340	/* This is asm insn which is tryed to be issued on the
4341	   cycle not first.  Issue it on the next cycle.  */
4342	return 1;
4343      else
4344	/* A USE insn, or something else we don't need to
4345	   understand.  We can't pass these directly to
4346	   state_transition because it will trigger a
4347	   fatal error for unrecognizable insns.  */
4348	return 0;
4349    }
4350  else
4351    return estimate_insn_cost (insn, FENCE_STATE (fence));
4352}
4353
4354/* Find the best insn for scheduling, either via max_issue or just take
4355   the most prioritized available.  */
4356static int
4357choose_best_insn (fence_t fence, int privileged_n, int *index)
4358{
4359  int can_issue = 0;
4360
4361  if (dfa_lookahead > 0)
4362    {
4363      cycle_issued_insns = FENCE_ISSUED_INSNS (fence);
4364      /* TODO: pass equivalent of first_cycle_insn_p to max_issue ().  */
4365      can_issue = max_issue (&ready, privileged_n,
4366                             FENCE_STATE (fence), true, index);
4367      if (sched_verbose >= 2)
4368        sel_print ("max_issue: we can issue %d insns, already did %d insns\n",
4369                   can_issue, FENCE_ISSUED_INSNS (fence));
4370    }
4371  else
4372    {
4373      /* We can't use max_issue; just return the first available element.  */
4374      int i;
4375
4376      for (i = 0; i < ready.n_ready; i++)
4377	{
4378	  expr_t expr = find_expr_for_ready (i, true);
4379
4380	  if (get_expr_cost (expr, fence) < 1)
4381	    {
4382	      can_issue = can_issue_more;
4383	      *index = i;
4384
4385	      if (sched_verbose >= 2)
4386		sel_print ("using %dth insn from the ready list\n", i + 1);
4387
4388	      break;
4389	    }
4390	}
4391
4392      if (i == ready.n_ready)
4393	{
4394	  can_issue = 0;
4395	  *index = -1;
4396	}
4397    }
4398
4399  return can_issue;
4400}
4401
4402/* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
4403   BNDS and FENCE are current boundaries and scheduling fence respectively.
4404   Return the expr found and NULL if nothing can be issued atm.
4405   Write to PNEED_STALL the number of cycles to stall if no expr was found.  */
4406static expr_t
4407find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
4408                int *pneed_stall)
4409{
4410  expr_t best;
4411
4412  /* Choose the best insn for scheduling via:
4413     1) sorting the ready list based on priority;
4414     2) calling the reorder hook;
4415     3) calling max_issue.  */
4416  best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall);
4417  if (best == NULL && ready.n_ready > 0)
4418    {
4419      int privileged_n, index;
4420
4421      can_issue_more = invoke_reorder_hooks (fence);
4422      if (can_issue_more > 0)
4423        {
4424          /* Try choosing the best insn until we find one that is could be
4425             scheduled due to liveness restrictions on its destination register.
4426             In the future, we'd like to choose once and then just probe insns
4427             in the order of their priority.  */
4428          invoke_dfa_lookahead_guard ();
4429          privileged_n = calculate_privileged_insns ();
4430          can_issue_more = choose_best_insn (fence, privileged_n, &index);
4431          if (can_issue_more)
4432            best = find_expr_for_ready (index, true);
4433        }
4434      /* We had some available insns, so if we can't issue them,
4435         we have a stall.  */
4436      if (can_issue_more == 0)
4437        {
4438          best = NULL;
4439          *pneed_stall = 1;
4440        }
4441    }
4442
4443  if (best != NULL)
4444    {
4445      can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
4446                                               can_issue_more);
4447      if (targetm.sched.variable_issue
4448	  && can_issue_more == 0)
4449        *pneed_stall = 1;
4450    }
4451
4452  if (sched_verbose >= 2)
4453    {
4454      if (best != NULL)
4455        {
4456          sel_print ("Best expression (vliw form): ");
4457          dump_expr (best);
4458          sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
4459        }
4460      else
4461        sel_print ("No best expr found!\n");
4462    }
4463
4464  return best;
4465}
4466
4467
4468/* Functions that implement the core of the scheduler.  */
4469
4470
4471/* Emit an instruction from EXPR with SEQNO and VINSN after
4472   PLACE_TO_INSERT.  */
4473static insn_t
4474emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
4475                           insn_t place_to_insert)
4476{
4477  /* This assert fails when we have identical instructions
4478     one of which dominates the other.  In this case move_op ()
4479     finds the first instruction and doesn't search for second one.
4480     The solution would be to compute av_set after the first found
4481     insn and, if insn present in that set, continue searching.
4482     For now we workaround this issue in move_op.  */
4483  gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)));
4484
4485  if (EXPR_WAS_RENAMED (expr))
4486    {
4487      unsigned regno = expr_dest_regno (expr);
4488
4489      if (HARD_REGISTER_NUM_P (regno))
4490	{
4491	  df_set_regs_ever_live (regno, true);
4492	  reg_rename_tick[regno] = ++reg_rename_this_tick;
4493	}
4494    }
4495
4496  return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
4497                                       place_to_insert);
4498}
4499
4500/* Return TRUE if BB can hold bookkeeping code.  */
4501static bool
4502block_valid_for_bookkeeping_p (basic_block bb)
4503{
4504  insn_t bb_end = BB_END (bb);
4505
4506  if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1)
4507    return false;
4508
4509  if (INSN_P (bb_end))
4510    {
4511      if (INSN_SCHED_TIMES (bb_end) > 0)
4512	return false;
4513    }
4514  else
4515    gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end));
4516
4517  return true;
4518}
4519
4520/* Attempt to find a block that can hold bookkeeping code for path(s) incoming
4521   into E2->dest, except from E1->src (there may be a sequence of empty basic
4522   blocks between E1->src and E2->dest).  Return found block, or NULL if new
4523   one must be created.  If LAX holds, don't assume there is a simple path
4524   from E1->src to E2->dest.  */
4525static basic_block
4526find_block_for_bookkeeping (edge e1, edge e2, bool lax)
4527{
4528  basic_block candidate_block = NULL;
4529  edge e;
4530
4531  /* Loop over edges from E1 to E2, inclusive.  */
4532  for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun); e =
4533       EDGE_SUCC (e->dest, 0))
4534    {
4535      if (EDGE_COUNT (e->dest->preds) == 2)
4536	{
4537	  if (candidate_block == NULL)
4538	    candidate_block = (EDGE_PRED (e->dest, 0) == e
4539			       ? EDGE_PRED (e->dest, 1)->src
4540			       : EDGE_PRED (e->dest, 0)->src);
4541	  else
4542	    /* Found additional edge leading to path from e1 to e2
4543	       from aside.  */
4544	    return NULL;
4545	}
4546      else if (EDGE_COUNT (e->dest->preds) > 2)
4547	/* Several edges leading to path from e1 to e2 from aside.  */
4548	return NULL;
4549
4550      if (e == e2)
4551	return ((!lax || candidate_block)
4552		&& block_valid_for_bookkeeping_p (candidate_block)
4553		? candidate_block
4554		: NULL);
4555
4556      if (lax && EDGE_COUNT (e->dest->succs) != 1)
4557	return NULL;
4558    }
4559
4560  if (lax)
4561    return NULL;
4562
4563  gcc_unreachable ();
4564}
4565
4566/* Create new basic block for bookkeeping code for path(s) incoming into
4567   E2->dest, except from E1->src.  Return created block.  */
4568static basic_block
4569create_block_for_bookkeeping (edge e1, edge e2)
4570{
4571  basic_block new_bb, bb = e2->dest;
4572
4573  /* Check that we don't spoil the loop structure.  */
4574  if (current_loop_nest)
4575    {
4576      basic_block latch = current_loop_nest->latch;
4577
4578      /* We do not split header.  */
4579      gcc_assert (e2->dest != current_loop_nest->header);
4580
4581      /* We do not redirect the only edge to the latch block.  */
4582      gcc_assert (e1->dest != latch
4583		  || !single_pred_p (latch)
4584		  || e1 != single_pred_edge (latch));
4585    }
4586
4587  /* Split BB to insert BOOK_INSN there.  */
4588  new_bb = sched_split_block (bb, NULL);
4589
4590  /* Move note_list from the upper bb.  */
4591  gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX);
4592  BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb);
4593  BB_NOTE_LIST (bb) = NULL;
4594
4595  gcc_assert (e2->dest == bb);
4596
4597  /* Skip block for bookkeeping copy when leaving E1->src.  */
4598  if (e1->flags & EDGE_FALLTHRU)
4599    sel_redirect_edge_and_branch_force (e1, new_bb);
4600  else
4601    sel_redirect_edge_and_branch (e1, new_bb);
4602
4603  gcc_assert (e1->dest == new_bb);
4604  gcc_assert (sel_bb_empty_p (bb));
4605
4606  /* To keep basic block numbers in sync between debug and non-debug
4607     compilations, we have to rotate blocks here.  Consider that we
4608     started from (a,b)->d, (c,d)->e, and d contained only debug
4609     insns.  It would have been removed before if the debug insns
4610     weren't there, so we'd have split e rather than d.  So what we do
4611     now is to swap the block numbers of new_bb and
4612     single_succ(new_bb) == e, so that the insns that were in e before
4613     get the new block number.  */
4614
4615  if (MAY_HAVE_DEBUG_INSNS)
4616    {
4617      basic_block succ;
4618      insn_t insn = sel_bb_head (new_bb);
4619      insn_t last;
4620
4621      if (DEBUG_INSN_P (insn)
4622	  && single_succ_p (new_bb)
4623	  && (succ = single_succ (new_bb))
4624	  && succ != EXIT_BLOCK_PTR_FOR_FN (cfun)
4625	  && DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
4626	{
4627	  while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4628	    insn = NEXT_INSN (insn);
4629
4630	  if (insn == last)
4631	    {
4632	      sel_global_bb_info_def gbi;
4633	      sel_region_bb_info_def rbi;
4634	      int i;
4635
4636	      if (sched_verbose >= 2)
4637		sel_print ("Swapping block ids %i and %i\n",
4638			   new_bb->index, succ->index);
4639
4640	      i = new_bb->index;
4641	      new_bb->index = succ->index;
4642	      succ->index = i;
4643
4644	      SET_BASIC_BLOCK_FOR_FN (cfun, new_bb->index, new_bb);
4645	      SET_BASIC_BLOCK_FOR_FN (cfun, succ->index, succ);
4646
4647	      memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
4648	      memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
4649		      sizeof (gbi));
4650	      memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi));
4651
4652	      memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi));
4653	      memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ),
4654		      sizeof (rbi));
4655	      memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi));
4656
4657	      i = BLOCK_TO_BB (new_bb->index);
4658	      BLOCK_TO_BB (new_bb->index) = BLOCK_TO_BB (succ->index);
4659	      BLOCK_TO_BB (succ->index) = i;
4660
4661	      i = CONTAINING_RGN (new_bb->index);
4662	      CONTAINING_RGN (new_bb->index) = CONTAINING_RGN (succ->index);
4663	      CONTAINING_RGN (succ->index) = i;
4664
4665	      for (i = 0; i < current_nr_blocks; i++)
4666		if (BB_TO_BLOCK (i) == succ->index)
4667		  BB_TO_BLOCK (i) = new_bb->index;
4668		else if (BB_TO_BLOCK (i) == new_bb->index)
4669		  BB_TO_BLOCK (i) = succ->index;
4670
4671	      FOR_BB_INSNS (new_bb, insn)
4672		if (INSN_P (insn))
4673		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
4674
4675	      FOR_BB_INSNS (succ, insn)
4676		if (INSN_P (insn))
4677		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index;
4678
4679	      if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index))
4680		bitmap_set_bit (code_motion_visited_blocks, succ->index);
4681
4682	      gcc_assert (LABEL_P (BB_HEAD (new_bb))
4683			  && LABEL_P (BB_HEAD (succ)));
4684
4685	      if (sched_verbose >= 4)
4686		sel_print ("Swapping code labels %i and %i\n",
4687			   CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
4688			   CODE_LABEL_NUMBER (BB_HEAD (succ)));
4689
4690	      i = CODE_LABEL_NUMBER (BB_HEAD (new_bb));
4691	      CODE_LABEL_NUMBER (BB_HEAD (new_bb))
4692		= CODE_LABEL_NUMBER (BB_HEAD (succ));
4693	      CODE_LABEL_NUMBER (BB_HEAD (succ)) = i;
4694	    }
4695	}
4696    }
4697
4698  return bb;
4699}
4700
4701/* Return insn after which we must insert bookkeeping code for path(s) incoming
4702   into E2->dest, except from E1->src.  If the returned insn immediately
4703   precedes a fence, assign that fence to *FENCE_TO_REWIND.  */
4704static insn_t
4705find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind)
4706{
4707  insn_t place_to_insert;
4708  /* Find a basic block that can hold bookkeeping.  If it can be found, do not
4709     create new basic block, but insert bookkeeping there.  */
4710  basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE);
4711
4712  if (book_block)
4713    {
4714      place_to_insert = BB_END (book_block);
4715
4716      /* Don't use a block containing only debug insns for
4717	 bookkeeping, this causes scheduling differences between debug
4718	 and non-debug compilations, for the block would have been
4719	 removed already.  */
4720      if (DEBUG_INSN_P (place_to_insert))
4721	{
4722	  rtx_insn *insn = sel_bb_head (book_block);
4723
4724	  while (insn != place_to_insert &&
4725		 (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4726	    insn = NEXT_INSN (insn);
4727
4728	  if (insn == place_to_insert)
4729	    book_block = NULL;
4730	}
4731    }
4732
4733  if (!book_block)
4734    {
4735      book_block = create_block_for_bookkeeping (e1, e2);
4736      place_to_insert = BB_END (book_block);
4737      if (sched_verbose >= 9)
4738	sel_print ("New block is %i, split from bookkeeping block %i\n",
4739		   EDGE_SUCC (book_block, 0)->dest->index, book_block->index);
4740    }
4741  else
4742    {
4743      if (sched_verbose >= 9)
4744	sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
4745    }
4746
4747  *fence_to_rewind = NULL;
4748  /* If basic block ends with a jump, insert bookkeeping code right before it.
4749     Notice if we are crossing a fence when taking PREV_INSN.  */
4750  if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
4751    {
4752      *fence_to_rewind = flist_lookup (fences, place_to_insert);
4753      place_to_insert = PREV_INSN (place_to_insert);
4754    }
4755
4756  return place_to_insert;
4757}
4758
4759/* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT
4760   for JOIN_POINT.   */
4761static int
4762find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
4763{
4764  int seqno;
4765  rtx next;
4766
4767  /* Check if we are about to insert bookkeeping copy before a jump, and use
4768     jump's seqno for the copy; otherwise, use JOIN_POINT's seqno.  */
4769  next = NEXT_INSN (place_to_insert);
4770  if (INSN_P (next)
4771      && JUMP_P (next)
4772      && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
4773    {
4774      gcc_assert (INSN_SCHED_TIMES (next) == 0);
4775      seqno = INSN_SEQNO (next);
4776    }
4777  else if (INSN_SEQNO (join_point) > 0)
4778    seqno = INSN_SEQNO (join_point);
4779  else
4780    {
4781      seqno = get_seqno_by_preds (place_to_insert);
4782
4783      /* Sometimes the fences can move in such a way that there will be
4784         no instructions with positive seqno around this bookkeeping.
4785         This means that there will be no way to get to it by a regular
4786         fence movement.  Never mind because we pick up such pieces for
4787         rescheduling anyways, so any positive value will do for now.  */
4788      if (seqno < 0)
4789        {
4790          gcc_assert (pipelining_p);
4791          seqno = 1;
4792        }
4793    }
4794
4795  gcc_assert (seqno > 0);
4796  return seqno;
4797}
4798
4799/* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning
4800   NEW_SEQNO to it.  Return created insn.  */
4801static insn_t
4802emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno)
4803{
4804  rtx_insn *new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr));
4805
4806  vinsn_t new_vinsn
4807    = create_vinsn_from_insn_rtx (new_insn_rtx,
4808				  VINSN_UNIQUE_P (EXPR_VINSN (c_expr)));
4809
4810  insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno,
4811					       place_to_insert);
4812
4813  INSN_SCHED_TIMES (new_insn) = 0;
4814  bitmap_set_bit (current_copies, INSN_UID (new_insn));
4815
4816  return new_insn;
4817}
4818
4819/* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to
4820   E2->dest, except from E1->src (there may be a sequence of empty blocks
4821   between E1->src and E2->dest).  Return block containing the copy.
4822   All scheduler data is initialized for the newly created insn.  */
4823static basic_block
4824generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
4825{
4826  insn_t join_point, place_to_insert, new_insn;
4827  int new_seqno;
4828  bool need_to_exchange_data_sets;
4829  fence_t fence_to_rewind;
4830
4831  if (sched_verbose >= 4)
4832    sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
4833	       e2->dest->index);
4834
4835  join_point = sel_bb_head (e2->dest);
4836  place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind);
4837  new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
4838  need_to_exchange_data_sets
4839    = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
4840
4841  new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
4842
4843  if (fence_to_rewind)
4844    FENCE_INSN (fence_to_rewind) = new_insn;
4845
4846  /* When inserting bookkeeping insn in new block, av sets should be
4847     following: old basic block (that now holds bookkeeping) data sets are
4848     the same as was before generation of bookkeeping, and new basic block
4849     (that now hold all other insns of old basic block) data sets are
4850     invalid.  So exchange data sets for these basic blocks as sel_split_block
4851     mistakenly exchanges them in this case.  Cannot do it earlier because
4852     when single instruction is added to new basic block it should hold NULL
4853     lv_set.  */
4854  if (need_to_exchange_data_sets)
4855    exchange_data_sets (BLOCK_FOR_INSN (new_insn),
4856			BLOCK_FOR_INSN (join_point));
4857
4858  stat_bookkeeping_copies++;
4859  return BLOCK_FOR_INSN (new_insn);
4860}
4861
4862/* Remove from AV_PTR all insns that may need bookkeeping when scheduling
4863   on FENCE, but we are unable to copy them.  */
4864static void
4865remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
4866{
4867  expr_t expr;
4868  av_set_iterator i;
4869
4870  /*  An expression does not need bookkeeping if it is available on all paths
4871      from current block to original block and current block dominates
4872      original block.  We check availability on all paths by examining
4873      EXPR_SPEC; this is not equivalent, because it may be positive even
4874      if expr is available on all paths (but if expr is not available on
4875      any path, EXPR_SPEC will be positive).  */
4876
4877  FOR_EACH_EXPR_1 (expr, i, av_ptr)
4878    {
4879      if (!control_flow_insn_p (EXPR_INSN_RTX (expr))
4880	  && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr)))
4881	  && (EXPR_SPEC (expr)
4882	      || !EXPR_ORIG_BB_INDEX (expr)
4883	      || !dominated_by_p (CDI_DOMINATORS,
4884				  BASIC_BLOCK_FOR_FN (cfun,
4885						      EXPR_ORIG_BB_INDEX (expr)),
4886				  BLOCK_FOR_INSN (FENCE_INSN (fence)))))
4887	{
4888          if (sched_verbose >= 4)
4889            sel_print ("Expr %d removed because it would need bookkeeping, which "
4890                       "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)));
4891	  av_set_iter_remove (&i);
4892	}
4893    }
4894}
4895
4896/* Moving conditional jump through some instructions.
4897
4898   Consider example:
4899
4900       ...                     <- current scheduling point
4901       NOTE BASIC BLOCK:       <- bb header
4902       (p8)  add r14=r14+0x9;;
4903       (p8)  mov [r14]=r23
4904       (!p8) jump L1;;
4905       NOTE BASIC BLOCK:
4906       ...
4907
4908   We can schedule jump one cycle earlier, than mov, because they cannot be
4909   executed together as their predicates are mutually exclusive.
4910
4911   This is done in this way: first, new fallthrough basic block is created
4912   after jump (it is always can be done, because there already should be a
4913   fallthrough block, where control flow goes in case of predicate being true -
4914   in our example; otherwise there should be a dependence between those
4915   instructions and jump and we cannot schedule jump right now);
4916   next, all instructions between jump and current scheduling point are moved
4917   to this new block.  And the result is this:
4918
4919      NOTE BASIC BLOCK:
4920      (!p8) jump L1           <- current scheduling point
4921      NOTE BASIC BLOCK:       <- bb header
4922      (p8)  add r14=r14+0x9;;
4923      (p8)  mov [r14]=r23
4924      NOTE BASIC BLOCK:
4925      ...
4926*/
4927static void
4928move_cond_jump (rtx_insn *insn, bnd_t bnd)
4929{
4930  edge ft_edge;
4931  basic_block block_from, block_next, block_new, block_bnd, bb;
4932  rtx_insn *next, *prev, *link, *head;
4933
4934  block_from = BLOCK_FOR_INSN (insn);
4935  block_bnd = BLOCK_FOR_INSN (BND_TO (bnd));
4936  prev = BND_TO (bnd);
4937
4938#ifdef ENABLE_CHECKING
4939  /* Moving of jump should not cross any other jumps or beginnings of new
4940     basic blocks.  The only exception is when we move a jump through
4941     mutually exclusive insns along fallthru edges.  */
4942  if (block_from != block_bnd)
4943    {
4944      bb = block_from;
4945      for (link = PREV_INSN (insn); link != PREV_INSN (prev);
4946           link = PREV_INSN (link))
4947        {
4948          if (INSN_P (link))
4949            gcc_assert (sched_insns_conditions_mutex_p (insn, link));
4950          if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb)
4951            {
4952              gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link));
4953              bb = BLOCK_FOR_INSN (link);
4954            }
4955        }
4956    }
4957#endif
4958
4959  /* Jump is moved to the boundary.  */
4960  next = PREV_INSN (insn);
4961  BND_TO (bnd) = insn;
4962
4963  ft_edge = find_fallthru_edge_from (block_from);
4964  block_next = ft_edge->dest;
4965  /* There must be a fallthrough block (or where should go
4966  control flow in case of false jump predicate otherwise?).  */
4967  gcc_assert (block_next);
4968
4969  /* Create new empty basic block after source block.  */
4970  block_new = sel_split_edge (ft_edge);
4971  gcc_assert (block_new->next_bb == block_next
4972              && block_from->next_bb == block_new);
4973
4974  /* Move all instructions except INSN to BLOCK_NEW.  */
4975  bb = block_bnd;
4976  head = BB_HEAD (block_new);
4977  while (bb != block_from->next_bb)
4978    {
4979      rtx_insn *from, *to;
4980      from = bb == block_bnd ? prev : sel_bb_head (bb);
4981      to = bb == block_from ? next : sel_bb_end (bb);
4982
4983      /* The jump being moved can be the first insn in the block.
4984         In this case we don't have to move anything in this block.  */
4985      if (NEXT_INSN (to) != from)
4986        {
4987          reorder_insns (from, to, head);
4988
4989          for (link = to; link != head; link = PREV_INSN (link))
4990            EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index;
4991          head = to;
4992        }
4993
4994      /* Cleanup possibly empty blocks left.  */
4995      block_next = bb->next_bb;
4996      if (bb != block_from)
4997	tidy_control_flow (bb, false);
4998      bb = block_next;
4999    }
5000
5001  /* Assert there is no jump to BLOCK_NEW, only fallthrough edge.  */
5002  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)));
5003
5004  gcc_assert (!sel_bb_empty_p (block_from)
5005              && !sel_bb_empty_p (block_new));
5006
5007  /* Update data sets for BLOCK_NEW to represent that INSN and
5008     instructions from the other branch of INSN is no longer
5009     available at BLOCK_NEW.  */
5010  BB_AV_LEVEL (block_new) = global_level;
5011  gcc_assert (BB_LV_SET (block_new) == NULL);
5012  BB_LV_SET (block_new) = get_clear_regset_from_pool ();
5013  update_data_sets (sel_bb_head (block_new));
5014
5015  /* INSN is a new basic block header - so prepare its data
5016     structures and update availability and liveness sets.  */
5017  update_data_sets (insn);
5018
5019  if (sched_verbose >= 4)
5020    sel_print ("Moving jump %d\n", INSN_UID (insn));
5021}
5022
5023/* Remove nops generated during move_op for preventing removal of empty
5024   basic blocks.  */
5025static void
5026remove_temp_moveop_nops (bool full_tidying)
5027{
5028  int i;
5029  insn_t insn;
5030
5031  FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn)
5032    {
5033      gcc_assert (INSN_NOP_P (insn));
5034      return_nop_to_pool (insn, full_tidying);
5035    }
5036
5037  /* Empty the vector.  */
5038  if (vec_temp_moveop_nops.length () > 0)
5039    vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ());
5040}
5041
5042/* Records the maximal UID before moving up an instruction.  Used for
5043   distinguishing between bookkeeping copies and original insns.  */
5044static int max_uid_before_move_op = 0;
5045
5046/* Remove from AV_VLIW_P all instructions but next when debug counter
5047   tells us so.  Next instruction is fetched from BNDS.  */
5048static void
5049remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
5050{
5051  if (! dbg_cnt (sel_sched_insn_cnt))
5052    /* Leave only the next insn in av_vliw.  */
5053    {
5054      av_set_iterator av_it;
5055      expr_t expr;
5056      bnd_t bnd = BLIST_BND (bnds);
5057      insn_t next = BND_TO (bnd);
5058
5059      gcc_assert (BLIST_NEXT (bnds) == NULL);
5060
5061      FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
5062        if (EXPR_INSN_RTX (expr) != next)
5063          av_set_iter_remove (&av_it);
5064    }
5065}
5066
5067/* Compute available instructions on BNDS.  FENCE is the current fence.  Write
5068   the computed set to *AV_VLIW_P.  */
5069static void
5070compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
5071{
5072  if (sched_verbose >= 2)
5073    {
5074      sel_print ("Boundaries: ");
5075      dump_blist (bnds);
5076      sel_print ("\n");
5077    }
5078
5079  for (; bnds; bnds = BLIST_NEXT (bnds))
5080    {
5081      bnd_t bnd = BLIST_BND (bnds);
5082      av_set_t av1_copy;
5083      insn_t bnd_to = BND_TO (bnd);
5084
5085      /* Rewind BND->TO to the basic block header in case some bookkeeping
5086         instructions were inserted before BND->TO and it needs to be
5087         adjusted.  */
5088      if (sel_bb_head_p (bnd_to))
5089        gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
5090      else
5091        while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0)
5092          {
5093            bnd_to = PREV_INSN (bnd_to);
5094            if (sel_bb_head_p (bnd_to))
5095              break;
5096          }
5097
5098      if (BND_TO (bnd) != bnd_to)
5099	{
5100  	  gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
5101	  FENCE_INSN (fence) = bnd_to;
5102	  BND_TO (bnd) = bnd_to;
5103	}
5104
5105      av_set_clear (&BND_AV (bnd));
5106      BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
5107
5108      av_set_clear (&BND_AV1 (bnd));
5109      BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
5110
5111      moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
5112
5113      av1_copy = av_set_copy (BND_AV1 (bnd));
5114      av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
5115    }
5116
5117  if (sched_verbose >= 2)
5118    {
5119      sel_print ("Available exprs (vliw form): ");
5120      dump_av_set (*av_vliw_p);
5121      sel_print ("\n");
5122    }
5123}
5124
5125/* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
5126   expression.  When FOR_MOVEOP is true, also replace the register of
5127   expressions found with the register from EXPR_VLIW.  */
5128static av_set_t
5129find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
5130{
5131  av_set_t expr_seq = NULL;
5132  expr_t expr;
5133  av_set_iterator i;
5134
5135  FOR_EACH_EXPR (expr, i, BND_AV (bnd))
5136    {
5137      if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
5138        {
5139          if (for_moveop)
5140            {
5141              /* The sequential expression has the right form to pass
5142                 to move_op except when renaming happened.  Put the
5143                 correct register in EXPR then.  */
5144              if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
5145		{
5146                  if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
5147		    {
5148		      replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
5149		      stat_renamed_scheduled++;
5150		    }
5151		  /* Also put the correct TARGET_AVAILABLE bit on the expr.
5152                     This is needed when renaming came up with original
5153                     register.  */
5154                  else if (EXPR_TARGET_AVAILABLE (expr)
5155                           != EXPR_TARGET_AVAILABLE (expr_vliw))
5156		    {
5157		      gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
5158		      EXPR_TARGET_AVAILABLE (expr) = 1;
5159		    }
5160		}
5161              if (EXPR_WAS_SUBSTITUTED (expr))
5162                stat_substitutions_total++;
5163            }
5164
5165          av_set_add (&expr_seq, expr);
5166
5167          /* With substitution inside insn group, it is possible
5168             that more than one expression in expr_seq will correspond
5169             to expr_vliw.  In this case, choose one as the attempt to
5170             move both leads to miscompiles.  */
5171          break;
5172        }
5173    }
5174
5175  if (for_moveop && sched_verbose >= 2)
5176    {
5177      sel_print ("Best expression(s) (sequential form): ");
5178      dump_av_set (expr_seq);
5179      sel_print ("\n");
5180    }
5181
5182  return expr_seq;
5183}
5184
5185
5186/* Move nop to previous block.  */
5187static void ATTRIBUTE_UNUSED
5188move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
5189{
5190  insn_t prev_insn, next_insn, note;
5191
5192  gcc_assert (sel_bb_head_p (nop)
5193              && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
5194  note = bb_note (BLOCK_FOR_INSN (nop));
5195  prev_insn = sel_bb_end (prev_bb);
5196  next_insn = NEXT_INSN (nop);
5197  gcc_assert (prev_insn != NULL_RTX
5198              && PREV_INSN (note) == prev_insn);
5199
5200  SET_NEXT_INSN (prev_insn) = nop;
5201  SET_PREV_INSN (nop) = prev_insn;
5202
5203  SET_PREV_INSN (note) = nop;
5204  SET_NEXT_INSN (note) = next_insn;
5205
5206  SET_NEXT_INSN (nop) = note;
5207  SET_PREV_INSN (next_insn) = note;
5208
5209  BB_END (prev_bb) = nop;
5210  BLOCK_FOR_INSN (nop) = prev_bb;
5211}
5212
5213/* Prepare a place to insert the chosen expression on BND.  */
5214static insn_t
5215prepare_place_to_insert (bnd_t bnd)
5216{
5217  insn_t place_to_insert;
5218
5219  /* Init place_to_insert before calling move_op, as the later
5220     can possibly remove BND_TO (bnd).  */
5221  if (/* If this is not the first insn scheduled.  */
5222      BND_PTR (bnd))
5223    {
5224      /* Add it after last scheduled.  */
5225      place_to_insert = ILIST_INSN (BND_PTR (bnd));
5226      if (DEBUG_INSN_P (place_to_insert))
5227	{
5228	  ilist_t l = BND_PTR (bnd);
5229	  while ((l = ILIST_NEXT (l)) &&
5230		 DEBUG_INSN_P (ILIST_INSN (l)))
5231	    ;
5232	  if (!l)
5233	    place_to_insert = NULL;
5234	}
5235    }
5236  else
5237    place_to_insert = NULL;
5238
5239  if (!place_to_insert)
5240    {
5241      /* Add it before BND_TO.  The difference is in the
5242         basic block, where INSN will be added.  */
5243      place_to_insert = get_nop_from_pool (BND_TO (bnd));
5244      gcc_assert (BLOCK_FOR_INSN (place_to_insert)
5245                  == BLOCK_FOR_INSN (BND_TO (bnd)));
5246    }
5247
5248  return place_to_insert;
5249}
5250
5251/* Find original instructions for EXPR_SEQ and move it to BND boundary.
5252   Return the expression to emit in C_EXPR.  */
5253static bool
5254move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
5255                        av_set_t expr_seq, expr_t c_expr)
5256{
5257  bool b, should_move;
5258  unsigned book_uid;
5259  bitmap_iterator bi;
5260  int n_bookkeeping_copies_before_moveop;
5261
5262  /* Make a move.  This call will remove the original operation,
5263     insert all necessary bookkeeping instructions and update the
5264     data sets.  After that all we have to do is add the operation
5265     at before BND_TO (BND).  */
5266  n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
5267  max_uid_before_move_op = get_max_uid ();
5268  bitmap_clear (current_copies);
5269  bitmap_clear (current_originators);
5270
5271  b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
5272               get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
5273
5274  /* We should be able to find the expression we've chosen for
5275     scheduling.  */
5276  gcc_assert (b);
5277
5278  if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
5279    stat_insns_needed_bookkeeping++;
5280
5281  EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
5282    {
5283      unsigned uid;
5284      bitmap_iterator bi;
5285
5286      /* We allocate these bitmaps lazily.  */
5287      if (! INSN_ORIGINATORS_BY_UID (book_uid))
5288        INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
5289
5290      bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
5291                   current_originators);
5292
5293      /* Transitively add all originators' originators.  */
5294      EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)
5295       if (INSN_ORIGINATORS_BY_UID (uid))
5296	 bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid),
5297			  INSN_ORIGINATORS_BY_UID (uid));
5298    }
5299
5300  return should_move;
5301}
5302
5303
5304/* Debug a DFA state as an array of bytes.  */
5305static void
5306debug_state (state_t state)
5307{
5308  unsigned char *p;
5309  unsigned int i, size = dfa_state_size;
5310
5311  sel_print ("state (%u):", size);
5312  for (i = 0, p = (unsigned char *) state; i < size; i++)
5313    sel_print (" %d", p[i]);
5314  sel_print ("\n");
5315}
5316
5317/* Advance state on FENCE with INSN.  Return true if INSN is
5318   an ASM, and we should advance state once more.  */
5319static bool
5320advance_state_on_fence (fence_t fence, insn_t insn)
5321{
5322  bool asm_p;
5323
5324  if (recog_memoized (insn) >= 0)
5325    {
5326      int res;
5327      state_t temp_state = alloca (dfa_state_size);
5328
5329      gcc_assert (!INSN_ASM_P (insn));
5330      asm_p = false;
5331
5332      memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
5333      res = state_transition (FENCE_STATE (fence), insn);
5334      gcc_assert (res < 0);
5335
5336      if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
5337        {
5338          FENCE_ISSUED_INSNS (fence)++;
5339
5340          /* We should never issue more than issue_rate insns.  */
5341          if (FENCE_ISSUED_INSNS (fence) > issue_rate)
5342            gcc_unreachable ();
5343        }
5344    }
5345  else
5346    {
5347      /* This could be an ASM insn which we'd like to schedule
5348         on the next cycle.  */
5349      asm_p = INSN_ASM_P (insn);
5350      if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
5351        advance_one_cycle (fence);
5352    }
5353
5354  if (sched_verbose >= 2)
5355    debug_state (FENCE_STATE (fence));
5356  if (!DEBUG_INSN_P (insn))
5357    FENCE_STARTS_CYCLE_P (fence) = 0;
5358  FENCE_ISSUE_MORE (fence) = can_issue_more;
5359  return asm_p;
5360}
5361
5362/* Update FENCE on which INSN was scheduled and this INSN, too.  NEED_STALL
5363   is nonzero if we need to stall after issuing INSN.  */
5364static void
5365update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
5366{
5367  bool asm_p;
5368
5369  /* First, reflect that something is scheduled on this fence.  */
5370  asm_p = advance_state_on_fence (fence, insn);
5371  FENCE_LAST_SCHEDULED_INSN (fence) = insn;
5372  vec_safe_push (FENCE_EXECUTING_INSNS (fence), insn);
5373  if (SCHED_GROUP_P (insn))
5374    {
5375      FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
5376      SCHED_GROUP_P (insn) = 0;
5377    }
5378  else
5379    FENCE_SCHED_NEXT (fence) = NULL;
5380  if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
5381    FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
5382
5383  /* Set instruction scheduling info.  This will be used in bundling,
5384     pipelining, tick computations etc.  */
5385  ++INSN_SCHED_TIMES (insn);
5386  EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
5387  EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
5388  INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
5389  INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
5390
5391  /* This does not account for adjust_cost hooks, just add the biggest
5392     constant the hook may add to the latency.  TODO: make this
5393     a target dependent constant.  */
5394  INSN_READY_CYCLE (insn)
5395    = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
5396                                 ? 1
5397                                 : maximal_insn_latency (insn) + 1);
5398
5399  /* Change these fields last, as they're used above.  */
5400  FENCE_AFTER_STALL_P (fence) = 0;
5401  if (asm_p || need_stall)
5402    advance_one_cycle (fence);
5403
5404  /* Indicate that we've scheduled something on this fence.  */
5405  FENCE_SCHEDULED_P (fence) = true;
5406  scheduled_something_on_previous_fence = true;
5407
5408  /* Print debug information when insn's fields are updated.  */
5409  if (sched_verbose >= 2)
5410    {
5411      sel_print ("Scheduling insn: ");
5412      dump_insn_1 (insn, 1);
5413      sel_print ("\n");
5414    }
5415}
5416
5417/* Update boundary BND (and, if needed, FENCE) with INSN, remove the
5418   old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and
5419   return it.  */
5420static blist_t *
5421update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
5422                   blist_t *bnds_tailp)
5423{
5424  succ_iterator si;
5425  insn_t succ;
5426
5427  advance_deps_context (BND_DC (bnd), insn);
5428  FOR_EACH_SUCC_1 (succ, si, insn,
5429                   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
5430    {
5431      ilist_t ptr = ilist_copy (BND_PTR (bnd));
5432
5433      ilist_add (&ptr, insn);
5434
5435      if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
5436	  && is_ineligible_successor (succ, ptr))
5437	{
5438	  ilist_clear (&ptr);
5439	  continue;
5440	}
5441
5442      if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn))
5443	{
5444	  if (sched_verbose >= 9)
5445	    sel_print ("Updating fence insn from %i to %i\n",
5446		       INSN_UID (insn), INSN_UID (succ));
5447	  FENCE_INSN (fence) = succ;
5448	}
5449      blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
5450      bnds_tailp = &BLIST_NEXT (*bnds_tailp);
5451    }
5452
5453  blist_remove (bndsp);
5454  return bnds_tailp;
5455}
5456
5457/* Schedule EXPR_VLIW on BND.  Return the insn emitted.  */
5458static insn_t
5459schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
5460{
5461  av_set_t expr_seq;
5462  expr_t c_expr = XALLOCA (expr_def);
5463  insn_t place_to_insert;
5464  insn_t insn;
5465  bool should_move;
5466
5467  expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
5468
5469  /* In case of scheduling a jump skipping some other instructions,
5470     prepare CFG.  After this, jump is at the boundary and can be
5471     scheduled as usual insn by MOVE_OP.  */
5472  if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
5473    {
5474      insn = EXPR_INSN_RTX (expr_vliw);
5475
5476      /* Speculative jumps are not handled.  */
5477      if (insn != BND_TO (bnd)
5478          && !sel_insn_is_speculation_check (insn))
5479        move_cond_jump (insn, bnd);
5480    }
5481
5482  /* Find a place for C_EXPR to schedule.  */
5483  place_to_insert = prepare_place_to_insert (bnd);
5484  should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
5485  clear_expr (c_expr);
5486
5487  /* Add the instruction.  The corner case to care about is when
5488     the expr_seq set has more than one expr, and we chose the one that
5489     is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and
5490     we can't use it.  Generate the new vinsn.  */
5491  if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
5492    {
5493      vinsn_t vinsn_new;
5494
5495      vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
5496      change_vinsn_in_expr (expr_vliw, vinsn_new);
5497      should_move = false;
5498    }
5499  if (should_move)
5500    insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
5501  else
5502    insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
5503                                      place_to_insert);
5504
5505  /* Return the nops generated for preserving of data sets back
5506     into pool.  */
5507  if (INSN_NOP_P (place_to_insert))
5508    return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn));
5509  remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
5510
5511  av_set_clear (&expr_seq);
5512
5513  /* Save the expression scheduled so to reset target availability if we'll
5514     meet it later on the same fence.  */
5515  if (EXPR_WAS_RENAMED (expr_vliw))
5516    vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
5517
5518  /* Check that the recent movement didn't destroyed loop
5519     structure.  */
5520  gcc_assert (!pipelining_p
5521              || current_loop_nest == NULL
5522              || loop_latch_edge (current_loop_nest));
5523  return insn;
5524}
5525
5526/* Stall for N cycles on FENCE.  */
5527static void
5528stall_for_cycles (fence_t fence, int n)
5529{
5530  int could_more;
5531
5532  could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
5533  while (n--)
5534    advance_one_cycle (fence);
5535  if (could_more)
5536    FENCE_AFTER_STALL_P (fence) = 1;
5537}
5538
5539/* Gather a parallel group of insns at FENCE and assign their seqno
5540   to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
5541   list for later recalculation of seqnos.  */
5542static void
5543fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
5544{
5545  blist_t bnds = NULL, *bnds_tailp;
5546  av_set_t av_vliw = NULL;
5547  insn_t insn = FENCE_INSN (fence);
5548
5549  if (sched_verbose >= 2)
5550    sel_print ("Starting fill_insns for insn %d, cycle %d\n",
5551               INSN_UID (insn), FENCE_CYCLE (fence));
5552
5553  blist_add (&bnds, insn, NULL, FENCE_DC (fence));
5554  bnds_tailp = &BLIST_NEXT (bnds);
5555  set_target_context (FENCE_TC (fence));
5556  can_issue_more = FENCE_ISSUE_MORE (fence);
5557  target_bb = INSN_BB (insn);
5558
5559  /* Do while we can add any operation to the current group.  */
5560  do
5561    {
5562      blist_t *bnds_tailp1, *bndsp;
5563      expr_t expr_vliw;
5564      int need_stall = false;
5565      int was_stall = 0, scheduled_insns = 0;
5566      int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
5567      int max_stall = pipelining_p ? 1 : 3;
5568      bool last_insn_was_debug = false;
5569      bool was_debug_bb_end_p = false;
5570
5571      compute_av_set_on_boundaries (fence, bnds, &av_vliw);
5572      remove_insns_that_need_bookkeeping (fence, &av_vliw);
5573      remove_insns_for_debug (bnds, &av_vliw);
5574
5575      /* Return early if we have nothing to schedule.  */
5576      if (av_vliw == NULL)
5577        break;
5578
5579      /* Choose the best expression and, if needed, destination register
5580	 for it.  */
5581      do
5582        {
5583          expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall);
5584          if (! expr_vliw && need_stall)
5585            {
5586              /* All expressions required a stall.  Do not recompute av sets
5587                 as we'll get the same answer (modulo the insns between
5588                 the fence and its boundary, which will not be available for
5589                 pipelining).
5590		 If we are going to stall for too long, break to recompute av
5591		 sets and bring more insns for pipelining.  */
5592              was_stall++;
5593	      if (need_stall <= 3)
5594		stall_for_cycles (fence, need_stall);
5595	      else
5596		{
5597		  stall_for_cycles (fence, 1);
5598		  break;
5599		}
5600            }
5601        }
5602      while (! expr_vliw && need_stall);
5603
5604      /* Now either we've selected expr_vliw or we have nothing to schedule.  */
5605      if (!expr_vliw)
5606        {
5607	  av_set_clear (&av_vliw);
5608          break;
5609        }
5610
5611      bndsp = &bnds;
5612      bnds_tailp1 = bnds_tailp;
5613
5614      do
5615	/* This code will be executed only once until we'd have several
5616           boundaries per fence.  */
5617        {
5618	  bnd_t bnd = BLIST_BND (*bndsp);
5619
5620	  if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
5621	    {
5622	      bndsp = &BLIST_NEXT (*bndsp);
5623	      continue;
5624	    }
5625
5626          insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
5627	  last_insn_was_debug = DEBUG_INSN_P (insn);
5628	  if (last_insn_was_debug)
5629	    was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn));
5630          update_fence_and_insn (fence, insn, need_stall);
5631          bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp);
5632
5633	  /* Add insn to the list of scheduled on this cycle instructions.  */
5634	  ilist_add (*scheduled_insns_tailpp, insn);
5635	  *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
5636        }
5637      while (*bndsp != *bnds_tailp1);
5638
5639      av_set_clear (&av_vliw);
5640      if (!last_insn_was_debug)
5641	scheduled_insns++;
5642
5643      /* We currently support information about candidate blocks only for
5644	 one 'target_bb' block.  Hence we can't schedule after jump insn,
5645	 as this will bring two boundaries and, hence, necessity to handle
5646	 information for two or more blocks concurrently.  */
5647      if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
5648          || (was_stall
5649              && (was_stall >= max_stall
5650                  || scheduled_insns >= max_insns)))
5651        break;
5652    }
5653  while (bnds);
5654
5655  gcc_assert (!FENCE_BNDS (fence));
5656
5657  /* Update boundaries of the FENCE.  */
5658  while (bnds)
5659    {
5660      ilist_t ptr = BND_PTR (BLIST_BND (bnds));
5661
5662      if (ptr)
5663	{
5664	  insn = ILIST_INSN (ptr);
5665
5666	  if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
5667	    ilist_add (&FENCE_BNDS (fence), insn);
5668	}
5669
5670      blist_remove (&bnds);
5671    }
5672
5673  /* Update target context on the fence.  */
5674  reset_target_context (FENCE_TC (fence), false);
5675}
5676
5677/* All exprs in ORIG_OPS must have the same destination register or memory.
5678   Return that destination.  */
5679static rtx
5680get_dest_from_orig_ops (av_set_t orig_ops)
5681{
5682  rtx dest = NULL_RTX;
5683  av_set_iterator av_it;
5684  expr_t expr;
5685  bool first_p = true;
5686
5687  FOR_EACH_EXPR (expr, av_it, orig_ops)
5688    {
5689      rtx x = EXPR_LHS (expr);
5690
5691      if (first_p)
5692	{
5693	  first_p = false;
5694	  dest = x;
5695	}
5696      else
5697	gcc_assert (dest == x
5698		    || (dest != NULL_RTX && x != NULL_RTX
5699			&& rtx_equal_p (dest, x)));
5700    }
5701
5702  return dest;
5703}
5704
5705/* Update data sets for the bookkeeping block and record those expressions
5706   which become no longer available after inserting this bookkeeping.  */
5707static void
5708update_and_record_unavailable_insns (basic_block book_block)
5709{
5710  av_set_iterator i;
5711  av_set_t old_av_set = NULL;
5712  expr_t cur_expr;
5713  rtx_insn *bb_end = sel_bb_end (book_block);
5714
5715  /* First, get correct liveness in the bookkeeping block.  The problem is
5716     the range between the bookeeping insn and the end of block.  */
5717  update_liveness_on_insn (bb_end);
5718  if (control_flow_insn_p (bb_end))
5719    update_liveness_on_insn (PREV_INSN (bb_end));
5720
5721  /* If there's valid av_set on BOOK_BLOCK, then there might exist another
5722     fence above, where we may choose to schedule an insn which is
5723     actually blocked from moving up with the bookkeeping we create here.  */
5724  if (AV_SET_VALID_P (sel_bb_head (book_block)))
5725    {
5726      old_av_set = av_set_copy (BB_AV_SET (book_block));
5727      update_data_sets (sel_bb_head (book_block));
5728
5729      /* Traverse all the expressions in the old av_set and check whether
5730	 CUR_EXPR is in new AV_SET.  */
5731      FOR_EACH_EXPR (cur_expr, i, old_av_set)
5732        {
5733          expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
5734					   EXPR_VINSN (cur_expr));
5735
5736          if (! new_expr
5737              /* In this case, we can just turn off the E_T_A bit, but we can't
5738                 represent this information with the current vector.  */
5739              || EXPR_TARGET_AVAILABLE (new_expr)
5740		 != EXPR_TARGET_AVAILABLE (cur_expr))
5741	    /* Unfortunately, the below code could be also fired up on
5742	       separable insns, e.g. when moving insns through the new
5743	       speculation check as in PR 53701.  */
5744            vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
5745        }
5746
5747      av_set_clear (&old_av_set);
5748    }
5749}
5750
5751/* The main effect of this function is that sparams->c_expr is merged
5752   with (or copied to) lparams->c_expr_merged.  If there's only one successor,
5753   we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
5754   lparams->c_expr_merged is copied back to sparams->c_expr after all
5755   successors has been traversed.  lparams->c_expr_local is an expr allocated
5756   on stack in the caller function, and is used if there is more than one
5757   successor.
5758
5759   SUCC is one of the SUCCS_NORMAL successors of INSN,
5760   MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
5761   LPARAMS and STATIC_PARAMS contain the parameters described above.  */
5762static void
5763move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
5764                     insn_t succ ATTRIBUTE_UNUSED,
5765		     int moveop_drv_call_res,
5766		     cmpd_local_params_p lparams, void *static_params)
5767{
5768  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
5769
5770  /* Nothing to do, if original expr wasn't found below.  */
5771  if (moveop_drv_call_res != 1)
5772    return;
5773
5774  /* If this is a first successor.  */
5775  if (!lparams->c_expr_merged)
5776    {
5777      lparams->c_expr_merged = sparams->c_expr;
5778      sparams->c_expr = lparams->c_expr_local;
5779    }
5780  else
5781    {
5782      /* We must merge all found expressions to get reasonable
5783	 EXPR_SPEC_DONE_DS for the resulting insn.  If we don't
5784	 do so then we can first find the expr with epsilon
5785	 speculation success probability and only then with the
5786	 good probability.  As a result the insn will get epsilon
5787	 probability and will never be scheduled because of
5788	 weakness_cutoff in find_best_expr.
5789
5790	 We call merge_expr_data here instead of merge_expr
5791	 because due to speculation C_EXPR and X may have the
5792	 same insns with different speculation types.  And as of
5793	 now such insns are considered non-equal.
5794
5795	 However, EXPR_SCHED_TIMES is different -- we must get
5796	 SCHED_TIMES from a real insn, not a bookkeeping copy.
5797	 We force this here.  Instead, we may consider merging
5798	 SCHED_TIMES to the maximum instead of minimum in the
5799	 below function.  */
5800      int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
5801
5802      merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL);
5803      if (EXPR_SCHED_TIMES (sparams->c_expr) == 0)
5804	EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times;
5805
5806      clear_expr (sparams->c_expr);
5807    }
5808}
5809
5810/*  Add used regs for the successor SUCC into SPARAMS->USED_REGS.
5811
5812   SUCC is one of the SUCCS_NORMAL successors of INSN,
5813   MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0,
5814     if SUCC is one of SUCCS_BACK or SUCCS_OUT.
5815   STATIC_PARAMS contain USED_REGS set.  */
5816static void
5817fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
5818		 int moveop_drv_call_res,
5819		 cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5820		 void *static_params)
5821{
5822  regset succ_live;
5823  fur_static_params_p sparams = (fur_static_params_p) static_params;
5824
5825  /* Here we compute live regsets only for branches that do not lie
5826     on the code motion paths.  These branches correspond to value
5827     MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
5828     for such branches code_motion_path_driver is not called.  */
5829  if (moveop_drv_call_res != 0)
5830    return;
5831
5832  /* Mark all registers that do not meet the following condition:
5833     (3) not live on the other path of any conditional branch
5834     that is passed by the operation, in case original
5835     operations are not present on both paths of the
5836     conditional branch.  */
5837  succ_live = compute_live (succ);
5838  IOR_REG_SET (sparams->used_regs, succ_live);
5839}
5840
5841/* This function is called after the last successor.  Copies LP->C_EXPR_MERGED
5842   into SP->CEXPR.  */
5843static void
5844move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
5845{
5846  moveop_static_params_p sp = (moveop_static_params_p) sparams;
5847
5848  sp->c_expr = lp->c_expr_merged;
5849}
5850
5851/* Track bookkeeping copies created, insns scheduled, and blocks for
5852   rescheduling when INSN is found by move_op.  */
5853static void
5854track_scheduled_insns_and_blocks (rtx insn)
5855{
5856  /* Even if this insn can be a copy that will be removed during current move_op,
5857     we still need to count it as an originator.  */
5858  bitmap_set_bit (current_originators, INSN_UID (insn));
5859
5860  if (!bitmap_clear_bit (current_copies, INSN_UID (insn)))
5861    {
5862      /* Note that original block needs to be rescheduled, as we pulled an
5863	 instruction out of it.  */
5864      if (INSN_SCHED_TIMES (insn) > 0)
5865	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
5866      else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn))
5867	num_insns_scheduled++;
5868    }
5869
5870  /* For instructions we must immediately remove insn from the
5871     stream, so subsequent update_data_sets () won't include this
5872     insn into av_set.
5873     For expr we must make insn look like "INSN_REG (insn) := c_expr".  */
5874  if (INSN_UID (insn) > max_uid_before_move_op)
5875    stat_bookkeeping_copies--;
5876}
5877
5878/* Emit a register-register copy for INSN if needed.  Return true if
5879   emitted one.  PARAMS is the move_op static parameters.  */
5880static bool
5881maybe_emit_renaming_copy (rtx_insn *insn,
5882                          moveop_static_params_p params)
5883{
5884  bool insn_emitted  = false;
5885  rtx cur_reg;
5886
5887  /* Bail out early when expression can not be renamed at all.  */
5888  if (!EXPR_SEPARABLE_P (params->c_expr))
5889    return false;
5890
5891  cur_reg = expr_dest_reg (params->c_expr);
5892  gcc_assert (cur_reg && params->dest && REG_P (params->dest));
5893
5894  /* If original operation has expr and the register chosen for
5895     that expr is not original operation's dest reg, substitute
5896     operation's right hand side with the register chosen.  */
5897  if (REGNO (params->dest) != REGNO (cur_reg))
5898    {
5899      insn_t reg_move_insn, reg_move_insn_rtx;
5900
5901      reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
5902                                                    params->dest);
5903      reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
5904                                                   INSN_EXPR (insn),
5905                                                   INSN_SEQNO (insn),
5906                                                   insn);
5907      EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
5908      replace_dest_with_reg_in_expr (params->c_expr, params->dest);
5909
5910      insn_emitted = true;
5911      params->was_renamed = true;
5912    }
5913
5914  return insn_emitted;
5915}
5916
5917/* Emit a speculative check for INSN speculated as EXPR if needed.
5918   Return true if we've  emitted one.  PARAMS is the move_op static
5919   parameters.  */
5920static bool
5921maybe_emit_speculative_check (rtx_insn *insn, expr_t expr,
5922                              moveop_static_params_p params)
5923{
5924  bool insn_emitted = false;
5925  insn_t x;
5926  ds_t check_ds;
5927
5928  check_ds = get_spec_check_type_for_insn (insn, expr);
5929  if (check_ds != 0)
5930    {
5931      /* A speculation check should be inserted.  */
5932      x = create_speculation_check (params->c_expr, check_ds, insn);
5933      insn_emitted = true;
5934    }
5935  else
5936    {
5937      EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
5938      x = insn;
5939    }
5940
5941  gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
5942              && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
5943  return insn_emitted;
5944}
5945
5946/* Handle transformations that leave an insn in place of original
5947   insn such as renaming/speculation.  Return true if one of such
5948   transformations actually happened, and we have emitted this insn.  */
5949static bool
5950handle_emitting_transformations (rtx_insn *insn, expr_t expr,
5951                                 moveop_static_params_p params)
5952{
5953  bool insn_emitted = false;
5954
5955  insn_emitted = maybe_emit_renaming_copy (insn, params);
5956  insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
5957
5958  return insn_emitted;
5959}
5960
5961/* If INSN is the only insn in the basic block (not counting JUMP,
5962   which may be a jump to next insn, and DEBUG_INSNs), we want to
5963   leave a NOP there till the return to fill_insns.  */
5964
5965static bool
5966need_nop_to_preserve_insn_bb (rtx_insn *insn)
5967{
5968  insn_t bb_head, bb_end, bb_next, in_next;
5969  basic_block bb = BLOCK_FOR_INSN (insn);
5970
5971  bb_head = sel_bb_head (bb);
5972  bb_end = sel_bb_end (bb);
5973
5974  if (bb_head == bb_end)
5975    return true;
5976
5977  while (bb_head != bb_end && DEBUG_INSN_P (bb_head))
5978    bb_head = NEXT_INSN (bb_head);
5979
5980  if (bb_head == bb_end)
5981    return true;
5982
5983  while (bb_head != bb_end && DEBUG_INSN_P (bb_end))
5984    bb_end = PREV_INSN (bb_end);
5985
5986  if (bb_head == bb_end)
5987    return true;
5988
5989  bb_next = NEXT_INSN (bb_head);
5990  while (bb_next != bb_end && DEBUG_INSN_P (bb_next))
5991    bb_next = NEXT_INSN (bb_next);
5992
5993  if (bb_next == bb_end && JUMP_P (bb_end))
5994    return true;
5995
5996  in_next = NEXT_INSN (insn);
5997  while (DEBUG_INSN_P (in_next))
5998    in_next = NEXT_INSN (in_next);
5999
6000  if (IN_CURRENT_FENCE_P (in_next))
6001    return true;
6002
6003  return false;
6004}
6005
6006/* Remove INSN from stream.  When ONLY_DISCONNECT is true, its data
6007   is not removed but reused when INSN is re-emitted.  */
6008static void
6009remove_insn_from_stream (rtx_insn *insn, bool only_disconnect)
6010{
6011  /* If there's only one insn in the BB, make sure that a nop is
6012     inserted into it, so the basic block won't disappear when we'll
6013     delete INSN below with sel_remove_insn. It should also survive
6014     till the return to fill_insns.  */
6015  if (need_nop_to_preserve_insn_bb (insn))
6016    {
6017      insn_t nop = get_nop_from_pool (insn);
6018      gcc_assert (INSN_NOP_P (nop));
6019      vec_temp_moveop_nops.safe_push (nop);
6020    }
6021
6022  sel_remove_insn (insn, only_disconnect, false);
6023}
6024
6025/* This function is called when original expr is found.
6026   INSN - current insn traversed, EXPR - the corresponding expr found.
6027   LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
6028   is static parameters of move_op.  */
6029static void
6030move_op_orig_expr_found (insn_t insn, expr_t expr,
6031                         cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6032                         void *static_params)
6033{
6034  bool only_disconnect;
6035  moveop_static_params_p params = (moveop_static_params_p) static_params;
6036
6037  copy_expr_onside (params->c_expr, INSN_EXPR (insn));
6038  track_scheduled_insns_and_blocks (insn);
6039  handle_emitting_transformations (insn, expr, params);
6040  only_disconnect = params->uid == INSN_UID (insn);
6041
6042  /* Mark that we've disconnected an insn.  */
6043  if (only_disconnect)
6044    params->uid = -1;
6045  remove_insn_from_stream (insn, only_disconnect);
6046}
6047
6048/* The function is called when original expr is found.
6049   INSN - current insn traversed, EXPR - the corresponding expr found,
6050   crosses_call and original_insns in STATIC_PARAMS are updated.  */
6051static void
6052fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
6053                     cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6054                     void *static_params)
6055{
6056  fur_static_params_p params = (fur_static_params_p) static_params;
6057  regset tmp;
6058
6059  if (CALL_P (insn))
6060    params->crosses_call = true;
6061
6062  def_list_add (params->original_insns, insn, params->crosses_call);
6063
6064  /* Mark the registers that do not meet the following condition:
6065    (2) not among the live registers of the point
6066	immediately following the first original operation on
6067	a given downward path, except for the original target
6068	register of the operation.  */
6069  tmp = get_clear_regset_from_pool ();
6070  compute_live_below_insn (insn, tmp);
6071  AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn));
6072  AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn));
6073  IOR_REG_SET (params->used_regs, tmp);
6074  return_regset_to_pool (tmp);
6075
6076  /* (*1) We need to add to USED_REGS registers that are read by
6077     INSN's lhs. This may lead to choosing wrong src register.
6078     E.g. (scheduling const expr enabled):
6079
6080	429: ax=0x0	<- Can't use AX for this expr (0x0)
6081	433: dx=[bp-0x18]
6082	427: [ax+dx+0x1]=ax
6083	  REG_DEAD: ax
6084	168: di=dx
6085	  REG_DEAD: dx
6086     */
6087  /* FIXME: see comment above and enable MEM_P
6088     in vinsn_separable_p.  */
6089  gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
6090	      || !MEM_P (INSN_LHS (insn)));
6091}
6092
6093/* This function is called on the ascending pass, before returning from
6094   current basic block.  */
6095static void
6096move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
6097                       void *static_params)
6098{
6099  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6100  basic_block book_block = NULL;
6101
6102  /* When we have removed the boundary insn for scheduling, which also
6103     happened to be the end insn in its bb, we don't need to update sets.  */
6104  if (!lparams->removed_last_insn
6105      && lparams->e1
6106      && sel_bb_head_p (insn))
6107    {
6108      /* We should generate bookkeeping code only if we are not at the
6109         top level of the move_op.  */
6110      if (sel_num_cfg_preds_gt_1 (insn))
6111        book_block = generate_bookkeeping_insn (sparams->c_expr,
6112                                                lparams->e1, lparams->e2);
6113      /* Update data sets for the current insn.  */
6114      update_data_sets (insn);
6115    }
6116
6117  /* If bookkeeping code was inserted, we need to update av sets of basic
6118     block that received bookkeeping.  After generation of bookkeeping insn,
6119     bookkeeping block does not contain valid av set because we are not following
6120     the original algorithm in every detail with regards to e.g. renaming
6121     simple reg-reg copies.  Consider example:
6122
6123     bookkeeping block           scheduling fence
6124     \            /
6125      \    join  /
6126       ----------
6127       |        |
6128       ----------
6129      /           \
6130     /             \
6131     r1 := r2          r1 := r3
6132
6133     We try to schedule insn "r1 := r3" on the current
6134     scheduling fence.  Also, note that av set of bookkeeping block
6135     contain both insns "r1 := r2" and "r1 := r3".  When the insn has
6136     been scheduled, the CFG is as follows:
6137
6138     r1 := r3               r1 := r3
6139     bookkeeping block           scheduling fence
6140     \            /
6141      \    join  /
6142       ----------
6143       |        |
6144       ----------
6145      /          \
6146     /            \
6147     r1 := r2
6148
6149     Here, insn "r1 := r3" was scheduled at the current scheduling point
6150     and bookkeeping code was generated at the bookeeping block.  This
6151     way insn "r1 := r2" is no longer available as a whole instruction
6152     (but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
6153     This situation is handled by calling update_data_sets.
6154
6155     Since update_data_sets is called only on the bookkeeping block, and
6156     it also may have predecessors with av_sets, containing instructions that
6157     are no longer available, we save all such expressions that become
6158     unavailable during data sets update on the bookkeeping block in
6159     VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such
6160     expressions for scheduling.  This allows us to avoid recomputation of
6161     av_sets outside the code motion path.  */
6162
6163  if (book_block)
6164    update_and_record_unavailable_insns (book_block);
6165
6166  /* If INSN was previously marked for deletion, it's time to do it.  */
6167  if (lparams->removed_last_insn)
6168    insn = PREV_INSN (insn);
6169
6170  /* Do not tidy control flow at the topmost moveop, as we can erroneously
6171     kill a block with a single nop in which the insn should be emitted.  */
6172  if (lparams->e1)
6173    tidy_control_flow (BLOCK_FOR_INSN (insn), true);
6174}
6175
6176/* This function is called on the ascending pass, before returning from the
6177   current basic block.  */
6178static void
6179fur_at_first_insn (insn_t insn,
6180                   cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6181                   void *static_params ATTRIBUTE_UNUSED)
6182{
6183  gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
6184	      || AV_LEVEL (insn) == -1);
6185}
6186
6187/* Called on the backward stage of recursion to call moveup_expr for insn
6188   and sparams->c_expr.  */
6189static void
6190move_op_ascend (insn_t insn, void *static_params)
6191{
6192  enum MOVEUP_EXPR_CODE res;
6193  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6194
6195  if (! INSN_NOP_P (insn))
6196    {
6197      res = moveup_expr_cached (sparams->c_expr, insn, false);
6198      gcc_assert (res != MOVEUP_EXPR_NULL);
6199    }
6200
6201  /* Update liveness for this insn as it was invalidated.  */
6202  update_liveness_on_insn (insn);
6203}
6204
6205/* This function is called on enter to the basic block.
6206   Returns TRUE if this block already have been visited and
6207   code_motion_path_driver should return 1, FALSE otherwise.  */
6208static int
6209fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
6210	      void *static_params, bool visited_p)
6211{
6212  fur_static_params_p sparams = (fur_static_params_p) static_params;
6213
6214  if (visited_p)
6215    {
6216      /* If we have found something below this block, there should be at
6217	 least one insn in ORIGINAL_INSNS.  */
6218      gcc_assert (*sparams->original_insns);
6219
6220      /* Adjust CROSSES_CALL, since we may have come to this block along
6221	 different path.  */
6222      DEF_LIST_DEF (*sparams->original_insns)->crosses_call
6223	  |= sparams->crosses_call;
6224    }
6225  else
6226    local_params->old_original_insns = *sparams->original_insns;
6227
6228  return 1;
6229}
6230
6231/* Same as above but for move_op.   */
6232static int
6233move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
6234                  cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
6235                  void *static_params ATTRIBUTE_UNUSED, bool visited_p)
6236{
6237  if (visited_p)
6238    return -1;
6239  return 1;
6240}
6241
6242/* This function is called while descending current basic block if current
6243   insn is not the original EXPR we're searching for.
6244
6245   Return value: FALSE, if code_motion_path_driver should perform a local
6246			cleanup and return 0 itself;
6247		 TRUE, if code_motion_path_driver should continue.  */
6248static bool
6249move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
6250			    void *static_params)
6251{
6252  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6253
6254#ifdef ENABLE_CHECKING
6255  sparams->failed_insn = insn;
6256#endif
6257
6258  /* If we're scheduling separate expr, in order to generate correct code
6259     we need to stop the search at bookkeeping code generated with the
6260     same destination register or memory.  */
6261  if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
6262    return false;
6263  return true;
6264}
6265
6266/* This function is called while descending current basic block if current
6267   insn is not the original EXPR we're searching for.
6268
6269   Return value: TRUE (code_motion_path_driver should continue).  */
6270static bool
6271fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params)
6272{
6273  bool mutexed;
6274  expr_t r;
6275  av_set_iterator avi;
6276  fur_static_params_p sparams = (fur_static_params_p) static_params;
6277
6278  if (CALL_P (insn))
6279    sparams->crosses_call = true;
6280  else if (DEBUG_INSN_P (insn))
6281    return true;
6282
6283  /* If current insn we are looking at cannot be executed together
6284     with original insn, then we can skip it safely.
6285
6286     Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); }
6287	      INSN = (!p6) r14 = r14 + 1;
6288
6289     Here we can schedule ORIG_OP with lhs = r14, though only
6290     looking at the set of used and set registers of INSN we must
6291     forbid it.  So, add set/used in INSN registers to the
6292     untouchable set only if there is an insn in ORIG_OPS that can
6293     affect INSN.  */
6294  mutexed = true;
6295  FOR_EACH_EXPR (r, avi, orig_ops)
6296    if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r)))
6297      {
6298	mutexed = false;
6299	break;
6300      }
6301
6302  /* Mark all registers that do not meet the following condition:
6303     (1) Not set or read on any path from xi to an instance of the
6304	 original operation.  */
6305  if (!mutexed)
6306    {
6307      IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn));
6308      IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn));
6309      IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn));
6310    }
6311
6312  return true;
6313}
6314
6315/* Hooks and data to perform move_op operations with code_motion_path_driver.  */
6316struct code_motion_path_driver_info_def move_op_hooks = {
6317  move_op_on_enter,
6318  move_op_orig_expr_found,
6319  move_op_orig_expr_not_found,
6320  move_op_merge_succs,
6321  move_op_after_merge_succs,
6322  move_op_ascend,
6323  move_op_at_first_insn,
6324  SUCCS_NORMAL,
6325  "move_op"
6326};
6327
6328/* Hooks and data to perform find_used_regs operations
6329   with code_motion_path_driver.  */
6330struct code_motion_path_driver_info_def fur_hooks = {
6331  fur_on_enter,
6332  fur_orig_expr_found,
6333  fur_orig_expr_not_found,
6334  fur_merge_succs,
6335  NULL, /* fur_after_merge_succs */
6336  NULL, /* fur_ascend */
6337  fur_at_first_insn,
6338  SUCCS_ALL,
6339  "find_used_regs"
6340};
6341
6342/* Traverse all successors of INSN.  For each successor that is SUCCS_NORMAL
6343   code_motion_path_driver is called recursively.  Original operation
6344   was found at least on one path that is starting with one of INSN's
6345   successors (this fact is asserted).  ORIG_OPS is expressions we're looking
6346   for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
6347   of either move_op or find_used_regs depending on the caller.
6348
6349   Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
6350   know for sure at this point.  */
6351static int
6352code_motion_process_successors (insn_t insn, av_set_t orig_ops,
6353                                ilist_t path, void *static_params)
6354{
6355  int res = 0;
6356  succ_iterator succ_i;
6357  insn_t succ;
6358  basic_block bb;
6359  int old_index;
6360  unsigned old_succs;
6361
6362  struct cmpd_local_params lparams;
6363  expr_def _x;
6364
6365  lparams.c_expr_local = &_x;
6366  lparams.c_expr_merged = NULL;
6367
6368  /* We need to process only NORMAL succs for move_op, and collect live
6369     registers from ALL branches (including those leading out of the
6370     region) for find_used_regs.
6371
6372     In move_op, there can be a case when insn's bb number has changed
6373     due to created bookkeeping.  This happens very rare, as we need to
6374     move expression from the beginning to the end of the same block.
6375     Rescan successors in this case.  */
6376
6377 rescan:
6378  bb = BLOCK_FOR_INSN (insn);
6379  old_index = bb->index;
6380  old_succs = EDGE_COUNT (bb->succs);
6381
6382  FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
6383    {
6384      int b;
6385
6386      lparams.e1 = succ_i.e1;
6387      lparams.e2 = succ_i.e2;
6388
6389      /* Go deep into recursion only for NORMAL edges (non-backedges within the
6390	 current region).  */
6391      if (succ_i.current_flags == SUCCS_NORMAL)
6392	b = code_motion_path_driver (succ, orig_ops, path, &lparams,
6393				     static_params);
6394      else
6395	b = 0;
6396
6397      /* Merge c_expres found or unify live register sets from different
6398	 successors.  */
6399      code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
6400						 static_params);
6401      if (b == 1)
6402        res = b;
6403      else if (b == -1 && res != 1)
6404        res = b;
6405
6406      /* We have simplified the control flow below this point.  In this case,
6407         the iterator becomes invalid.  We need to try again.
6408	 If we have removed the insn itself, it could be only an
6409	 unconditional jump.  Thus, do not rescan but break immediately --
6410	 we have already visited the only successor block.  */
6411      if (!BLOCK_FOR_INSN (insn))
6412	{
6413	  if (sched_verbose >= 6)
6414	    sel_print ("Not doing rescan: already visited the only successor"
6415		       " of block %d\n", old_index);
6416	  break;
6417	}
6418      if (BLOCK_FOR_INSN (insn)->index != old_index
6419          || EDGE_COUNT (bb->succs) != old_succs)
6420        {
6421	  if (sched_verbose >= 6)
6422	    sel_print ("Rescan: CFG was simplified below insn %d, block %d\n",
6423		       INSN_UID (insn), BLOCK_FOR_INSN (insn)->index);
6424          insn = sel_bb_end (BLOCK_FOR_INSN (insn));
6425          goto rescan;
6426        }
6427    }
6428
6429#ifdef ENABLE_CHECKING
6430  /* Here, RES==1 if original expr was found at least for one of the
6431     successors.  After the loop, RES may happen to have zero value
6432     only if at some point the expr searched is present in av_set, but is
6433     not found below.  In most cases, this situation is an error.
6434     The exception is when the original operation is blocked by
6435     bookkeeping generated for another fence or for another path in current
6436     move_op.  */
6437  gcc_assert (res == 1
6438	      || (res == 0
6439		  && av_set_could_be_blocked_by_bookkeeping_p (orig_ops,
6440							       static_params))
6441	      || res == -1);
6442#endif
6443
6444  /* Merge data, clean up, etc.  */
6445  if (res != -1 && code_motion_path_driver_info->after_merge_succs)
6446    code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
6447
6448  return res;
6449}
6450
6451
6452/* Perform a cleanup when the driver is about to terminate.  ORIG_OPS_P
6453   is the pointer to the av set with expressions we were looking for,
6454   PATH_P is the pointer to the traversed path.  */
6455static inline void
6456code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
6457{
6458  ilist_remove (path_p);
6459  av_set_clear (orig_ops_p);
6460}
6461
6462/* The driver function that implements move_op or find_used_regs
6463   functionality dependent whether code_motion_path_driver_INFO is set to
6464   &MOVE_OP_HOOKS or &FUR_HOOKS.  This function implements the common parts
6465   of code (CFG traversal etc) that are shared among both functions.  INSN
6466   is the insn we're starting the search from, ORIG_OPS are the expressions
6467   we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
6468   parameters of the driver, and STATIC_PARAMS are static parameters of
6469   the caller.
6470
6471   Returns whether original instructions were found.  Note that top-level
6472   code_motion_path_driver always returns true.  */
6473static int
6474code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
6475			 cmpd_local_params_p local_params_in,
6476			 void *static_params)
6477{
6478  expr_t expr = NULL;
6479  basic_block bb = BLOCK_FOR_INSN (insn);
6480  insn_t first_insn, bb_tail, before_first;
6481  bool removed_last_insn = false;
6482
6483  if (sched_verbose >= 6)
6484    {
6485      sel_print ("%s (", code_motion_path_driver_info->routine_name);
6486      dump_insn (insn);
6487      sel_print (",");
6488      dump_av_set (orig_ops);
6489      sel_print (")\n");
6490    }
6491
6492  gcc_assert (orig_ops);
6493
6494  /* If no original operations exist below this insn, return immediately.  */
6495  if (is_ineligible_successor (insn, path))
6496    {
6497      if (sched_verbose >= 6)
6498        sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
6499      return false;
6500    }
6501
6502  /* The block can have invalid av set, in which case it was created earlier
6503     during move_op.  Return immediately.  */
6504  if (sel_bb_head_p (insn))
6505    {
6506      if (! AV_SET_VALID_P (insn))
6507        {
6508          if (sched_verbose >= 6)
6509            sel_print ("Returned from block %d as it had invalid av set\n",
6510                       bb->index);
6511          return false;
6512        }
6513
6514      if (bitmap_bit_p (code_motion_visited_blocks, bb->index))
6515        {
6516          /* We have already found an original operation on this branch, do not
6517             go any further and just return TRUE here.  If we don't stop here,
6518             function can have exponential behaviour even on the small code
6519             with many different paths (e.g. with data speculation and
6520             recovery blocks).  */
6521          if (sched_verbose >= 6)
6522            sel_print ("Block %d already visited in this traversal\n", bb->index);
6523          if (code_motion_path_driver_info->on_enter)
6524            return code_motion_path_driver_info->on_enter (insn,
6525                                                           local_params_in,
6526                                                           static_params,
6527                                                           true);
6528        }
6529    }
6530
6531  if (code_motion_path_driver_info->on_enter)
6532    code_motion_path_driver_info->on_enter (insn, local_params_in,
6533                                            static_params, false);
6534  orig_ops = av_set_copy (orig_ops);
6535
6536  /* Filter the orig_ops set.  */
6537  if (AV_SET_VALID_P (insn))
6538    av_set_code_motion_filter (&orig_ops, AV_SET (insn));
6539
6540  /* If no more original ops, return immediately.  */
6541  if (!orig_ops)
6542    {
6543      if (sched_verbose >= 6)
6544        sel_print ("No intersection with av set of block %d\n", bb->index);
6545      return false;
6546    }
6547
6548  /* For non-speculative insns we have to leave only one form of the
6549     original operation, because if we don't, we may end up with
6550     different C_EXPRes and, consequently, with bookkeepings for different
6551     expression forms along the same code motion path.  That may lead to
6552     generation of incorrect code.  So for each code motion we stick to
6553     the single form of the instruction,  except for speculative insns
6554     which we need to keep in different forms with all speculation
6555     types.  */
6556  av_set_leave_one_nonspec (&orig_ops);
6557
6558  /* It is not possible that all ORIG_OPS are filtered out.  */
6559  gcc_assert (orig_ops);
6560
6561  /* It is enough to place only heads and tails of visited basic blocks into
6562     the PATH.  */
6563  ilist_add (&path, insn);
6564  first_insn = insn;
6565  bb_tail = sel_bb_end (bb);
6566
6567  /* Descend the basic block in search of the original expr; this part
6568     corresponds to the part of the original move_op procedure executed
6569     before the recursive call.  */
6570  for (;;)
6571    {
6572      /* Look at the insn and decide if it could be an ancestor of currently
6573	 scheduling operation.  If it is so, then the insn "dest = op" could
6574	 either be replaced with "dest = reg", because REG now holds the result
6575	 of OP, or just removed, if we've scheduled the insn as a whole.
6576
6577	 If this insn doesn't contain currently scheduling OP, then proceed
6578	 with searching and look at its successors.  Operations we're searching
6579	 for could have changed when moving up through this insn via
6580	 substituting.  In this case, perform unsubstitution on them first.
6581
6582	 When traversing the DAG below this insn is finished, insert
6583	 bookkeeping code, if the insn is a joint point, and remove
6584	 leftovers.  */
6585
6586      expr = av_set_lookup (orig_ops, INSN_VINSN (insn));
6587      if (expr)
6588	{
6589	  insn_t last_insn = PREV_INSN (insn);
6590
6591	  /* We have found the original operation.   */
6592          if (sched_verbose >= 6)
6593            sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
6594
6595	  code_motion_path_driver_info->orig_expr_found
6596            (insn, expr, local_params_in, static_params);
6597
6598	  /* Step back, so on the way back we'll start traversing from the
6599	     previous insn (or we'll see that it's bb_note and skip that
6600	     loop).  */
6601          if (insn == first_insn)
6602            {
6603              first_insn = NEXT_INSN (last_insn);
6604              removed_last_insn = sel_bb_end_p (last_insn);
6605            }
6606	  insn = last_insn;
6607	  break;
6608	}
6609      else
6610	{
6611	  /* We haven't found the original expr, continue descending the basic
6612	     block.  */
6613	  if (code_motion_path_driver_info->orig_expr_not_found
6614              (insn, orig_ops, static_params))
6615	    {
6616	      /* Av set ops could have been changed when moving through this
6617	         insn.  To find them below it, we have to un-substitute them.  */
6618	      undo_transformations (&orig_ops, insn);
6619	    }
6620	  else
6621	    {
6622	      /* Clean up and return, if the hook tells us to do so.  It may
6623		 happen if we've encountered the previously created
6624		 bookkeeping.  */
6625	      code_motion_path_driver_cleanup (&orig_ops, &path);
6626	      return -1;
6627	    }
6628
6629	  gcc_assert (orig_ops);
6630        }
6631
6632      /* Stop at insn if we got to the end of BB.  */
6633      if (insn == bb_tail)
6634	break;
6635
6636      insn = NEXT_INSN (insn);
6637    }
6638
6639  /* Here INSN either points to the insn before the original insn (may be
6640     bb_note, if original insn was a bb_head) or to the bb_end.  */
6641  if (!expr)
6642    {
6643      int res;
6644      rtx_insn *last_insn = PREV_INSN (insn);
6645      bool added_to_path;
6646
6647      gcc_assert (insn == sel_bb_end (bb));
6648
6649      /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
6650	 it's already in PATH then).  */
6651      if (insn != first_insn)
6652	{
6653	  ilist_add (&path, insn);
6654	  added_to_path = true;
6655	}
6656      else
6657        added_to_path = false;
6658
6659      /* Process_successors should be able to find at least one
6660	 successor for which code_motion_path_driver returns TRUE.  */
6661      res = code_motion_process_successors (insn, orig_ops,
6662                                            path, static_params);
6663
6664      /* Jump in the end of basic block could have been removed or replaced
6665         during code_motion_process_successors, so recompute insn as the
6666         last insn in bb.  */
6667      if (NEXT_INSN (last_insn) != insn)
6668        {
6669          insn = sel_bb_end (bb);
6670          first_insn = sel_bb_head (bb);
6671        }
6672
6673      /* Remove bb tail from path.  */
6674      if (added_to_path)
6675	ilist_remove (&path);
6676
6677      if (res != 1)
6678	{
6679	  /* This is the case when one of the original expr is no longer available
6680	     due to bookkeeping created on this branch with the same register.
6681	     In the original algorithm, which doesn't have update_data_sets call
6682	     on a bookkeeping block, it would simply result in returning
6683	     FALSE when we've encountered a previously generated bookkeeping
6684	     insn in moveop_orig_expr_not_found.  */
6685	  code_motion_path_driver_cleanup (&orig_ops, &path);
6686	  return res;
6687	}
6688    }
6689
6690  /* Don't need it any more.  */
6691  av_set_clear (&orig_ops);
6692
6693  /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
6694     the beginning of the basic block.  */
6695  before_first = PREV_INSN (first_insn);
6696  while (insn != before_first)
6697    {
6698      if (code_motion_path_driver_info->ascend)
6699	code_motion_path_driver_info->ascend (insn, static_params);
6700
6701      insn = PREV_INSN (insn);
6702    }
6703
6704  /* Now we're at the bb head.  */
6705  insn = first_insn;
6706  ilist_remove (&path);
6707  local_params_in->removed_last_insn = removed_last_insn;
6708  code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
6709
6710  /* This should be the very last operation as at bb head we could change
6711     the numbering by creating bookkeeping blocks.  */
6712  if (removed_last_insn)
6713    insn = PREV_INSN (insn);
6714
6715  /* If we have simplified the control flow and removed the first jump insn,
6716     there's no point in marking this block in the visited blocks bitmap.  */
6717  if (BLOCK_FOR_INSN (insn))
6718    bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index);
6719  return true;
6720}
6721
6722/* Move up the operations from ORIG_OPS set traversing the dag starting
6723   from INSN.  PATH represents the edges traversed so far.
6724   DEST is the register chosen for scheduling the current expr.  Insert
6725   bookkeeping code in the join points.  EXPR_VLIW is the chosen expression,
6726   C_EXPR is how it looks like at the given cfg point.
6727   Set *SHOULD_MOVE to indicate whether we have only disconnected
6728   one of the insns found.
6729
6730   Returns whether original instructions were found, which is asserted
6731   to be true in the caller.  */
6732static bool
6733move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
6734         rtx dest, expr_t c_expr, bool *should_move)
6735{
6736  struct moveop_static_params sparams;
6737  struct cmpd_local_params lparams;
6738  int res;
6739
6740  /* Init params for code_motion_path_driver.  */
6741  sparams.dest = dest;
6742  sparams.c_expr = c_expr;
6743  sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
6744#ifdef ENABLE_CHECKING
6745  sparams.failed_insn = NULL;
6746#endif
6747  sparams.was_renamed = false;
6748  lparams.e1 = NULL;
6749
6750  /* We haven't visited any blocks yet.  */
6751  bitmap_clear (code_motion_visited_blocks);
6752
6753  /* Set appropriate hooks and data.  */
6754  code_motion_path_driver_info = &move_op_hooks;
6755  res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
6756
6757  gcc_assert (res != -1);
6758
6759  if (sparams.was_renamed)
6760    EXPR_WAS_RENAMED (expr_vliw) = true;
6761
6762  *should_move = (sparams.uid == -1);
6763
6764  return res;
6765}
6766
6767
6768/* Functions that work with regions.  */
6769
6770/* Current number of seqno used in init_seqno and init_seqno_1.  */
6771static int cur_seqno;
6772
6773/* A helper for init_seqno.  Traverse the region starting from BB and
6774   compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
6775   Clear visited blocks from BLOCKS_TO_RESCHEDULE.  */
6776static void
6777init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
6778{
6779  int bbi = BLOCK_TO_BB (bb->index);
6780  insn_t insn, note = bb_note (bb);
6781  insn_t succ_insn;
6782  succ_iterator si;
6783
6784  bitmap_set_bit (visited_bbs, bbi);
6785  if (blocks_to_reschedule)
6786    bitmap_clear_bit (blocks_to_reschedule, bb->index);
6787
6788  FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
6789		   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
6790    {
6791      basic_block succ = BLOCK_FOR_INSN (succ_insn);
6792      int succ_bbi = BLOCK_TO_BB (succ->index);
6793
6794      gcc_assert (in_current_region_p (succ));
6795
6796      if (!bitmap_bit_p (visited_bbs, succ_bbi))
6797	{
6798	  gcc_assert (succ_bbi > bbi);
6799
6800	  init_seqno_1 (succ, visited_bbs, blocks_to_reschedule);
6801	}
6802      else if (blocks_to_reschedule)
6803        bitmap_set_bit (forced_ebb_heads, succ->index);
6804    }
6805
6806  for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn))
6807    INSN_SEQNO (insn) = cur_seqno--;
6808}
6809
6810/* Initialize seqnos for the current region.  BLOCKS_TO_RESCHEDULE contains
6811   blocks on which we're rescheduling when pipelining, FROM is the block where
6812   traversing region begins (it may not be the head of the region when
6813   pipelining, but the head of the loop instead).
6814
6815   Returns the maximal seqno found.  */
6816static int
6817init_seqno (bitmap blocks_to_reschedule, basic_block from)
6818{
6819  sbitmap visited_bbs;
6820  bitmap_iterator bi;
6821  unsigned bbi;
6822
6823  visited_bbs = sbitmap_alloc (current_nr_blocks);
6824
6825  if (blocks_to_reschedule)
6826    {
6827      bitmap_ones (visited_bbs);
6828      EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
6829        {
6830	  gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
6831          bitmap_clear_bit (visited_bbs, BLOCK_TO_BB (bbi));
6832	}
6833    }
6834  else
6835    {
6836      bitmap_clear (visited_bbs);
6837      from = EBB_FIRST_BB (0);
6838    }
6839
6840  cur_seqno = sched_max_luid - 1;
6841  init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
6842
6843  /* cur_seqno may be positive if the number of instructions is less than
6844     sched_max_luid - 1 (when rescheduling or if some instructions have been
6845     removed by the call to purge_empty_blocks in sel_sched_region_1).  */
6846  gcc_assert (cur_seqno >= 0);
6847
6848  sbitmap_free (visited_bbs);
6849  return sched_max_luid - 1;
6850}
6851
6852/* Initialize scheduling parameters for current region.  */
6853static void
6854sel_setup_region_sched_flags (void)
6855{
6856  enable_schedule_as_rhs_p = 1;
6857  bookkeeping_p = 1;
6858  pipelining_p = (bookkeeping_p
6859                  && (flag_sel_sched_pipelining != 0)
6860		  && current_loop_nest != NULL
6861		  && loop_has_exit_edges (current_loop_nest));
6862  max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
6863  max_ws = MAX_WS;
6864}
6865
6866/* Return true if all basic blocks of current region are empty.  */
6867static bool
6868current_region_empty_p (void)
6869{
6870  int i;
6871  for (i = 0; i < current_nr_blocks; i++)
6872    if (! sel_bb_empty_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))))
6873      return false;
6874
6875  return true;
6876}
6877
6878/* Prepare and verify loop nest for pipelining.  */
6879static void
6880setup_current_loop_nest (int rgn, bb_vec_t *bbs)
6881{
6882  current_loop_nest = get_loop_nest_for_rgn (rgn);
6883
6884  if (!current_loop_nest)
6885    return;
6886
6887  /* If this loop has any saved loop preheaders from nested loops,
6888     add these basic blocks to the current region.  */
6889  sel_add_loop_preheaders (bbs);
6890
6891  /* Check that we're starting with a valid information.  */
6892  gcc_assert (loop_latch_edge (current_loop_nest));
6893  gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
6894}
6895
6896/* Compute instruction priorities for current region.  */
6897static void
6898sel_compute_priorities (int rgn)
6899{
6900  sched_rgn_compute_dependencies (rgn);
6901
6902  /* Compute insn priorities in haifa style.  Then free haifa style
6903     dependencies that we've calculated for this.  */
6904  compute_priorities ();
6905
6906  if (sched_verbose >= 5)
6907    debug_rgn_dependencies (0);
6908
6909  free_rgn_deps ();
6910}
6911
6912/* Init scheduling data for RGN.  Returns true when this region should not
6913   be scheduled.  */
6914static bool
6915sel_region_init (int rgn)
6916{
6917  int i;
6918  bb_vec_t bbs;
6919
6920  rgn_setup_region (rgn);
6921
6922  /* Even if sched_is_disabled_for_current_region_p() is true, we still
6923     do region initialization here so the region can be bundled correctly,
6924     but we'll skip the scheduling in sel_sched_region ().  */
6925  if (current_region_empty_p ())
6926    return true;
6927
6928  bbs.create (current_nr_blocks);
6929
6930  for (i = 0; i < current_nr_blocks; i++)
6931    bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
6932
6933  sel_init_bbs (bbs);
6934
6935  if (flag_sel_sched_pipelining)
6936    setup_current_loop_nest (rgn, &bbs);
6937
6938  sel_setup_region_sched_flags ();
6939
6940  /* Initialize luids and dependence analysis which both sel-sched and haifa
6941     need.  */
6942  sched_init_luids (bbs);
6943  sched_deps_init (false);
6944
6945  /* Initialize haifa data.  */
6946  rgn_setup_sched_infos ();
6947  sel_set_sched_flags ();
6948  haifa_init_h_i_d (bbs);
6949
6950  sel_compute_priorities (rgn);
6951  init_deps_global ();
6952
6953  /* Main initialization.  */
6954  sel_setup_sched_infos ();
6955  sel_init_global_and_expr (bbs);
6956
6957  bbs.release ();
6958
6959  blocks_to_reschedule = BITMAP_ALLOC (NULL);
6960
6961  /* Init correct liveness sets on each instruction of a single-block loop.
6962     This is the only situation when we can't update liveness when calling
6963     compute_live for the first insn of the loop.  */
6964  if (current_loop_nest)
6965    {
6966      int header =
6967	(sel_is_loop_preheader_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (0)))
6968	 ? 1
6969	 : 0);
6970
6971      if (current_nr_blocks == header + 1)
6972        update_liveness_on_insn
6973          (sel_bb_head (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (header))));
6974    }
6975
6976  /* Set hooks so that no newly generated insn will go out unnoticed.  */
6977  sel_register_cfg_hooks ();
6978
6979  /* !!! We call target.sched.init () for the whole region, but we invoke
6980     targetm.sched.finish () for every ebb.  */
6981  if (targetm.sched.init)
6982    /* None of the arguments are actually used in any target.  */
6983    targetm.sched.init (sched_dump, sched_verbose, -1);
6984
6985  first_emitted_uid = get_max_uid () + 1;
6986  preheader_removed = false;
6987
6988  /* Reset register allocation ticks array.  */
6989  memset (reg_rename_tick, 0, sizeof reg_rename_tick);
6990  reg_rename_this_tick = 0;
6991
6992  bitmap_initialize (forced_ebb_heads, 0);
6993  bitmap_clear (forced_ebb_heads);
6994
6995  setup_nop_vinsn ();
6996  current_copies = BITMAP_ALLOC (NULL);
6997  current_originators = BITMAP_ALLOC (NULL);
6998  code_motion_visited_blocks = BITMAP_ALLOC (NULL);
6999
7000  return false;
7001}
7002
7003/* Simplify insns after the scheduling.  */
7004static void
7005simplify_changed_insns (void)
7006{
7007  int i;
7008
7009  for (i = 0; i < current_nr_blocks; i++)
7010    {
7011      basic_block bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
7012      rtx_insn *insn;
7013
7014      FOR_BB_INSNS (bb, insn)
7015	if (INSN_P (insn))
7016	  {
7017	    expr_t expr = INSN_EXPR (insn);
7018
7019	    if (EXPR_WAS_SUBSTITUTED (expr))
7020	      validate_simplify_insn (insn);
7021	  }
7022    }
7023}
7024
7025/* Find boundaries of the EBB starting from basic block BB, marking blocks of
7026   this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL,
7027   PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure.  */
7028static void
7029find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks)
7030{
7031  rtx_insn *head, *tail;
7032  basic_block bb1 = bb;
7033  if (sched_verbose >= 2)
7034    sel_print ("Finishing schedule in bbs: ");
7035
7036  do
7037    {
7038      bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
7039
7040      if (sched_verbose >= 2)
7041	sel_print ("%d; ", bb1->index);
7042    }
7043  while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1)));
7044
7045  if (sched_verbose >= 2)
7046    sel_print ("\n");
7047
7048  get_ebb_head_tail (bb, bb1, &head, &tail);
7049
7050  current_sched_info->head = head;
7051  current_sched_info->tail = tail;
7052  current_sched_info->prev_head = PREV_INSN (head);
7053  current_sched_info->next_tail = NEXT_INSN (tail);
7054}
7055
7056/* Regenerate INSN_SCHED_CYCLEs for insns of current EBB.  */
7057static void
7058reset_sched_cycles_in_current_ebb (void)
7059{
7060  int last_clock = 0;
7061  int haifa_last_clock = -1;
7062  int haifa_clock = 0;
7063  int issued_insns = 0;
7064  insn_t insn;
7065
7066  if (targetm.sched.init)
7067    {
7068      /* None of the arguments are actually used in any target.
7069	 NB: We should have md_reset () hook for cases like this.  */
7070      targetm.sched.init (sched_dump, sched_verbose, -1);
7071    }
7072
7073  state_reset (curr_state);
7074  advance_state (curr_state);
7075
7076  for (insn = current_sched_info->head;
7077       insn != current_sched_info->next_tail;
7078       insn = NEXT_INSN (insn))
7079    {
7080      int cost, haifa_cost;
7081      int sort_p;
7082      bool asm_p, real_insn, after_stall, all_issued;
7083      int clock;
7084
7085      if (!INSN_P (insn))
7086	continue;
7087
7088      asm_p = false;
7089      real_insn = recog_memoized (insn) >= 0;
7090      clock = INSN_SCHED_CYCLE (insn);
7091
7092      cost = clock - last_clock;
7093
7094      /* Initialize HAIFA_COST.  */
7095      if (! real_insn)
7096	{
7097	  asm_p = INSN_ASM_P (insn);
7098
7099	  if (asm_p)
7100	    /* This is asm insn which *had* to be scheduled first
7101	       on the cycle.  */
7102	    haifa_cost = 1;
7103	  else
7104	    /* This is a use/clobber insn.  It should not change
7105	       cost.  */
7106	    haifa_cost = 0;
7107	}
7108      else
7109        haifa_cost = estimate_insn_cost (insn, curr_state);
7110
7111      /* Stall for whatever cycles we've stalled before.  */
7112      after_stall = 0;
7113      if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
7114        {
7115          haifa_cost = cost;
7116          after_stall = 1;
7117        }
7118      all_issued = issued_insns == issue_rate;
7119      if (haifa_cost == 0 && all_issued)
7120	haifa_cost = 1;
7121      if (haifa_cost > 0)
7122	{
7123	  int i = 0;
7124
7125	  while (haifa_cost--)
7126	    {
7127	      advance_state (curr_state);
7128	      issued_insns = 0;
7129              i++;
7130
7131	      if (sched_verbose >= 2)
7132                {
7133                  sel_print ("advance_state (state_transition)\n");
7134                  debug_state (curr_state);
7135                }
7136
7137              /* The DFA may report that e.g. insn requires 2 cycles to be
7138                 issued, but on the next cycle it says that insn is ready
7139                 to go.  Check this here.  */
7140              if (!after_stall
7141                  && real_insn
7142                  && haifa_cost > 0
7143                  && estimate_insn_cost (insn, curr_state) == 0)
7144                break;
7145
7146              /* When the data dependency stall is longer than the DFA stall,
7147                 and when we have issued exactly issue_rate insns and stalled,
7148                 it could be that after this longer stall the insn will again
7149                 become unavailable  to the DFA restrictions.  Looks strange
7150                 but happens e.g. on x86-64.  So recheck DFA on the last
7151                 iteration.  */
7152              if ((after_stall || all_issued)
7153                  && real_insn
7154                  && haifa_cost == 0)
7155                haifa_cost = estimate_insn_cost (insn, curr_state);
7156            }
7157
7158	  haifa_clock += i;
7159          if (sched_verbose >= 2)
7160            sel_print ("haifa clock: %d\n", haifa_clock);
7161	}
7162      else
7163	gcc_assert (haifa_cost == 0);
7164
7165      if (sched_verbose >= 2)
7166	sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost);
7167
7168      if (targetm.sched.dfa_new_cycle)
7169	while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn,
7170					    haifa_last_clock, haifa_clock,
7171					    &sort_p))
7172	  {
7173	    advance_state (curr_state);
7174	    issued_insns = 0;
7175	    haifa_clock++;
7176	    if (sched_verbose >= 2)
7177              {
7178                sel_print ("advance_state (dfa_new_cycle)\n");
7179                debug_state (curr_state);
7180		sel_print ("haifa clock: %d\n", haifa_clock + 1);
7181              }
7182          }
7183
7184      if (real_insn)
7185	{
7186	  static state_t temp = NULL;
7187
7188	  if (!temp)
7189	    temp = xmalloc (dfa_state_size);
7190	  memcpy (temp, curr_state, dfa_state_size);
7191
7192	  cost = state_transition (curr_state, insn);
7193	  if (memcmp (temp, curr_state, dfa_state_size))
7194	    issued_insns++;
7195
7196          if (sched_verbose >= 2)
7197	    {
7198	      sel_print ("scheduled insn %d, clock %d\n", INSN_UID (insn),
7199			 haifa_clock + 1);
7200              debug_state (curr_state);
7201	    }
7202	  gcc_assert (cost < 0);
7203	}
7204
7205      if (targetm.sched.variable_issue)
7206	targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0);
7207
7208      INSN_SCHED_CYCLE (insn) = haifa_clock;
7209
7210      last_clock = clock;
7211      haifa_last_clock = haifa_clock;
7212    }
7213}
7214
7215/* Put TImode markers on insns starting a new issue group.  */
7216static void
7217put_TImodes (void)
7218{
7219  int last_clock = -1;
7220  insn_t insn;
7221
7222  for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
7223       insn = NEXT_INSN (insn))
7224    {
7225      int cost, clock;
7226
7227      if (!INSN_P (insn))
7228	continue;
7229
7230      clock = INSN_SCHED_CYCLE (insn);
7231      cost = (last_clock == -1) ? 1 : clock - last_clock;
7232
7233      gcc_assert (cost >= 0);
7234
7235      if (issue_rate > 1
7236	  && GET_CODE (PATTERN (insn)) != USE
7237	  && GET_CODE (PATTERN (insn)) != CLOBBER)
7238	{
7239	  if (reload_completed && cost > 0)
7240	    PUT_MODE (insn, TImode);
7241
7242	  last_clock = clock;
7243	}
7244
7245      if (sched_verbose >= 2)
7246	sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost);
7247    }
7248}
7249
7250/* Perform MD_FINISH on EBBs comprising current region.  When
7251   RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
7252   to produce correct sched cycles on insns.  */
7253static void
7254sel_region_target_finish (bool reset_sched_cycles_p)
7255{
7256  int i;
7257  bitmap scheduled_blocks = BITMAP_ALLOC (NULL);
7258
7259  for (i = 0; i < current_nr_blocks; i++)
7260    {
7261      if (bitmap_bit_p (scheduled_blocks, i))
7262	continue;
7263
7264      /* While pipelining outer loops, skip bundling for loop
7265	 preheaders.  Those will be rescheduled in the outer loop.  */
7266      if (sel_is_loop_preheader_p (EBB_FIRST_BB (i)))
7267	continue;
7268
7269      find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
7270
7271      if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
7272	continue;
7273
7274      if (reset_sched_cycles_p)
7275	reset_sched_cycles_in_current_ebb ();
7276
7277      if (targetm.sched.init)
7278	targetm.sched.init (sched_dump, sched_verbose, -1);
7279
7280      put_TImodes ();
7281
7282      if (targetm.sched.finish)
7283	{
7284	  targetm.sched.finish (sched_dump, sched_verbose);
7285
7286	  /* Extend luids so that insns generated by the target will
7287	     get zero luid.  */
7288	  sched_extend_luids ();
7289	}
7290    }
7291
7292  BITMAP_FREE (scheduled_blocks);
7293}
7294
7295/* Free the scheduling data for the current region.  When RESET_SCHED_CYCLES_P
7296   is true, make an additional pass emulating scheduler to get correct insn
7297   cycles for md_finish calls.  */
7298static void
7299sel_region_finish (bool reset_sched_cycles_p)
7300{
7301  simplify_changed_insns ();
7302  sched_finish_ready_list ();
7303  free_nop_pool ();
7304
7305  /* Free the vectors.  */
7306  vec_av_set.release ();
7307  BITMAP_FREE (current_copies);
7308  BITMAP_FREE (current_originators);
7309  BITMAP_FREE (code_motion_visited_blocks);
7310  vinsn_vec_free (vec_bookkeeping_blocked_vinsns);
7311  vinsn_vec_free (vec_target_unavailable_vinsns);
7312
7313  /* If LV_SET of the region head should be updated, do it now because
7314     there will be no other chance.  */
7315  {
7316    succ_iterator si;
7317    insn_t insn;
7318
7319    FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)),
7320                     SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
7321      {
7322	basic_block bb = BLOCK_FOR_INSN (insn);
7323
7324	if (!BB_LV_SET_VALID_P (bb))
7325	  compute_live (insn);
7326      }
7327  }
7328
7329  /* Emulate the Haifa scheduler for bundling.  */
7330  if (reload_completed)
7331    sel_region_target_finish (reset_sched_cycles_p);
7332
7333  sel_finish_global_and_expr ();
7334
7335  bitmap_clear (forced_ebb_heads);
7336
7337  free_nop_vinsn ();
7338
7339  finish_deps_global ();
7340  sched_finish_luids ();
7341  h_d_i_d.release ();
7342
7343  sel_finish_bbs ();
7344  BITMAP_FREE (blocks_to_reschedule);
7345
7346  sel_unregister_cfg_hooks ();
7347
7348  max_issue_size = 0;
7349}
7350
7351
7352/* Functions that implement the scheduler driver.  */
7353
7354/* Schedule a parallel instruction group on each of FENCES.  MAX_SEQNO
7355   is the current maximum seqno.  SCHEDULED_INSNS_TAILPP is the list
7356   of insns scheduled -- these would be postprocessed later.  */
7357static void
7358schedule_on_fences (flist_t fences, int max_seqno,
7359                    ilist_t **scheduled_insns_tailpp)
7360{
7361  flist_t old_fences = fences;
7362
7363  if (sched_verbose >= 1)
7364    {
7365      sel_print ("\nScheduling on fences: ");
7366      dump_flist (fences);
7367      sel_print ("\n");
7368    }
7369
7370  scheduled_something_on_previous_fence = false;
7371  for (; fences; fences = FLIST_NEXT (fences))
7372    {
7373      fence_t fence = NULL;
7374      int seqno = 0;
7375      flist_t fences2;
7376      bool first_p = true;
7377
7378      /* Choose the next fence group to schedule.
7379         The fact that insn can be scheduled only once
7380         on the cycle is guaranteed by two properties:
7381         1. seqnos of parallel groups decrease with each iteration.
7382         2. If is_ineligible_successor () sees the larger seqno, it
7383         checks if candidate insn is_in_current_fence_p ().  */
7384      for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
7385        {
7386          fence_t f = FLIST_FENCE (fences2);
7387
7388          if (!FENCE_PROCESSED_P (f))
7389            {
7390              int i = INSN_SEQNO (FENCE_INSN (f));
7391
7392              if (first_p || i > seqno)
7393                {
7394                  seqno = i;
7395                  fence = f;
7396                  first_p = false;
7397                }
7398              else
7399                /* ??? Seqnos of different groups should be different.  */
7400                gcc_assert (1 || i != seqno);
7401            }
7402        }
7403
7404      gcc_assert (fence);
7405
7406      /* As FENCE is nonnull, SEQNO is initialized.  */
7407      seqno -= max_seqno + 1;
7408      fill_insns (fence, seqno, scheduled_insns_tailpp);
7409      FENCE_PROCESSED_P (fence) = true;
7410    }
7411
7412  /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
7413     don't need to keep bookkeeping-invalidated and target-unavailable
7414     vinsns any more.  */
7415  vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
7416  vinsn_vec_clear (&vec_target_unavailable_vinsns);
7417}
7418
7419/* Calculate MIN_SEQNO and MAX_SEQNO.  */
7420static void
7421find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
7422{
7423  *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7424
7425  /* The first element is already processed.  */
7426  while ((fences = FLIST_NEXT (fences)))
7427    {
7428      int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7429
7430      if (*min_seqno > seqno)
7431        *min_seqno = seqno;
7432      else if (*max_seqno < seqno)
7433        *max_seqno = seqno;
7434    }
7435}
7436
7437/* Calculate new fences from FENCES.  Write the current time to PTIME.  */
7438static flist_t
7439calculate_new_fences (flist_t fences, int orig_max_seqno, int *ptime)
7440{
7441  flist_t old_fences = fences;
7442  struct flist_tail_def _new_fences, *new_fences = &_new_fences;
7443  int max_time = 0;
7444
7445  flist_tail_init (new_fences);
7446  for (; fences; fences = FLIST_NEXT (fences))
7447    {
7448      fence_t fence = FLIST_FENCE (fences);
7449      insn_t insn;
7450
7451      if (!FENCE_BNDS (fence))
7452        {
7453          /* This fence doesn't have any successors.  */
7454          if (!FENCE_SCHEDULED_P (fence))
7455            {
7456              /* Nothing was scheduled on this fence.  */
7457              int seqno;
7458
7459              insn = FENCE_INSN (fence);
7460              seqno = INSN_SEQNO (insn);
7461              gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
7462
7463              if (sched_verbose >= 1)
7464                sel_print ("Fence %d[%d] has not changed\n",
7465                           INSN_UID (insn),
7466                           BLOCK_NUM (insn));
7467              move_fence_to_fences (fences, new_fences);
7468            }
7469        }
7470      else
7471        extract_new_fences_from (fences, new_fences, orig_max_seqno);
7472      max_time = MAX (max_time, FENCE_CYCLE (fence));
7473    }
7474
7475  flist_clear (&old_fences);
7476  *ptime = max_time;
7477  return FLIST_TAIL_HEAD (new_fences);
7478}
7479
7480/* Update seqnos of insns given by PSCHEDULED_INSNS.  MIN_SEQNO and MAX_SEQNO
7481   are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
7482   the highest seqno used in a region.  Return the updated highest seqno.  */
7483static int
7484update_seqnos_and_stage (int min_seqno, int max_seqno,
7485                         int highest_seqno_in_use,
7486                         ilist_t *pscheduled_insns)
7487{
7488  int new_hs;
7489  ilist_iterator ii;
7490  insn_t insn;
7491
7492  /* Actually, new_hs is the seqno of the instruction, that was
7493     scheduled first (i.e. it is the first one in SCHEDULED_INSNS).  */
7494  if (*pscheduled_insns)
7495    {
7496      new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
7497                + highest_seqno_in_use + max_seqno - min_seqno + 2);
7498      gcc_assert (new_hs > highest_seqno_in_use);
7499    }
7500  else
7501    new_hs = highest_seqno_in_use;
7502
7503  FOR_EACH_INSN (insn, ii, *pscheduled_insns)
7504    {
7505      gcc_assert (INSN_SEQNO (insn) < 0);
7506      INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
7507      gcc_assert (INSN_SEQNO (insn) <= new_hs);
7508
7509      /* When not pipelining, purge unneeded insn info on the scheduled insns.
7510         For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
7511         require > 1GB of memory e.g. on limit-fnargs.c.  */
7512      if (! pipelining_p)
7513        free_data_for_scheduled_insn (insn);
7514    }
7515
7516  ilist_clear (pscheduled_insns);
7517  global_level++;
7518
7519  return new_hs;
7520}
7521
7522/* The main driver for scheduling a region.  This function is responsible
7523   for correct propagation of fences (i.e. scheduling points) and creating
7524   a group of parallel insns at each of them.  It also supports
7525   pipelining.  ORIG_MAX_SEQNO is the maximal seqno before this pass
7526   of scheduling.  */
7527static void
7528sel_sched_region_2 (int orig_max_seqno)
7529{
7530  int highest_seqno_in_use = orig_max_seqno;
7531  int max_time = 0;
7532
7533  stat_bookkeeping_copies = 0;
7534  stat_insns_needed_bookkeeping = 0;
7535  stat_renamed_scheduled = 0;
7536  stat_substitutions_total = 0;
7537  num_insns_scheduled = 0;
7538
7539  while (fences)
7540    {
7541      int min_seqno, max_seqno;
7542      ilist_t scheduled_insns = NULL;
7543      ilist_t *scheduled_insns_tailp = &scheduled_insns;
7544
7545      find_min_max_seqno (fences, &min_seqno, &max_seqno);
7546      schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
7547      fences = calculate_new_fences (fences, orig_max_seqno, &max_time);
7548      highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
7549                                                      highest_seqno_in_use,
7550                                                      &scheduled_insns);
7551    }
7552
7553  if (sched_verbose >= 1)
7554    {
7555      sel_print ("Total scheduling time: %d cycles\n", max_time);
7556      sel_print ("Scheduled %d bookkeeping copies, %d insns needed "
7557		 "bookkeeping, %d insns renamed, %d insns substituted\n",
7558		 stat_bookkeeping_copies,
7559		 stat_insns_needed_bookkeeping,
7560		 stat_renamed_scheduled,
7561		 stat_substitutions_total);
7562    }
7563}
7564
7565/* Schedule a region.  When pipelining, search for possibly never scheduled
7566   bookkeeping code and schedule it.  Reschedule pipelined code without
7567   pipelining after.  */
7568static void
7569sel_sched_region_1 (void)
7570{
7571  int orig_max_seqno;
7572
7573  /* Remove empty blocks that might be in the region from the beginning.  */
7574  purge_empty_blocks ();
7575
7576  orig_max_seqno = init_seqno (NULL, NULL);
7577  gcc_assert (orig_max_seqno >= 1);
7578
7579  /* When pipelining outer loops, create fences on the loop header,
7580     not preheader.  */
7581  fences = NULL;
7582  if (current_loop_nest)
7583    init_fences (BB_END (EBB_FIRST_BB (0)));
7584  else
7585    init_fences (bb_note (EBB_FIRST_BB (0)));
7586  global_level = 1;
7587
7588  sel_sched_region_2 (orig_max_seqno);
7589
7590  gcc_assert (fences == NULL);
7591
7592  if (pipelining_p)
7593    {
7594      int i;
7595      basic_block bb;
7596      struct flist_tail_def _new_fences;
7597      flist_tail_t new_fences = &_new_fences;
7598      bool do_p = true;
7599
7600      pipelining_p = false;
7601      max_ws = MIN (max_ws, issue_rate * 3 / 2);
7602      bookkeeping_p = false;
7603      enable_schedule_as_rhs_p = false;
7604
7605      /* Schedule newly created code, that has not been scheduled yet.  */
7606      do_p = true;
7607
7608      while (do_p)
7609        {
7610          do_p = false;
7611
7612          for (i = 0; i < current_nr_blocks; i++)
7613            {
7614              basic_block bb = EBB_FIRST_BB (i);
7615
7616              if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7617                {
7618                  if (! bb_ends_ebb_p (bb))
7619                    bitmap_set_bit (blocks_to_reschedule, bb_next_bb (bb)->index);
7620                  if (sel_bb_empty_p (bb))
7621                    {
7622                      bitmap_clear_bit (blocks_to_reschedule, bb->index);
7623                      continue;
7624                    }
7625                  clear_outdated_rtx_info (bb);
7626                  if (sel_insn_is_speculation_check (BB_END (bb))
7627                      && JUMP_P (BB_END (bb)))
7628                    bitmap_set_bit (blocks_to_reschedule,
7629                                    BRANCH_EDGE (bb)->dest->index);
7630                }
7631              else if (! sel_bb_empty_p (bb)
7632                       && INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0)
7633                bitmap_set_bit (blocks_to_reschedule, bb->index);
7634            }
7635
7636          for (i = 0; i < current_nr_blocks; i++)
7637            {
7638              bb = EBB_FIRST_BB (i);
7639
7640              /* While pipelining outer loops, skip bundling for loop
7641                 preheaders.  Those will be rescheduled in the outer
7642                 loop.  */
7643              if (sel_is_loop_preheader_p (bb))
7644                {
7645                  clear_outdated_rtx_info (bb);
7646                  continue;
7647                }
7648
7649              if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7650                {
7651                  flist_tail_init (new_fences);
7652
7653                  orig_max_seqno = init_seqno (blocks_to_reschedule, bb);
7654
7655                  /* Mark BB as head of the new ebb.  */
7656                  bitmap_set_bit (forced_ebb_heads, bb->index);
7657
7658                  gcc_assert (fences == NULL);
7659
7660                  init_fences (bb_note (bb));
7661
7662                  sel_sched_region_2 (orig_max_seqno);
7663
7664                  do_p = true;
7665                  break;
7666                }
7667            }
7668        }
7669    }
7670}
7671
7672/* Schedule the RGN region.  */
7673void
7674sel_sched_region (int rgn)
7675{
7676  bool schedule_p;
7677  bool reset_sched_cycles_p;
7678
7679  if (sel_region_init (rgn))
7680    return;
7681
7682  if (sched_verbose >= 1)
7683    sel_print ("Scheduling region %d\n", rgn);
7684
7685  schedule_p = (!sched_is_disabled_for_current_region_p ()
7686                && dbg_cnt (sel_sched_region_cnt));
7687  reset_sched_cycles_p = pipelining_p;
7688  if (schedule_p)
7689    sel_sched_region_1 ();
7690  else
7691    /* Force initialization of INSN_SCHED_CYCLEs for correct bundling.  */
7692    reset_sched_cycles_p = true;
7693
7694  sel_region_finish (reset_sched_cycles_p);
7695}
7696
7697/* Perform global init for the scheduler.  */
7698static void
7699sel_global_init (void)
7700{
7701  calculate_dominance_info (CDI_DOMINATORS);
7702  alloc_sched_pools ();
7703
7704  /* Setup the infos for sched_init.  */
7705  sel_setup_sched_infos ();
7706  setup_sched_dump ();
7707
7708  sched_rgn_init (false);
7709  sched_init ();
7710
7711  sched_init_bbs ();
7712  /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass.  */
7713  after_recovery = 0;
7714  can_issue_more = issue_rate;
7715
7716  sched_extend_target ();
7717  sched_deps_init (true);
7718  setup_nop_and_exit_insns ();
7719  sel_extend_global_bb_info ();
7720  init_lv_sets ();
7721  init_hard_regs_data ();
7722}
7723
7724/* Free the global data of the scheduler.  */
7725static void
7726sel_global_finish (void)
7727{
7728  free_bb_note_pool ();
7729  free_lv_sets ();
7730  sel_finish_global_bb_info ();
7731
7732  free_regset_pool ();
7733  free_nop_and_exit_insns ();
7734
7735  sched_rgn_finish ();
7736  sched_deps_finish ();
7737  sched_finish ();
7738
7739  if (current_loops)
7740    sel_finish_pipelining ();
7741
7742  free_sched_pools ();
7743  free_dominance_info (CDI_DOMINATORS);
7744}
7745
7746/* Return true when we need to skip selective scheduling.  Used for debugging.  */
7747bool
7748maybe_skip_selective_scheduling (void)
7749{
7750  return ! dbg_cnt (sel_sched_cnt);
7751}
7752
7753/* The entry point.  */
7754void
7755run_selective_scheduling (void)
7756{
7757  int rgn;
7758
7759  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
7760    return;
7761
7762  sel_global_init ();
7763
7764  for (rgn = 0; rgn < nr_regions; rgn++)
7765    sel_sched_region (rgn);
7766
7767  sel_global_finish ();
7768}
7769
7770#endif
7771