1/* Instruction scheduling pass.  This file computes dependencies between
2   instructions.
3   Copyright (C) 1992-2015 Free Software Foundation, Inc.
4   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5   and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3.  If not see
21<http://www.gnu.org/licenses/>.  */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "diagnostic-core.h"
28#include "rtl.h"
29#include "hash-set.h"
30#include "machmode.h"
31#include "vec.h"
32#include "double-int.h"
33#include "input.h"
34#include "alias.h"
35#include "symtab.h"
36#include "wide-int.h"
37#include "inchash.h"
38#include "tree.h"		/* FIXME: Used by call_may_noreturn_p.  */
39#include "tm_p.h"
40#include "hard-reg-set.h"
41#include "regs.h"
42#include "input.h"
43#include "function.h"
44#include "flags.h"
45#include "insn-config.h"
46#include "insn-attr.h"
47#include "except.h"
48#include "recog.h"
49#include "emit-rtl.h"
50#include "dominance.h"
51#include "cfg.h"
52#include "cfgbuild.h"
53#include "predict.h"
54#include "basic-block.h"
55#include "sched-int.h"
56#include "params.h"
57#include "cselib.h"
58#include "ira.h"
59#include "ira-int.h"
60#include "target.h"
61
62#ifdef INSN_SCHEDULING
63
64#ifdef ENABLE_CHECKING
65#define CHECK (true)
66#else
67#define CHECK (false)
68#endif
69
70/* Holds current parameters for the dependency analyzer.  */
71struct sched_deps_info_def *sched_deps_info;
72
73/* The data is specific to the Haifa scheduler.  */
74vec<haifa_deps_insn_data_def>
75    h_d_i_d = vNULL;
76
77/* Return the major type present in the DS.  */
78enum reg_note
79ds_to_dk (ds_t ds)
80{
81  if (ds & DEP_TRUE)
82    return REG_DEP_TRUE;
83
84  if (ds & DEP_OUTPUT)
85    return REG_DEP_OUTPUT;
86
87  if (ds & DEP_CONTROL)
88    return REG_DEP_CONTROL;
89
90  gcc_assert (ds & DEP_ANTI);
91
92  return REG_DEP_ANTI;
93}
94
95/* Return equivalent dep_status.  */
96ds_t
97dk_to_ds (enum reg_note dk)
98{
99  switch (dk)
100    {
101    case REG_DEP_TRUE:
102      return DEP_TRUE;
103
104    case REG_DEP_OUTPUT:
105      return DEP_OUTPUT;
106
107    case REG_DEP_CONTROL:
108      return DEP_CONTROL;
109
110    default:
111      gcc_assert (dk == REG_DEP_ANTI);
112      return DEP_ANTI;
113    }
114}
115
116/* Functions to operate with dependence information container - dep_t.  */
117
118/* Init DEP with the arguments.  */
119void
120init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
121{
122  DEP_PRO (dep) = pro;
123  DEP_CON (dep) = con;
124  DEP_TYPE (dep) = type;
125  DEP_STATUS (dep) = ds;
126  DEP_COST (dep) = UNKNOWN_DEP_COST;
127  DEP_NONREG (dep) = 0;
128  DEP_MULTIPLE (dep) = 0;
129  DEP_REPLACE (dep) = NULL;
130}
131
132/* Init DEP with the arguments.
133   While most of the scheduler (including targets) only need the major type
134   of the dependency, it is convenient to hide full dep_status from them.  */
135void
136init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
137{
138  ds_t ds;
139
140  if ((current_sched_info->flags & USE_DEPS_LIST))
141    ds = dk_to_ds (kind);
142  else
143    ds = 0;
144
145  init_dep_1 (dep, pro, con, kind, ds);
146}
147
148/* Make a copy of FROM in TO.  */
149static void
150copy_dep (dep_t to, dep_t from)
151{
152  memcpy (to, from, sizeof (*to));
153}
154
155static void dump_ds (FILE *, ds_t);
156
157/* Define flags for dump_dep ().  */
158
159/* Dump producer of the dependence.  */
160#define DUMP_DEP_PRO (2)
161
162/* Dump consumer of the dependence.  */
163#define DUMP_DEP_CON (4)
164
165/* Dump type of the dependence.  */
166#define DUMP_DEP_TYPE (8)
167
168/* Dump status of the dependence.  */
169#define DUMP_DEP_STATUS (16)
170
171/* Dump all information about the dependence.  */
172#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
173		      |DUMP_DEP_STATUS)
174
175/* Dump DEP to DUMP.
176   FLAGS is a bit mask specifying what information about DEP needs
177   to be printed.
178   If FLAGS has the very first bit set, then dump all information about DEP
179   and propagate this bit into the callee dump functions.  */
180static void
181dump_dep (FILE *dump, dep_t dep, int flags)
182{
183  if (flags & 1)
184    flags |= DUMP_DEP_ALL;
185
186  fprintf (dump, "<");
187
188  if (flags & DUMP_DEP_PRO)
189    fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
190
191  if (flags & DUMP_DEP_CON)
192    fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
193
194  if (flags & DUMP_DEP_TYPE)
195    {
196      char t;
197      enum reg_note type = DEP_TYPE (dep);
198
199      switch (type)
200	{
201	case REG_DEP_TRUE:
202	  t = 't';
203	  break;
204
205	case REG_DEP_OUTPUT:
206	  t = 'o';
207	  break;
208
209	case REG_DEP_CONTROL:
210	  t = 'c';
211	  break;
212
213	case REG_DEP_ANTI:
214	  t = 'a';
215	  break;
216
217	default:
218	  gcc_unreachable ();
219	  break;
220	}
221
222      fprintf (dump, "%c; ", t);
223    }
224
225  if (flags & DUMP_DEP_STATUS)
226    {
227      if (current_sched_info->flags & USE_DEPS_LIST)
228	dump_ds (dump, DEP_STATUS (dep));
229    }
230
231  fprintf (dump, ">");
232}
233
234/* Default flags for dump_dep ().  */
235static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
236
237/* Dump all fields of DEP to STDERR.  */
238void
239sd_debug_dep (dep_t dep)
240{
241  dump_dep (stderr, dep, 1);
242  fprintf (stderr, "\n");
243}
244
245/* Determine whether DEP is a dependency link of a non-debug insn on a
246   debug insn.  */
247
248static inline bool
249depl_on_debug_p (dep_link_t dep)
250{
251  return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
252	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
253}
254
255/* Functions to operate with a single link from the dependencies lists -
256   dep_link_t.  */
257
258/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
259   PREV_NEXT_P.  */
260static void
261attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
262{
263  dep_link_t next = *prev_nextp;
264
265  gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
266	      && DEP_LINK_NEXT (l) == NULL);
267
268  /* Init node being inserted.  */
269  DEP_LINK_PREV_NEXTP (l) = prev_nextp;
270  DEP_LINK_NEXT (l) = next;
271
272  /* Fix next node.  */
273  if (next != NULL)
274    {
275      gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
276
277      DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
278    }
279
280  /* Fix prev node.  */
281  *prev_nextp = l;
282}
283
284/* Add dep_link LINK to deps_list L.  */
285static void
286add_to_deps_list (dep_link_t link, deps_list_t l)
287{
288  attach_dep_link (link, &DEPS_LIST_FIRST (l));
289
290  /* Don't count debug deps.  */
291  if (!depl_on_debug_p (link))
292    ++DEPS_LIST_N_LINKS (l);
293}
294
295/* Detach dep_link L from the list.  */
296static void
297detach_dep_link (dep_link_t l)
298{
299  dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
300  dep_link_t next = DEP_LINK_NEXT (l);
301
302  *prev_nextp = next;
303
304  if (next != NULL)
305    DEP_LINK_PREV_NEXTP (next) = prev_nextp;
306
307  DEP_LINK_PREV_NEXTP (l) = NULL;
308  DEP_LINK_NEXT (l) = NULL;
309}
310
311/* Remove link LINK from list LIST.  */
312static void
313remove_from_deps_list (dep_link_t link, deps_list_t list)
314{
315  detach_dep_link (link);
316
317  /* Don't count debug deps.  */
318  if (!depl_on_debug_p (link))
319    --DEPS_LIST_N_LINKS (list);
320}
321
322/* Move link LINK from list FROM to list TO.  */
323static void
324move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
325{
326  remove_from_deps_list (link, from);
327  add_to_deps_list (link, to);
328}
329
330/* Return true of LINK is not attached to any list.  */
331static bool
332dep_link_is_detached_p (dep_link_t link)
333{
334  return DEP_LINK_PREV_NEXTP (link) == NULL;
335}
336
337/* Pool to hold all dependency nodes (dep_node_t).  */
338static alloc_pool dn_pool;
339
340/* Number of dep_nodes out there.  */
341static int dn_pool_diff = 0;
342
343/* Create a dep_node.  */
344static dep_node_t
345create_dep_node (void)
346{
347  dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
348  dep_link_t back = DEP_NODE_BACK (n);
349  dep_link_t forw = DEP_NODE_FORW (n);
350
351  DEP_LINK_NODE (back) = n;
352  DEP_LINK_NEXT (back) = NULL;
353  DEP_LINK_PREV_NEXTP (back) = NULL;
354
355  DEP_LINK_NODE (forw) = n;
356  DEP_LINK_NEXT (forw) = NULL;
357  DEP_LINK_PREV_NEXTP (forw) = NULL;
358
359  ++dn_pool_diff;
360
361  return n;
362}
363
364/* Delete dep_node N.  N must not be connected to any deps_list.  */
365static void
366delete_dep_node (dep_node_t n)
367{
368  gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
369	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
370
371  XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
372
373  --dn_pool_diff;
374
375  pool_free (dn_pool, n);
376}
377
378/* Pool to hold dependencies lists (deps_list_t).  */
379static alloc_pool dl_pool;
380
381/* Number of deps_lists out there.  */
382static int dl_pool_diff = 0;
383
384/* Functions to operate with dependences lists - deps_list_t.  */
385
386/* Return true if list L is empty.  */
387static bool
388deps_list_empty_p (deps_list_t l)
389{
390  return DEPS_LIST_N_LINKS (l) == 0;
391}
392
393/* Create a new deps_list.  */
394static deps_list_t
395create_deps_list (void)
396{
397  deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
398
399  DEPS_LIST_FIRST (l) = NULL;
400  DEPS_LIST_N_LINKS (l) = 0;
401
402  ++dl_pool_diff;
403  return l;
404}
405
406/* Free deps_list L.  */
407static void
408free_deps_list (deps_list_t l)
409{
410  gcc_assert (deps_list_empty_p (l));
411
412  --dl_pool_diff;
413
414  pool_free (dl_pool, l);
415}
416
417/* Return true if there is no dep_nodes and deps_lists out there.
418   After the region is scheduled all the dependency nodes and lists
419   should [generally] be returned to pool.  */
420bool
421deps_pools_are_empty_p (void)
422{
423  return dn_pool_diff == 0 && dl_pool_diff == 0;
424}
425
426/* Remove all elements from L.  */
427static void
428clear_deps_list (deps_list_t l)
429{
430  do
431    {
432      dep_link_t link = DEPS_LIST_FIRST (l);
433
434      if (link == NULL)
435	break;
436
437      remove_from_deps_list (link, l);
438    }
439  while (1);
440}
441
442/* Decide whether a dependency should be treated as a hard or a speculative
443   dependency.  */
444static bool
445dep_spec_p (dep_t dep)
446{
447  if (current_sched_info->flags & DO_SPECULATION)
448    {
449      if (DEP_STATUS (dep) & SPECULATIVE)
450	return true;
451    }
452  if (current_sched_info->flags & DO_PREDICATION)
453    {
454      if (DEP_TYPE (dep) == REG_DEP_CONTROL)
455	return true;
456    }
457  if (DEP_REPLACE (dep) != NULL)
458    return true;
459  return false;
460}
461
462static regset reg_pending_sets;
463static regset reg_pending_clobbers;
464static regset reg_pending_uses;
465static regset reg_pending_control_uses;
466static enum reg_pending_barrier_mode reg_pending_barrier;
467
468/* Hard registers implicitly clobbered or used (or may be implicitly
469   clobbered or used) by the currently analyzed insn.  For example,
470   insn in its constraint has one register class.  Even if there is
471   currently no hard register in the insn, the particular hard
472   register will be in the insn after reload pass because the
473   constraint requires it.  */
474static HARD_REG_SET implicit_reg_pending_clobbers;
475static HARD_REG_SET implicit_reg_pending_uses;
476
477/* To speed up the test for duplicate dependency links we keep a
478   record of dependencies created by add_dependence when the average
479   number of instructions in a basic block is very large.
480
481   Studies have shown that there is typically around 5 instructions between
482   branches for typical C code.  So we can make a guess that the average
483   basic block is approximately 5 instructions long; we will choose 100X
484   the average size as a very large basic block.
485
486   Each insn has associated bitmaps for its dependencies.  Each bitmap
487   has enough entries to represent a dependency on any other insn in
488   the insn chain.  All bitmap for true dependencies cache is
489   allocated then the rest two ones are also allocated.  */
490static bitmap_head *true_dependency_cache = NULL;
491static bitmap_head *output_dependency_cache = NULL;
492static bitmap_head *anti_dependency_cache = NULL;
493static bitmap_head *control_dependency_cache = NULL;
494static bitmap_head *spec_dependency_cache = NULL;
495static int cache_size;
496
497/* True if we should mark added dependencies as a non-register deps.  */
498static bool mark_as_hard;
499
500static int deps_may_trap_p (const_rtx);
501static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
502static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
503				 enum reg_note, bool);
504static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
505					  rtx_insn_list **, int, enum reg_note,
506					  bool);
507static void delete_all_dependences (rtx);
508static void chain_to_prev_insn (rtx_insn *);
509
510static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
511static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
512static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
513static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
514
515static bool sched_has_condition_p (const rtx_insn *);
516static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
517
518static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
519							  rtx, rtx);
520static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
521
522#ifdef ENABLE_CHECKING
523static void check_dep (dep_t, bool);
524#endif
525
526/* Return nonzero if a load of the memory reference MEM can cause a trap.  */
527
528static int
529deps_may_trap_p (const_rtx mem)
530{
531  const_rtx addr = XEXP (mem, 0);
532
533  if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
534    {
535      const_rtx t = get_reg_known_value (REGNO (addr));
536      if (t)
537	addr = t;
538    }
539  return rtx_addr_can_trap_p (addr);
540}
541
542
543/* Find the condition under which INSN is executed.  If REV is not NULL,
544   it is set to TRUE when the returned comparison should be reversed
545   to get the actual condition.  */
546static rtx
547sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
548{
549  rtx pat = PATTERN (insn);
550  rtx src;
551
552  if (rev)
553    *rev = false;
554
555  if (GET_CODE (pat) == COND_EXEC)
556    return COND_EXEC_TEST (pat);
557
558  if (!any_condjump_p (insn) || !onlyjump_p (insn))
559    return 0;
560
561  src = SET_SRC (pc_set (insn));
562
563  if (XEXP (src, 2) == pc_rtx)
564    return XEXP (src, 0);
565  else if (XEXP (src, 1) == pc_rtx)
566    {
567      rtx cond = XEXP (src, 0);
568      enum rtx_code revcode = reversed_comparison_code (cond, insn);
569
570      if (revcode == UNKNOWN)
571	return 0;
572
573      if (rev)
574	*rev = true;
575      return cond;
576    }
577
578  return 0;
579}
580
581/* Return the condition under which INSN does not execute (i.e.  the
582   not-taken condition for a conditional branch), or NULL if we cannot
583   find such a condition.  The caller should make a copy of the condition
584   before using it.  */
585rtx
586sched_get_reverse_condition_uncached (const rtx_insn *insn)
587{
588  bool rev;
589  rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
590  if (cond == NULL_RTX)
591    return cond;
592  if (!rev)
593    {
594      enum rtx_code revcode = reversed_comparison_code (cond, insn);
595      cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
596			     XEXP (cond, 0),
597			     XEXP (cond, 1));
598    }
599  return cond;
600}
601
602/* Caching variant of sched_get_condition_with_rev_uncached.
603   We only do actual work the first time we come here for an insn; the
604   results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
605static rtx
606sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
607{
608  bool tmp;
609
610  if (INSN_LUID (insn) == 0)
611    return sched_get_condition_with_rev_uncached (insn, rev);
612
613  if (INSN_CACHED_COND (insn) == const_true_rtx)
614    return NULL_RTX;
615
616  if (INSN_CACHED_COND (insn) != NULL_RTX)
617    {
618      if (rev)
619	*rev = INSN_REVERSE_COND (insn);
620      return INSN_CACHED_COND (insn);
621    }
622
623  INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
624  INSN_REVERSE_COND (insn) = tmp;
625
626  if (INSN_CACHED_COND (insn) == NULL_RTX)
627    {
628      INSN_CACHED_COND (insn) = const_true_rtx;
629      return NULL_RTX;
630    }
631
632  if (rev)
633    *rev = INSN_REVERSE_COND (insn);
634  return INSN_CACHED_COND (insn);
635}
636
637/* True when we can find a condition under which INSN is executed.  */
638static bool
639sched_has_condition_p (const rtx_insn *insn)
640{
641  return !! sched_get_condition_with_rev (insn, NULL);
642}
643
644
645
646/* Return nonzero if conditions COND1 and COND2 can never be both true.  */
647static int
648conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
649{
650  if (COMPARISON_P (cond1)
651      && COMPARISON_P (cond2)
652      && GET_CODE (cond1) ==
653	  (rev1==rev2
654	  ? reversed_comparison_code (cond2, NULL)
655	  : GET_CODE (cond2))
656      && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
657      && XEXP (cond1, 1) == XEXP (cond2, 1))
658    return 1;
659  return 0;
660}
661
662/* Return true if insn1 and insn2 can never depend on one another because
663   the conditions under which they are executed are mutually exclusive.  */
664bool
665sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
666{
667  rtx cond1, cond2;
668  bool rev1 = false, rev2 = false;
669
670  /* df doesn't handle conditional lifetimes entirely correctly;
671     calls mess up the conditional lifetimes.  */
672  if (!CALL_P (insn1) && !CALL_P (insn2))
673    {
674      cond1 = sched_get_condition_with_rev (insn1, &rev1);
675      cond2 = sched_get_condition_with_rev (insn2, &rev2);
676      if (cond1 && cond2
677	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
678	  /* Make sure first instruction doesn't affect condition of second
679	     instruction if switched.  */
680	  && !modified_in_p (cond1, insn2)
681	  /* Make sure second instruction doesn't affect condition of first
682	     instruction if switched.  */
683	  && !modified_in_p (cond2, insn1))
684	return true;
685    }
686  return false;
687}
688
689
690/* Return true if INSN can potentially be speculated with type DS.  */
691bool
692sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
693{
694  if (HAS_INTERNAL_DEP (insn))
695    return false;
696
697  if (!NONJUMP_INSN_P (insn))
698    return false;
699
700  if (SCHED_GROUP_P (insn))
701    return false;
702
703  if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
704    return false;
705
706  if (side_effects_p (PATTERN (insn)))
707    return false;
708
709  if (ds & BE_IN_SPEC)
710    /* The following instructions, which depend on a speculatively scheduled
711       instruction, cannot be speculatively scheduled along.  */
712    {
713      if (may_trap_or_fault_p (PATTERN (insn)))
714	/* If instruction might fault, it cannot be speculatively scheduled.
715	   For control speculation it's obvious why and for data speculation
716	   it's because the insn might get wrong input if speculation
717	   wasn't successful.  */
718	return false;
719
720      if ((ds & BE_IN_DATA)
721	  && sched_has_condition_p (insn))
722	/* If this is a predicated instruction, then it cannot be
723	   speculatively scheduled.  See PR35659.  */
724	return false;
725    }
726
727  return true;
728}
729
730/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
731   initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
732   and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
733   This function is used to switch sd_iterator to the next list.
734   !!! For internal use only.  Might consider moving it to sched-int.h.  */
735void
736sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
737	      deps_list_t *list_ptr, bool *resolved_p_ptr)
738{
739  sd_list_types_def types = *types_ptr;
740
741  if (types & SD_LIST_HARD_BACK)
742    {
743      *list_ptr = INSN_HARD_BACK_DEPS (insn);
744      *resolved_p_ptr = false;
745      *types_ptr = types & ~SD_LIST_HARD_BACK;
746    }
747  else if (types & SD_LIST_SPEC_BACK)
748    {
749      *list_ptr = INSN_SPEC_BACK_DEPS (insn);
750      *resolved_p_ptr = false;
751      *types_ptr = types & ~SD_LIST_SPEC_BACK;
752    }
753  else if (types & SD_LIST_FORW)
754    {
755      *list_ptr = INSN_FORW_DEPS (insn);
756      *resolved_p_ptr = false;
757      *types_ptr = types & ~SD_LIST_FORW;
758    }
759  else if (types & SD_LIST_RES_BACK)
760    {
761      *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
762      *resolved_p_ptr = true;
763      *types_ptr = types & ~SD_LIST_RES_BACK;
764    }
765  else if (types & SD_LIST_RES_FORW)
766    {
767      *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
768      *resolved_p_ptr = true;
769      *types_ptr = types & ~SD_LIST_RES_FORW;
770    }
771  else
772    {
773      *list_ptr = NULL;
774      *resolved_p_ptr = false;
775      *types_ptr = SD_LIST_NONE;
776    }
777}
778
779/* Return the summary size of INSN's lists defined by LIST_TYPES.  */
780int
781sd_lists_size (const_rtx insn, sd_list_types_def list_types)
782{
783  int size = 0;
784
785  while (list_types != SD_LIST_NONE)
786    {
787      deps_list_t list;
788      bool resolved_p;
789
790      sd_next_list (insn, &list_types, &list, &resolved_p);
791      if (list)
792	size += DEPS_LIST_N_LINKS (list);
793    }
794
795  return size;
796}
797
798/* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
799
800bool
801sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
802{
803  while (list_types != SD_LIST_NONE)
804    {
805      deps_list_t list;
806      bool resolved_p;
807
808      sd_next_list (insn, &list_types, &list, &resolved_p);
809      if (!deps_list_empty_p (list))
810	return false;
811    }
812
813  return true;
814}
815
816/* Initialize data for INSN.  */
817void
818sd_init_insn (rtx insn)
819{
820  INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
821  INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
822  INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
823  INSN_FORW_DEPS (insn) = create_deps_list ();
824  INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
825
826  /* ??? It would be nice to allocate dependency caches here.  */
827}
828
829/* Free data for INSN.  */
830void
831sd_finish_insn (rtx insn)
832{
833  /* ??? It would be nice to deallocate dependency caches here.  */
834
835  free_deps_list (INSN_HARD_BACK_DEPS (insn));
836  INSN_HARD_BACK_DEPS (insn) = NULL;
837
838  free_deps_list (INSN_SPEC_BACK_DEPS (insn));
839  INSN_SPEC_BACK_DEPS (insn) = NULL;
840
841  free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
842  INSN_RESOLVED_BACK_DEPS (insn) = NULL;
843
844  free_deps_list (INSN_FORW_DEPS (insn));
845  INSN_FORW_DEPS (insn) = NULL;
846
847  free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
848  INSN_RESOLVED_FORW_DEPS (insn) = NULL;
849}
850
851/* Find a dependency between producer PRO and consumer CON.
852   Search through resolved dependency lists if RESOLVED_P is true.
853   If no such dependency is found return NULL,
854   otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
855   with an iterator pointing to it.  */
856static dep_t
857sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
858			      sd_iterator_def *sd_it_ptr)
859{
860  sd_list_types_def pro_list_type;
861  sd_list_types_def con_list_type;
862  sd_iterator_def sd_it;
863  dep_t dep;
864  bool found_p = false;
865
866  if (resolved_p)
867    {
868      pro_list_type = SD_LIST_RES_FORW;
869      con_list_type = SD_LIST_RES_BACK;
870    }
871  else
872    {
873      pro_list_type = SD_LIST_FORW;
874      con_list_type = SD_LIST_BACK;
875    }
876
877  /* Walk through either back list of INSN or forw list of ELEM
878     depending on which one is shorter.  */
879  if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
880    {
881      /* Find the dep_link with producer PRO in consumer's back_deps.  */
882      FOR_EACH_DEP (con, con_list_type, sd_it, dep)
883	if (DEP_PRO (dep) == pro)
884	  {
885	    found_p = true;
886	    break;
887	  }
888    }
889  else
890    {
891      /* Find the dep_link with consumer CON in producer's forw_deps.  */
892      FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
893	if (DEP_CON (dep) == con)
894	  {
895	    found_p = true;
896	    break;
897	  }
898    }
899
900  if (found_p)
901    {
902      if (sd_it_ptr != NULL)
903	*sd_it_ptr = sd_it;
904
905      return dep;
906    }
907
908  return NULL;
909}
910
911/* Find a dependency between producer PRO and consumer CON.
912   Use dependency [if available] to check if dependency is present at all.
913   Search through resolved dependency lists if RESOLVED_P is true.
914   If the dependency or NULL if none found.  */
915dep_t
916sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
917{
918  if (true_dependency_cache != NULL)
919    /* Avoiding the list walk below can cut compile times dramatically
920       for some code.  */
921    {
922      int elem_luid = INSN_LUID (pro);
923      int insn_luid = INSN_LUID (con);
924
925      if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
926	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
927	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
928	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
929	return NULL;
930    }
931
932  return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
933}
934
935/* Add or update  a dependence described by DEP.
936   MEM1 and MEM2, if non-null, correspond to memory locations in case of
937   data speculation.
938
939   The function returns a value indicating if an old entry has been changed
940   or a new entry has been added to insn's backward deps.
941
942   This function merely checks if producer and consumer is the same insn
943   and doesn't create a dep in this case.  Actual manipulation of
944   dependence data structures is performed in add_or_update_dep_1.  */
945static enum DEPS_ADJUST_RESULT
946maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
947{
948  rtx_insn *elem = DEP_PRO (dep);
949  rtx_insn *insn = DEP_CON (dep);
950
951  gcc_assert (INSN_P (insn) && INSN_P (elem));
952
953  /* Don't depend an insn on itself.  */
954  if (insn == elem)
955    {
956      if (sched_deps_info->generate_spec_deps)
957        /* INSN has an internal dependence, which we can't overcome.  */
958        HAS_INTERNAL_DEP (insn) = 1;
959
960      return DEP_NODEP;
961    }
962
963  return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
964}
965
966/* Ask dependency caches what needs to be done for dependence DEP.
967   Return DEP_CREATED if new dependence should be created and there is no
968   need to try to find one searching the dependencies lists.
969   Return DEP_PRESENT if there already is a dependence described by DEP and
970   hence nothing is to be done.
971   Return DEP_CHANGED if there already is a dependence, but it should be
972   updated to incorporate additional information from DEP.  */
973static enum DEPS_ADJUST_RESULT
974ask_dependency_caches (dep_t dep)
975{
976  int elem_luid = INSN_LUID (DEP_PRO (dep));
977  int insn_luid = INSN_LUID (DEP_CON (dep));
978
979  gcc_assert (true_dependency_cache != NULL
980	      && output_dependency_cache != NULL
981	      && anti_dependency_cache != NULL
982	      && control_dependency_cache != NULL);
983
984  if (!(current_sched_info->flags & USE_DEPS_LIST))
985    {
986      enum reg_note present_dep_type;
987
988      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
989	present_dep_type = REG_DEP_TRUE;
990      else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
991	present_dep_type = REG_DEP_OUTPUT;
992      else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
993	present_dep_type = REG_DEP_ANTI;
994      else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
995	present_dep_type = REG_DEP_CONTROL;
996      else
997	/* There is no existing dep so it should be created.  */
998	return DEP_CREATED;
999
1000      if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
1001	/* DEP does not add anything to the existing dependence.  */
1002	return DEP_PRESENT;
1003    }
1004  else
1005    {
1006      ds_t present_dep_types = 0;
1007
1008      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
1009	present_dep_types |= DEP_TRUE;
1010      if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
1011	present_dep_types |= DEP_OUTPUT;
1012      if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
1013	present_dep_types |= DEP_ANTI;
1014      if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1015	present_dep_types |= DEP_CONTROL;
1016
1017      if (present_dep_types == 0)
1018	/* There is no existing dep so it should be created.  */
1019	return DEP_CREATED;
1020
1021      if (!(current_sched_info->flags & DO_SPECULATION)
1022	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1023	{
1024	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1025	      == present_dep_types)
1026	    /* DEP does not add anything to the existing dependence.  */
1027	    return DEP_PRESENT;
1028	}
1029      else
1030	{
1031	  /* Only true dependencies can be data speculative and
1032	     only anti dependencies can be control speculative.  */
1033	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1034		      == present_dep_types);
1035
1036	  /* if (DEP is SPECULATIVE) then
1037	     ..we should update DEP_STATUS
1038	     else
1039	     ..we should reset existing dep to non-speculative.  */
1040	}
1041    }
1042
1043  return DEP_CHANGED;
1044}
1045
1046/* Set dependency caches according to DEP.  */
1047static void
1048set_dependency_caches (dep_t dep)
1049{
1050  int elem_luid = INSN_LUID (DEP_PRO (dep));
1051  int insn_luid = INSN_LUID (DEP_CON (dep));
1052
1053  if (!(current_sched_info->flags & USE_DEPS_LIST))
1054    {
1055      switch (DEP_TYPE (dep))
1056	{
1057	case REG_DEP_TRUE:
1058	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1059	  break;
1060
1061	case REG_DEP_OUTPUT:
1062	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1063	  break;
1064
1065	case REG_DEP_ANTI:
1066	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1067	  break;
1068
1069	case REG_DEP_CONTROL:
1070	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1071	  break;
1072
1073	default:
1074	  gcc_unreachable ();
1075	}
1076    }
1077  else
1078    {
1079      ds_t ds = DEP_STATUS (dep);
1080
1081      if (ds & DEP_TRUE)
1082	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1083      if (ds & DEP_OUTPUT)
1084	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1085      if (ds & DEP_ANTI)
1086	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1087      if (ds & DEP_CONTROL)
1088	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1089
1090      if (ds & SPECULATIVE)
1091	{
1092	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1093	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1094	}
1095    }
1096}
1097
1098/* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1099   caches accordingly.  */
1100static void
1101update_dependency_caches (dep_t dep, enum reg_note old_type)
1102{
1103  int elem_luid = INSN_LUID (DEP_PRO (dep));
1104  int insn_luid = INSN_LUID (DEP_CON (dep));
1105
1106  /* Clear corresponding cache entry because type of the link
1107     may have changed.  Keep them if we use_deps_list.  */
1108  if (!(current_sched_info->flags & USE_DEPS_LIST))
1109    {
1110      switch (old_type)
1111	{
1112	case REG_DEP_OUTPUT:
1113	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1114	  break;
1115
1116	case REG_DEP_ANTI:
1117	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1118	  break;
1119
1120	case REG_DEP_CONTROL:
1121	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1122	  break;
1123
1124	default:
1125	  gcc_unreachable ();
1126	}
1127    }
1128
1129  set_dependency_caches (dep);
1130}
1131
1132/* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1133static void
1134change_spec_dep_to_hard (sd_iterator_def sd_it)
1135{
1136  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1137  dep_link_t link = DEP_NODE_BACK (node);
1138  dep_t dep = DEP_NODE_DEP (node);
1139  rtx_insn *elem = DEP_PRO (dep);
1140  rtx_insn *insn = DEP_CON (dep);
1141
1142  move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1143
1144  DEP_STATUS (dep) &= ~SPECULATIVE;
1145
1146  if (true_dependency_cache != NULL)
1147    /* Clear the cache entry.  */
1148    bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1149		      INSN_LUID (elem));
1150}
1151
1152/* Update DEP to incorporate information from NEW_DEP.
1153   SD_IT points to DEP in case it should be moved to another list.
1154   MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1155   data-speculative dependence should be updated.  */
1156static enum DEPS_ADJUST_RESULT
1157update_dep (dep_t dep, dep_t new_dep,
1158	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1159	    rtx mem1 ATTRIBUTE_UNUSED,
1160	    rtx mem2 ATTRIBUTE_UNUSED)
1161{
1162  enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1163  enum reg_note old_type = DEP_TYPE (dep);
1164  bool was_spec = dep_spec_p (dep);
1165
1166  DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1167  DEP_MULTIPLE (dep) = 1;
1168
1169  /* If this is a more restrictive type of dependence than the
1170     existing one, then change the existing dependence to this
1171     type.  */
1172  if ((int) DEP_TYPE (new_dep) < (int) old_type)
1173    {
1174      DEP_TYPE (dep) = DEP_TYPE (new_dep);
1175      res = DEP_CHANGED;
1176    }
1177
1178  if (current_sched_info->flags & USE_DEPS_LIST)
1179    /* Update DEP_STATUS.  */
1180    {
1181      ds_t dep_status = DEP_STATUS (dep);
1182      ds_t ds = DEP_STATUS (new_dep);
1183      ds_t new_status = ds | dep_status;
1184
1185      if (new_status & SPECULATIVE)
1186	{
1187	  /* Either existing dep or a dep we're adding or both are
1188	     speculative.  */
1189	  if (!(ds & SPECULATIVE)
1190	      || !(dep_status & SPECULATIVE))
1191	    /* The new dep can't be speculative.  */
1192	    new_status &= ~SPECULATIVE;
1193	  else
1194	    {
1195	      /* Both are speculative.  Merge probabilities.  */
1196	      if (mem1 != NULL)
1197		{
1198		  dw_t dw;
1199
1200		  dw = estimate_dep_weak (mem1, mem2);
1201		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1202		}
1203
1204	      new_status = ds_merge (dep_status, ds);
1205	    }
1206	}
1207
1208      ds = new_status;
1209
1210      if (dep_status != ds)
1211	{
1212	  DEP_STATUS (dep) = ds;
1213	  res = DEP_CHANGED;
1214	}
1215    }
1216
1217  if (was_spec && !dep_spec_p (dep))
1218    /* The old dep was speculative, but now it isn't.  */
1219    change_spec_dep_to_hard (sd_it);
1220
1221  if (true_dependency_cache != NULL
1222      && res == DEP_CHANGED)
1223    update_dependency_caches (dep, old_type);
1224
1225  return res;
1226}
1227
1228/* Add or update  a dependence described by DEP.
1229   MEM1 and MEM2, if non-null, correspond to memory locations in case of
1230   data speculation.
1231
1232   The function returns a value indicating if an old entry has been changed
1233   or a new entry has been added to insn's backward deps or nothing has
1234   been updated at all.  */
1235static enum DEPS_ADJUST_RESULT
1236add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1237		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1238{
1239  bool maybe_present_p = true;
1240  bool present_p = false;
1241
1242  gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1243	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1244
1245#ifdef ENABLE_CHECKING
1246  check_dep (new_dep, mem1 != NULL);
1247#endif
1248
1249  if (true_dependency_cache != NULL)
1250    {
1251      switch (ask_dependency_caches (new_dep))
1252	{
1253	case DEP_PRESENT:
1254	  dep_t present_dep;
1255	  sd_iterator_def sd_it;
1256
1257	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1258						      DEP_CON (new_dep),
1259						      resolved_p, &sd_it);
1260	  DEP_MULTIPLE (present_dep) = 1;
1261	  return DEP_PRESENT;
1262
1263	case DEP_CHANGED:
1264	  maybe_present_p = true;
1265	  present_p = true;
1266	  break;
1267
1268	case DEP_CREATED:
1269	  maybe_present_p = false;
1270	  present_p = false;
1271	  break;
1272
1273	default:
1274	  gcc_unreachable ();
1275	  break;
1276	}
1277    }
1278
1279  /* Check that we don't already have this dependence.  */
1280  if (maybe_present_p)
1281    {
1282      dep_t present_dep;
1283      sd_iterator_def sd_it;
1284
1285      gcc_assert (true_dependency_cache == NULL || present_p);
1286
1287      present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1288						  DEP_CON (new_dep),
1289						  resolved_p, &sd_it);
1290
1291      if (present_dep != NULL)
1292	/* We found an existing dependency between ELEM and INSN.  */
1293	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1294      else
1295	/* We didn't find a dep, it shouldn't present in the cache.  */
1296	gcc_assert (!present_p);
1297    }
1298
1299  /* Might want to check one level of transitivity to save conses.
1300     This check should be done in maybe_add_or_update_dep_1.
1301     Since we made it to add_or_update_dep_1, we must create
1302     (or update) a link.  */
1303
1304  if (mem1 != NULL_RTX)
1305    {
1306      gcc_assert (sched_deps_info->generate_spec_deps);
1307      DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1308					   estimate_dep_weak (mem1, mem2));
1309    }
1310
1311  sd_add_dep (new_dep, resolved_p);
1312
1313  return DEP_CREATED;
1314}
1315
1316/* Initialize BACK_LIST_PTR with consumer's backward list and
1317   FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1318   initialize with lists that hold resolved deps.  */
1319static void
1320get_back_and_forw_lists (dep_t dep, bool resolved_p,
1321			 deps_list_t *back_list_ptr,
1322			 deps_list_t *forw_list_ptr)
1323{
1324  rtx_insn *con = DEP_CON (dep);
1325
1326  if (!resolved_p)
1327    {
1328      if (dep_spec_p (dep))
1329	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1330      else
1331	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1332
1333      *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1334    }
1335  else
1336    {
1337      *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1338      *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1339    }
1340}
1341
1342/* Add dependence described by DEP.
1343   If RESOLVED_P is true treat the dependence as a resolved one.  */
1344void
1345sd_add_dep (dep_t dep, bool resolved_p)
1346{
1347  dep_node_t n = create_dep_node ();
1348  deps_list_t con_back_deps;
1349  deps_list_t pro_forw_deps;
1350  rtx_insn *elem = DEP_PRO (dep);
1351  rtx_insn *insn = DEP_CON (dep);
1352
1353  gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1354
1355  if ((current_sched_info->flags & DO_SPECULATION) == 0
1356      || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1357    DEP_STATUS (dep) &= ~SPECULATIVE;
1358
1359  copy_dep (DEP_NODE_DEP (n), dep);
1360
1361  get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1362
1363  add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1364
1365#ifdef ENABLE_CHECKING
1366  check_dep (dep, false);
1367#endif
1368
1369  add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1370
1371  /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1372     in the bitmap caches of dependency information.  */
1373  if (true_dependency_cache != NULL)
1374    set_dependency_caches (dep);
1375}
1376
1377/* Add or update backward dependence between INSN and ELEM
1378   with given type DEP_TYPE and dep_status DS.
1379   This function is a convenience wrapper.  */
1380enum DEPS_ADJUST_RESULT
1381sd_add_or_update_dep (dep_t dep, bool resolved_p)
1382{
1383  return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1384}
1385
1386/* Resolved dependence pointed to by SD_IT.
1387   SD_IT will advance to the next element.  */
1388void
1389sd_resolve_dep (sd_iterator_def sd_it)
1390{
1391  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1392  dep_t dep = DEP_NODE_DEP (node);
1393  rtx_insn *pro = DEP_PRO (dep);
1394  rtx_insn *con = DEP_CON (dep);
1395
1396  if (dep_spec_p (dep))
1397    move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1398		   INSN_RESOLVED_BACK_DEPS (con));
1399  else
1400    move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1401		   INSN_RESOLVED_BACK_DEPS (con));
1402
1403  move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1404		 INSN_RESOLVED_FORW_DEPS (pro));
1405}
1406
1407/* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1408   pointed to by SD_IT to unresolved state.  */
1409void
1410sd_unresolve_dep (sd_iterator_def sd_it)
1411{
1412  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1413  dep_t dep = DEP_NODE_DEP (node);
1414  rtx_insn *pro = DEP_PRO (dep);
1415  rtx_insn *con = DEP_CON (dep);
1416
1417  if (dep_spec_p (dep))
1418    move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1419		   INSN_SPEC_BACK_DEPS (con));
1420  else
1421    move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1422		   INSN_HARD_BACK_DEPS (con));
1423
1424  move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1425		 INSN_FORW_DEPS (pro));
1426}
1427
1428/* Make TO depend on all the FROM's producers.
1429   If RESOLVED_P is true add dependencies to the resolved lists.  */
1430void
1431sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1432{
1433  sd_list_types_def list_type;
1434  sd_iterator_def sd_it;
1435  dep_t dep;
1436
1437  list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1438
1439  FOR_EACH_DEP (from, list_type, sd_it, dep)
1440    {
1441      dep_def _new_dep, *new_dep = &_new_dep;
1442
1443      copy_dep (new_dep, dep);
1444      DEP_CON (new_dep) = to;
1445      sd_add_dep (new_dep, resolved_p);
1446    }
1447}
1448
1449/* Remove a dependency referred to by SD_IT.
1450   SD_IT will point to the next dependence after removal.  */
1451void
1452sd_delete_dep (sd_iterator_def sd_it)
1453{
1454  dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1455  dep_t dep = DEP_NODE_DEP (n);
1456  rtx_insn *pro = DEP_PRO (dep);
1457  rtx_insn *con = DEP_CON (dep);
1458  deps_list_t con_back_deps;
1459  deps_list_t pro_forw_deps;
1460
1461  if (true_dependency_cache != NULL)
1462    {
1463      int elem_luid = INSN_LUID (pro);
1464      int insn_luid = INSN_LUID (con);
1465
1466      bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1467      bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1468      bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1469      bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1470
1471      if (current_sched_info->flags & DO_SPECULATION)
1472	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1473    }
1474
1475  get_back_and_forw_lists (dep, sd_it.resolved_p,
1476			   &con_back_deps, &pro_forw_deps);
1477
1478  remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1479  remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1480
1481  delete_dep_node (n);
1482}
1483
1484/* Dump size of the lists.  */
1485#define DUMP_LISTS_SIZE (2)
1486
1487/* Dump dependencies of the lists.  */
1488#define DUMP_LISTS_DEPS (4)
1489
1490/* Dump all information about the lists.  */
1491#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1492
1493/* Dump deps_lists of INSN specified by TYPES to DUMP.
1494   FLAGS is a bit mask specifying what information about the lists needs
1495   to be printed.
1496   If FLAGS has the very first bit set, then dump all information about
1497   the lists and propagate this bit into the callee dump functions.  */
1498static void
1499dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1500{
1501  sd_iterator_def sd_it;
1502  dep_t dep;
1503  int all;
1504
1505  all = (flags & 1);
1506
1507  if (all)
1508    flags |= DUMP_LISTS_ALL;
1509
1510  fprintf (dump, "[");
1511
1512  if (flags & DUMP_LISTS_SIZE)
1513    fprintf (dump, "%d; ", sd_lists_size (insn, types));
1514
1515  if (flags & DUMP_LISTS_DEPS)
1516    {
1517      FOR_EACH_DEP (insn, types, sd_it, dep)
1518	{
1519	  dump_dep (dump, dep, dump_dep_flags | all);
1520	  fprintf (dump, " ");
1521	}
1522    }
1523}
1524
1525/* Dump all information about deps_lists of INSN specified by TYPES
1526   to STDERR.  */
1527void
1528sd_debug_lists (rtx insn, sd_list_types_def types)
1529{
1530  dump_lists (stderr, insn, types, 1);
1531  fprintf (stderr, "\n");
1532}
1533
1534/* A wrapper around add_dependence_1, to add a dependence of CON on
1535   PRO, with type DEP_TYPE.  This function implements special handling
1536   for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1537   the type to REG_DEP_ANTI if we can determine that predication is
1538   impossible; otherwise we add additional true dependencies on the
1539   INSN_COND_DEPS list of the jump (which PRO must be).  */
1540void
1541add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1542{
1543  if (dep_type == REG_DEP_CONTROL
1544      && !(current_sched_info->flags & DO_PREDICATION))
1545    dep_type = REG_DEP_ANTI;
1546
1547  /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1548     so we must also make the insn dependent on the setter of the
1549     condition.  */
1550  if (dep_type == REG_DEP_CONTROL)
1551    {
1552      rtx_insn *real_pro = pro;
1553      rtx_insn *other = real_insn_for_shadow (real_pro);
1554      rtx cond;
1555
1556      if (other != NULL_RTX)
1557	real_pro = other;
1558      cond = sched_get_reverse_condition_uncached (real_pro);
1559      /* Verify that the insn does not use a different value in
1560	 the condition register than the one that was present at
1561	 the jump.  */
1562      if (cond == NULL_RTX)
1563	dep_type = REG_DEP_ANTI;
1564      else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1565	{
1566	  HARD_REG_SET uses;
1567	  CLEAR_HARD_REG_SET (uses);
1568	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1569	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1570	    dep_type = REG_DEP_ANTI;
1571	}
1572      if (dep_type == REG_DEP_CONTROL)
1573	{
1574	  if (sched_verbose >= 5)
1575	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1576		     INSN_UID (real_pro));
1577	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1578			       REG_DEP_TRUE, false);
1579	}
1580    }
1581
1582  add_dependence_1 (con, pro, dep_type);
1583}
1584
1585/* A convenience wrapper to operate on an entire list.  HARD should be
1586   true if DEP_NONREG should be set on newly created dependencies.  */
1587
1588static void
1589add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1590		     enum reg_note dep_type, bool hard)
1591{
1592  mark_as_hard = hard;
1593  for (; list; list = list->next ())
1594    {
1595      if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1596	add_dependence (insn, list->insn (), dep_type);
1597    }
1598  mark_as_hard = false;
1599}
1600
1601/* Similar, but free *LISTP at the same time, when the context
1602   is not readonly.  HARD should be true if DEP_NONREG should be set on
1603   newly created dependencies.  */
1604
1605static void
1606add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1607			      rtx_insn_list **listp,
1608                              int uncond, enum reg_note dep_type, bool hard)
1609{
1610  add_dependence_list (insn, *listp, uncond, dep_type, hard);
1611
1612  /* We don't want to short-circuit dependencies involving debug
1613     insns, because they may cause actual dependencies to be
1614     disregarded.  */
1615  if (deps->readonly || DEBUG_INSN_P (insn))
1616    return;
1617
1618  free_INSN_LIST_list (listp);
1619}
1620
1621/* Remove all occurrences of INSN from LIST.  Return the number of
1622   occurrences removed.  */
1623
1624static int
1625remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
1626{
1627  int removed = 0;
1628
1629  while (*listp)
1630    {
1631      if ((*listp)->insn () == insn)
1632        {
1633          remove_free_INSN_LIST_node (listp);
1634          removed++;
1635          continue;
1636        }
1637
1638      listp = (rtx_insn_list **)&XEXP (*listp, 1);
1639    }
1640
1641  return removed;
1642}
1643
1644/* Same as above, but process two lists at once.  */
1645static int
1646remove_from_both_dependence_lists (rtx insn,
1647				   rtx_insn_list **listp,
1648				   rtx_expr_list **exprp)
1649{
1650  int removed = 0;
1651
1652  while (*listp)
1653    {
1654      if (XEXP (*listp, 0) == insn)
1655        {
1656          remove_free_INSN_LIST_node (listp);
1657          remove_free_EXPR_LIST_node (exprp);
1658          removed++;
1659          continue;
1660        }
1661
1662      listp = (rtx_insn_list **)&XEXP (*listp, 1);
1663      exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1664    }
1665
1666  return removed;
1667}
1668
1669/* Clear all dependencies for an insn.  */
1670static void
1671delete_all_dependences (rtx insn)
1672{
1673  sd_iterator_def sd_it;
1674  dep_t dep;
1675
1676  /* The below cycle can be optimized to clear the caches and back_deps
1677     in one call but that would provoke duplication of code from
1678     delete_dep ().  */
1679
1680  for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1681       sd_iterator_cond (&sd_it, &dep);)
1682    sd_delete_dep (sd_it);
1683}
1684
1685/* All insns in a scheduling group except the first should only have
1686   dependencies on the previous insn in the group.  So we find the
1687   first instruction in the scheduling group by walking the dependence
1688   chains backwards. Then we add the dependencies for the group to
1689   the previous nonnote insn.  */
1690
1691static void
1692chain_to_prev_insn (rtx_insn *insn)
1693{
1694  sd_iterator_def sd_it;
1695  dep_t dep;
1696  rtx_insn *prev_nonnote;
1697
1698  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1699    {
1700      rtx_insn *i = insn;
1701      rtx_insn *pro = DEP_PRO (dep);
1702
1703      do
1704	{
1705	  i = prev_nonnote_insn (i);
1706
1707	  if (pro == i)
1708	    goto next_link;
1709	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1710
1711      if (! sched_insns_conditions_mutex_p (i, pro))
1712	add_dependence (i, pro, DEP_TYPE (dep));
1713    next_link:;
1714    }
1715
1716  delete_all_dependences (insn);
1717
1718  prev_nonnote = prev_nonnote_nondebug_insn (insn);
1719  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1720      && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1721    add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1722}
1723
1724/* Process an insn's memory dependencies.  There are four kinds of
1725   dependencies:
1726
1727   (0) read dependence: read follows read
1728   (1) true dependence: read follows write
1729   (2) output dependence: write follows write
1730   (3) anti dependence: write follows read
1731
1732   We are careful to build only dependencies which actually exist, and
1733   use transitivity to avoid building too many links.  */
1734
1735/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1736   The MEM is a memory reference contained within INSN, which we are saving
1737   so that we can do memory aliasing on it.  */
1738
1739static void
1740add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1741			 rtx_insn *insn, rtx mem)
1742{
1743  rtx_insn_list **insn_list;
1744  rtx_insn_list *insn_node;
1745  rtx_expr_list **mem_list;
1746  rtx_expr_list *mem_node;
1747
1748  gcc_assert (!deps->readonly);
1749  if (read_p)
1750    {
1751      insn_list = &deps->pending_read_insns;
1752      mem_list = &deps->pending_read_mems;
1753      if (!DEBUG_INSN_P (insn))
1754	deps->pending_read_list_length++;
1755    }
1756  else
1757    {
1758      insn_list = &deps->pending_write_insns;
1759      mem_list = &deps->pending_write_mems;
1760      deps->pending_write_list_length++;
1761    }
1762
1763  insn_node = alloc_INSN_LIST (insn, *insn_list);
1764  *insn_list = insn_node;
1765
1766  if (sched_deps_info->use_cselib)
1767    {
1768      mem = shallow_copy_rtx (mem);
1769      XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1770							GET_MODE (mem), insn);
1771    }
1772  mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1773  *mem_list = mem_node;
1774}
1775
1776/* Make a dependency between every memory reference on the pending lists
1777   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1778   dependencies for a read operation, similarly with FOR_WRITE.  */
1779
1780static void
1781flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1782		     int for_write)
1783{
1784  if (for_write)
1785    {
1786      add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1787                                    1, REG_DEP_ANTI, true);
1788      if (!deps->readonly)
1789        {
1790          free_EXPR_LIST_list (&deps->pending_read_mems);
1791          deps->pending_read_list_length = 0;
1792        }
1793    }
1794
1795  add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1796				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1797				true);
1798
1799  add_dependence_list_and_free (deps, insn,
1800                                &deps->last_pending_memory_flush, 1,
1801                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1802				true);
1803
1804  add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1805				REG_DEP_ANTI, true);
1806
1807  if (DEBUG_INSN_P (insn))
1808    {
1809      if (for_write)
1810	free_INSN_LIST_list (&deps->pending_read_insns);
1811      free_INSN_LIST_list (&deps->pending_write_insns);
1812      free_INSN_LIST_list (&deps->last_pending_memory_flush);
1813      free_INSN_LIST_list (&deps->pending_jump_insns);
1814    }
1815
1816  if (!deps->readonly)
1817    {
1818      free_EXPR_LIST_list (&deps->pending_write_mems);
1819      deps->pending_write_list_length = 0;
1820
1821      deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1822      deps->pending_flush_length = 1;
1823    }
1824  mark_as_hard = false;
1825}
1826
1827/* Instruction which dependencies we are analyzing.  */
1828static rtx_insn *cur_insn = NULL;
1829
1830/* Implement hooks for haifa scheduler.  */
1831
1832static void
1833haifa_start_insn (rtx_insn *insn)
1834{
1835  gcc_assert (insn && !cur_insn);
1836
1837  cur_insn = insn;
1838}
1839
1840static void
1841haifa_finish_insn (void)
1842{
1843  cur_insn = NULL;
1844}
1845
1846void
1847haifa_note_reg_set (int regno)
1848{
1849  SET_REGNO_REG_SET (reg_pending_sets, regno);
1850}
1851
1852void
1853haifa_note_reg_clobber (int regno)
1854{
1855  SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1856}
1857
1858void
1859haifa_note_reg_use (int regno)
1860{
1861  SET_REGNO_REG_SET (reg_pending_uses, regno);
1862}
1863
1864static void
1865haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1866{
1867  if (!(ds & SPECULATIVE))
1868    {
1869      mem = NULL_RTX;
1870      pending_mem = NULL_RTX;
1871    }
1872  else
1873    gcc_assert (ds & BEGIN_DATA);
1874
1875  {
1876    dep_def _dep, *dep = &_dep;
1877
1878    init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1879                current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1880    DEP_NONREG (dep) = 1;
1881    maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1882  }
1883
1884}
1885
1886static void
1887haifa_note_dep (rtx_insn *elem, ds_t ds)
1888{
1889  dep_def _dep;
1890  dep_t dep = &_dep;
1891
1892  init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1893  if (mark_as_hard)
1894    DEP_NONREG (dep) = 1;
1895  maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1896}
1897
1898static void
1899note_reg_use (int r)
1900{
1901  if (sched_deps_info->note_reg_use)
1902    sched_deps_info->note_reg_use (r);
1903}
1904
1905static void
1906note_reg_set (int r)
1907{
1908  if (sched_deps_info->note_reg_set)
1909    sched_deps_info->note_reg_set (r);
1910}
1911
1912static void
1913note_reg_clobber (int r)
1914{
1915  if (sched_deps_info->note_reg_clobber)
1916    sched_deps_info->note_reg_clobber (r);
1917}
1918
1919static void
1920note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1921{
1922  if (sched_deps_info->note_mem_dep)
1923    sched_deps_info->note_mem_dep (m1, m2, e, ds);
1924}
1925
1926static void
1927note_dep (rtx_insn *e, ds_t ds)
1928{
1929  if (sched_deps_info->note_dep)
1930    sched_deps_info->note_dep (e, ds);
1931}
1932
1933/* Return corresponding to DS reg_note.  */
1934enum reg_note
1935ds_to_dt (ds_t ds)
1936{
1937  if (ds & DEP_TRUE)
1938    return REG_DEP_TRUE;
1939  else if (ds & DEP_OUTPUT)
1940    return REG_DEP_OUTPUT;
1941  else if (ds & DEP_ANTI)
1942    return REG_DEP_ANTI;
1943  else
1944    {
1945      gcc_assert (ds & DEP_CONTROL);
1946      return REG_DEP_CONTROL;
1947    }
1948}
1949
1950
1951
1952/* Functions for computation of info needed for register pressure
1953   sensitive insn scheduling.  */
1954
1955
1956/* Allocate and return reg_use_data structure for REGNO and INSN.  */
1957static struct reg_use_data *
1958create_insn_reg_use (int regno, rtx_insn *insn)
1959{
1960  struct reg_use_data *use;
1961
1962  use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1963  use->regno = regno;
1964  use->insn = insn;
1965  use->next_insn_use = INSN_REG_USE_LIST (insn);
1966  INSN_REG_USE_LIST (insn) = use;
1967  return use;
1968}
1969
1970/* Allocate reg_set_data structure for REGNO and INSN.  */
1971static void
1972create_insn_reg_set (int regno, rtx insn)
1973{
1974  struct reg_set_data *set;
1975
1976  set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1977  set->regno = regno;
1978  set->insn = insn;
1979  set->next_insn_set = INSN_REG_SET_LIST (insn);
1980  INSN_REG_SET_LIST (insn) = set;
1981}
1982
1983/* Set up insn register uses for INSN and dependency context DEPS.  */
1984static void
1985setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1986{
1987  unsigned i;
1988  reg_set_iterator rsi;
1989  struct reg_use_data *use, *use2, *next;
1990  struct deps_reg *reg_last;
1991
1992  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1993    {
1994      if (i < FIRST_PSEUDO_REGISTER
1995	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1996	continue;
1997
1998      if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1999	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
2000	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
2001	/* Ignore use which is not dying.  */
2002	continue;
2003
2004      use = create_insn_reg_use (i, insn);
2005      use->next_regno_use = use;
2006      reg_last = &deps->reg_last[i];
2007
2008      /* Create the cycle list of uses.  */
2009      for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
2010	{
2011	  use2 = create_insn_reg_use (i, list->insn ());
2012	  next = use->next_regno_use;
2013	  use->next_regno_use = use2;
2014	  use2->next_regno_use = next;
2015	}
2016    }
2017}
2018
2019/* Register pressure info for the currently processed insn.  */
2020static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2021
2022/* Return TRUE if INSN has the use structure for REGNO.  */
2023static bool
2024insn_use_p (rtx insn, int regno)
2025{
2026  struct reg_use_data *use;
2027
2028  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2029    if (use->regno == regno)
2030      return true;
2031  return false;
2032}
2033
2034/* Update the register pressure info after birth of pseudo register REGNO
2035   in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2036   the register is in clobber or unused after the insn.  */
2037static void
2038mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2039{
2040  int incr, new_incr;
2041  enum reg_class cl;
2042
2043  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2044  cl = sched_regno_pressure_class[regno];
2045  if (cl != NO_REGS)
2046    {
2047      incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2048      if (clobber_p)
2049	{
2050	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2051	  reg_pressure_info[cl].clobber_increase = new_incr;
2052	}
2053      else if (unused_p)
2054	{
2055	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2056	  reg_pressure_info[cl].unused_set_increase = new_incr;
2057	}
2058      else
2059	{
2060	  new_incr = reg_pressure_info[cl].set_increase + incr;
2061	  reg_pressure_info[cl].set_increase = new_incr;
2062	  if (! insn_use_p (insn, regno))
2063	    reg_pressure_info[cl].change += incr;
2064	  create_insn_reg_set (regno, insn);
2065	}
2066      gcc_assert (new_incr < (1 << INCREASE_BITS));
2067    }
2068}
2069
2070/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2071   hard registers involved in the birth.  */
2072static void
2073mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2074			    bool clobber_p, bool unused_p)
2075{
2076  enum reg_class cl;
2077  int new_incr, last = regno + nregs;
2078
2079  while (regno < last)
2080    {
2081      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2082      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2083	{
2084	  cl = sched_regno_pressure_class[regno];
2085	  if (cl != NO_REGS)
2086	    {
2087	      if (clobber_p)
2088		{
2089		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2090		  reg_pressure_info[cl].clobber_increase = new_incr;
2091		}
2092	      else if (unused_p)
2093		{
2094		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2095		  reg_pressure_info[cl].unused_set_increase = new_incr;
2096		}
2097	      else
2098		{
2099		  new_incr = reg_pressure_info[cl].set_increase + 1;
2100		  reg_pressure_info[cl].set_increase = new_incr;
2101		  if (! insn_use_p (insn, regno))
2102		    reg_pressure_info[cl].change += 1;
2103		  create_insn_reg_set (regno, insn);
2104		}
2105	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2106	    }
2107	}
2108      regno++;
2109    }
2110}
2111
2112/* Update the register pressure info after birth of pseudo or hard
2113   register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2114   correspondingly that the register is in clobber or unused after the
2115   insn.  */
2116static void
2117mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2118{
2119  int regno;
2120
2121  if (GET_CODE (reg) == SUBREG)
2122    reg = SUBREG_REG (reg);
2123
2124  if (! REG_P (reg))
2125    return;
2126
2127  regno = REGNO (reg);
2128  if (regno < FIRST_PSEUDO_REGISTER)
2129    mark_insn_hard_regno_birth (insn, regno,
2130				hard_regno_nregs[regno][GET_MODE (reg)],
2131				clobber_p, unused_p);
2132  else
2133    mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2134}
2135
2136/* Update the register pressure info after death of pseudo register
2137   REGNO.  */
2138static void
2139mark_pseudo_death (int regno)
2140{
2141  int incr;
2142  enum reg_class cl;
2143
2144  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2145  cl = sched_regno_pressure_class[regno];
2146  if (cl != NO_REGS)
2147    {
2148      incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2149      reg_pressure_info[cl].change -= incr;
2150    }
2151}
2152
2153/* Like mark_pseudo_death except that NREGS saying how many hard
2154   registers involved in the death.  */
2155static void
2156mark_hard_regno_death (int regno, int nregs)
2157{
2158  enum reg_class cl;
2159  int last = regno + nregs;
2160
2161  while (regno < last)
2162    {
2163      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2164      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2165	{
2166	  cl = sched_regno_pressure_class[regno];
2167	  if (cl != NO_REGS)
2168	    reg_pressure_info[cl].change -= 1;
2169	}
2170      regno++;
2171    }
2172}
2173
2174/* Update the register pressure info after death of pseudo or hard
2175   register REG.  */
2176static void
2177mark_reg_death (rtx reg)
2178{
2179  int regno;
2180
2181  if (GET_CODE (reg) == SUBREG)
2182    reg = SUBREG_REG (reg);
2183
2184  if (! REG_P (reg))
2185    return;
2186
2187  regno = REGNO (reg);
2188  if (regno < FIRST_PSEUDO_REGISTER)
2189    mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2190  else
2191    mark_pseudo_death (regno);
2192}
2193
2194/* Process SETTER of REG.  DATA is an insn containing the setter.  */
2195static void
2196mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2197{
2198  if (setter != NULL_RTX && GET_CODE (setter) != SET)
2199    return;
2200  mark_insn_reg_birth
2201    ((rtx) data, reg, false,
2202     find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2203}
2204
2205/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2206static void
2207mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2208{
2209  if (GET_CODE (setter) == CLOBBER)
2210    mark_insn_reg_birth ((rtx) data, reg, true, false);
2211}
2212
2213/* Set up reg pressure info related to INSN.  */
2214void
2215init_insn_reg_pressure_info (rtx insn)
2216{
2217  int i, len;
2218  enum reg_class cl;
2219  static struct reg_pressure_data *pressure_info;
2220  rtx link;
2221
2222  gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2223
2224  if (! INSN_P (insn))
2225    return;
2226
2227  for (i = 0; i < ira_pressure_classes_num; i++)
2228    {
2229      cl = ira_pressure_classes[i];
2230      reg_pressure_info[cl].clobber_increase = 0;
2231      reg_pressure_info[cl].set_increase = 0;
2232      reg_pressure_info[cl].unused_set_increase = 0;
2233      reg_pressure_info[cl].change = 0;
2234    }
2235
2236  note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2237
2238  note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2239
2240#ifdef AUTO_INC_DEC
2241  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2242    if (REG_NOTE_KIND (link) == REG_INC)
2243      mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2244#endif
2245
2246  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2247    if (REG_NOTE_KIND (link) == REG_DEAD)
2248      mark_reg_death (XEXP (link, 0));
2249
2250  len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2251  pressure_info
2252    = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2253  if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2254    INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2255						    * sizeof (int), 1);
2256  for (i = 0; i < ira_pressure_classes_num; i++)
2257    {
2258      cl = ira_pressure_classes[i];
2259      pressure_info[i].clobber_increase
2260	= reg_pressure_info[cl].clobber_increase;
2261      pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2262      pressure_info[i].unused_set_increase
2263	= reg_pressure_info[cl].unused_set_increase;
2264      pressure_info[i].change = reg_pressure_info[cl].change;
2265    }
2266}
2267
2268
2269
2270
2271/* Internal variable for sched_analyze_[12] () functions.
2272   If it is nonzero, this means that sched_analyze_[12] looks
2273   at the most toplevel SET.  */
2274static bool can_start_lhs_rhs_p;
2275
2276/* Extend reg info for the deps context DEPS given that
2277   we have just generated a register numbered REGNO.  */
2278static void
2279extend_deps_reg_info (struct deps_desc *deps, int regno)
2280{
2281  int max_regno = regno + 1;
2282
2283  gcc_assert (!reload_completed);
2284
2285  /* In a readonly context, it would not hurt to extend info,
2286     but it should not be needed.  */
2287  if (reload_completed && deps->readonly)
2288    {
2289      deps->max_reg = max_regno;
2290      return;
2291    }
2292
2293  if (max_regno > deps->max_reg)
2294    {
2295      deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2296                                   max_regno);
2297      memset (&deps->reg_last[deps->max_reg],
2298              0, (max_regno - deps->max_reg)
2299              * sizeof (struct deps_reg));
2300      deps->max_reg = max_regno;
2301    }
2302}
2303
2304/* Extends REG_INFO_P if needed.  */
2305void
2306maybe_extend_reg_info_p (void)
2307{
2308  /* Extend REG_INFO_P, if needed.  */
2309  if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2310    {
2311      size_t new_reg_info_p_size = max_regno + 128;
2312
2313      gcc_assert (!reload_completed && sel_sched_p ());
2314
2315      reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2316                                                    new_reg_info_p_size,
2317                                                    reg_info_p_size,
2318                                                    sizeof (*reg_info_p));
2319      reg_info_p_size = new_reg_info_p_size;
2320    }
2321}
2322
2323/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2324   The type of the reference is specified by REF and can be SET,
2325   CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2326
2327static void
2328sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2329		   enum rtx_code ref, rtx_insn *insn)
2330{
2331  /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2332  if (!reload_completed && sel_sched_p ()
2333      && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2334    extend_deps_reg_info (deps, regno);
2335
2336  maybe_extend_reg_info_p ();
2337
2338  /* A hard reg in a wide mode may really be multiple registers.
2339     If so, mark all of them just like the first.  */
2340  if (regno < FIRST_PSEUDO_REGISTER)
2341    {
2342      int i = hard_regno_nregs[regno][mode];
2343      if (ref == SET)
2344	{
2345	  while (--i >= 0)
2346	    note_reg_set (regno + i);
2347	}
2348      else if (ref == USE)
2349	{
2350	  while (--i >= 0)
2351	    note_reg_use (regno + i);
2352	}
2353      else
2354	{
2355	  while (--i >= 0)
2356	    note_reg_clobber (regno + i);
2357	}
2358    }
2359
2360  /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2361     it does not reload.  Ignore these as they have served their
2362     purpose already.  */
2363  else if (regno >= deps->max_reg)
2364    {
2365      enum rtx_code code = GET_CODE (PATTERN (insn));
2366      gcc_assert (code == USE || code == CLOBBER);
2367    }
2368
2369  else
2370    {
2371      if (ref == SET)
2372	note_reg_set (regno);
2373      else if (ref == USE)
2374	note_reg_use (regno);
2375      else
2376	note_reg_clobber (regno);
2377
2378      /* Pseudos that are REG_EQUIV to something may be replaced
2379	 by that during reloading.  We need only add dependencies for
2380	the address in the REG_EQUIV note.  */
2381      if (!reload_completed && get_reg_known_equiv_p (regno))
2382	{
2383	  rtx t = get_reg_known_value (regno);
2384	  if (MEM_P (t))
2385	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2386	}
2387
2388      /* Don't let it cross a call after scheduling if it doesn't
2389	 already cross one.  */
2390      if (REG_N_CALLS_CROSSED (regno) == 0)
2391	{
2392	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2393	    deps->sched_before_next_call
2394	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2395	  else
2396	    add_dependence_list (insn, deps->last_function_call, 1,
2397				 REG_DEP_ANTI, false);
2398	}
2399    }
2400}
2401
2402/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2403   rtx, X, creating all dependencies generated by the write to the
2404   destination of X, and reads of everything mentioned.  */
2405
2406static void
2407sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2408{
2409  rtx dest = XEXP (x, 0);
2410  enum rtx_code code = GET_CODE (x);
2411  bool cslr_p = can_start_lhs_rhs_p;
2412
2413  can_start_lhs_rhs_p = false;
2414
2415  gcc_assert (dest);
2416  if (dest == 0)
2417    return;
2418
2419  if (cslr_p && sched_deps_info->start_lhs)
2420    sched_deps_info->start_lhs (dest);
2421
2422  if (GET_CODE (dest) == PARALLEL)
2423    {
2424      int i;
2425
2426      for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2427	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2428	  sched_analyze_1 (deps,
2429			   gen_rtx_CLOBBER (VOIDmode,
2430					    XEXP (XVECEXP (dest, 0, i), 0)),
2431			   insn);
2432
2433      if (cslr_p && sched_deps_info->finish_lhs)
2434	sched_deps_info->finish_lhs ();
2435
2436      if (code == SET)
2437	{
2438	  can_start_lhs_rhs_p = cslr_p;
2439
2440	  sched_analyze_2 (deps, SET_SRC (x), insn);
2441
2442	  can_start_lhs_rhs_p = false;
2443	}
2444
2445      return;
2446    }
2447
2448  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2449	 || GET_CODE (dest) == ZERO_EXTRACT)
2450    {
2451      if (GET_CODE (dest) == STRICT_LOW_PART
2452	 || GET_CODE (dest) == ZERO_EXTRACT
2453	 || df_read_modify_subreg_p (dest))
2454        {
2455	  /* These both read and modify the result.  We must handle
2456             them as writes to get proper dependencies for following
2457             instructions.  We must handle them as reads to get proper
2458             dependencies from this to previous instructions.
2459             Thus we need to call sched_analyze_2.  */
2460
2461	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2462	}
2463      if (GET_CODE (dest) == ZERO_EXTRACT)
2464	{
2465	  /* The second and third arguments are values read by this insn.  */
2466	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2467	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2468	}
2469      dest = XEXP (dest, 0);
2470    }
2471
2472  if (REG_P (dest))
2473    {
2474      int regno = REGNO (dest);
2475      machine_mode mode = GET_MODE (dest);
2476
2477      sched_analyze_reg (deps, regno, mode, code, insn);
2478
2479#ifdef STACK_REGS
2480      /* Treat all writes to a stack register as modifying the TOS.  */
2481      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2482	{
2483	  /* Avoid analyzing the same register twice.  */
2484	  if (regno != FIRST_STACK_REG)
2485	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2486
2487	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2488			       FIRST_STACK_REG);
2489	}
2490#endif
2491    }
2492  else if (MEM_P (dest))
2493    {
2494      /* Writing memory.  */
2495      rtx t = dest;
2496
2497      if (sched_deps_info->use_cselib)
2498	{
2499	  machine_mode address_mode = get_address_mode (dest);
2500
2501	  t = shallow_copy_rtx (dest);
2502	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2503				   GET_MODE (t), insn);
2504	  XEXP (t, 0)
2505	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2506						insn);
2507	}
2508      t = canon_rtx (t);
2509
2510      /* Pending lists can't get larger with a readonly context.  */
2511      if (!deps->readonly
2512          && ((deps->pending_read_list_length + deps->pending_write_list_length)
2513              >= MAX_PENDING_LIST_LENGTH))
2514	{
2515	  /* Flush all pending reads and writes to prevent the pending lists
2516	     from getting any larger.  Insn scheduling runs too slowly when
2517	     these lists get long.  When compiling GCC with itself,
2518	     this flush occurs 8 times for sparc, and 10 times for m88k using
2519	     the default value of 32.  */
2520	  flush_pending_lists (deps, insn, false, true);
2521	}
2522      else
2523	{
2524	  rtx_insn_list *pending;
2525	  rtx_expr_list *pending_mem;
2526
2527	  pending = deps->pending_read_insns;
2528	  pending_mem = deps->pending_read_mems;
2529	  while (pending)
2530	    {
2531	      if (anti_dependence (pending_mem->element (), t)
2532		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2533		note_mem_dep (t, pending_mem->element (), pending->insn (),
2534			      DEP_ANTI);
2535
2536	      pending = pending->next ();
2537	      pending_mem = pending_mem->next ();
2538	    }
2539
2540	  pending = deps->pending_write_insns;
2541	  pending_mem = deps->pending_write_mems;
2542	  while (pending)
2543	    {
2544	      if (output_dependence (pending_mem->element (), t)
2545		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2546		note_mem_dep (t, pending_mem->element (),
2547			      pending->insn (),
2548			      DEP_OUTPUT);
2549
2550	      pending = pending->next ();
2551	      pending_mem = pending_mem-> next ();
2552	    }
2553
2554	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2555			       REG_DEP_ANTI, true);
2556	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2557			       REG_DEP_CONTROL, true);
2558
2559          if (!deps->readonly)
2560            add_insn_mem_dependence (deps, false, insn, dest);
2561	}
2562      sched_analyze_2 (deps, XEXP (dest, 0), insn);
2563    }
2564
2565  if (cslr_p && sched_deps_info->finish_lhs)
2566    sched_deps_info->finish_lhs ();
2567
2568  /* Analyze reads.  */
2569  if (GET_CODE (x) == SET)
2570    {
2571      can_start_lhs_rhs_p = cslr_p;
2572
2573      sched_analyze_2 (deps, SET_SRC (x), insn);
2574
2575      can_start_lhs_rhs_p = false;
2576    }
2577}
2578
2579/* Analyze the uses of memory and registers in rtx X in INSN.  */
2580static void
2581sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2582{
2583  int i;
2584  int j;
2585  enum rtx_code code;
2586  const char *fmt;
2587  bool cslr_p = can_start_lhs_rhs_p;
2588
2589  can_start_lhs_rhs_p = false;
2590
2591  gcc_assert (x);
2592  if (x == 0)
2593    return;
2594
2595  if (cslr_p && sched_deps_info->start_rhs)
2596    sched_deps_info->start_rhs (x);
2597
2598  code = GET_CODE (x);
2599
2600  switch (code)
2601    {
2602    CASE_CONST_ANY:
2603    case SYMBOL_REF:
2604    case CONST:
2605    case LABEL_REF:
2606      /* Ignore constants.  */
2607      if (cslr_p && sched_deps_info->finish_rhs)
2608	sched_deps_info->finish_rhs ();
2609
2610      return;
2611
2612#ifdef HAVE_cc0
2613    case CC0:
2614      /* User of CC0 depends on immediately preceding insn.  */
2615      SCHED_GROUP_P (insn) = 1;
2616       /* Don't move CC0 setter to another block (it can set up the
2617        same flag for previous CC0 users which is safe).  */
2618      CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2619
2620      if (cslr_p && sched_deps_info->finish_rhs)
2621	sched_deps_info->finish_rhs ();
2622
2623      return;
2624#endif
2625
2626    case REG:
2627      {
2628	int regno = REGNO (x);
2629	machine_mode mode = GET_MODE (x);
2630
2631	sched_analyze_reg (deps, regno, mode, USE, insn);
2632
2633#ifdef STACK_REGS
2634      /* Treat all reads of a stack register as modifying the TOS.  */
2635      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2636	{
2637	  /* Avoid analyzing the same register twice.  */
2638	  if (regno != FIRST_STACK_REG)
2639	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2640	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2641	}
2642#endif
2643
2644	if (cslr_p && sched_deps_info->finish_rhs)
2645	  sched_deps_info->finish_rhs ();
2646
2647	return;
2648      }
2649
2650    case MEM:
2651      {
2652	/* Reading memory.  */
2653	rtx u;
2654	rtx_insn_list *pending;
2655	rtx_expr_list *pending_mem;
2656	rtx t = x;
2657
2658	if (sched_deps_info->use_cselib)
2659	  {
2660	    machine_mode address_mode = get_address_mode (t);
2661
2662	    t = shallow_copy_rtx (t);
2663	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2664				     GET_MODE (t), insn);
2665	    XEXP (t, 0)
2666	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2667						  insn);
2668	  }
2669
2670	if (!DEBUG_INSN_P (insn))
2671	  {
2672	    t = canon_rtx (t);
2673	    pending = deps->pending_read_insns;
2674	    pending_mem = deps->pending_read_mems;
2675	    while (pending)
2676	      {
2677		if (read_dependence (pending_mem->element (), t)
2678		    && ! sched_insns_conditions_mutex_p (insn,
2679							 pending->insn ()))
2680		  note_mem_dep (t, pending_mem->element (),
2681				pending->insn (),
2682				DEP_ANTI);
2683
2684		pending = pending->next ();
2685		pending_mem = pending_mem->next ();
2686	      }
2687
2688	    pending = deps->pending_write_insns;
2689	    pending_mem = deps->pending_write_mems;
2690	    while (pending)
2691	      {
2692		if (true_dependence (pending_mem->element (), VOIDmode, t)
2693		    && ! sched_insns_conditions_mutex_p (insn,
2694							 pending->insn ()))
2695		  note_mem_dep (t, pending_mem->element (),
2696				pending->insn (),
2697				sched_deps_info->generate_spec_deps
2698				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2699
2700		pending = pending->next ();
2701		pending_mem = pending_mem->next ();
2702	      }
2703
2704	    for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2705	      add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
2706			      REG_DEP_ANTI);
2707
2708	    for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2709	      if (deps_may_trap_p (x))
2710		{
2711		  if ((sched_deps_info->generate_spec_deps)
2712		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2713		    {
2714		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2715					      MAX_DEP_WEAK);
2716
2717		      note_dep (as_a <rtx_insn *> (XEXP (u, 0)), ds);
2718		    }
2719		  else
2720		    add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
2721				    REG_DEP_CONTROL);
2722		}
2723	  }
2724
2725	/* Always add these dependencies to pending_reads, since
2726	   this insn may be followed by a write.  */
2727	if (!deps->readonly)
2728	  {
2729	    if ((deps->pending_read_list_length
2730		 + deps->pending_write_list_length)
2731		>= MAX_PENDING_LIST_LENGTH
2732		&& !DEBUG_INSN_P (insn))
2733	      flush_pending_lists (deps, insn, true, true);
2734	    add_insn_mem_dependence (deps, true, insn, x);
2735	  }
2736
2737	sched_analyze_2 (deps, XEXP (x, 0), insn);
2738
2739	if (cslr_p && sched_deps_info->finish_rhs)
2740	  sched_deps_info->finish_rhs ();
2741
2742	return;
2743      }
2744
2745    /* Force pending stores to memory in case a trap handler needs them.
2746       Also force pending loads from memory; loads and stores can segfault
2747       and the signal handler won't be triggered if the trap insn was moved
2748       above load or store insn.  */
2749    case TRAP_IF:
2750      flush_pending_lists (deps, insn, true, true);
2751      break;
2752
2753    case PREFETCH:
2754      if (PREFETCH_SCHEDULE_BARRIER_P (x))
2755	reg_pending_barrier = TRUE_BARRIER;
2756      /* Prefetch insn contains addresses only.  So if the prefetch
2757	 address has no registers, there will be no dependencies on
2758	 the prefetch insn.  This is wrong with result code
2759	 correctness point of view as such prefetch can be moved below
2760	 a jump insn which usually generates MOVE_BARRIER preventing
2761	 to move insns containing registers or memories through the
2762	 barrier.  It is also wrong with generated code performance
2763	 point of view as prefetch withouth dependecies will have a
2764	 tendency to be issued later instead of earlier.  It is hard
2765	 to generate accurate dependencies for prefetch insns as
2766	 prefetch has only the start address but it is better to have
2767	 something than nothing.  */
2768      if (!deps->readonly)
2769	{
2770	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2771	  if (sched_deps_info->use_cselib)
2772	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2773	  add_insn_mem_dependence (deps, true, insn, x);
2774	}
2775      break;
2776
2777    case UNSPEC_VOLATILE:
2778      flush_pending_lists (deps, insn, true, true);
2779      /* FALLTHRU */
2780
2781    case ASM_OPERANDS:
2782    case ASM_INPUT:
2783      {
2784	/* Traditional and volatile asm instructions must be considered to use
2785	   and clobber all hard registers, all pseudo-registers and all of
2786	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2787
2788	   Consider for instance a volatile asm that changes the fpu rounding
2789	   mode.  An insn should not be moved across this even if it only uses
2790	   pseudo-regs because it might give an incorrectly rounded result.  */
2791	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2792	    && !DEBUG_INSN_P (insn))
2793	  reg_pending_barrier = TRUE_BARRIER;
2794
2795	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2796	   We can not just fall through here since then we would be confused
2797	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2798	   traditional asms unlike their normal usage.  */
2799
2800	if (code == ASM_OPERANDS)
2801	  {
2802	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2803	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2804
2805	    if (cslr_p && sched_deps_info->finish_rhs)
2806	      sched_deps_info->finish_rhs ();
2807
2808	    return;
2809	  }
2810	break;
2811      }
2812
2813    case PRE_DEC:
2814    case POST_DEC:
2815    case PRE_INC:
2816    case POST_INC:
2817      /* These both read and modify the result.  We must handle them as writes
2818         to get proper dependencies for following instructions.  We must handle
2819         them as reads to get proper dependencies from this to previous
2820         instructions.  Thus we need to pass them to both sched_analyze_1
2821         and sched_analyze_2.  We must call sched_analyze_2 first in order
2822         to get the proper antecedent for the read.  */
2823      sched_analyze_2 (deps, XEXP (x, 0), insn);
2824      sched_analyze_1 (deps, x, insn);
2825
2826      if (cslr_p && sched_deps_info->finish_rhs)
2827	sched_deps_info->finish_rhs ();
2828
2829      return;
2830
2831    case POST_MODIFY:
2832    case PRE_MODIFY:
2833      /* op0 = op0 + op1 */
2834      sched_analyze_2 (deps, XEXP (x, 0), insn);
2835      sched_analyze_2 (deps, XEXP (x, 1), insn);
2836      sched_analyze_1 (deps, x, insn);
2837
2838      if (cslr_p && sched_deps_info->finish_rhs)
2839	sched_deps_info->finish_rhs ();
2840
2841      return;
2842
2843    default:
2844      break;
2845    }
2846
2847  /* Other cases: walk the insn.  */
2848  fmt = GET_RTX_FORMAT (code);
2849  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2850    {
2851      if (fmt[i] == 'e')
2852	sched_analyze_2 (deps, XEXP (x, i), insn);
2853      else if (fmt[i] == 'E')
2854	for (j = 0; j < XVECLEN (x, i); j++)
2855	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2856    }
2857
2858  if (cslr_p && sched_deps_info->finish_rhs)
2859    sched_deps_info->finish_rhs ();
2860}
2861
2862/* Try to group two fuseable insns together to prevent scheduler
2863   from scheduling them apart.  */
2864
2865static void
2866sched_macro_fuse_insns (rtx_insn *insn)
2867{
2868  rtx_insn *prev;
2869
2870  if (any_condjump_p (insn))
2871    {
2872      unsigned int condreg1, condreg2;
2873      rtx cc_reg_1;
2874      targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2875      cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2876      prev = prev_nonnote_nondebug_insn (insn);
2877      if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2878          || !prev
2879          || !modified_in_p (cc_reg_1, prev))
2880        return;
2881    }
2882  else
2883    {
2884      rtx insn_set = single_set (insn);
2885
2886      prev = prev_nonnote_nondebug_insn (insn);
2887      if (!prev
2888          || !insn_set
2889          || !single_set (prev))
2890        return;
2891
2892    }
2893
2894  if (targetm.sched.macro_fusion_pair_p (prev, insn))
2895    SCHED_GROUP_P (insn) = 1;
2896
2897}
2898
2899/* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
2900void
2901get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2902{
2903  extract_insn (insn);
2904  preprocess_constraints (insn);
2905  alternative_mask preferred = get_preferred_alternatives (insn);
2906  ira_implicitly_set_insn_hard_regs (temp, preferred);
2907  AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2908}
2909
2910/* Analyze an INSN with pattern X to find all dependencies.  */
2911static void
2912sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2913{
2914  RTX_CODE code = GET_CODE (x);
2915  rtx link;
2916  unsigned i;
2917  reg_set_iterator rsi;
2918
2919  if (! reload_completed)
2920    {
2921      HARD_REG_SET temp;
2922      get_implicit_reg_pending_clobbers (&temp, insn);
2923      IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2924    }
2925
2926  can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2927			 && code == SET);
2928
2929  /* Group compare and branch insns for macro-fusion.  */
2930  if (targetm.sched.macro_fusion_p
2931      && targetm.sched.macro_fusion_p ())
2932    sched_macro_fuse_insns (insn);
2933
2934  if (may_trap_p (x))
2935    /* Avoid moving trapping instructions across function calls that might
2936       not always return.  */
2937    add_dependence_list (insn, deps->last_function_call_may_noreturn,
2938			 1, REG_DEP_ANTI, true);
2939
2940  /* We must avoid creating a situation in which two successors of the
2941     current block have different unwind info after scheduling.  If at any
2942     point the two paths re-join this leads to incorrect unwind info.  */
2943  /* ??? There are certain situations involving a forced frame pointer in
2944     which, with extra effort, we could fix up the unwind info at a later
2945     CFG join.  However, it seems better to notice these cases earlier
2946     during prologue generation and avoid marking the frame pointer setup
2947     as frame-related at all.  */
2948  if (RTX_FRAME_RELATED_P (insn))
2949    {
2950      /* Make sure prologue insn is scheduled before next jump.  */
2951      deps->sched_before_next_jump
2952	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2953
2954      /* Make sure epilogue insn is scheduled after preceding jumps.  */
2955      add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2956			   true);
2957    }
2958
2959  if (code == COND_EXEC)
2960    {
2961      sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2962
2963      /* ??? Should be recording conditions so we reduce the number of
2964	 false dependencies.  */
2965      x = COND_EXEC_CODE (x);
2966      code = GET_CODE (x);
2967    }
2968  if (code == SET || code == CLOBBER)
2969    {
2970      sched_analyze_1 (deps, x, insn);
2971
2972      /* Bare clobber insns are used for letting life analysis, reg-stack
2973	 and others know that a value is dead.  Depend on the last call
2974	 instruction so that reg-stack won't get confused.  */
2975      if (code == CLOBBER)
2976	add_dependence_list (insn, deps->last_function_call, 1,
2977			     REG_DEP_OUTPUT, true);
2978    }
2979  else if (code == PARALLEL)
2980    {
2981      for (i = XVECLEN (x, 0); i--;)
2982	{
2983	  rtx sub = XVECEXP (x, 0, i);
2984	  code = GET_CODE (sub);
2985
2986	  if (code == COND_EXEC)
2987	    {
2988	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2989	      sub = COND_EXEC_CODE (sub);
2990	      code = GET_CODE (sub);
2991	    }
2992	  if (code == SET || code == CLOBBER)
2993	    sched_analyze_1 (deps, sub, insn);
2994	  else
2995	    sched_analyze_2 (deps, sub, insn);
2996	}
2997    }
2998  else
2999    sched_analyze_2 (deps, x, insn);
3000
3001  /* Mark registers CLOBBERED or used by called function.  */
3002  if (CALL_P (insn))
3003    {
3004      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
3005	{
3006	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
3007	    sched_analyze_1 (deps, XEXP (link, 0), insn);
3008	  else if (GET_CODE (XEXP (link, 0)) != SET)
3009	    sched_analyze_2 (deps, XEXP (link, 0), insn);
3010	}
3011      /* Don't schedule anything after a tail call, tail call needs
3012	 to use at least all call-saved registers.  */
3013      if (SIBLING_CALL_P (insn))
3014	reg_pending_barrier = TRUE_BARRIER;
3015      else if (find_reg_note (insn, REG_SETJMP, NULL))
3016	reg_pending_barrier = MOVE_BARRIER;
3017    }
3018
3019  if (JUMP_P (insn))
3020    {
3021      rtx next;
3022      next = next_nonnote_nondebug_insn (insn);
3023      if (next && BARRIER_P (next))
3024	reg_pending_barrier = MOVE_BARRIER;
3025      else
3026	{
3027	  rtx_insn_list *pending;
3028	  rtx_expr_list *pending_mem;
3029
3030          if (sched_deps_info->compute_jump_reg_dependencies)
3031            {
3032              (*sched_deps_info->compute_jump_reg_dependencies)
3033		(insn, reg_pending_control_uses);
3034
3035              /* Make latency of jump equal to 0 by using anti-dependence.  */
3036              EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3037                {
3038                  struct deps_reg *reg_last = &deps->reg_last[i];
3039                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3040				       false);
3041                  add_dependence_list (insn, reg_last->implicit_sets,
3042				       0, REG_DEP_ANTI, false);
3043                  add_dependence_list (insn, reg_last->clobbers, 0,
3044				       REG_DEP_ANTI, false);
3045                }
3046            }
3047
3048	  /* All memory writes and volatile reads must happen before the
3049	     jump.  Non-volatile reads must happen before the jump iff
3050	     the result is needed by the above register used mask.  */
3051
3052	  pending = deps->pending_write_insns;
3053	  pending_mem = deps->pending_write_mems;
3054	  while (pending)
3055	    {
3056	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3057		add_dependence (insn, pending->insn (),
3058				REG_DEP_OUTPUT);
3059	      pending = pending->next ();
3060	      pending_mem = pending_mem->next ();
3061	    }
3062
3063	  pending = deps->pending_read_insns;
3064	  pending_mem = deps->pending_read_mems;
3065	  while (pending)
3066	    {
3067	      if (MEM_VOLATILE_P (pending_mem->element ())
3068		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3069		add_dependence (insn, pending->insn (),
3070				REG_DEP_OUTPUT);
3071	      pending = pending->next ();
3072	      pending_mem = pending_mem->next ();
3073	    }
3074
3075	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3076			       REG_DEP_ANTI, true);
3077	  add_dependence_list (insn, deps->pending_jump_insns, 1,
3078			       REG_DEP_ANTI, true);
3079	}
3080    }
3081
3082  /* If this instruction can throw an exception, then moving it changes
3083     where block boundaries fall.  This is mighty confusing elsewhere.
3084     Therefore, prevent such an instruction from being moved.  Same for
3085     non-jump instructions that define block boundaries.
3086     ??? Unclear whether this is still necessary in EBB mode.  If not,
3087     add_branch_dependences should be adjusted for RGN mode instead.  */
3088  if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3089      || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3090    reg_pending_barrier = MOVE_BARRIER;
3091
3092  if (sched_pressure != SCHED_PRESSURE_NONE)
3093    {
3094      setup_insn_reg_uses (deps, insn);
3095      init_insn_reg_pressure_info (insn);
3096    }
3097
3098  /* Add register dependencies for insn.  */
3099  if (DEBUG_INSN_P (insn))
3100    {
3101      rtx_insn *prev = deps->last_debug_insn;
3102      rtx u;
3103
3104      if (!deps->readonly)
3105	deps->last_debug_insn = insn;
3106
3107      if (prev)
3108	add_dependence (insn, prev, REG_DEP_ANTI);
3109
3110      add_dependence_list (insn, deps->last_function_call, 1,
3111			   REG_DEP_ANTI, false);
3112
3113      if (!sel_sched_p ())
3114	for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
3115	  add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)), REG_DEP_ANTI);
3116
3117      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3118	{
3119	  struct deps_reg *reg_last = &deps->reg_last[i];
3120	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3121	  /* There's no point in making REG_DEP_CONTROL dependencies for
3122	     debug insns.  */
3123	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3124			       false);
3125
3126	  if (!deps->readonly)
3127	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3128	}
3129      CLEAR_REG_SET (reg_pending_uses);
3130
3131      /* Quite often, a debug insn will refer to stuff in the
3132	 previous instruction, but the reason we want this
3133	 dependency here is to make sure the scheduler doesn't
3134	 gratuitously move a debug insn ahead.  This could dirty
3135	 DF flags and cause additional analysis that wouldn't have
3136	 occurred in compilation without debug insns, and such
3137	 additional analysis can modify the generated code.  */
3138      prev = PREV_INSN (insn);
3139
3140      if (prev && NONDEBUG_INSN_P (prev))
3141	add_dependence (insn, prev, REG_DEP_ANTI);
3142    }
3143  else
3144    {
3145      regset_head set_or_clobbered;
3146
3147      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3148	{
3149	  struct deps_reg *reg_last = &deps->reg_last[i];
3150	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3151	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3152			       false);
3153	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3154			       false);
3155
3156	  if (!deps->readonly)
3157	    {
3158	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3159	      reg_last->uses_length++;
3160	    }
3161	}
3162
3163      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3164	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3165	  {
3166	    struct deps_reg *reg_last = &deps->reg_last[i];
3167	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3168	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3169				 REG_DEP_ANTI, false);
3170	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3171				 false);
3172
3173	    if (!deps->readonly)
3174	      {
3175		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3176		reg_last->uses_length++;
3177	      }
3178	  }
3179
3180      if (targetm.sched.exposed_pipeline)
3181	{
3182	  INIT_REG_SET (&set_or_clobbered);
3183	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3184		      reg_pending_sets);
3185	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3186	    {
3187	      struct deps_reg *reg_last = &deps->reg_last[i];
3188	      rtx list;
3189	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3190		{
3191		  rtx other = XEXP (list, 0);
3192		  if (INSN_CACHED_COND (other) != const_true_rtx
3193		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3194		    INSN_CACHED_COND (other) = const_true_rtx;
3195		}
3196	    }
3197	}
3198
3199      /* If the current insn is conditional, we can't free any
3200	 of the lists.  */
3201      if (sched_has_condition_p (insn))
3202	{
3203	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3204	    {
3205	      struct deps_reg *reg_last = &deps->reg_last[i];
3206	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3207				   false);
3208	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3209				   REG_DEP_ANTI, false);
3210	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3211				   false);
3212	      add_dependence_list (insn, reg_last->control_uses, 0,
3213				   REG_DEP_CONTROL, false);
3214
3215	      if (!deps->readonly)
3216		{
3217		  reg_last->clobbers
3218		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3219		  reg_last->clobbers_length++;
3220		}
3221	    }
3222	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3223	    {
3224	      struct deps_reg *reg_last = &deps->reg_last[i];
3225	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3226				   false);
3227	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3228				   REG_DEP_ANTI, false);
3229	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3230				   false);
3231	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3232				   false);
3233	      add_dependence_list (insn, reg_last->control_uses, 0,
3234				   REG_DEP_CONTROL, false);
3235
3236	      if (!deps->readonly)
3237		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3238	    }
3239	}
3240      else
3241	{
3242	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3243	    {
3244	      struct deps_reg *reg_last = &deps->reg_last[i];
3245	      if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3246		  || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3247		{
3248		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3249						REG_DEP_OUTPUT, false);
3250		  add_dependence_list_and_free (deps, insn,
3251						&reg_last->implicit_sets, 0,
3252						REG_DEP_ANTI, false);
3253		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3254						REG_DEP_ANTI, false);
3255		  add_dependence_list_and_free (deps, insn,
3256						&reg_last->control_uses, 0,
3257						REG_DEP_ANTI, false);
3258		  add_dependence_list_and_free (deps, insn,
3259						&reg_last->clobbers, 0,
3260						REG_DEP_OUTPUT, false);
3261
3262		  if (!deps->readonly)
3263		    {
3264		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3265		      reg_last->clobbers_length = 0;
3266		      reg_last->uses_length = 0;
3267		    }
3268		}
3269	      else
3270		{
3271		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3272				       false);
3273		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3274				       REG_DEP_ANTI, false);
3275		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3276				       false);
3277		  add_dependence_list (insn, reg_last->control_uses, 0,
3278				       REG_DEP_CONTROL, false);
3279		}
3280
3281	      if (!deps->readonly)
3282		{
3283		  reg_last->clobbers_length++;
3284		  reg_last->clobbers
3285		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3286		}
3287	    }
3288	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3289	    {
3290	      struct deps_reg *reg_last = &deps->reg_last[i];
3291
3292	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3293					    REG_DEP_OUTPUT, false);
3294	      add_dependence_list_and_free (deps, insn,
3295					    &reg_last->implicit_sets,
3296					    0, REG_DEP_ANTI, false);
3297	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3298					    REG_DEP_OUTPUT, false);
3299	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3300					    REG_DEP_ANTI, false);
3301	      add_dependence_list (insn, reg_last->control_uses, 0,
3302				   REG_DEP_CONTROL, false);
3303
3304	      if (!deps->readonly)
3305		{
3306		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3307		  reg_last->uses_length = 0;
3308		  reg_last->clobbers_length = 0;
3309		}
3310	    }
3311	}
3312      if (!deps->readonly)
3313	{
3314	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3315	    {
3316	      struct deps_reg *reg_last = &deps->reg_last[i];
3317	      reg_last->control_uses
3318		= alloc_INSN_LIST (insn, reg_last->control_uses);
3319	    }
3320	}
3321    }
3322
3323  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3324    if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3325      {
3326	struct deps_reg *reg_last = &deps->reg_last[i];
3327	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3328	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3329	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3330	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3331			     false);
3332
3333	if (!deps->readonly)
3334	  reg_last->implicit_sets
3335	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3336      }
3337
3338  if (!deps->readonly)
3339    {
3340      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3341      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3342      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3343      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3344	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3345	    || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3346	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3347
3348      /* Set up the pending barrier found.  */
3349      deps->last_reg_pending_barrier = reg_pending_barrier;
3350    }
3351
3352  CLEAR_REG_SET (reg_pending_uses);
3353  CLEAR_REG_SET (reg_pending_clobbers);
3354  CLEAR_REG_SET (reg_pending_sets);
3355  CLEAR_REG_SET (reg_pending_control_uses);
3356  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3357  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3358
3359  /* Add dependencies if a scheduling barrier was found.  */
3360  if (reg_pending_barrier)
3361    {
3362      /* In the case of barrier the most added dependencies are not
3363         real, so we use anti-dependence here.  */
3364      if (sched_has_condition_p (insn))
3365	{
3366	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3367	    {
3368	      struct deps_reg *reg_last = &deps->reg_last[i];
3369	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3370				   true);
3371	      add_dependence_list (insn, reg_last->sets, 0,
3372				   reg_pending_barrier == TRUE_BARRIER
3373				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3374	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3375				   REG_DEP_ANTI, true);
3376	      add_dependence_list (insn, reg_last->clobbers, 0,
3377				   reg_pending_barrier == TRUE_BARRIER
3378				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3379	    }
3380	}
3381      else
3382	{
3383	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3384	    {
3385	      struct deps_reg *reg_last = &deps->reg_last[i];
3386	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3387					    REG_DEP_ANTI, true);
3388	      add_dependence_list_and_free (deps, insn,
3389					    &reg_last->control_uses, 0,
3390					    REG_DEP_CONTROL, true);
3391	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3392					    reg_pending_barrier == TRUE_BARRIER
3393					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3394					    true);
3395	      add_dependence_list_and_free (deps, insn,
3396					    &reg_last->implicit_sets, 0,
3397					    REG_DEP_ANTI, true);
3398	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3399					    reg_pending_barrier == TRUE_BARRIER
3400					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3401					    true);
3402
3403              if (!deps->readonly)
3404                {
3405                  reg_last->uses_length = 0;
3406                  reg_last->clobbers_length = 0;
3407                }
3408	    }
3409	}
3410
3411      if (!deps->readonly)
3412        for (i = 0; i < (unsigned)deps->max_reg; i++)
3413          {
3414            struct deps_reg *reg_last = &deps->reg_last[i];
3415            reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3416            SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3417          }
3418
3419      /* Don't flush pending lists on speculative checks for
3420	 selective scheduling.  */
3421      if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3422	flush_pending_lists (deps, insn, true, true);
3423
3424      reg_pending_barrier = NOT_A_BARRIER;
3425    }
3426
3427  /* If a post-call group is still open, see if it should remain so.
3428     This insn must be a simple move of a hard reg to a pseudo or
3429     vice-versa.
3430
3431     We must avoid moving these insns for correctness on targets
3432     with small register classes, and for special registers like
3433     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3434     hard regs for all targets.  */
3435
3436  if (deps->in_post_call_group_p)
3437    {
3438      rtx tmp, set = single_set (insn);
3439      int src_regno, dest_regno;
3440
3441      if (set == NULL)
3442	{
3443	  if (DEBUG_INSN_P (insn))
3444	    /* We don't want to mark debug insns as part of the same
3445	       sched group.  We know they really aren't, but if we use
3446	       debug insns to tell that a call group is over, we'll
3447	       get different code if debug insns are not there and
3448	       instructions that follow seem like they should be part
3449	       of the call group.
3450
3451	       Also, if we did, chain_to_prev_insn would move the
3452	       deps of the debug insn to the call insn, modifying
3453	       non-debug post-dependency counts of the debug insn
3454	       dependencies and otherwise messing with the scheduling
3455	       order.
3456
3457	       Instead, let such debug insns be scheduled freely, but
3458	       keep the call group open in case there are insns that
3459	       should be part of it afterwards.  Since we grant debug
3460	       insns higher priority than even sched group insns, it
3461	       will all turn out all right.  */
3462	    goto debug_dont_end_call_group;
3463	  else
3464	    goto end_call_group;
3465	}
3466
3467      tmp = SET_DEST (set);
3468      if (GET_CODE (tmp) == SUBREG)
3469	tmp = SUBREG_REG (tmp);
3470      if (REG_P (tmp))
3471	dest_regno = REGNO (tmp);
3472      else
3473	goto end_call_group;
3474
3475      tmp = SET_SRC (set);
3476      if (GET_CODE (tmp) == SUBREG)
3477	tmp = SUBREG_REG (tmp);
3478      if ((GET_CODE (tmp) == PLUS
3479	   || GET_CODE (tmp) == MINUS)
3480	  && REG_P (XEXP (tmp, 0))
3481	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3482	  && dest_regno == STACK_POINTER_REGNUM)
3483	src_regno = STACK_POINTER_REGNUM;
3484      else if (REG_P (tmp))
3485	src_regno = REGNO (tmp);
3486      else
3487	goto end_call_group;
3488
3489      if (src_regno < FIRST_PSEUDO_REGISTER
3490	  || dest_regno < FIRST_PSEUDO_REGISTER)
3491	{
3492	  if (!deps->readonly
3493              && deps->in_post_call_group_p == post_call_initial)
3494	    deps->in_post_call_group_p = post_call;
3495
3496          if (!sel_sched_p () || sched_emulate_haifa_p)
3497            {
3498              SCHED_GROUP_P (insn) = 1;
3499              CANT_MOVE (insn) = 1;
3500            }
3501	}
3502      else
3503	{
3504	end_call_group:
3505          if (!deps->readonly)
3506            deps->in_post_call_group_p = not_post_call;
3507	}
3508    }
3509
3510 debug_dont_end_call_group:
3511  if ((current_sched_info->flags & DO_SPECULATION)
3512      && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3513    /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3514       be speculated.  */
3515    {
3516      if (sel_sched_p ())
3517        sel_mark_hard_insn (insn);
3518      else
3519        {
3520          sd_iterator_def sd_it;
3521          dep_t dep;
3522
3523          for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3524               sd_iterator_cond (&sd_it, &dep);)
3525            change_spec_dep_to_hard (sd_it);
3526        }
3527    }
3528
3529  /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3530     honor their original ordering.  */
3531  if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3532    {
3533      if (deps->last_args_size)
3534	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3535      if (!deps->readonly)
3536	deps->last_args_size = insn;
3537    }
3538}
3539
3540/* Return TRUE if INSN might not always return normally (e.g. call exit,
3541   longjmp, loop forever, ...).  */
3542/* FIXME: Why can't this function just use flags_from_decl_or_type and
3543   test for ECF_NORETURN?  */
3544static bool
3545call_may_noreturn_p (rtx insn)
3546{
3547  rtx call;
3548
3549  /* const or pure calls that aren't looping will always return.  */
3550  if (RTL_CONST_OR_PURE_CALL_P (insn)
3551      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3552    return false;
3553
3554  call = get_call_rtx_from (insn);
3555  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3556    {
3557      rtx symbol = XEXP (XEXP (call, 0), 0);
3558      if (SYMBOL_REF_DECL (symbol)
3559	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3560	{
3561	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3562	      == BUILT_IN_NORMAL)
3563	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3564	      {
3565	      case BUILT_IN_BCMP:
3566	      case BUILT_IN_BCOPY:
3567	      case BUILT_IN_BZERO:
3568	      case BUILT_IN_INDEX:
3569	      case BUILT_IN_MEMCHR:
3570	      case BUILT_IN_MEMCMP:
3571	      case BUILT_IN_MEMCPY:
3572	      case BUILT_IN_MEMMOVE:
3573	      case BUILT_IN_MEMPCPY:
3574	      case BUILT_IN_MEMSET:
3575	      case BUILT_IN_RINDEX:
3576	      case BUILT_IN_STPCPY:
3577	      case BUILT_IN_STPNCPY:
3578	      case BUILT_IN_STRCAT:
3579	      case BUILT_IN_STRCHR:
3580	      case BUILT_IN_STRCMP:
3581	      case BUILT_IN_STRCPY:
3582	      case BUILT_IN_STRCSPN:
3583	      case BUILT_IN_STRLEN:
3584	      case BUILT_IN_STRNCAT:
3585	      case BUILT_IN_STRNCMP:
3586	      case BUILT_IN_STRNCPY:
3587	      case BUILT_IN_STRPBRK:
3588	      case BUILT_IN_STRRCHR:
3589	      case BUILT_IN_STRSPN:
3590	      case BUILT_IN_STRSTR:
3591		/* Assume certain string/memory builtins always return.  */
3592		return false;
3593	      default:
3594		break;
3595	      }
3596	}
3597    }
3598
3599  /* For all other calls assume that they might not always return.  */
3600  return true;
3601}
3602
3603/* Return true if INSN should be made dependent on the previous instruction
3604   group, and if all INSN's dependencies should be moved to the first
3605   instruction of that group.  */
3606
3607static bool
3608chain_to_prev_insn_p (rtx insn)
3609{
3610  rtx prev, x;
3611
3612  /* INSN forms a group with the previous instruction.  */
3613  if (SCHED_GROUP_P (insn))
3614    return true;
3615
3616  /* If the previous instruction clobbers a register R and this one sets
3617     part of R, the clobber was added specifically to help us track the
3618     liveness of R.  There's no point scheduling the clobber and leaving
3619     INSN behind, especially if we move the clobber to another block.  */
3620  prev = prev_nonnote_nondebug_insn (insn);
3621  if (prev
3622      && INSN_P (prev)
3623      && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3624      && GET_CODE (PATTERN (prev)) == CLOBBER)
3625    {
3626      x = XEXP (PATTERN (prev), 0);
3627      if (set_of (x, insn))
3628	return true;
3629    }
3630
3631  return false;
3632}
3633
3634/* Analyze INSN with DEPS as a context.  */
3635void
3636deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3637{
3638  if (sched_deps_info->start_insn)
3639    sched_deps_info->start_insn (insn);
3640
3641  /* Record the condition for this insn.  */
3642  if (NONDEBUG_INSN_P (insn))
3643    {
3644      rtx t;
3645      sched_get_condition_with_rev (insn, NULL);
3646      t = INSN_CACHED_COND (insn);
3647      INSN_COND_DEPS (insn) = NULL;
3648      if (reload_completed
3649	  && (current_sched_info->flags & DO_PREDICATION)
3650	  && COMPARISON_P (t)
3651	  && REG_P (XEXP (t, 0))
3652	  && CONSTANT_P (XEXP (t, 1)))
3653	{
3654	  unsigned int regno;
3655	  int nregs;
3656	  rtx_insn_list *cond_deps = NULL;
3657	  t = XEXP (t, 0);
3658	  regno = REGNO (t);
3659	  nregs = hard_regno_nregs[regno][GET_MODE (t)];
3660	  while (nregs-- > 0)
3661	    {
3662	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3663	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3664	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3665	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3666	    }
3667	  INSN_COND_DEPS (insn) = cond_deps;
3668	}
3669    }
3670
3671  if (JUMP_P (insn))
3672    {
3673      /* Make each JUMP_INSN (but not a speculative check)
3674         a scheduling barrier for memory references.  */
3675      if (!deps->readonly
3676          && !(sel_sched_p ()
3677               && sel_insn_is_speculation_check (insn)))
3678        {
3679          /* Keep the list a reasonable size.  */
3680          if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3681            flush_pending_lists (deps, insn, true, true);
3682          else
3683	    deps->pending_jump_insns
3684              = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3685        }
3686
3687      /* For each insn which shouldn't cross a jump, add a dependence.  */
3688      add_dependence_list_and_free (deps, insn,
3689				    &deps->sched_before_next_jump, 1,
3690				    REG_DEP_ANTI, true);
3691
3692      sched_analyze_insn (deps, PATTERN (insn), insn);
3693    }
3694  else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3695    {
3696      sched_analyze_insn (deps, PATTERN (insn), insn);
3697    }
3698  else if (CALL_P (insn))
3699    {
3700      int i;
3701
3702      CANT_MOVE (insn) = 1;
3703
3704      if (find_reg_note (insn, REG_SETJMP, NULL))
3705        {
3706          /* This is setjmp.  Assume that all registers, not just
3707             hard registers, may be clobbered by this call.  */
3708          reg_pending_barrier = MOVE_BARRIER;
3709        }
3710      else
3711        {
3712          for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3713            /* A call may read and modify global register variables.  */
3714            if (global_regs[i])
3715              {
3716                SET_REGNO_REG_SET (reg_pending_sets, i);
3717                SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3718              }
3719          /* Other call-clobbered hard regs may be clobbered.
3720             Since we only have a choice between 'might be clobbered'
3721             and 'definitely not clobbered', we must include all
3722             partly call-clobbered registers here.  */
3723            else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3724                     || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3725              SET_REGNO_REG_SET (reg_pending_clobbers, i);
3726          /* We don't know what set of fixed registers might be used
3727             by the function, but it is certain that the stack pointer
3728             is among them, but be conservative.  */
3729            else if (fixed_regs[i])
3730	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3731          /* The frame pointer is normally not used by the function
3732             itself, but by the debugger.  */
3733          /* ??? MIPS o32 is an exception.  It uses the frame pointer
3734             in the macro expansion of jal but does not represent this
3735             fact in the call_insn rtl.  */
3736            else if (i == FRAME_POINTER_REGNUM
3737                     || (i == HARD_FRAME_POINTER_REGNUM
3738                         && (! reload_completed || frame_pointer_needed)))
3739	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3740        }
3741
3742      /* For each insn which shouldn't cross a call, add a dependence
3743         between that insn and this call insn.  */
3744      add_dependence_list_and_free (deps, insn,
3745                                    &deps->sched_before_next_call, 1,
3746                                    REG_DEP_ANTI, true);
3747
3748      sched_analyze_insn (deps, PATTERN (insn), insn);
3749
3750      /* If CALL would be in a sched group, then this will violate
3751	 convention that sched group insns have dependencies only on the
3752	 previous instruction.
3753
3754	 Of course one can say: "Hey!  What about head of the sched group?"
3755	 And I will answer: "Basic principles (one dep per insn) are always
3756	 the same."  */
3757      gcc_assert (!SCHED_GROUP_P (insn));
3758
3759      /* In the absence of interprocedural alias analysis, we must flush
3760         all pending reads and writes, and start new dependencies starting
3761         from here.  But only flush writes for constant calls (which may
3762         be passed a pointer to something we haven't written yet).  */
3763      flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3764
3765      if (!deps->readonly)
3766        {
3767          /* Remember the last function call for limiting lifetimes.  */
3768          free_INSN_LIST_list (&deps->last_function_call);
3769          deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3770
3771	  if (call_may_noreturn_p (insn))
3772	    {
3773	      /* Remember the last function call that might not always return
3774		 normally for limiting moves of trapping insns.  */
3775	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3776	      deps->last_function_call_may_noreturn
3777		= alloc_INSN_LIST (insn, NULL_RTX);
3778	    }
3779
3780          /* Before reload, begin a post-call group, so as to keep the
3781             lifetimes of hard registers correct.  */
3782          if (! reload_completed)
3783            deps->in_post_call_group_p = post_call;
3784        }
3785    }
3786
3787  if (sched_deps_info->use_cselib)
3788    cselib_process_insn (insn);
3789
3790  if (sched_deps_info->finish_insn)
3791    sched_deps_info->finish_insn ();
3792
3793  /* Fixup the dependencies in the sched group.  */
3794  if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3795      && chain_to_prev_insn_p (insn)
3796      && !sel_sched_p ())
3797    chain_to_prev_insn (insn);
3798}
3799
3800/* Initialize DEPS for the new block beginning with HEAD.  */
3801void
3802deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3803{
3804  gcc_assert (!deps->readonly);
3805
3806  /* Before reload, if the previous block ended in a call, show that
3807     we are inside a post-call group, so as to keep the lifetimes of
3808     hard registers correct.  */
3809  if (! reload_completed && !LABEL_P (head))
3810    {
3811      rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3812
3813      if (insn && CALL_P (insn))
3814	deps->in_post_call_group_p = post_call_initial;
3815    }
3816}
3817
3818/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3819   dependencies for each insn.  */
3820void
3821sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3822{
3823  rtx_insn *insn;
3824
3825  if (sched_deps_info->use_cselib)
3826    cselib_init (CSELIB_RECORD_MEMORY);
3827
3828  deps_start_bb (deps, head);
3829
3830  for (insn = head;; insn = NEXT_INSN (insn))
3831    {
3832
3833      if (INSN_P (insn))
3834	{
3835	  /* And initialize deps_lists.  */
3836	  sd_init_insn (insn);
3837	  /* Clean up SCHED_GROUP_P which may be set by last
3838	     scheduler pass.  */
3839	  if (SCHED_GROUP_P (insn))
3840	    SCHED_GROUP_P (insn) = 0;
3841	}
3842
3843      deps_analyze_insn (deps, insn);
3844
3845      if (insn == tail)
3846	{
3847	  if (sched_deps_info->use_cselib)
3848	    cselib_finish ();
3849	  return;
3850	}
3851    }
3852  gcc_unreachable ();
3853}
3854
3855/* Helper for sched_free_deps ().
3856   Delete INSN's (RESOLVED_P) backward dependencies.  */
3857static void
3858delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3859{
3860  sd_iterator_def sd_it;
3861  dep_t dep;
3862  sd_list_types_def types;
3863
3864  if (resolved_p)
3865    types = SD_LIST_RES_BACK;
3866  else
3867    types = SD_LIST_BACK;
3868
3869  for (sd_it = sd_iterator_start (insn, types);
3870       sd_iterator_cond (&sd_it, &dep);)
3871    {
3872      dep_link_t link = *sd_it.linkp;
3873      dep_node_t node = DEP_LINK_NODE (link);
3874      deps_list_t back_list;
3875      deps_list_t forw_list;
3876
3877      get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3878      remove_from_deps_list (link, back_list);
3879      delete_dep_node (node);
3880    }
3881}
3882
3883/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3884   deps_lists.  */
3885void
3886sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3887{
3888  rtx_insn *insn;
3889  rtx_insn *next_tail = NEXT_INSN (tail);
3890
3891  /* We make two passes since some insns may be scheduled before their
3892     dependencies are resolved.  */
3893  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3894    if (INSN_P (insn) && INSN_LUID (insn) > 0)
3895      {
3896	/* Clear forward deps and leave the dep_nodes to the
3897	   corresponding back_deps list.  */
3898	if (resolved_p)
3899	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3900	else
3901	  clear_deps_list (INSN_FORW_DEPS (insn));
3902      }
3903  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3904    if (INSN_P (insn) && INSN_LUID (insn) > 0)
3905      {
3906	/* Clear resolved back deps together with its dep_nodes.  */
3907	delete_dep_nodes_in_back_deps (insn, resolved_p);
3908
3909	sd_finish_insn (insn);
3910      }
3911}
3912
3913/* Initialize variables for region data dependence analysis.
3914   When LAZY_REG_LAST is true, do not allocate reg_last array
3915   of struct deps_desc immediately.  */
3916
3917void
3918init_deps (struct deps_desc *deps, bool lazy_reg_last)
3919{
3920  int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3921
3922  deps->max_reg = max_reg;
3923  if (lazy_reg_last)
3924    deps->reg_last = NULL;
3925  else
3926    deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3927  INIT_REG_SET (&deps->reg_last_in_use);
3928
3929  deps->pending_read_insns = 0;
3930  deps->pending_read_mems = 0;
3931  deps->pending_write_insns = 0;
3932  deps->pending_write_mems = 0;
3933  deps->pending_jump_insns = 0;
3934  deps->pending_read_list_length = 0;
3935  deps->pending_write_list_length = 0;
3936  deps->pending_flush_length = 0;
3937  deps->last_pending_memory_flush = 0;
3938  deps->last_function_call = 0;
3939  deps->last_function_call_may_noreturn = 0;
3940  deps->sched_before_next_call = 0;
3941  deps->sched_before_next_jump = 0;
3942  deps->in_post_call_group_p = not_post_call;
3943  deps->last_debug_insn = 0;
3944  deps->last_args_size = 0;
3945  deps->last_reg_pending_barrier = NOT_A_BARRIER;
3946  deps->readonly = 0;
3947}
3948
3949/* Init only reg_last field of DEPS, which was not allocated before as
3950   we inited DEPS lazily.  */
3951void
3952init_deps_reg_last (struct deps_desc *deps)
3953{
3954  gcc_assert (deps && deps->max_reg > 0);
3955  gcc_assert (deps->reg_last == NULL);
3956
3957  deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3958}
3959
3960
3961/* Free insn lists found in DEPS.  */
3962
3963void
3964free_deps (struct deps_desc *deps)
3965{
3966  unsigned i;
3967  reg_set_iterator rsi;
3968
3969  /* We set max_reg to 0 when this context was already freed.  */
3970  if (deps->max_reg == 0)
3971    {
3972      gcc_assert (deps->reg_last == NULL);
3973      return;
3974    }
3975  deps->max_reg = 0;
3976
3977  free_INSN_LIST_list (&deps->pending_read_insns);
3978  free_EXPR_LIST_list (&deps->pending_read_mems);
3979  free_INSN_LIST_list (&deps->pending_write_insns);
3980  free_EXPR_LIST_list (&deps->pending_write_mems);
3981  free_INSN_LIST_list (&deps->last_pending_memory_flush);
3982
3983  /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3984     times.  For a testcase with 42000 regs and 8000 small basic blocks,
3985     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3986  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3987    {
3988      struct deps_reg *reg_last = &deps->reg_last[i];
3989      if (reg_last->uses)
3990	free_INSN_LIST_list (&reg_last->uses);
3991      if (reg_last->sets)
3992	free_INSN_LIST_list (&reg_last->sets);
3993      if (reg_last->implicit_sets)
3994	free_INSN_LIST_list (&reg_last->implicit_sets);
3995      if (reg_last->control_uses)
3996	free_INSN_LIST_list (&reg_last->control_uses);
3997      if (reg_last->clobbers)
3998	free_INSN_LIST_list (&reg_last->clobbers);
3999    }
4000  CLEAR_REG_SET (&deps->reg_last_in_use);
4001
4002  /* As we initialize reg_last lazily, it is possible that we didn't allocate
4003     it at all.  */
4004  free (deps->reg_last);
4005  deps->reg_last = NULL;
4006
4007  deps = NULL;
4008}
4009
4010/* Remove INSN from dependence contexts DEPS.  */
4011void
4012remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4013{
4014  int removed;
4015  unsigned i;
4016  reg_set_iterator rsi;
4017
4018  removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4019                                               &deps->pending_read_mems);
4020  if (!DEBUG_INSN_P (insn))
4021    deps->pending_read_list_length -= removed;
4022  removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4023                                               &deps->pending_write_mems);
4024  deps->pending_write_list_length -= removed;
4025
4026  removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4027  deps->pending_flush_length -= removed;
4028  removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4029  deps->pending_flush_length -= removed;
4030
4031  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4032    {
4033      struct deps_reg *reg_last = &deps->reg_last[i];
4034      if (reg_last->uses)
4035	remove_from_dependence_list (insn, &reg_last->uses);
4036      if (reg_last->sets)
4037	remove_from_dependence_list (insn, &reg_last->sets);
4038      if (reg_last->implicit_sets)
4039	remove_from_dependence_list (insn, &reg_last->implicit_sets);
4040      if (reg_last->clobbers)
4041	remove_from_dependence_list (insn, &reg_last->clobbers);
4042      if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4043	  && !reg_last->clobbers)
4044        CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4045    }
4046
4047  if (CALL_P (insn))
4048    {
4049      remove_from_dependence_list (insn, &deps->last_function_call);
4050      remove_from_dependence_list (insn,
4051				   &deps->last_function_call_may_noreturn);
4052    }
4053  remove_from_dependence_list (insn, &deps->sched_before_next_call);
4054}
4055
4056/* Init deps data vector.  */
4057static void
4058init_deps_data_vector (void)
4059{
4060  int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4061  if (reserve > 0 && ! h_d_i_d.space (reserve))
4062    h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4063}
4064
4065/* If it is profitable to use them, initialize or extend (depending on
4066   GLOBAL_P) dependency data.  */
4067void
4068sched_deps_init (bool global_p)
4069{
4070  /* Average number of insns in the basic block.
4071     '+ 1' is used to make it nonzero.  */
4072  int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4073
4074  init_deps_data_vector ();
4075
4076  /* We use another caching mechanism for selective scheduling, so
4077     we don't use this one.  */
4078  if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4079    {
4080      /* ?!? We could save some memory by computing a per-region luid mapping
4081         which could reduce both the number of vectors in the cache and the
4082         size of each vector.  Instead we just avoid the cache entirely unless
4083         the average number of instructions in a basic block is very high.  See
4084         the comment before the declaration of true_dependency_cache for
4085         what we consider "very high".  */
4086      cache_size = 0;
4087      extend_dependency_caches (sched_max_luid, true);
4088    }
4089
4090  if (global_p)
4091    {
4092      dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
4093                                   /* Allocate lists for one block at a time.  */
4094                                   insns_in_block);
4095      dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
4096                                   /* Allocate nodes for one block at a time.
4097                                      We assume that average insn has
4098                                      5 producers.  */
4099                                   5 * insns_in_block);
4100    }
4101}
4102
4103
4104/* Create or extend (depending on CREATE_P) dependency caches to
4105   size N.  */
4106void
4107extend_dependency_caches (int n, bool create_p)
4108{
4109  if (create_p || true_dependency_cache)
4110    {
4111      int i, luid = cache_size + n;
4112
4113      true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4114					  luid);
4115      output_dependency_cache = XRESIZEVEC (bitmap_head,
4116					    output_dependency_cache, luid);
4117      anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4118					  luid);
4119      control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4120					  luid);
4121
4122      if (current_sched_info->flags & DO_SPECULATION)
4123        spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4124					    luid);
4125
4126      for (i = cache_size; i < luid; i++)
4127	{
4128	  bitmap_initialize (&true_dependency_cache[i], 0);
4129	  bitmap_initialize (&output_dependency_cache[i], 0);
4130	  bitmap_initialize (&anti_dependency_cache[i], 0);
4131	  bitmap_initialize (&control_dependency_cache[i], 0);
4132
4133          if (current_sched_info->flags & DO_SPECULATION)
4134            bitmap_initialize (&spec_dependency_cache[i], 0);
4135	}
4136      cache_size = luid;
4137    }
4138}
4139
4140/* Finalize dependency information for the whole function.  */
4141void
4142sched_deps_finish (void)
4143{
4144  gcc_assert (deps_pools_are_empty_p ());
4145  free_alloc_pool_if_empty (&dn_pool);
4146  free_alloc_pool_if_empty (&dl_pool);
4147  gcc_assert (dn_pool == NULL && dl_pool == NULL);
4148
4149  h_d_i_d.release ();
4150  cache_size = 0;
4151
4152  if (true_dependency_cache)
4153    {
4154      int i;
4155
4156      for (i = 0; i < cache_size; i++)
4157	{
4158	  bitmap_clear (&true_dependency_cache[i]);
4159	  bitmap_clear (&output_dependency_cache[i]);
4160	  bitmap_clear (&anti_dependency_cache[i]);
4161	  bitmap_clear (&control_dependency_cache[i]);
4162
4163          if (sched_deps_info->generate_spec_deps)
4164            bitmap_clear (&spec_dependency_cache[i]);
4165	}
4166      free (true_dependency_cache);
4167      true_dependency_cache = NULL;
4168      free (output_dependency_cache);
4169      output_dependency_cache = NULL;
4170      free (anti_dependency_cache);
4171      anti_dependency_cache = NULL;
4172      free (control_dependency_cache);
4173      control_dependency_cache = NULL;
4174
4175      if (sched_deps_info->generate_spec_deps)
4176        {
4177          free (spec_dependency_cache);
4178          spec_dependency_cache = NULL;
4179        }
4180
4181    }
4182}
4183
4184/* Initialize some global variables needed by the dependency analysis
4185   code.  */
4186
4187void
4188init_deps_global (void)
4189{
4190  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4191  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4192  reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4193  reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4194  reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4195  reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4196  reg_pending_barrier = NOT_A_BARRIER;
4197
4198  if (!sel_sched_p () || sched_emulate_haifa_p)
4199    {
4200      sched_deps_info->start_insn = haifa_start_insn;
4201      sched_deps_info->finish_insn = haifa_finish_insn;
4202
4203      sched_deps_info->note_reg_set = haifa_note_reg_set;
4204      sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4205      sched_deps_info->note_reg_use = haifa_note_reg_use;
4206
4207      sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4208      sched_deps_info->note_dep = haifa_note_dep;
4209   }
4210}
4211
4212/* Free everything used by the dependency analysis code.  */
4213
4214void
4215finish_deps_global (void)
4216{
4217  FREE_REG_SET (reg_pending_sets);
4218  FREE_REG_SET (reg_pending_clobbers);
4219  FREE_REG_SET (reg_pending_uses);
4220  FREE_REG_SET (reg_pending_control_uses);
4221}
4222
4223/* Estimate the weakness of dependence between MEM1 and MEM2.  */
4224dw_t
4225estimate_dep_weak (rtx mem1, rtx mem2)
4226{
4227  rtx r1, r2;
4228
4229  if (mem1 == mem2)
4230    /* MEMs are the same - don't speculate.  */
4231    return MIN_DEP_WEAK;
4232
4233  r1 = XEXP (mem1, 0);
4234  r2 = XEXP (mem2, 0);
4235
4236  if (r1 == r2
4237      || (REG_P (r1) && REG_P (r2)
4238	  && REGNO (r1) == REGNO (r2)))
4239    /* Again, MEMs are the same.  */
4240    return MIN_DEP_WEAK;
4241  else if ((REG_P (r1) && !REG_P (r2))
4242	   || (!REG_P (r1) && REG_P (r2)))
4243    /* Different addressing modes - reason to be more speculative,
4244       than usual.  */
4245    return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4246  else
4247    /* We can't say anything about the dependence.  */
4248    return UNCERTAIN_DEP_WEAK;
4249}
4250
4251/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4252   This function can handle same INSN and ELEM (INSN == ELEM).
4253   It is a convenience wrapper.  */
4254static void
4255add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4256{
4257  ds_t ds;
4258  bool internal;
4259
4260  if (dep_type == REG_DEP_TRUE)
4261    ds = DEP_TRUE;
4262  else if (dep_type == REG_DEP_OUTPUT)
4263    ds = DEP_OUTPUT;
4264  else if (dep_type == REG_DEP_CONTROL)
4265    ds = DEP_CONTROL;
4266  else
4267    {
4268      gcc_assert (dep_type == REG_DEP_ANTI);
4269      ds = DEP_ANTI;
4270    }
4271
4272  /* When add_dependence is called from inside sched-deps.c, we expect
4273     cur_insn to be non-null.  */
4274  internal = cur_insn != NULL;
4275  if (internal)
4276    gcc_assert (insn == cur_insn);
4277  else
4278    cur_insn = insn;
4279
4280  note_dep (elem, ds);
4281  if (!internal)
4282    cur_insn = NULL;
4283}
4284
4285/* Return weakness of speculative type TYPE in the dep_status DS,
4286   without checking to prevent ICEs on malformed input.  */
4287static dw_t
4288get_dep_weak_1 (ds_t ds, ds_t type)
4289{
4290  ds = ds & type;
4291
4292  switch (type)
4293    {
4294    case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4295    case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4296    case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4297    case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4298    default: gcc_unreachable ();
4299    }
4300
4301  return (dw_t) ds;
4302}
4303
4304/* Return weakness of speculative type TYPE in the dep_status DS.  */
4305dw_t
4306get_dep_weak (ds_t ds, ds_t type)
4307{
4308  dw_t dw = get_dep_weak_1 (ds, type);
4309
4310  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4311  return dw;
4312}
4313
4314/* Return the dep_status, which has the same parameters as DS, except for
4315   speculative type TYPE, that will have weakness DW.  */
4316ds_t
4317set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4318{
4319  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4320
4321  ds &= ~type;
4322  switch (type)
4323    {
4324    case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4325    case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4326    case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4327    case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4328    default: gcc_unreachable ();
4329    }
4330  return ds;
4331}
4332
4333/* Return the join of two dep_statuses DS1 and DS2.
4334   If MAX_P is true then choose the greater probability,
4335   otherwise multiply probabilities.
4336   This function assumes that both DS1 and DS2 contain speculative bits.  */
4337static ds_t
4338ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4339{
4340  ds_t ds, t;
4341
4342  gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4343
4344  ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4345
4346  t = FIRST_SPEC_TYPE;
4347  do
4348    {
4349      if ((ds1 & t) && !(ds2 & t))
4350	ds |= ds1 & t;
4351      else if (!(ds1 & t) && (ds2 & t))
4352	ds |= ds2 & t;
4353      else if ((ds1 & t) && (ds2 & t))
4354	{
4355	  dw_t dw1 = get_dep_weak (ds1, t);
4356	  dw_t dw2 = get_dep_weak (ds2, t);
4357	  ds_t dw;
4358
4359	  if (!max_p)
4360	    {
4361	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4362	      dw /= MAX_DEP_WEAK;
4363	      if (dw < MIN_DEP_WEAK)
4364		dw = MIN_DEP_WEAK;
4365	    }
4366	  else
4367	    {
4368	      if (dw1 >= dw2)
4369		dw = dw1;
4370	      else
4371		dw = dw2;
4372	    }
4373
4374	  ds = set_dep_weak (ds, t, (dw_t) dw);
4375	}
4376
4377      if (t == LAST_SPEC_TYPE)
4378	break;
4379      t <<= SPEC_TYPE_SHIFT;
4380    }
4381  while (1);
4382
4383  return ds;
4384}
4385
4386/* Return the join of two dep_statuses DS1 and DS2.
4387   This function assumes that both DS1 and DS2 contain speculative bits.  */
4388ds_t
4389ds_merge (ds_t ds1, ds_t ds2)
4390{
4391  return ds_merge_1 (ds1, ds2, false);
4392}
4393
4394/* Return the join of two dep_statuses DS1 and DS2.  */
4395ds_t
4396ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4397{
4398  ds_t new_status = ds | ds2;
4399
4400  if (new_status & SPECULATIVE)
4401    {
4402      if ((ds && !(ds & SPECULATIVE))
4403	  || (ds2 && !(ds2 & SPECULATIVE)))
4404	/* Then this dep can't be speculative.  */
4405	new_status &= ~SPECULATIVE;
4406      else
4407	{
4408	  /* Both are speculative.  Merging probabilities.  */
4409	  if (mem1)
4410	    {
4411	      dw_t dw;
4412
4413	      dw = estimate_dep_weak (mem1, mem2);
4414	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4415	    }
4416
4417	  if (!ds)
4418	    new_status = ds2;
4419	  else if (!ds2)
4420	    new_status = ds;
4421	  else
4422	    new_status = ds_merge (ds2, ds);
4423	}
4424    }
4425
4426  return new_status;
4427}
4428
4429/* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4430   probabilities.  */
4431ds_t
4432ds_max_merge (ds_t ds1, ds_t ds2)
4433{
4434  if (ds1 == 0 && ds2 == 0)
4435    return 0;
4436
4437  if (ds1 == 0 && ds2 != 0)
4438    return ds2;
4439
4440  if (ds1 != 0 && ds2 == 0)
4441    return ds1;
4442
4443  return ds_merge_1 (ds1, ds2, true);
4444}
4445
4446/* Return the probability of speculation success for the speculation
4447   status DS.  */
4448dw_t
4449ds_weak (ds_t ds)
4450{
4451  ds_t res = 1, dt;
4452  int n = 0;
4453
4454  dt = FIRST_SPEC_TYPE;
4455  do
4456    {
4457      if (ds & dt)
4458	{
4459	  res *= (ds_t) get_dep_weak (ds, dt);
4460	  n++;
4461	}
4462
4463      if (dt == LAST_SPEC_TYPE)
4464	break;
4465      dt <<= SPEC_TYPE_SHIFT;
4466    }
4467  while (1);
4468
4469  gcc_assert (n);
4470  while (--n)
4471    res /= MAX_DEP_WEAK;
4472
4473  if (res < MIN_DEP_WEAK)
4474    res = MIN_DEP_WEAK;
4475
4476  gcc_assert (res <= MAX_DEP_WEAK);
4477
4478  return (dw_t) res;
4479}
4480
4481/* Return a dep status that contains all speculation types of DS.  */
4482ds_t
4483ds_get_speculation_types (ds_t ds)
4484{
4485  if (ds & BEGIN_DATA)
4486    ds |= BEGIN_DATA;
4487  if (ds & BE_IN_DATA)
4488    ds |= BE_IN_DATA;
4489  if (ds & BEGIN_CONTROL)
4490    ds |= BEGIN_CONTROL;
4491  if (ds & BE_IN_CONTROL)
4492    ds |= BE_IN_CONTROL;
4493
4494  return ds & SPECULATIVE;
4495}
4496
4497/* Return a dep status that contains maximal weakness for each speculation
4498   type present in DS.  */
4499ds_t
4500ds_get_max_dep_weak (ds_t ds)
4501{
4502  if (ds & BEGIN_DATA)
4503    ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4504  if (ds & BE_IN_DATA)
4505    ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4506  if (ds & BEGIN_CONTROL)
4507    ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4508  if (ds & BE_IN_CONTROL)
4509    ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4510
4511  return ds;
4512}
4513
4514/* Dump information about the dependence status S.  */
4515static void
4516dump_ds (FILE *f, ds_t s)
4517{
4518  fprintf (f, "{");
4519
4520  if (s & BEGIN_DATA)
4521    fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4522  if (s & BE_IN_DATA)
4523    fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4524  if (s & BEGIN_CONTROL)
4525    fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4526  if (s & BE_IN_CONTROL)
4527    fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4528
4529  if (s & HARD_DEP)
4530    fprintf (f, "HARD_DEP; ");
4531
4532  if (s & DEP_TRUE)
4533    fprintf (f, "DEP_TRUE; ");
4534  if (s & DEP_OUTPUT)
4535    fprintf (f, "DEP_OUTPUT; ");
4536  if (s & DEP_ANTI)
4537    fprintf (f, "DEP_ANTI; ");
4538  if (s & DEP_CONTROL)
4539    fprintf (f, "DEP_CONTROL; ");
4540
4541  fprintf (f, "}");
4542}
4543
4544DEBUG_FUNCTION void
4545debug_ds (ds_t s)
4546{
4547  dump_ds (stderr, s);
4548  fprintf (stderr, "\n");
4549}
4550
4551#ifdef ENABLE_CHECKING
4552/* Verify that dependence type and status are consistent.
4553   If RELAXED_P is true, then skip dep_weakness checks.  */
4554static void
4555check_dep (dep_t dep, bool relaxed_p)
4556{
4557  enum reg_note dt = DEP_TYPE (dep);
4558  ds_t ds = DEP_STATUS (dep);
4559
4560  gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4561
4562  if (!(current_sched_info->flags & USE_DEPS_LIST))
4563    {
4564      gcc_assert (ds == 0);
4565      return;
4566    }
4567
4568  /* Check that dependence type contains the same bits as the status.  */
4569  if (dt == REG_DEP_TRUE)
4570    gcc_assert (ds & DEP_TRUE);
4571  else if (dt == REG_DEP_OUTPUT)
4572    gcc_assert ((ds & DEP_OUTPUT)
4573		&& !(ds & DEP_TRUE));
4574  else if (dt == REG_DEP_ANTI)
4575    gcc_assert ((ds & DEP_ANTI)
4576		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4577  else
4578    gcc_assert (dt == REG_DEP_CONTROL
4579		&& (ds & DEP_CONTROL)
4580		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4581
4582  /* HARD_DEP can not appear in dep_status of a link.  */
4583  gcc_assert (!(ds & HARD_DEP));
4584
4585  /* Check that dependence status is set correctly when speculation is not
4586     supported.  */
4587  if (!sched_deps_info->generate_spec_deps)
4588    gcc_assert (!(ds & SPECULATIVE));
4589  else if (ds & SPECULATIVE)
4590    {
4591      if (!relaxed_p)
4592	{
4593	  ds_t type = FIRST_SPEC_TYPE;
4594
4595	  /* Check that dependence weakness is in proper range.  */
4596	  do
4597	    {
4598	      if (ds & type)
4599		get_dep_weak (ds, type);
4600
4601	      if (type == LAST_SPEC_TYPE)
4602		break;
4603	      type <<= SPEC_TYPE_SHIFT;
4604	    }
4605	  while (1);
4606	}
4607
4608      if (ds & BEGIN_SPEC)
4609	{
4610	  /* Only true dependence can be data speculative.  */
4611	  if (ds & BEGIN_DATA)
4612	    gcc_assert (ds & DEP_TRUE);
4613
4614	  /* Control dependencies in the insn scheduler are represented by
4615	     anti-dependencies, therefore only anti dependence can be
4616	     control speculative.  */
4617	  if (ds & BEGIN_CONTROL)
4618	    gcc_assert (ds & DEP_ANTI);
4619	}
4620      else
4621	{
4622	  /* Subsequent speculations should resolve true dependencies.  */
4623	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4624	}
4625
4626      /* Check that true and anti dependencies can't have other speculative
4627	 statuses.  */
4628      if (ds & DEP_TRUE)
4629	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4630      /* An output dependence can't be speculative at all.  */
4631      gcc_assert (!(ds & DEP_OUTPUT));
4632      if (ds & DEP_ANTI)
4633	gcc_assert (ds & BEGIN_CONTROL);
4634    }
4635}
4636#endif /* ENABLE_CHECKING */
4637
4638/* The following code discovers opportunities to switch a memory reference
4639   and an increment by modifying the address.  We ensure that this is done
4640   only for dependencies that are only used to show a single register
4641   dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4642   instruction involved is subject to only one dep that can cause a pattern
4643   change.
4644
4645   When we discover a suitable dependency, we fill in the dep_replacement
4646   structure to show how to modify the memory reference.  */
4647
4648/* Holds information about a pair of memory reference and register increment
4649   insns which depend on each other, but could possibly be interchanged.  */
4650struct mem_inc_info
4651{
4652  rtx_insn *inc_insn;
4653  rtx_insn *mem_insn;
4654
4655  rtx *mem_loc;
4656  /* A register occurring in the memory address for which we wish to break
4657     the dependence.  This must be identical to the destination register of
4658     the increment.  */
4659  rtx mem_reg0;
4660  /* Any kind of index that is added to that register.  */
4661  rtx mem_index;
4662  /* The constant offset used in the memory address.  */
4663  HOST_WIDE_INT mem_constant;
4664  /* The constant added in the increment insn.  Negated if the increment is
4665     after the memory address.  */
4666  HOST_WIDE_INT inc_constant;
4667  /* The source register used in the increment.  May be different from mem_reg0
4668     if the increment occurs before the memory address.  */
4669  rtx inc_input;
4670};
4671
4672/* Verify that the memory location described in MII can be replaced with
4673   one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4674   insn remains unchanged by this function.  */
4675
4676static rtx
4677attempt_change (struct mem_inc_info *mii, rtx new_addr)
4678{
4679  rtx mem = *mii->mem_loc;
4680  rtx new_mem;
4681
4682  /* Jump through a lot of hoops to keep the attributes up to date.  We
4683     do not want to call one of the change address variants that take
4684     an offset even though we know the offset in many cases.  These
4685     assume you are changing where the address is pointing by the
4686     offset.  */
4687  new_mem = replace_equiv_address_nv (mem, new_addr);
4688  if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4689    {
4690      if (sched_verbose >= 5)
4691	fprintf (sched_dump, "validation failure\n");
4692      return NULL_RTX;
4693    }
4694
4695  /* Put back the old one.  */
4696  validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4697
4698  return new_mem;
4699}
4700
4701/* Return true if INSN is of a form "a = b op c" where a and b are
4702   regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4703   informantion in MII about what is found.
4704   BEFORE_MEM indicates whether the increment is found before or after
4705   a corresponding memory reference.  */
4706
4707static bool
4708parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4709{
4710  rtx pat = single_set (insn);
4711  rtx src, cst;
4712  bool regs_equal;
4713
4714  if (RTX_FRAME_RELATED_P (insn) || !pat)
4715    return false;
4716
4717  /* Result must be single reg.  */
4718  if (!REG_P (SET_DEST (pat)))
4719    return false;
4720
4721  if (GET_CODE (SET_SRC (pat)) != PLUS)
4722    return false;
4723
4724  mii->inc_insn = insn;
4725  src = SET_SRC (pat);
4726  mii->inc_input = XEXP (src, 0);
4727
4728  if (!REG_P (XEXP (src, 0)))
4729    return false;
4730
4731  if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4732    return false;
4733
4734  cst = XEXP (src, 1);
4735  if (!CONST_INT_P (cst))
4736    return false;
4737  mii->inc_constant = INTVAL (cst);
4738
4739  regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4740
4741  if (!before_mem)
4742    {
4743      mii->inc_constant = -mii->inc_constant;
4744      if (!regs_equal)
4745	return false;
4746    }
4747
4748  if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4749    {
4750      /* Note that the sign has already been reversed for !before_mem.  */
4751#ifdef STACK_GROWS_DOWNWARD
4752      return mii->inc_constant > 0;
4753#else
4754      return mii->inc_constant < 0;
4755#endif
4756    }
4757  return true;
4758}
4759
4760/* Once a suitable mem reference has been found and the corresponding data
4761   in MII has been filled in, this function is called to find a suitable
4762   add or inc insn involving the register we found in the memory
4763   reference.  */
4764
4765static bool
4766find_inc (struct mem_inc_info *mii, bool backwards)
4767{
4768  sd_iterator_def sd_it;
4769  dep_t dep;
4770
4771  sd_it = sd_iterator_start (mii->mem_insn,
4772			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4773  while (sd_iterator_cond (&sd_it, &dep))
4774    {
4775      dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4776      rtx_insn *pro = DEP_PRO (dep);
4777      rtx_insn *con = DEP_CON (dep);
4778      rtx_insn *inc_cand = backwards ? pro : con;
4779      if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4780	goto next;
4781      if (parse_add_or_inc (mii, inc_cand, backwards))
4782	{
4783	  struct dep_replacement *desc;
4784	  df_ref def;
4785	  rtx newaddr, newmem;
4786
4787	  if (sched_verbose >= 5)
4788	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4789		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4790
4791	  /* Need to assure that none of the operands of the inc
4792	     instruction are assigned to by the mem insn.  */
4793	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
4794	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4795		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4796	      {
4797		if (sched_verbose >= 5)
4798		  fprintf (sched_dump,
4799			   "inc conflicts with store failure.\n");
4800		goto next;
4801	      }
4802
4803	  newaddr = mii->inc_input;
4804	  if (mii->mem_index != NULL_RTX)
4805	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4806				    mii->mem_index);
4807	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4808				   mii->mem_constant + mii->inc_constant);
4809	  newmem = attempt_change (mii, newaddr);
4810	  if (newmem == NULL_RTX)
4811	    goto next;
4812	  if (sched_verbose >= 5)
4813	    fprintf (sched_dump, "successful address replacement\n");
4814	  desc = XCNEW (struct dep_replacement);
4815	  DEP_REPLACE (dep) = desc;
4816	  desc->loc = mii->mem_loc;
4817	  desc->newval = newmem;
4818	  desc->orig = *desc->loc;
4819	  desc->insn = mii->mem_insn;
4820	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4821			 INSN_SPEC_BACK_DEPS (con));
4822	  if (backwards)
4823	    {
4824	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4825		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4826				  REG_DEP_TRUE);
4827	    }
4828	  else
4829	    {
4830	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4831		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4832				  REG_DEP_ANTI);
4833	    }
4834	  return true;
4835	}
4836    next:
4837      sd_iterator_next (&sd_it);
4838    }
4839  return false;
4840}
4841
4842/* A recursive function that walks ADDRESS_OF_X to find memory references
4843   which could be modified during scheduling.  We call find_inc for each
4844   one we find that has a recognizable form.  MII holds information about
4845   the pair of memory/increment instructions.
4846   We ensure that every instruction with a memory reference (which will be
4847   the location of the replacement) is assigned at most one breakable
4848   dependency.  */
4849
4850static bool
4851find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4852{
4853  rtx x = *address_of_x;
4854  enum rtx_code code = GET_CODE (x);
4855  const char *const fmt = GET_RTX_FORMAT (code);
4856  int i;
4857
4858  if (code == MEM)
4859    {
4860      rtx reg0 = XEXP (x, 0);
4861
4862      mii->mem_loc = address_of_x;
4863      mii->mem_index = NULL_RTX;
4864      mii->mem_constant = 0;
4865      if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4866	{
4867	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4868	  reg0 = XEXP (reg0, 0);
4869	}
4870      if (GET_CODE (reg0) == PLUS)
4871	{
4872	  mii->mem_index = XEXP (reg0, 1);
4873	  reg0 = XEXP (reg0, 0);
4874	}
4875      if (REG_P (reg0))
4876	{
4877	  df_ref use;
4878	  int occurrences = 0;
4879
4880	  /* Make sure this reg appears only once in this insn.  Can't use
4881	     count_occurrences since that only works for pseudos.  */
4882	  FOR_EACH_INSN_USE (use, mii->mem_insn)
4883	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4884	      if (++occurrences > 1)
4885		{
4886		  if (sched_verbose >= 5)
4887		    fprintf (sched_dump, "mem count failure\n");
4888		  return false;
4889		}
4890
4891	  mii->mem_reg0 = reg0;
4892	  return find_inc (mii, true) || find_inc (mii, false);
4893	}
4894      return false;
4895    }
4896
4897  if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4898    {
4899      /* If REG occurs inside a MEM used in a bit-field reference,
4900	 that is unacceptable.  */
4901      return false;
4902    }
4903
4904  /* Time for some deep diving.  */
4905  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4906    {
4907      if (fmt[i] == 'e')
4908	{
4909	  if (find_mem (mii, &XEXP (x, i)))
4910	    return true;
4911	}
4912      else if (fmt[i] == 'E')
4913	{
4914	  int j;
4915	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4916	    if (find_mem (mii, &XVECEXP (x, i, j)))
4917	      return true;
4918	}
4919    }
4920  return false;
4921}
4922
4923
4924/* Examine the instructions between HEAD and TAIL and try to find
4925   dependencies that can be broken by modifying one of the patterns.  */
4926
4927void
4928find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4929{
4930  rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4931  int success_in_block = 0;
4932
4933  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4934    {
4935      struct mem_inc_info mii;
4936
4937      if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4938	continue;
4939
4940      mii.mem_insn = insn;
4941      if (find_mem (&mii, &PATTERN (insn)))
4942	success_in_block++;
4943    }
4944  if (success_in_block && sched_verbose >= 5)
4945    fprintf (sched_dump, "%d candidates for address modification found.\n",
4946	     success_in_block);
4947}
4948
4949#endif /* INSN_SCHEDULING */
4950