1/* Assign reload pseudos.
2   Copyright (C) 2010-2015 Free Software Foundation, Inc.
3   Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.	If not see
19<http://www.gnu.org/licenses/>.	 */
20
21
22/* This file's main objective is to assign hard registers to reload
23   pseudos.  It also tries to allocate hard registers to other
24   pseudos, but at a lower priority than the reload pseudos.  The pass
25   does not transform the RTL.
26
27   We must allocate a hard register to every reload pseudo.  We try to
28   increase the chances of finding a viable allocation by assigning
29   the pseudos in order of fewest available hard registers first.  If
30   we still fail to find a hard register, we spill other (non-reload)
31   pseudos in order to make room.
32
33   find_hard_regno_for finds hard registers for allocation without
34   spilling.  spill_for does the same with spilling.  Both functions
35   use a cost model to determine the most profitable choice of hard
36   and spill registers.
37
38   Once we have finished allocating reload pseudos, we also try to
39   assign registers to other (non-reload) pseudos.  This is useful if
40   hard registers were freed up by the spilling just described.
41
42   We try to assign hard registers by collecting pseudos into threads.
43   These threads contain reload and inheritance pseudos that are
44   connected by copies (move insns).  Doing this improves the chances
45   of pseudos in the thread getting the same hard register and, as a
46   result, of allowing some move insns to be deleted.
47
48   When we assign a hard register to a pseudo, we decrease the cost of
49   using the same hard register for pseudos that are connected by
50   copies.
51
52   If two hard registers have the same frequency-derived cost, we
53   prefer hard registers with higher priorities.  The mapping of
54   registers to priorities is controlled by the register_priority
55   target hook.  For example, x86-64 has a few register priorities:
56   hard registers with and without REX prefixes have different
57   priorities.  This permits us to generate smaller code as insns
58   without REX prefixes are shorter.
59
60   If a few hard registers are still equally good for the assignment,
61   we choose the least used hard register.  It is called leveling and
62   may be profitable for some targets.
63
64   Only insns with changed allocation pseudos are processed on the
65   next constraint pass.
66
67   The pseudo live-ranges are used to find conflicting pseudos.
68
69   For understanding the code, it is important to keep in mind that
70   inheritance, split, and reload pseudos created since last
71   constraint pass have regno >= lra_constraint_new_regno_start.
72   Inheritance and split pseudos created on any pass are in the
73   corresponding bitmaps.  Inheritance and split pseudos since the
74   last constraint pass have also the corresponding non-negative
75   restore_regno.  */
76
77#include "config.h"
78#include "system.h"
79#include "coretypes.h"
80#include "tm.h"
81#include "hard-reg-set.h"
82#include "rtl.h"
83#include "rtl-error.h"
84#include "tm_p.h"
85#include "target.h"
86#include "insn-config.h"
87#include "recog.h"
88#include "output.h"
89#include "regs.h"
90#include "hashtab.h"
91#include "hash-set.h"
92#include "vec.h"
93#include "machmode.h"
94#include "input.h"
95#include "function.h"
96#include "symtab.h"
97#include "flags.h"
98#include "statistics.h"
99#include "double-int.h"
100#include "real.h"
101#include "fixed-value.h"
102#include "alias.h"
103#include "wide-int.h"
104#include "inchash.h"
105#include "tree.h"
106#include "expmed.h"
107#include "dojump.h"
108#include "explow.h"
109#include "calls.h"
110#include "emit-rtl.h"
111#include "varasm.h"
112#include "stmt.h"
113#include "expr.h"
114#include "predict.h"
115#include "dominance.h"
116#include "cfg.h"
117#include "basic-block.h"
118#include "except.h"
119#include "df.h"
120#include "ira.h"
121#include "sparseset.h"
122#include "params.h"
123#include "lra-int.h"
124
125/* Current iteration number of the pass and current iteration number
126   of the pass after the latest spill pass when any former reload
127   pseudo was spilled.  */
128int lra_assignment_iter;
129int lra_assignment_iter_after_spill;
130
131/* Flag of spilling former reload pseudos on this pass.  */
132static bool former_reload_pseudo_spill_p;
133
134/* Array containing corresponding values of function
135   lra_get_allocno_class.  It is used to speed up the code.  */
136static enum reg_class *regno_allocno_class_array;
137
138/* Information about the thread to which a pseudo belongs.  Threads are
139   a set of connected reload and inheritance pseudos with the same set of
140   available hard registers.  Lone registers belong to their own threads.  */
141struct regno_assign_info
142{
143  /* First/next pseudo of the same thread.  */
144  int first, next;
145  /* Frequency of the thread (execution frequency of only reload
146     pseudos in the thread when the thread contains a reload pseudo).
147     Defined only for the first thread pseudo.	*/
148  int freq;
149};
150
151/* Map regno to the corresponding regno assignment info.  */
152static struct regno_assign_info *regno_assign_info;
153
154/* All inherited, subreg or optional pseudos created before last spill
155   sub-pass.  Such pseudos are permitted to get memory instead of hard
156   regs.  */
157static bitmap_head non_reload_pseudos;
158
159/* Process a pseudo copy with execution frequency COPY_FREQ connecting
160   REGNO1 and REGNO2 to form threads.  */
161static void
162process_copy_to_form_thread (int regno1, int regno2, int copy_freq)
163{
164  int last, regno1_first, regno2_first;
165
166  lra_assert (regno1 >= lra_constraint_new_regno_start
167	      && regno2 >= lra_constraint_new_regno_start);
168  regno1_first = regno_assign_info[regno1].first;
169  regno2_first = regno_assign_info[regno2].first;
170  if (regno1_first != regno2_first)
171    {
172      for (last = regno2_first;
173	   regno_assign_info[last].next >= 0;
174	   last = regno_assign_info[last].next)
175	regno_assign_info[last].first = regno1_first;
176      regno_assign_info[last].first = regno1_first;
177      regno_assign_info[last].next = regno_assign_info[regno1_first].next;
178      regno_assign_info[regno1_first].next = regno2_first;
179      regno_assign_info[regno1_first].freq
180	+= regno_assign_info[regno2_first].freq;
181    }
182  regno_assign_info[regno1_first].freq -= 2 * copy_freq;
183  lra_assert (regno_assign_info[regno1_first].freq >= 0);
184}
185
186/* Initialize REGNO_ASSIGN_INFO and form threads.  */
187static void
188init_regno_assign_info (void)
189{
190  int i, regno1, regno2, max_regno = max_reg_num ();
191  lra_copy_t cp;
192
193  regno_assign_info = XNEWVEC (struct regno_assign_info, max_regno);
194  for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
195    {
196      regno_assign_info[i].first = i;
197      regno_assign_info[i].next = -1;
198      regno_assign_info[i].freq = lra_reg_info[i].freq;
199    }
200  /* Form the threads.	*/
201  for (i = 0; (cp = lra_get_copy (i)) != NULL; i++)
202    if ((regno1 = cp->regno1) >= lra_constraint_new_regno_start
203	&& (regno2 = cp->regno2) >= lra_constraint_new_regno_start
204	&& reg_renumber[regno1] < 0 && lra_reg_info[regno1].nrefs != 0
205	&& reg_renumber[regno2] < 0 && lra_reg_info[regno2].nrefs != 0
206	&& (ira_class_hard_regs_num[regno_allocno_class_array[regno1]]
207	    == ira_class_hard_regs_num[regno_allocno_class_array[regno2]]))
208      process_copy_to_form_thread (regno1, regno2, cp->freq);
209}
210
211/* Free REGNO_ASSIGN_INFO.  */
212static void
213finish_regno_assign_info (void)
214{
215  free (regno_assign_info);
216}
217
218/* The function is used to sort *reload* and *inheritance* pseudos to
219   try to assign them hard registers.  We put pseudos from the same
220   thread always nearby.  */
221static int
222reload_pseudo_compare_func (const void *v1p, const void *v2p)
223{
224  int r1 = *(const int *) v1p, r2 = *(const int *) v2p;
225  enum reg_class cl1 = regno_allocno_class_array[r1];
226  enum reg_class cl2 = regno_allocno_class_array[r2];
227  int diff;
228
229  lra_assert (r1 >= lra_constraint_new_regno_start
230	      && r2 >= lra_constraint_new_regno_start);
231
232  /* Prefer to assign reload registers with smaller classes first to
233     guarantee assignment to all reload registers.  */
234  if ((diff = (ira_class_hard_regs_num[cl1]
235	       - ira_class_hard_regs_num[cl2])) != 0)
236    return diff;
237  if ((diff
238       = (ira_reg_class_max_nregs[cl2][lra_reg_info[r2].biggest_mode]
239	  - ira_reg_class_max_nregs[cl1][lra_reg_info[r1].biggest_mode])) != 0
240      /* The code below executes rarely as nregs == 1 in most cases.
241	 So we should not worry about using faster data structures to
242	 check reload pseudos.  */
243      && ! bitmap_bit_p (&non_reload_pseudos, r1)
244      && ! bitmap_bit_p (&non_reload_pseudos, r2))
245    return diff;
246  if ((diff = (regno_assign_info[regno_assign_info[r2].first].freq
247	       - regno_assign_info[regno_assign_info[r1].first].freq)) != 0)
248    return diff;
249  /* Allocate bigger pseudos first to avoid register file
250     fragmentation.  */
251  if ((diff
252       = (ira_reg_class_max_nregs[cl2][lra_reg_info[r2].biggest_mode]
253	  - ira_reg_class_max_nregs[cl1][lra_reg_info[r1].biggest_mode])) != 0)
254    return diff;
255  /* Put pseudos from the thread nearby.  */
256  if ((diff = regno_assign_info[r1].first - regno_assign_info[r2].first) != 0)
257    return diff;
258  /* If regs are equally good, sort by their numbers, so that the
259     results of qsort leave nothing to chance.	*/
260  return r1 - r2;
261}
262
263/* The function is used to sort *non-reload* pseudos to try to assign
264   them hard registers.	 The order calculation is simpler than in the
265   previous function and based on the pseudo frequency usage.  */
266static int
267pseudo_compare_func (const void *v1p, const void *v2p)
268{
269  int r1 = *(const int *) v1p, r2 = *(const int *) v2p;
270  int diff;
271
272  /* Prefer to assign more frequently used registers first.  */
273  if ((diff = lra_reg_info[r2].freq - lra_reg_info[r1].freq) != 0)
274    return diff;
275
276  /* If regs are equally good, sort by their numbers, so that the
277     results of qsort leave nothing to chance.	*/
278  return r1 - r2;
279}
280
281/* Arrays of size LRA_LIVE_MAX_POINT mapping a program point to the
282   pseudo live ranges with given start point.  We insert only live
283   ranges of pseudos interesting for assignment purposes.  They are
284   reload pseudos and pseudos assigned to hard registers.  */
285static lra_live_range_t *start_point_ranges;
286
287/* Used as a flag that a live range is not inserted in the start point
288   chain.  */
289static struct lra_live_range not_in_chain_mark;
290
291/* Create and set up START_POINT_RANGES.  */
292static void
293create_live_range_start_chains (void)
294{
295  int i, max_regno;
296  lra_live_range_t r;
297
298  start_point_ranges = XCNEWVEC (lra_live_range_t, lra_live_max_point);
299  max_regno = max_reg_num ();
300  for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
301    if (i >= lra_constraint_new_regno_start || reg_renumber[i] >= 0)
302      {
303	for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
304	  {
305	    r->start_next = start_point_ranges[r->start];
306	    start_point_ranges[r->start] = r;
307	  }
308      }
309    else
310      {
311	for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
312	  r->start_next = &not_in_chain_mark;
313      }
314}
315
316/* Insert live ranges of pseudo REGNO into start chains if they are
317   not there yet.  */
318static void
319insert_in_live_range_start_chain (int regno)
320{
321  lra_live_range_t r = lra_reg_info[regno].live_ranges;
322
323  if (r->start_next != &not_in_chain_mark)
324    return;
325  for (; r != NULL; r = r->next)
326    {
327      r->start_next = start_point_ranges[r->start];
328      start_point_ranges[r->start] = r;
329    }
330}
331
332/* Free START_POINT_RANGES.  */
333static void
334finish_live_range_start_chains (void)
335{
336  gcc_assert (start_point_ranges != NULL);
337  free (start_point_ranges);
338  start_point_ranges = NULL;
339}
340
341/* Map: program point -> bitmap of all pseudos living at the point and
342   assigned to hard registers.	*/
343static bitmap_head *live_hard_reg_pseudos;
344static bitmap_obstack live_hard_reg_pseudos_bitmap_obstack;
345
346/* reg_renumber corresponding to pseudos marked in
347   live_hard_reg_pseudos.  reg_renumber might be not matched to
348   live_hard_reg_pseudos but live_pseudos_reg_renumber always reflects
349   live_hard_reg_pseudos.  */
350static int *live_pseudos_reg_renumber;
351
352/* Sparseset used to calculate living hard reg pseudos for some program
353   point range.	 */
354static sparseset live_range_hard_reg_pseudos;
355
356/* Sparseset used to calculate living reload/inheritance pseudos for
357   some program point range.  */
358static sparseset live_range_reload_inheritance_pseudos;
359
360/* Allocate and initialize the data about living pseudos at program
361   points.  */
362static void
363init_lives (void)
364{
365  int i, max_regno = max_reg_num ();
366
367  live_range_hard_reg_pseudos = sparseset_alloc (max_regno);
368  live_range_reload_inheritance_pseudos = sparseset_alloc (max_regno);
369  live_hard_reg_pseudos = XNEWVEC (bitmap_head, lra_live_max_point);
370  bitmap_obstack_initialize (&live_hard_reg_pseudos_bitmap_obstack);
371  for (i = 0; i < lra_live_max_point; i++)
372    bitmap_initialize (&live_hard_reg_pseudos[i],
373		       &live_hard_reg_pseudos_bitmap_obstack);
374  live_pseudos_reg_renumber = XNEWVEC (int, max_regno);
375  for (i = 0; i < max_regno; i++)
376    live_pseudos_reg_renumber[i] = -1;
377}
378
379/* Free the data about living pseudos at program points.  */
380static void
381finish_lives (void)
382{
383  sparseset_free (live_range_hard_reg_pseudos);
384  sparseset_free (live_range_reload_inheritance_pseudos);
385  free (live_hard_reg_pseudos);
386  bitmap_obstack_release (&live_hard_reg_pseudos_bitmap_obstack);
387  free (live_pseudos_reg_renumber);
388}
389
390/* Update the LIVE_HARD_REG_PSEUDOS and LIVE_PSEUDOS_REG_RENUMBER
391   entries for pseudo REGNO.  Assume that the register has been
392   spilled if FREE_P, otherwise assume that it has been assigned
393   reg_renumber[REGNO] (if >= 0).  We also insert the pseudo live
394   ranges in the start chains when it is assumed to be assigned to a
395   hard register because we use the chains of pseudos assigned to hard
396   registers during allocation.  */
397static void
398update_lives (int regno, bool free_p)
399{
400  int p;
401  lra_live_range_t r;
402
403  if (reg_renumber[regno] < 0)
404    return;
405  live_pseudos_reg_renumber[regno] = free_p ? -1 : reg_renumber[regno];
406  for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
407    {
408      for (p = r->start; p <= r->finish; p++)
409	if (free_p)
410	  bitmap_clear_bit (&live_hard_reg_pseudos[p], regno);
411	else
412	  {
413	    bitmap_set_bit (&live_hard_reg_pseudos[p], regno);
414	    insert_in_live_range_start_chain (regno);
415	  }
416    }
417}
418
419/* Sparseset used to calculate reload pseudos conflicting with a given
420   pseudo when we are trying to find a hard register for the given
421   pseudo.  */
422static sparseset conflict_reload_and_inheritance_pseudos;
423
424/* Map: program point -> bitmap of all reload and inheritance pseudos
425   living at the point.	 */
426static bitmap_head *live_reload_and_inheritance_pseudos;
427static bitmap_obstack live_reload_and_inheritance_pseudos_bitmap_obstack;
428
429/* Allocate and initialize data about living reload pseudos at any
430   given program point.  */
431static void
432init_live_reload_and_inheritance_pseudos (void)
433{
434  int i, p, max_regno = max_reg_num ();
435  lra_live_range_t r;
436
437  conflict_reload_and_inheritance_pseudos = sparseset_alloc (max_regno);
438  live_reload_and_inheritance_pseudos = XNEWVEC (bitmap_head, lra_live_max_point);
439  bitmap_obstack_initialize (&live_reload_and_inheritance_pseudos_bitmap_obstack);
440  for (p = 0; p < lra_live_max_point; p++)
441    bitmap_initialize (&live_reload_and_inheritance_pseudos[p],
442		       &live_reload_and_inheritance_pseudos_bitmap_obstack);
443  for (i = lra_constraint_new_regno_start; i < max_regno; i++)
444    {
445      for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
446	for (p = r->start; p <= r->finish; p++)
447	  bitmap_set_bit (&live_reload_and_inheritance_pseudos[p], i);
448    }
449}
450
451/* Finalize data about living reload pseudos at any given program
452   point.  */
453static void
454finish_live_reload_and_inheritance_pseudos (void)
455{
456  sparseset_free (conflict_reload_and_inheritance_pseudos);
457  free (live_reload_and_inheritance_pseudos);
458  bitmap_obstack_release (&live_reload_and_inheritance_pseudos_bitmap_obstack);
459}
460
461/* The value used to check that cost of given hard reg is really
462   defined currently.  */
463static int curr_hard_regno_costs_check = 0;
464/* Array used to check that cost of the corresponding hard reg (the
465   array element index) is really defined currently.  */
466static int hard_regno_costs_check[FIRST_PSEUDO_REGISTER];
467/* The current costs of allocation of hard regs.  Defined only if the
468   value of the corresponding element of the previous array is equal to
469   CURR_HARD_REGNO_COSTS_CHECK.	 */
470static int hard_regno_costs[FIRST_PSEUDO_REGISTER];
471
472/* Adjust cost of HARD_REGNO by INCR.  Reset the cost first if it is
473   not defined yet.  */
474static inline void
475adjust_hard_regno_cost (int hard_regno, int incr)
476{
477  if (hard_regno_costs_check[hard_regno] != curr_hard_regno_costs_check)
478    hard_regno_costs[hard_regno] = 0;
479  hard_regno_costs_check[hard_regno] = curr_hard_regno_costs_check;
480  hard_regno_costs[hard_regno] += incr;
481}
482
483/* Try to find a free hard register for pseudo REGNO.  Return the
484   hard register on success and set *COST to the cost of using
485   that register.  (If several registers have equal cost, the one with
486   the highest priority wins.)  Return -1 on failure.
487
488   If FIRST_P, return the first available hard reg ignoring other
489   criteria, e.g. allocation cost.  This approach results in less hard
490   reg pool fragmentation and permit to allocate hard regs to reload
491   pseudos in complicated situations where pseudo sizes are different.
492
493   If TRY_ONLY_HARD_REGNO >= 0, consider only that hard register,
494   otherwise consider all hard registers in REGNO's class.
495
496   If REGNO_SET is not empty, only hard registers from the set are
497   considered.  */
498static int
499find_hard_regno_for_1 (int regno, int *cost, int try_only_hard_regno,
500		       bool first_p, HARD_REG_SET regno_set)
501{
502  HARD_REG_SET conflict_set;
503  int best_cost = INT_MAX, best_priority = INT_MIN, best_usage = INT_MAX;
504  lra_live_range_t r;
505  int p, i, j, rclass_size, best_hard_regno, priority, hard_regno;
506  int hr, conflict_hr, nregs;
507  machine_mode biggest_mode;
508  unsigned int k, conflict_regno;
509  int offset, val, biggest_nregs, nregs_diff;
510  enum reg_class rclass;
511  bitmap_iterator bi;
512  bool *rclass_intersect_p;
513  HARD_REG_SET impossible_start_hard_regs, available_regs;
514
515  if (hard_reg_set_empty_p (regno_set))
516    COPY_HARD_REG_SET (conflict_set, lra_no_alloc_regs);
517  else
518    {
519      COMPL_HARD_REG_SET (conflict_set, regno_set);
520      IOR_HARD_REG_SET (conflict_set, lra_no_alloc_regs);
521    }
522  rclass = regno_allocno_class_array[regno];
523  rclass_intersect_p = ira_reg_classes_intersect_p[rclass];
524  curr_hard_regno_costs_check++;
525  sparseset_clear (conflict_reload_and_inheritance_pseudos);
526  sparseset_clear (live_range_hard_reg_pseudos);
527  IOR_HARD_REG_SET (conflict_set, lra_reg_info[regno].conflict_hard_regs);
528  biggest_mode = lra_reg_info[regno].biggest_mode;
529  for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
530    {
531      EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[r->start], 0, k, bi)
532	if (rclass_intersect_p[regno_allocno_class_array[k]])
533	  sparseset_set_bit (live_range_hard_reg_pseudos, k);
534      EXECUTE_IF_SET_IN_BITMAP (&live_reload_and_inheritance_pseudos[r->start],
535				0, k, bi)
536	if (lra_reg_info[k].preferred_hard_regno1 >= 0
537	    && live_pseudos_reg_renumber[k] < 0
538	    && rclass_intersect_p[regno_allocno_class_array[k]])
539	  sparseset_set_bit (conflict_reload_and_inheritance_pseudos, k);
540      for (p = r->start + 1; p <= r->finish; p++)
541	{
542	  lra_live_range_t r2;
543
544	  for (r2 = start_point_ranges[p];
545	       r2 != NULL;
546	       r2 = r2->start_next)
547	    {
548	      if (r2->regno >= lra_constraint_new_regno_start
549		  && lra_reg_info[r2->regno].preferred_hard_regno1 >= 0
550		  && live_pseudos_reg_renumber[r2->regno] < 0
551		  && rclass_intersect_p[regno_allocno_class_array[r2->regno]])
552		sparseset_set_bit (conflict_reload_and_inheritance_pseudos,
553				   r2->regno);
554	      if (live_pseudos_reg_renumber[r2->regno] >= 0
555		  && rclass_intersect_p[regno_allocno_class_array[r2->regno]])
556		sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno);
557	    }
558	}
559    }
560  if ((hard_regno = lra_reg_info[regno].preferred_hard_regno1) >= 0)
561    {
562      adjust_hard_regno_cost
563	(hard_regno, -lra_reg_info[regno].preferred_hard_regno_profit1);
564      if ((hard_regno = lra_reg_info[regno].preferred_hard_regno2) >= 0)
565	adjust_hard_regno_cost
566	  (hard_regno, -lra_reg_info[regno].preferred_hard_regno_profit2);
567    }
568#ifdef STACK_REGS
569  if (lra_reg_info[regno].no_stack_p)
570    for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
571      SET_HARD_REG_BIT (conflict_set, i);
572#endif
573  sparseset_clear_bit (conflict_reload_and_inheritance_pseudos, regno);
574  val = lra_reg_info[regno].val;
575  offset = lra_reg_info[regno].offset;
576  CLEAR_HARD_REG_SET (impossible_start_hard_regs);
577  EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)
578    if (lra_reg_val_equal_p (conflict_regno, val, offset))
579      {
580	conflict_hr = live_pseudos_reg_renumber[conflict_regno];
581	nregs = (hard_regno_nregs[conflict_hr]
582		 [lra_reg_info[conflict_regno].biggest_mode]);
583	/* Remember about multi-register pseudos.  For example, 2 hard
584	   register pseudos can start on the same hard register but can
585	   not start on HR and HR+1/HR-1.  */
586	for (hr = conflict_hr + 1;
587	     hr < FIRST_PSEUDO_REGISTER && hr < conflict_hr + nregs;
588	     hr++)
589	  SET_HARD_REG_BIT (impossible_start_hard_regs, hr);
590	for (hr = conflict_hr - 1;
591	     hr >= 0 && hr + hard_regno_nregs[hr][biggest_mode] > conflict_hr;
592	     hr--)
593	  SET_HARD_REG_BIT (impossible_start_hard_regs, hr);
594      }
595    else
596      {
597	add_to_hard_reg_set (&conflict_set,
598			     lra_reg_info[conflict_regno].biggest_mode,
599			     live_pseudos_reg_renumber[conflict_regno]);
600	if (hard_reg_set_subset_p (reg_class_contents[rclass],
601				   conflict_set))
602	  return -1;
603      }
604  EXECUTE_IF_SET_IN_SPARSESET (conflict_reload_and_inheritance_pseudos,
605			       conflict_regno)
606    if (!lra_reg_val_equal_p (conflict_regno, val, offset))
607      {
608	lra_assert (live_pseudos_reg_renumber[conflict_regno] < 0);
609	if ((hard_regno
610	     = lra_reg_info[conflict_regno].preferred_hard_regno1) >= 0)
611	  {
612	    adjust_hard_regno_cost
613	      (hard_regno,
614	       lra_reg_info[conflict_regno].preferred_hard_regno_profit1);
615	    if ((hard_regno
616		 = lra_reg_info[conflict_regno].preferred_hard_regno2) >= 0)
617	      adjust_hard_regno_cost
618		(hard_regno,
619		 lra_reg_info[conflict_regno].preferred_hard_regno_profit2);
620	  }
621      }
622  /* Make sure that all registers in a multi-word pseudo belong to the
623     required class.  */
624  IOR_COMPL_HARD_REG_SET (conflict_set, reg_class_contents[rclass]);
625  lra_assert (rclass != NO_REGS);
626  rclass_size = ira_class_hard_regs_num[rclass];
627  best_hard_regno = -1;
628  hard_regno = ira_class_hard_regs[rclass][0];
629  biggest_nregs = hard_regno_nregs[hard_regno][biggest_mode];
630  nregs_diff = (biggest_nregs
631		- hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (regno)]);
632  COPY_HARD_REG_SET (available_regs, reg_class_contents[rclass]);
633  AND_COMPL_HARD_REG_SET (available_regs, lra_no_alloc_regs);
634  for (i = 0; i < rclass_size; i++)
635    {
636      if (try_only_hard_regno >= 0)
637	hard_regno = try_only_hard_regno;
638      else
639	hard_regno = ira_class_hard_regs[rclass][i];
640      if (! overlaps_hard_reg_set_p (conflict_set,
641				     PSEUDO_REGNO_MODE (regno), hard_regno)
642	  /* We can not use prohibited_class_mode_regs because it is
643	     not defined for all classes.  */
644	  && HARD_REGNO_MODE_OK (hard_regno, PSEUDO_REGNO_MODE (regno))
645	  && ! TEST_HARD_REG_BIT (impossible_start_hard_regs, hard_regno)
646	  && (nregs_diff == 0
647	      || (WORDS_BIG_ENDIAN
648		  ? (hard_regno - nregs_diff >= 0
649		     && TEST_HARD_REG_BIT (available_regs,
650					   hard_regno - nregs_diff))
651		  : TEST_HARD_REG_BIT (available_regs,
652				       hard_regno + nregs_diff))))
653	{
654	  if (hard_regno_costs_check[hard_regno]
655	      != curr_hard_regno_costs_check)
656	    {
657	      hard_regno_costs_check[hard_regno] = curr_hard_regno_costs_check;
658	      hard_regno_costs[hard_regno] = 0;
659	    }
660	  for (j = 0;
661	       j < hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (regno)];
662	       j++)
663	    if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j)
664		&& ! df_regs_ever_live_p (hard_regno + j))
665	      /* It needs save restore.	 */
666	      hard_regno_costs[hard_regno]
667		+= (2
668		    * REG_FREQ_FROM_BB (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
669		    + 1);
670	  priority = targetm.register_priority (hard_regno);
671	  if (best_hard_regno < 0 || hard_regno_costs[hard_regno] < best_cost
672	      || (hard_regno_costs[hard_regno] == best_cost
673		  && (priority > best_priority
674		      || (targetm.register_usage_leveling_p ()
675			  && priority == best_priority
676			  && best_usage > lra_hard_reg_usage[hard_regno]))))
677	    {
678	      best_hard_regno = hard_regno;
679	      best_cost = hard_regno_costs[hard_regno];
680	      best_priority = priority;
681	      best_usage = lra_hard_reg_usage[hard_regno];
682	    }
683	}
684      if (try_only_hard_regno >= 0 || (first_p && best_hard_regno >= 0))
685	break;
686    }
687  if (best_hard_regno >= 0)
688    *cost = best_cost - lra_reg_info[regno].freq;
689  return best_hard_regno;
690}
691
692/* A wrapper for find_hard_regno_for_1 (see comments for that function
693   description).  This function tries to find a hard register for
694   preferred class first if it is worth.  */
695static int
696find_hard_regno_for (int regno, int *cost, int try_only_hard_regno, bool first_p)
697{
698  int hard_regno;
699  HARD_REG_SET regno_set;
700
701  /* Only original pseudos can have a different preferred class.  */
702  if (try_only_hard_regno < 0 && regno < lra_new_regno_start)
703    {
704      enum reg_class pref_class = reg_preferred_class (regno);
705
706      if (regno_allocno_class_array[regno] != pref_class)
707	{
708	  hard_regno = find_hard_regno_for_1 (regno, cost, -1, first_p,
709					      reg_class_contents[pref_class]);
710	  if (hard_regno >= 0)
711	    return hard_regno;
712	}
713    }
714  CLEAR_HARD_REG_SET (regno_set);
715  return find_hard_regno_for_1 (regno, cost, try_only_hard_regno, first_p,
716				regno_set);
717}
718
719/* Current value used for checking elements in
720   update_hard_regno_preference_check.	*/
721static int curr_update_hard_regno_preference_check;
722/* If an element value is equal to the above variable value, then the
723   corresponding regno has been processed for preference
724   propagation.	 */
725static int *update_hard_regno_preference_check;
726
727/* Update the preference for using HARD_REGNO for pseudos that are
728   connected directly or indirectly with REGNO.  Apply divisor DIV
729   to any preference adjustments.
730
731   The more indirectly a pseudo is connected, the smaller its effect
732   should be.  We therefore increase DIV on each "hop".  */
733static void
734update_hard_regno_preference (int regno, int hard_regno, int div)
735{
736  int another_regno, cost;
737  lra_copy_t cp, next_cp;
738
739  /* Search depth 5 seems to be enough.	 */
740  if (div > (1 << 5))
741    return;
742  for (cp = lra_reg_info[regno].copies; cp != NULL; cp = next_cp)
743    {
744      if (cp->regno1 == regno)
745	{
746	  next_cp = cp->regno1_next;
747	  another_regno = cp->regno2;
748	}
749      else if (cp->regno2 == regno)
750	{
751	  next_cp = cp->regno2_next;
752	  another_regno = cp->regno1;
753	}
754      else
755	gcc_unreachable ();
756      if (reg_renumber[another_regno] < 0
757	  && (update_hard_regno_preference_check[another_regno]
758	      != curr_update_hard_regno_preference_check))
759	{
760	  update_hard_regno_preference_check[another_regno]
761	    = curr_update_hard_regno_preference_check;
762	  cost = cp->freq < div ? 1 : cp->freq / div;
763	  lra_setup_reload_pseudo_preferenced_hard_reg
764	    (another_regno, hard_regno, cost);
765	  update_hard_regno_preference (another_regno, hard_regno, div * 2);
766	}
767    }
768}
769
770/* Return prefix title for pseudo REGNO.  */
771static const char *
772pseudo_prefix_title (int regno)
773{
774  return
775    (regno < lra_constraint_new_regno_start ? ""
776     : bitmap_bit_p (&lra_inheritance_pseudos, regno) ? "inheritance "
777     : bitmap_bit_p (&lra_split_regs, regno) ? "split "
778     : bitmap_bit_p (&lra_optional_reload_pseudos, regno) ? "optional reload "
779     : bitmap_bit_p (&lra_subreg_reload_pseudos, regno) ? "subreg reload "
780     : "reload ");
781}
782
783/* Update REG_RENUMBER and other pseudo preferences by assignment of
784   HARD_REGNO to pseudo REGNO and print about it if PRINT_P.  */
785void
786lra_setup_reg_renumber (int regno, int hard_regno, bool print_p)
787{
788  int i, hr;
789
790  /* We can not just reassign hard register.  */
791  lra_assert (hard_regno < 0 || reg_renumber[regno] < 0);
792  if ((hr = hard_regno) < 0)
793    hr = reg_renumber[regno];
794  reg_renumber[regno] = hard_regno;
795  lra_assert (hr >= 0);
796  for (i = 0; i < hard_regno_nregs[hr][PSEUDO_REGNO_MODE (regno)]; i++)
797    if (hard_regno < 0)
798      lra_hard_reg_usage[hr + i] -= lra_reg_info[regno].freq;
799    else
800      lra_hard_reg_usage[hr + i] += lra_reg_info[regno].freq;
801  if (print_p && lra_dump_file != NULL)
802    fprintf (lra_dump_file, "	   Assign %d to %sr%d (freq=%d)\n",
803	     reg_renumber[regno], pseudo_prefix_title (regno),
804	     regno, lra_reg_info[regno].freq);
805  if (hard_regno >= 0)
806    {
807      curr_update_hard_regno_preference_check++;
808      update_hard_regno_preference (regno, hard_regno, 1);
809    }
810}
811
812/* Pseudos which occur in insns containing a particular pseudo.  */
813static bitmap_head insn_conflict_pseudos;
814
815/* Bitmaps used to contain spill pseudos for given pseudo hard regno
816   and best spill pseudos for given pseudo (and best hard regno).  */
817static bitmap_head spill_pseudos_bitmap, best_spill_pseudos_bitmap;
818
819/* Current pseudo check for validity of elements in
820   TRY_HARD_REG_PSEUDOS.  */
821static int curr_pseudo_check;
822/* Array used for validity of elements in TRY_HARD_REG_PSEUDOS.	 */
823static int try_hard_reg_pseudos_check[FIRST_PSEUDO_REGISTER];
824/* Pseudos who hold given hard register at the considered points.  */
825static bitmap_head try_hard_reg_pseudos[FIRST_PSEUDO_REGISTER];
826
827/* Set up try_hard_reg_pseudos for given program point P and class
828   RCLASS.  Those are pseudos living at P and assigned to a hard
829   register of RCLASS.	In other words, those are pseudos which can be
830   spilled to assign a hard register of RCLASS to a pseudo living at
831   P.  */
832static void
833setup_try_hard_regno_pseudos (int p, enum reg_class rclass)
834{
835  int i, hard_regno;
836  machine_mode mode;
837  unsigned int spill_regno;
838  bitmap_iterator bi;
839
840  /* Find what pseudos could be spilled.  */
841  EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[p], 0, spill_regno, bi)
842    {
843      mode = PSEUDO_REGNO_MODE (spill_regno);
844      hard_regno = live_pseudos_reg_renumber[spill_regno];
845      if (overlaps_hard_reg_set_p (reg_class_contents[rclass],
846				   mode, hard_regno))
847	{
848	  for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
849	    {
850	      if (try_hard_reg_pseudos_check[hard_regno + i]
851		  != curr_pseudo_check)
852		{
853		  try_hard_reg_pseudos_check[hard_regno + i]
854		    = curr_pseudo_check;
855		  bitmap_clear (&try_hard_reg_pseudos[hard_regno + i]);
856		}
857	      bitmap_set_bit (&try_hard_reg_pseudos[hard_regno + i],
858			      spill_regno);
859	    }
860	}
861    }
862}
863
864/* Assign temporarily HARD_REGNO to pseudo REGNO.  Temporary
865   assignment means that we might undo the data change.	 */
866static void
867assign_temporarily (int regno, int hard_regno)
868{
869  int p;
870  lra_live_range_t r;
871
872  for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
873    {
874      for (p = r->start; p <= r->finish; p++)
875	if (hard_regno < 0)
876	  bitmap_clear_bit (&live_hard_reg_pseudos[p], regno);
877	else
878	  {
879	    bitmap_set_bit (&live_hard_reg_pseudos[p], regno);
880	    insert_in_live_range_start_chain (regno);
881	  }
882    }
883  live_pseudos_reg_renumber[regno] = hard_regno;
884}
885
886/* Array used for sorting reload pseudos for subsequent allocation
887   after spilling some pseudo.	*/
888static int *sorted_reload_pseudos;
889
890/* Spill some pseudos for a reload pseudo REGNO and return hard
891   register which should be used for pseudo after spilling.  The
892   function adds spilled pseudos to SPILLED_PSEUDO_BITMAP.  When we
893   choose hard register (and pseudos occupying the hard registers and
894   to be spilled), we take into account not only how REGNO will
895   benefit from the spills but also how other reload pseudos not yet
896   assigned to hard registers benefit from the spills too.  In very
897   rare cases, the function can fail and return -1.
898
899   If FIRST_P, return the first available hard reg ignoring other
900   criteria, e.g. allocation cost and cost of spilling non-reload
901   pseudos.  This approach results in less hard reg pool fragmentation
902   and permit to allocate hard regs to reload pseudos in complicated
903   situations where pseudo sizes are different.  */
904static int
905spill_for (int regno, bitmap spilled_pseudo_bitmap, bool first_p)
906{
907  int i, j, n, p, hard_regno, best_hard_regno, cost, best_cost, rclass_size;
908  int reload_hard_regno, reload_cost;
909  machine_mode mode;
910  enum reg_class rclass;
911  unsigned int spill_regno, reload_regno, uid;
912  int insn_pseudos_num, best_insn_pseudos_num;
913  int bad_spills_num, smallest_bad_spills_num;
914  lra_live_range_t r;
915  bitmap_iterator bi;
916
917  rclass = regno_allocno_class_array[regno];
918  lra_assert (reg_renumber[regno] < 0 && rclass != NO_REGS);
919  bitmap_clear (&insn_conflict_pseudos);
920  bitmap_clear (&best_spill_pseudos_bitmap);
921  EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)
922    {
923      struct lra_insn_reg *ir;
924
925      for (ir = lra_get_insn_regs (uid); ir != NULL; ir = ir->next)
926	if (ir->regno >= FIRST_PSEUDO_REGISTER)
927	  bitmap_set_bit (&insn_conflict_pseudos, ir->regno);
928    }
929  best_hard_regno = -1;
930  best_cost = INT_MAX;
931  best_insn_pseudos_num = INT_MAX;
932  smallest_bad_spills_num = INT_MAX;
933  rclass_size = ira_class_hard_regs_num[rclass];
934  mode = PSEUDO_REGNO_MODE (regno);
935  /* Invalidate try_hard_reg_pseudos elements.  */
936  curr_pseudo_check++;
937  for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
938    for (p = r->start; p <= r->finish; p++)
939      setup_try_hard_regno_pseudos (p, rclass);
940  for (i = 0; i < rclass_size; i++)
941    {
942      hard_regno = ira_class_hard_regs[rclass][i];
943      bitmap_clear (&spill_pseudos_bitmap);
944      for (j = hard_regno_nregs[hard_regno][mode] - 1; j >= 0; j--)
945	{
946	  if (try_hard_reg_pseudos_check[hard_regno + j] != curr_pseudo_check)
947	    continue;
948	  lra_assert (!bitmap_empty_p (&try_hard_reg_pseudos[hard_regno + j]));
949	  bitmap_ior_into (&spill_pseudos_bitmap,
950			   &try_hard_reg_pseudos[hard_regno + j]);
951	}
952      /* Spill pseudos.	 */
953      EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)
954	if ((pic_offset_table_rtx != NULL
955	     && spill_regno == REGNO (pic_offset_table_rtx))
956	    || ((int) spill_regno >= lra_constraint_new_regno_start
957		&& ! bitmap_bit_p (&lra_inheritance_pseudos, spill_regno)
958		&& ! bitmap_bit_p (&lra_split_regs, spill_regno)
959		&& ! bitmap_bit_p (&lra_subreg_reload_pseudos, spill_regno)
960		&& ! bitmap_bit_p (&lra_optional_reload_pseudos, spill_regno)))
961	  goto fail;
962      insn_pseudos_num = 0;
963      bad_spills_num = 0;
964      if (lra_dump_file != NULL)
965	fprintf (lra_dump_file, "	 Trying %d:", hard_regno);
966      sparseset_clear (live_range_reload_inheritance_pseudos);
967      EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)
968	{
969	  if (bitmap_bit_p (&insn_conflict_pseudos, spill_regno))
970	    insn_pseudos_num++;
971	  if (spill_regno >= (unsigned int) lra_bad_spill_regno_start)
972	    bad_spills_num++;
973	  for (r = lra_reg_info[spill_regno].live_ranges;
974	       r != NULL;
975	       r = r->next)
976	    {
977	      for (p = r->start; p <= r->finish; p++)
978		{
979		  lra_live_range_t r2;
980
981		  for (r2 = start_point_ranges[p];
982		       r2 != NULL;
983		       r2 = r2->start_next)
984		    if (r2->regno >= lra_constraint_new_regno_start)
985		      sparseset_set_bit (live_range_reload_inheritance_pseudos,
986					 r2->regno);
987		}
988	    }
989	}
990      n = 0;
991      if (sparseset_cardinality (live_range_reload_inheritance_pseudos)
992	  <= (unsigned)LRA_MAX_CONSIDERED_RELOAD_PSEUDOS)
993	EXECUTE_IF_SET_IN_SPARSESET (live_range_reload_inheritance_pseudos,
994				     reload_regno)
995	  if ((int) reload_regno != regno
996	      && (ira_reg_classes_intersect_p
997		  [rclass][regno_allocno_class_array[reload_regno]])
998	      && live_pseudos_reg_renumber[reload_regno] < 0
999	      && find_hard_regno_for (reload_regno, &cost, -1, first_p) < 0)
1000	    sorted_reload_pseudos[n++] = reload_regno;
1001      EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)
1002	{
1003	  update_lives (spill_regno, true);
1004	  if (lra_dump_file != NULL)
1005	    fprintf (lra_dump_file, " spill %d(freq=%d)",
1006		     spill_regno, lra_reg_info[spill_regno].freq);
1007	}
1008      hard_regno = find_hard_regno_for (regno, &cost, -1, first_p);
1009      if (hard_regno >= 0)
1010	{
1011	  assign_temporarily (regno, hard_regno);
1012	  qsort (sorted_reload_pseudos, n, sizeof (int),
1013		 reload_pseudo_compare_func);
1014	  for (j = 0; j < n; j++)
1015	    {
1016	      reload_regno = sorted_reload_pseudos[j];
1017	      lra_assert (live_pseudos_reg_renumber[reload_regno] < 0);
1018	      if ((reload_hard_regno
1019		   = find_hard_regno_for (reload_regno,
1020					  &reload_cost, -1, first_p)) >= 0)
1021		{
1022		  if (lra_dump_file != NULL)
1023		    fprintf (lra_dump_file, " assign %d(cost=%d)",
1024			     reload_regno, reload_cost);
1025		  assign_temporarily (reload_regno, reload_hard_regno);
1026		  cost += reload_cost;
1027		}
1028	    }
1029	  EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)
1030	    {
1031	      rtx_insn_list *x;
1032
1033	      cost += lra_reg_info[spill_regno].freq;
1034	      if (ira_reg_equiv[spill_regno].memory != NULL
1035		  || ira_reg_equiv[spill_regno].constant != NULL)
1036		for (x = ira_reg_equiv[spill_regno].init_insns;
1037		     x != NULL;
1038		     x = x->next ())
1039		  cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (x->insn ()));
1040	    }
1041	  if (best_insn_pseudos_num > insn_pseudos_num
1042	      || (best_insn_pseudos_num == insn_pseudos_num
1043		  && (bad_spills_num < smallest_bad_spills_num
1044		      || (bad_spills_num == smallest_bad_spills_num
1045			  && best_cost > cost))))
1046	    {
1047	      best_insn_pseudos_num = insn_pseudos_num;
1048	      smallest_bad_spills_num = bad_spills_num;
1049	      best_cost = cost;
1050	      best_hard_regno = hard_regno;
1051	      bitmap_copy (&best_spill_pseudos_bitmap, &spill_pseudos_bitmap);
1052	      if (lra_dump_file != NULL)
1053		fprintf (lra_dump_file,
1054			 "	 Now best %d(cost=%d, bad_spills=%d, insn_pseudos=%d)\n",
1055			 hard_regno, cost, bad_spills_num, insn_pseudos_num);
1056	    }
1057	  assign_temporarily (regno, -1);
1058	  for (j = 0; j < n; j++)
1059	    {
1060	      reload_regno = sorted_reload_pseudos[j];
1061	      if (live_pseudos_reg_renumber[reload_regno] >= 0)
1062		assign_temporarily (reload_regno, -1);
1063	    }
1064	}
1065      if (lra_dump_file != NULL)
1066	fprintf (lra_dump_file, "\n");
1067      /* Restore the live hard reg pseudo info for spilled pseudos.  */
1068      EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)
1069	update_lives (spill_regno, false);
1070    fail:
1071      ;
1072    }
1073  /* Spill: */
1074  EXECUTE_IF_SET_IN_BITMAP (&best_spill_pseudos_bitmap, 0, spill_regno, bi)
1075    {
1076      if ((int) spill_regno >= lra_constraint_new_regno_start)
1077	former_reload_pseudo_spill_p = true;
1078      if (lra_dump_file != NULL)
1079	fprintf (lra_dump_file, "      Spill %sr%d(hr=%d, freq=%d) for r%d\n",
1080		 pseudo_prefix_title (spill_regno),
1081		 spill_regno, reg_renumber[spill_regno],
1082		 lra_reg_info[spill_regno].freq, regno);
1083      update_lives (spill_regno, true);
1084      lra_setup_reg_renumber (spill_regno, -1, false);
1085    }
1086  bitmap_ior_into (spilled_pseudo_bitmap, &best_spill_pseudos_bitmap);
1087  return best_hard_regno;
1088}
1089
1090/* Assign HARD_REGNO to REGNO.	*/
1091static void
1092assign_hard_regno (int hard_regno, int regno)
1093{
1094  int i;
1095
1096  lra_assert (hard_regno >= 0);
1097  lra_setup_reg_renumber (regno, hard_regno, true);
1098  update_lives (regno, false);
1099  for (i = 0;
1100       i < hard_regno_nregs[hard_regno][lra_reg_info[regno].biggest_mode];
1101       i++)
1102    df_set_regs_ever_live (hard_regno + i, true);
1103}
1104
1105/* Array used for sorting different pseudos.  */
1106static int *sorted_pseudos;
1107
1108/* The constraints pass is allowed to create equivalences between
1109   pseudos that make the current allocation "incorrect" (in the sense
1110   that pseudos are assigned to hard registers from their own conflict
1111   sets).  The global variable lra_risky_transformations_p says
1112   whether this might have happened.
1113
1114   Process pseudos assigned to hard registers (less frequently used
1115   first), spill if a conflict is found, and mark the spilled pseudos
1116   in SPILLED_PSEUDO_BITMAP.  Set up LIVE_HARD_REG_PSEUDOS from
1117   pseudos, assigned to hard registers.	 */
1118static void
1119setup_live_pseudos_and_spill_after_risky_transforms (bitmap
1120						     spilled_pseudo_bitmap)
1121{
1122  int p, i, j, n, regno, hard_regno;
1123  unsigned int k, conflict_regno;
1124  int val, offset;
1125  HARD_REG_SET conflict_set;
1126  machine_mode mode;
1127  lra_live_range_t r;
1128  bitmap_iterator bi;
1129  int max_regno = max_reg_num ();
1130
1131  if (! lra_risky_transformations_p)
1132    {
1133      for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
1134	if (reg_renumber[i] >= 0 && lra_reg_info[i].nrefs > 0)
1135	  update_lives (i, false);
1136      return;
1137    }
1138  for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
1139    if ((pic_offset_table_rtx == NULL_RTX
1140	 || i != (int) REGNO (pic_offset_table_rtx))
1141	&& reg_renumber[i] >= 0 && lra_reg_info[i].nrefs > 0)
1142      sorted_pseudos[n++] = i;
1143  qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func);
1144  if (pic_offset_table_rtx != NULL_RTX
1145      && (regno = REGNO (pic_offset_table_rtx)) >= FIRST_PSEUDO_REGISTER
1146      && reg_renumber[regno] >= 0 && lra_reg_info[regno].nrefs > 0)
1147    sorted_pseudos[n++] = regno;
1148  for (i = n - 1; i >= 0; i--)
1149    {
1150      regno = sorted_pseudos[i];
1151      hard_regno = reg_renumber[regno];
1152      lra_assert (hard_regno >= 0);
1153      mode = lra_reg_info[regno].biggest_mode;
1154      sparseset_clear (live_range_hard_reg_pseudos);
1155      for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
1156	{
1157	  EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[r->start], 0, k, bi)
1158	    sparseset_set_bit (live_range_hard_reg_pseudos, k);
1159	  for (p = r->start + 1; p <= r->finish; p++)
1160	    {
1161	      lra_live_range_t r2;
1162
1163	      for (r2 = start_point_ranges[p];
1164		   r2 != NULL;
1165		   r2 = r2->start_next)
1166		if (live_pseudos_reg_renumber[r2->regno] >= 0)
1167		  sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno);
1168	    }
1169	}
1170      COPY_HARD_REG_SET (conflict_set, lra_no_alloc_regs);
1171      IOR_HARD_REG_SET (conflict_set, lra_reg_info[regno].conflict_hard_regs);
1172      val = lra_reg_info[regno].val;
1173      offset = lra_reg_info[regno].offset;
1174      EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)
1175	if (!lra_reg_val_equal_p (conflict_regno, val, offset)
1176	    /* If it is multi-register pseudos they should start on
1177	       the same hard register.	*/
1178	    || hard_regno != reg_renumber[conflict_regno])
1179	  add_to_hard_reg_set (&conflict_set,
1180			       lra_reg_info[conflict_regno].biggest_mode,
1181			       reg_renumber[conflict_regno]);
1182      if (! overlaps_hard_reg_set_p (conflict_set, mode, hard_regno))
1183	{
1184	  update_lives (regno, false);
1185	  continue;
1186	}
1187      bitmap_set_bit (spilled_pseudo_bitmap, regno);
1188      for (j = 0;
1189	   j < hard_regno_nregs[hard_regno][PSEUDO_REGNO_MODE (regno)];
1190	   j++)
1191	lra_hard_reg_usage[hard_regno + j] -= lra_reg_info[regno].freq;
1192      reg_renumber[regno] = -1;
1193      if (regno >= lra_constraint_new_regno_start)
1194	former_reload_pseudo_spill_p = true;
1195      if (lra_dump_file != NULL)
1196	fprintf (lra_dump_file, "    Spill r%d after risky transformations\n",
1197		 regno);
1198    }
1199}
1200
1201/* Improve allocation by assigning the same hard regno of inheritance
1202   pseudos to the connected pseudos.  We need this because inheritance
1203   pseudos are allocated after reload pseudos in the thread and when
1204   we assign a hard register to a reload pseudo we don't know yet that
1205   the connected inheritance pseudos can get the same hard register.
1206   Add pseudos with changed allocation to bitmap CHANGED_PSEUDOS.  */
1207static void
1208improve_inheritance (bitmap changed_pseudos)
1209{
1210  unsigned int k;
1211  int regno, another_regno, hard_regno, another_hard_regno, cost, i, n;
1212  lra_copy_t cp, next_cp;
1213  bitmap_iterator bi;
1214
1215  if (lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES)
1216    return;
1217  n = 0;
1218  EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, k, bi)
1219    if (reg_renumber[k] >= 0 && lra_reg_info[k].nrefs != 0)
1220      sorted_pseudos[n++] = k;
1221  qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func);
1222  for (i = 0; i < n; i++)
1223    {
1224      regno = sorted_pseudos[i];
1225      hard_regno = reg_renumber[regno];
1226      lra_assert (hard_regno >= 0);
1227      for (cp = lra_reg_info[regno].copies; cp != NULL; cp = next_cp)
1228	{
1229	  if (cp->regno1 == regno)
1230	    {
1231	      next_cp = cp->regno1_next;
1232	      another_regno = cp->regno2;
1233	    }
1234	  else if (cp->regno2 == regno)
1235	    {
1236	      next_cp = cp->regno2_next;
1237	      another_regno = cp->regno1;
1238	    }
1239	  else
1240	    gcc_unreachable ();
1241	  /* Don't change reload pseudo allocation.  It might have
1242	     this allocation for a purpose and changing it can result
1243	     in LRA cycling.  */
1244	  if ((another_regno < lra_constraint_new_regno_start
1245	       || bitmap_bit_p (&lra_inheritance_pseudos, another_regno))
1246	      && (another_hard_regno = reg_renumber[another_regno]) >= 0
1247	      && another_hard_regno != hard_regno)
1248	    {
1249	      if (lra_dump_file != NULL)
1250		fprintf
1251		  (lra_dump_file,
1252		   "	Improving inheritance for %d(%d) and %d(%d)...\n",
1253		   regno, hard_regno, another_regno, another_hard_regno);
1254	      update_lives (another_regno, true);
1255	      lra_setup_reg_renumber (another_regno, -1, false);
1256	      if (hard_regno == find_hard_regno_for (another_regno, &cost,
1257						     hard_regno, false))
1258		assign_hard_regno (hard_regno, another_regno);
1259	      else
1260		assign_hard_regno (another_hard_regno, another_regno);
1261	      bitmap_set_bit (changed_pseudos, another_regno);
1262	    }
1263	}
1264    }
1265}
1266
1267
1268/* Bitmap finally containing all pseudos spilled on this assignment
1269   pass.  */
1270static bitmap_head all_spilled_pseudos;
1271/* All pseudos whose allocation was changed.  */
1272static bitmap_head changed_pseudo_bitmap;
1273
1274
1275/* Add to LIVE_RANGE_HARD_REG_PSEUDOS all pseudos conflicting with
1276   REGNO and whose hard regs can be assigned to REGNO.  */
1277static void
1278find_all_spills_for (int regno)
1279{
1280  int p;
1281  lra_live_range_t r;
1282  unsigned int k;
1283  bitmap_iterator bi;
1284  enum reg_class rclass;
1285  bool *rclass_intersect_p;
1286
1287  rclass = regno_allocno_class_array[regno];
1288  rclass_intersect_p = ira_reg_classes_intersect_p[rclass];
1289  for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
1290    {
1291      EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[r->start], 0, k, bi)
1292	if (rclass_intersect_p[regno_allocno_class_array[k]])
1293	  sparseset_set_bit (live_range_hard_reg_pseudos, k);
1294      for (p = r->start + 1; p <= r->finish; p++)
1295	{
1296	  lra_live_range_t r2;
1297
1298	  for (r2 = start_point_ranges[p];
1299	       r2 != NULL;
1300	       r2 = r2->start_next)
1301	    {
1302	      if (live_pseudos_reg_renumber[r2->regno] >= 0
1303		  && rclass_intersect_p[regno_allocno_class_array[r2->regno]])
1304		sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno);
1305	    }
1306	}
1307    }
1308}
1309
1310/* Assign hard registers to reload pseudos and other pseudos.  */
1311static void
1312assign_by_spills (void)
1313{
1314  int i, n, nfails, iter, regno, hard_regno, cost, restore_regno;
1315  rtx_insn *insn;
1316  bitmap_head changed_insns, do_not_assign_nonreload_pseudos;
1317  unsigned int u, conflict_regno;
1318  bitmap_iterator bi;
1319  bool reload_p;
1320  int max_regno = max_reg_num ();
1321
1322  for (n = 0, i = lra_constraint_new_regno_start; i < max_regno; i++)
1323    if (reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0
1324	&& regno_allocno_class_array[i] != NO_REGS)
1325      sorted_pseudos[n++] = i;
1326  bitmap_initialize (&insn_conflict_pseudos, &reg_obstack);
1327  bitmap_initialize (&spill_pseudos_bitmap, &reg_obstack);
1328  bitmap_initialize (&best_spill_pseudos_bitmap, &reg_obstack);
1329  update_hard_regno_preference_check = XCNEWVEC (int, max_regno);
1330  curr_update_hard_regno_preference_check = 0;
1331  memset (try_hard_reg_pseudos_check, 0, sizeof (try_hard_reg_pseudos_check));
1332  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1333    bitmap_initialize (&try_hard_reg_pseudos[i], &reg_obstack);
1334  curr_pseudo_check = 0;
1335  bitmap_initialize (&changed_insns, &reg_obstack);
1336  bitmap_initialize (&non_reload_pseudos, &reg_obstack);
1337  bitmap_ior (&non_reload_pseudos, &lra_inheritance_pseudos, &lra_split_regs);
1338  bitmap_ior_into (&non_reload_pseudos, &lra_subreg_reload_pseudos);
1339  bitmap_ior_into (&non_reload_pseudos, &lra_optional_reload_pseudos);
1340  for (iter = 0; iter <= 1; iter++)
1341    {
1342      qsort (sorted_pseudos, n, sizeof (int), reload_pseudo_compare_func);
1343      nfails = 0;
1344      for (i = 0; i < n; i++)
1345	{
1346	  regno = sorted_pseudos[i];
1347	  if (lra_dump_file != NULL)
1348	    fprintf (lra_dump_file, "	 Assigning to %d "
1349		     "(cl=%s, orig=%d, freq=%d, tfirst=%d, tfreq=%d)...\n",
1350		     regno, reg_class_names[regno_allocno_class_array[regno]],
1351		     ORIGINAL_REGNO (regno_reg_rtx[regno]),
1352		     lra_reg_info[regno].freq, regno_assign_info[regno].first,
1353		     regno_assign_info[regno_assign_info[regno].first].freq);
1354	  hard_regno = find_hard_regno_for (regno, &cost, -1, iter == 1);
1355	  reload_p = ! bitmap_bit_p (&non_reload_pseudos, regno);
1356	  if (hard_regno < 0 && reload_p)
1357	    hard_regno = spill_for (regno, &all_spilled_pseudos, iter == 1);
1358	  if (hard_regno < 0)
1359	    {
1360	      if (reload_p)
1361		sorted_pseudos[nfails++] = regno;
1362	    }
1363	  else
1364	    {
1365	      /* This register might have been spilled by the previous
1366		 pass.  Indicate that it is no longer spilled.  */
1367	      bitmap_clear_bit (&all_spilled_pseudos, regno);
1368	      assign_hard_regno (hard_regno, regno);
1369	      if (! reload_p)
1370		/* As non-reload pseudo assignment is changed we
1371		   should reconsider insns referring for the
1372		   pseudo.  */
1373		bitmap_set_bit (&changed_pseudo_bitmap, regno);
1374	    }
1375	}
1376      if (nfails == 0)
1377	break;
1378      if (iter > 0)
1379	{
1380	  /* We did not assign hard regs to reload pseudos after two iterations.
1381	     Either it's an asm and something is wrong with the constraints, or
1382	     we have run out of spill registers; error out in either case.  */
1383	  bool asm_p = false;
1384	  bitmap_head failed_reload_insns;
1385
1386	  bitmap_initialize (&failed_reload_insns, &reg_obstack);
1387	  for (i = 0; i < nfails; i++)
1388	    {
1389	      regno = sorted_pseudos[i];
1390	      bitmap_ior_into (&failed_reload_insns,
1391			       &lra_reg_info[regno].insn_bitmap);
1392	      /* Assign an arbitrary hard register of regno class to
1393		 avoid further trouble with this insn.  */
1394	      bitmap_clear_bit (&all_spilled_pseudos, regno);
1395	      assign_hard_regno
1396		(ira_class_hard_regs[regno_allocno_class_array[regno]][0],
1397		 regno);
1398	    }
1399	  EXECUTE_IF_SET_IN_BITMAP (&failed_reload_insns, 0, u, bi)
1400	    {
1401	      insn = lra_insn_recog_data[u]->insn;
1402	      if (asm_noperands (PATTERN (insn)) >= 0)
1403		{
1404		  asm_p = true;
1405		  error_for_asm (insn,
1406				 "%<asm%> operand has impossible constraints");
1407		  /* Avoid further trouble with this insn.
1408		     For asm goto, instead of fixing up all the edges
1409		     just clear the template and clear input operands
1410		     (asm goto doesn't have any output operands).  */
1411		  if (JUMP_P (insn))
1412		    {
1413		      rtx asm_op = extract_asm_operands (PATTERN (insn));
1414		      ASM_OPERANDS_TEMPLATE (asm_op) = ggc_strdup ("");
1415		      ASM_OPERANDS_INPUT_VEC (asm_op) = rtvec_alloc (0);
1416		      ASM_OPERANDS_INPUT_CONSTRAINT_VEC (asm_op) = rtvec_alloc (0);
1417		      lra_update_insn_regno_info (insn);
1418		    }
1419		  else
1420		    {
1421		      PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
1422		      lra_set_insn_deleted (insn);
1423		    }
1424		}
1425	      else if (!asm_p)
1426		{
1427		  error ("unable to find a register to spill");
1428		  fatal_insn ("this is the insn:", insn);
1429		}
1430	    }
1431	  break;
1432	}
1433      /* This is a very rare event.  We can not assign a hard register
1434	 to reload pseudo because the hard register was assigned to
1435	 another reload pseudo on a previous assignment pass.  For x86
1436	 example, on the 1st pass we assigned CX (although another
1437	 hard register could be used for this) to reload pseudo in an
1438	 insn, on the 2nd pass we need CX (and only this) hard
1439	 register for a new reload pseudo in the same insn.  Another
1440	 possible situation may occur in assigning to multi-regs
1441	 reload pseudos when hard regs pool is too fragmented even
1442	 after spilling non-reload pseudos.
1443
1444	 We should do something radical here to succeed.  Here we
1445	 spill *all* conflicting pseudos and reassign them.  */
1446      if (lra_dump_file != NULL)
1447	fprintf (lra_dump_file, "  2nd iter for reload pseudo assignments:\n");
1448      sparseset_clear (live_range_hard_reg_pseudos);
1449      for (i = 0; i < nfails; i++)
1450	{
1451	  if (lra_dump_file != NULL)
1452	    fprintf (lra_dump_file, "	 Reload r%d assignment failure\n",
1453		     sorted_pseudos[i]);
1454	  find_all_spills_for (sorted_pseudos[i]);
1455	}
1456      EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)
1457	{
1458	  if ((int) conflict_regno >= lra_constraint_new_regno_start)
1459	    {
1460	      sorted_pseudos[nfails++] = conflict_regno;
1461	      former_reload_pseudo_spill_p = true;
1462	    }
1463	  if (lra_dump_file != NULL)
1464	    fprintf (lra_dump_file, "	  Spill %s r%d(hr=%d, freq=%d)\n",
1465		     pseudo_prefix_title (conflict_regno), conflict_regno,
1466		     reg_renumber[conflict_regno],
1467		     lra_reg_info[conflict_regno].freq);
1468	  update_lives (conflict_regno, true);
1469	  lra_setup_reg_renumber (conflict_regno, -1, false);
1470	}
1471      n = nfails;
1472    }
1473  improve_inheritance (&changed_pseudo_bitmap);
1474  bitmap_clear (&non_reload_pseudos);
1475  bitmap_clear (&changed_insns);
1476  if (! lra_simple_p)
1477    {
1478      /* We should not assign to original pseudos of inheritance
1479	 pseudos or split pseudos if any its inheritance pseudo did
1480	 not get hard register or any its split pseudo was not split
1481	 because undo inheritance/split pass will extend live range of
1482	 such inheritance or split pseudos.  */
1483      bitmap_initialize (&do_not_assign_nonreload_pseudos, &reg_obstack);
1484      EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, u, bi)
1485	if ((restore_regno = lra_reg_info[u].restore_regno) >= 0
1486	    && reg_renumber[u] < 0
1487	    && bitmap_bit_p (&lra_inheritance_pseudos, u))
1488	  bitmap_set_bit (&do_not_assign_nonreload_pseudos, restore_regno);
1489      EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, u, bi)
1490	if ((restore_regno = lra_reg_info[u].restore_regno) >= 0
1491	    && reg_renumber[u] >= 0)
1492	  bitmap_set_bit (&do_not_assign_nonreload_pseudos, restore_regno);
1493      for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
1494	if (((i < lra_constraint_new_regno_start
1495	      && ! bitmap_bit_p (&do_not_assign_nonreload_pseudos, i))
1496	     || (bitmap_bit_p (&lra_inheritance_pseudos, i)
1497		 && lra_reg_info[i].restore_regno >= 0)
1498	     || (bitmap_bit_p (&lra_split_regs, i)
1499		 && lra_reg_info[i].restore_regno >= 0)
1500	     || bitmap_bit_p (&lra_subreg_reload_pseudos, i)
1501	     || bitmap_bit_p (&lra_optional_reload_pseudos, i))
1502	    && reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0
1503	    && regno_allocno_class_array[i] != NO_REGS)
1504	  sorted_pseudos[n++] = i;
1505      bitmap_clear (&do_not_assign_nonreload_pseudos);
1506      if (n != 0 && lra_dump_file != NULL)
1507	fprintf (lra_dump_file, "  Reassigning non-reload pseudos\n");
1508      qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func);
1509      for (i = 0; i < n; i++)
1510	{
1511	  regno = sorted_pseudos[i];
1512	  hard_regno = find_hard_regno_for (regno, &cost, -1, false);
1513	  if (hard_regno >= 0)
1514	    {
1515	      assign_hard_regno (hard_regno, regno);
1516	      /* We change allocation for non-reload pseudo on this
1517		 iteration -- mark the pseudo for invalidation of used
1518		 alternatives of insns containing the pseudo.  */
1519	      bitmap_set_bit (&changed_pseudo_bitmap, regno);
1520	    }
1521	  else
1522	    {
1523	      enum reg_class rclass = lra_get_allocno_class (regno);
1524	      enum reg_class spill_class;
1525
1526	      if (targetm.spill_class == NULL
1527		  || lra_reg_info[regno].restore_regno < 0
1528		  || ! bitmap_bit_p (&lra_inheritance_pseudos, regno)
1529		  || (spill_class
1530		      = ((enum reg_class)
1531			 targetm.spill_class
1532			 ((reg_class_t) rclass,
1533			  PSEUDO_REGNO_MODE (regno)))) == NO_REGS)
1534		continue;
1535	      regno_allocno_class_array[regno] = spill_class;
1536	      hard_regno = find_hard_regno_for (regno, &cost, -1, false);
1537	      if (hard_regno < 0)
1538		regno_allocno_class_array[regno] = rclass;
1539	      else
1540		{
1541		  setup_reg_classes
1542		    (regno, spill_class, spill_class, spill_class);
1543		  assign_hard_regno (hard_regno, regno);
1544		  bitmap_set_bit (&changed_pseudo_bitmap, regno);
1545		}
1546	    }
1547	}
1548    }
1549  free (update_hard_regno_preference_check);
1550  bitmap_clear (&best_spill_pseudos_bitmap);
1551  bitmap_clear (&spill_pseudos_bitmap);
1552  bitmap_clear (&insn_conflict_pseudos);
1553}
1554
1555
1556/* Entry function to assign hard registers to new reload pseudos
1557   starting with LRA_CONSTRAINT_NEW_REGNO_START (by possible spilling
1558   of old pseudos) and possibly to the old pseudos.  The function adds
1559   what insns to process for the next constraint pass.	Those are all
1560   insns who contains non-reload and non-inheritance pseudos with
1561   changed allocation.
1562
1563   Return true if we did not spill any non-reload and non-inheritance
1564   pseudos.  */
1565bool
1566lra_assign (void)
1567{
1568  int i;
1569  unsigned int u;
1570  bitmap_iterator bi;
1571  bitmap_head insns_to_process;
1572  bool no_spills_p;
1573  int max_regno = max_reg_num ();
1574
1575  timevar_push (TV_LRA_ASSIGN);
1576  lra_assignment_iter++;
1577  if (lra_dump_file != NULL)
1578    fprintf (lra_dump_file, "\n********** Assignment #%d: **********\n\n",
1579	     lra_assignment_iter);
1580  init_lives ();
1581  sorted_pseudos = XNEWVEC (int, max_regno);
1582  sorted_reload_pseudos = XNEWVEC (int, max_regno);
1583  regno_allocno_class_array = XNEWVEC (enum reg_class, max_regno);
1584  for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
1585    regno_allocno_class_array[i] = lra_get_allocno_class (i);
1586  former_reload_pseudo_spill_p = false;
1587  init_regno_assign_info ();
1588  bitmap_initialize (&all_spilled_pseudos, &reg_obstack);
1589  create_live_range_start_chains ();
1590  setup_live_pseudos_and_spill_after_risky_transforms (&all_spilled_pseudos);
1591#ifdef ENABLE_CHECKING
1592  if (!flag_ipa_ra)
1593    for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
1594      if (lra_reg_info[i].nrefs != 0 && reg_renumber[i] >= 0
1595	  && lra_reg_info[i].call_p
1596	  && overlaps_hard_reg_set_p (call_used_reg_set,
1597				      PSEUDO_REGNO_MODE (i), reg_renumber[i]))
1598	gcc_unreachable ();
1599#endif
1600  /* Setup insns to process on the next constraint pass.  */
1601  bitmap_initialize (&changed_pseudo_bitmap, &reg_obstack);
1602  init_live_reload_and_inheritance_pseudos ();
1603  assign_by_spills ();
1604  finish_live_reload_and_inheritance_pseudos ();
1605  bitmap_ior_into (&changed_pseudo_bitmap, &all_spilled_pseudos);
1606  no_spills_p = true;
1607  EXECUTE_IF_SET_IN_BITMAP (&all_spilled_pseudos, 0, u, bi)
1608    /* We ignore spilled pseudos created on last inheritance pass
1609       because they will be removed.  */
1610    if (lra_reg_info[u].restore_regno < 0)
1611      {
1612	no_spills_p = false;
1613	break;
1614      }
1615  finish_live_range_start_chains ();
1616  bitmap_clear (&all_spilled_pseudos);
1617  bitmap_initialize (&insns_to_process, &reg_obstack);
1618  EXECUTE_IF_SET_IN_BITMAP (&changed_pseudo_bitmap, 0, u, bi)
1619    bitmap_ior_into (&insns_to_process, &lra_reg_info[u].insn_bitmap);
1620  bitmap_clear (&changed_pseudo_bitmap);
1621  EXECUTE_IF_SET_IN_BITMAP (&insns_to_process, 0, u, bi)
1622    {
1623      lra_push_insn_by_uid (u);
1624      /* Invalidate alternatives for insn should be processed.	*/
1625      lra_set_used_insn_alternative_by_uid (u, -1);
1626    }
1627  bitmap_clear (&insns_to_process);
1628  finish_regno_assign_info ();
1629  free (regno_allocno_class_array);
1630  free (sorted_pseudos);
1631  free (sorted_reload_pseudos);
1632  finish_lives ();
1633  timevar_pop (TV_LRA_ASSIGN);
1634  if (former_reload_pseudo_spill_p)
1635    lra_assignment_iter_after_spill++;
1636  if (lra_assignment_iter_after_spill > LRA_MAX_ASSIGNMENT_ITERATION_NUMBER)
1637    internal_error
1638      ("Maximum number of LRA assignment passes is achieved (%d)\n",
1639       LRA_MAX_ASSIGNMENT_ITERATION_NUMBER);
1640  return no_spills_p;
1641}
1642