1/* Integrated Register Allocator.  Changing code and generating moves.
2   Copyright (C) 2006-2015 Free Software Foundation, Inc.
3   Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.  If not see
19<http://www.gnu.org/licenses/>.  */
20
21/* When we have more one region, we need to change the original RTL
22   code after coloring.  Let us consider two allocnos representing the
23   same pseudo-register outside and inside a region respectively.
24   They can get different hard-registers.  The reload pass works on
25   pseudo registers basis and there is no way to say the reload that
26   pseudo could be in different registers and it is even more
27   difficult to say in what places of the code the pseudo should have
28   particular hard-registers.  So in this case IRA has to create and
29   use a new pseudo-register inside the region and adds code to move
30   allocno values on the region's borders.  This is done by the code
31   in this file.
32
33   The code makes top-down traversal of the regions and generate new
34   pseudos and the move code on the region borders.  In some
35   complicated cases IRA can create a new pseudo used temporarily to
36   move allocno values when a swap of values stored in two
37   hard-registers is needed (e.g. two allocnos representing different
38   pseudos outside region got respectively hard registers 1 and 2 and
39   the corresponding allocnos inside the region got respectively hard
40   registers 2 and 1).  At this stage, the new pseudo is marked as
41   spilled.
42
43   IRA still creates the pseudo-register and the moves on the region
44   borders even when the both corresponding allocnos were assigned to
45   the same hard-register.  It is done because, if the reload pass for
46   some reason spills a pseudo-register representing the original
47   pseudo outside or inside the region, the effect will be smaller
48   because another pseudo will still be in the hard-register.  In most
49   cases, this is better then spilling the original pseudo in its
50   whole live-range.  If reload does not change the allocation for the
51   two pseudo-registers, the trivial move will be removed by
52   post-reload optimizations.
53
54   IRA does not generate a new pseudo and moves for the allocno values
55   if the both allocnos representing an original pseudo inside and
56   outside region assigned to the same hard register when the register
57   pressure in the region for the corresponding pressure class is less
58   than number of available hard registers for given pressure class.
59
60   IRA also does some optimizations to remove redundant moves which is
61   transformed into stores by the reload pass on CFG edges
62   representing exits from the region.
63
64   IRA tries to reduce duplication of code generated on CFG edges
65   which are enters and exits to/from regions by moving some code to
66   the edge sources or destinations when it is possible.  */
67
68#include "config.h"
69#include "system.h"
70#include "coretypes.h"
71#include "tm.h"
72#include "regs.h"
73#include "rtl.h"
74#include "tm_p.h"
75#include "target.h"
76#include "flags.h"
77#include "obstack.h"
78#include "bitmap.h"
79#include "hard-reg-set.h"
80#include "predict.h"
81#include "vec.h"
82#include "hashtab.h"
83#include "hash-set.h"
84#include "machmode.h"
85#include "input.h"
86#include "function.h"
87#include "dominance.h"
88#include "cfg.h"
89#include "cfgrtl.h"
90#include "cfgbuild.h"
91#include "basic-block.h"
92#include "symtab.h"
93#include "statistics.h"
94#include "double-int.h"
95#include "real.h"
96#include "fixed-value.h"
97#include "alias.h"
98#include "wide-int.h"
99#include "inchash.h"
100#include "tree.h"
101#include "insn-config.h"
102#include "expmed.h"
103#include "dojump.h"
104#include "explow.h"
105#include "calls.h"
106#include "emit-rtl.h"
107#include "varasm.h"
108#include "stmt.h"
109#include "expr.h"
110#include "recog.h"
111#include "params.h"
112#include "reload.h"
113#include "df.h"
114#include "ira-int.h"
115
116
117/* Data used to emit live range split insns and to flattening IR.  */
118ira_emit_data_t ira_allocno_emit_data;
119
120/* Definitions for vectors of pointers.  */
121typedef void *void_p;
122
123/* Pointers to data allocated for allocnos being created during
124   emitting.  Usually there are quite few such allocnos because they
125   are created only for resolving loop in register shuffling.  */
126static vec<void_p> new_allocno_emit_data_vec;
127
128/* Allocate and initiate the emit data.  */
129void
130ira_initiate_emit_data (void)
131{
132  ira_allocno_t a;
133  ira_allocno_iterator ai;
134
135  ira_allocno_emit_data
136    = (ira_emit_data_t) ira_allocate (ira_allocnos_num
137				      * sizeof (struct ira_emit_data));
138  memset (ira_allocno_emit_data, 0,
139	  ira_allocnos_num * sizeof (struct ira_emit_data));
140  FOR_EACH_ALLOCNO (a, ai)
141    ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
142  new_allocno_emit_data_vec.create (50);
143
144}
145
146/* Free the emit data.  */
147void
148ira_finish_emit_data (void)
149{
150  void_p p;
151  ira_allocno_t a;
152  ira_allocno_iterator ai;
153
154  ira_free (ira_allocno_emit_data);
155  FOR_EACH_ALLOCNO (a, ai)
156    ALLOCNO_ADD_DATA (a) = NULL;
157  for (;new_allocno_emit_data_vec.length () != 0;)
158    {
159      p = new_allocno_emit_data_vec.pop ();
160      ira_free (p);
161    }
162  new_allocno_emit_data_vec.release ();
163}
164
165/* Create and return a new allocno with given REGNO and
166   LOOP_TREE_NODE.  Allocate emit data for it.  */
167static ira_allocno_t
168create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
169{
170  ira_allocno_t a;
171
172  a = ira_create_allocno (regno, false, loop_tree_node);
173  ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
174  memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
175  new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
176  return a;
177}
178
179
180
181/* See comments below.  */
182typedef struct move *move_t;
183
184/* The structure represents an allocno move.  Both allocnos have the
185   same original regno but different allocation.  */
186struct move
187{
188  /* The allocnos involved in the move.  */
189  ira_allocno_t from, to;
190  /* The next move in the move sequence.  */
191  move_t next;
192  /* Used for finding dependencies.  */
193  bool visited_p;
194  /* The size of the following array. */
195  int deps_num;
196  /* Moves on which given move depends on.  Dependency can be cyclic.
197     It means we need a temporary to generates the moves.  Sequence
198     A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
199     B1 are assigned to reg R2 is an example of the cyclic
200     dependencies.  */
201  move_t *deps;
202  /* First insn generated for the move.  */
203  rtx_insn *insn;
204};
205
206/* Array of moves (indexed by BB index) which should be put at the
207   start/end of the corresponding basic blocks.  */
208static move_t *at_bb_start, *at_bb_end;
209
210/* Max regno before renaming some pseudo-registers.  For example, the
211   same pseudo-register can be renamed in a loop if its allocation is
212   different outside the loop.  */
213static int max_regno_before_changing;
214
215/* Return new move of allocnos TO and FROM.  */
216static move_t
217create_move (ira_allocno_t to, ira_allocno_t from)
218{
219  move_t move;
220
221  move = (move_t) ira_allocate (sizeof (struct move));
222  move->deps = NULL;
223  move->deps_num = 0;
224  move->to = to;
225  move->from = from;
226  move->next = NULL;
227  move->insn = NULL;
228  move->visited_p = false;
229  return move;
230}
231
232/* Free memory for MOVE and its dependencies.  */
233static void
234free_move (move_t move)
235{
236  if (move->deps != NULL)
237    ira_free (move->deps);
238  ira_free (move);
239}
240
241/* Free memory for list of the moves given by its HEAD.  */
242static void
243free_move_list (move_t head)
244{
245  move_t next;
246
247  for (; head != NULL; head = next)
248    {
249      next = head->next;
250      free_move (head);
251    }
252}
253
254/* Return TRUE if the move list LIST1 and LIST2 are equal (two
255   moves are equal if they involve the same allocnos).  */
256static bool
257eq_move_lists_p (move_t list1, move_t list2)
258{
259  for (; list1 != NULL && list2 != NULL;
260       list1 = list1->next, list2 = list2->next)
261    if (list1->from != list2->from || list1->to != list2->to)
262      return false;
263  return list1 == list2;
264}
265
266/* Print move list LIST into file F.  */
267static void
268print_move_list (FILE *f, move_t list)
269{
270  for (; list != NULL; list = list->next)
271    fprintf (f, " a%dr%d->a%dr%d",
272	     ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
273	     ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
274  fprintf (f, "\n");
275}
276
277extern void ira_debug_move_list (move_t list);
278
279/* Print move list LIST into stderr.  */
280void
281ira_debug_move_list (move_t list)
282{
283  print_move_list (stderr, list);
284}
285
286/* This recursive function changes pseudo-registers in *LOC if it is
287   necessary.  The function returns TRUE if a change was done.  */
288static bool
289change_regs (rtx *loc)
290{
291  int i, regno, result = false;
292  const char *fmt;
293  enum rtx_code code;
294  rtx reg;
295
296  if (*loc == NULL_RTX)
297    return false;
298  code = GET_CODE (*loc);
299  if (code == REG)
300    {
301      regno = REGNO (*loc);
302      if (regno < FIRST_PSEUDO_REGISTER)
303	return false;
304      if (regno >= max_regno_before_changing)
305	/* It is a shared register which was changed already.  */
306	return false;
307      if (ira_curr_regno_allocno_map[regno] == NULL)
308	return false;
309      reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
310      if (reg == *loc)
311	return false;
312      *loc = reg;
313      return true;
314    }
315
316  fmt = GET_RTX_FORMAT (code);
317  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
318    {
319      if (fmt[i] == 'e')
320	result = change_regs (&XEXP (*loc, i)) || result;
321      else if (fmt[i] == 'E')
322	{
323	  int j;
324
325	  for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
326	    result = change_regs (&XVECEXP (*loc, i, j)) || result;
327	}
328    }
329  return result;
330}
331
332static bool
333change_regs_in_insn (rtx_insn **insn_ptr)
334{
335  rtx rtx = *insn_ptr;
336  bool result = change_regs (&rtx);
337  *insn_ptr = as_a <rtx_insn *> (rtx);
338  return result;
339}
340
341/* Attach MOVE to the edge E.  The move is attached to the head of the
342   list if HEAD_P is TRUE.  */
343static void
344add_to_edge_list (edge e, move_t move, bool head_p)
345{
346  move_t last;
347
348  if (head_p || e->aux == NULL)
349    {
350      move->next = (move_t) e->aux;
351      e->aux = move;
352    }
353  else
354    {
355      for (last = (move_t) e->aux; last->next != NULL; last = last->next)
356	;
357      last->next = move;
358      move->next = NULL;
359    }
360}
361
362/* Create and return new pseudo-register with the same attributes as
363   ORIGINAL_REG.  */
364rtx
365ira_create_new_reg (rtx original_reg)
366{
367  rtx new_reg;
368
369  new_reg = gen_reg_rtx (GET_MODE (original_reg));
370  ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
371  REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
372  REG_POINTER (new_reg) = REG_POINTER (original_reg);
373  REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
374  if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
375    fprintf (ira_dump_file, "      Creating newreg=%i from oldreg=%i\n",
376	     REGNO (new_reg), REGNO (original_reg));
377  ira_expand_reg_equiv ();
378  return new_reg;
379}
380
381/* Return TRUE if loop given by SUBNODE inside the loop given by
382   NODE.  */
383static bool
384subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
385{
386  for (; subnode != NULL; subnode = subnode->parent)
387    if (subnode == node)
388      return true;
389  return false;
390}
391
392/* Set up member `reg' to REG for allocnos which has the same regno as
393   ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
394static void
395set_allocno_reg (ira_allocno_t allocno, rtx reg)
396{
397  int regno;
398  ira_allocno_t a;
399  ira_loop_tree_node_t node;
400
401  node = ALLOCNO_LOOP_TREE_NODE (allocno);
402  for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
403       a != NULL;
404       a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
405    if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
406      ALLOCNO_EMIT_DATA (a)->reg = reg;
407  for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
408    ALLOCNO_EMIT_DATA (a)->reg = reg;
409  regno = ALLOCNO_REGNO (allocno);
410  for (a = allocno;;)
411    {
412      if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
413	{
414	  node = node->parent;
415	  if (node == NULL)
416	    break;
417	  a = node->regno_allocno_map[regno];
418	}
419      if (a == NULL)
420	continue;
421      if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
422	break;
423      ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
424    }
425}
426
427/* Return true if there is an entry to given loop not from its parent
428   (or grandparent) block.  For example, it is possible for two
429   adjacent loops inside another loop.  */
430static bool
431entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
432{
433  ira_loop_tree_node_t bb_node, src_loop_node, parent;
434  edge e;
435  edge_iterator ei;
436
437  for (bb_node = loop_node->children;
438       bb_node != NULL;
439       bb_node = bb_node->next)
440    if (bb_node->bb != NULL)
441      {
442	FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
443	  if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
444	      && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
445	    {
446	      for (parent = src_loop_node->parent;
447		   parent != NULL;
448		   parent = parent->parent)
449		if (parent == loop_node)
450		  break;
451	      if (parent != NULL)
452		/* That is an exit from a nested loop -- skip it.  */
453		continue;
454	      for (parent = loop_node->parent;
455		   parent != NULL;
456		   parent = parent->parent)
457		if (src_loop_node == parent)
458		  break;
459	      if (parent == NULL)
460		return true;
461	    }
462      }
463  return false;
464}
465
466/* Set up ENTERED_FROM_NON_PARENT_P for each loop region.  */
467static void
468setup_entered_from_non_parent_p (void)
469{
470  unsigned int i;
471  loop_p loop;
472
473  ira_assert (current_loops != NULL);
474  FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
475    if (ira_loop_nodes[i].regno_allocno_map != NULL)
476      ira_loop_nodes[i].entered_from_non_parent_p
477	= entered_from_non_parent_p (&ira_loop_nodes[i]);
478}
479
480/* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
481   DEST_ALLOCNO (assigned to memory) can be removed because it does
482   not change value of the destination.  One possible reason for this
483   is the situation when SRC_ALLOCNO is not modified in the
484   corresponding loop.  */
485static bool
486store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
487{
488  int regno, orig_regno;
489  ira_allocno_t a;
490  ira_loop_tree_node_t node;
491
492  ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
493	      && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
494  orig_regno = ALLOCNO_REGNO (src_allocno);
495  regno = REGNO (allocno_emit_reg (dest_allocno));
496  for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
497       node != NULL;
498       node = node->parent)
499    {
500      a = node->regno_allocno_map[orig_regno];
501      ira_assert (a != NULL);
502      if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
503	/* We achieved the destination and everything is ok.  */
504	return true;
505      else if (bitmap_bit_p (node->modified_regnos, orig_regno))
506	return false;
507      else if (node->entered_from_non_parent_p)
508	/* If there is a path from a destination loop block to the
509	   source loop header containing basic blocks of non-parents
510	   (grandparents) of the source loop, we should have checked
511	   modifications of the pseudo on this path too to decide
512	   about possibility to remove the store.  It could be done by
513	   solving a data-flow problem.  Unfortunately such global
514	   solution would complicate IR flattening.  Therefore we just
515	   prohibit removal of the store in such complicated case.  */
516	return false;
517    }
518  /* It is actually a loop entry -- do not remove the store.  */
519  return false;
520}
521
522/* Generate and attach moves to the edge E.  This looks at the final
523   regnos of allocnos living on the edge with the same original regno
524   to figure out when moves should be generated.  */
525static void
526generate_edge_moves (edge e)
527{
528  ira_loop_tree_node_t src_loop_node, dest_loop_node;
529  unsigned int regno;
530  bitmap_iterator bi;
531  ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
532  move_t move;
533  bitmap regs_live_in_dest, regs_live_out_src;
534
535  src_loop_node = IRA_BB_NODE (e->src)->parent;
536  dest_loop_node = IRA_BB_NODE (e->dest)->parent;
537  e->aux = NULL;
538  if (src_loop_node == dest_loop_node)
539    return;
540  src_map = src_loop_node->regno_allocno_map;
541  dest_map = dest_loop_node->regno_allocno_map;
542  regs_live_in_dest = df_get_live_in (e->dest);
543  regs_live_out_src = df_get_live_out (e->src);
544  EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
545			     FIRST_PSEUDO_REGISTER, regno, bi)
546    if (bitmap_bit_p (regs_live_out_src, regno))
547      {
548	src_allocno = src_map[regno];
549	dest_allocno = dest_map[regno];
550	if (REGNO (allocno_emit_reg (src_allocno))
551	    == REGNO (allocno_emit_reg (dest_allocno)))
552	  continue;
553	/* Remove unnecessary stores at the region exit.  We should do
554	   this for readonly memory for sure and this is guaranteed by
555	   that we never generate moves on region borders (see
556	   checking in function change_loop).  */
557 	if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
558	    && ALLOCNO_HARD_REGNO (src_allocno) >= 0
559	    && store_can_be_removed_p (src_allocno, dest_allocno))
560	  {
561	    ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
562	    ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
563	    if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
564	      fprintf (ira_dump_file, "      Remove r%d:a%d->a%d(mem)\n",
565		       regno, ALLOCNO_NUM (src_allocno),
566		       ALLOCNO_NUM (dest_allocno));
567	    continue;
568	  }
569	move = create_move (dest_allocno, src_allocno);
570	add_to_edge_list (e, move, true);
571    }
572}
573
574/* Bitmap of allocnos local for the current loop.  */
575static bitmap local_allocno_bitmap;
576
577/* This bitmap is used to find that we need to generate and to use a
578   new pseudo-register when processing allocnos with the same original
579   regno.  */
580static bitmap used_regno_bitmap;
581
582/* This bitmap contains regnos of allocnos which were renamed locally
583   because the allocnos correspond to disjoint live ranges in loops
584   with a common parent.  */
585static bitmap renamed_regno_bitmap;
586
587/* Change (if necessary) pseudo-registers inside loop given by loop
588   tree node NODE.  */
589static void
590change_loop (ira_loop_tree_node_t node)
591{
592  bitmap_iterator bi;
593  unsigned int i;
594  int regno;
595  bool used_p;
596  ira_allocno_t allocno, parent_allocno, *map;
597  rtx_insn *insn;
598  rtx original_reg;
599  enum reg_class aclass, pclass;
600  ira_loop_tree_node_t parent;
601
602  if (node != ira_loop_tree_root)
603    {
604      ira_assert (current_loops != NULL);
605
606      if (node->bb != NULL)
607	{
608	  FOR_BB_INSNS (node->bb, insn)
609	    if (INSN_P (insn) && change_regs_in_insn (&insn))
610	      {
611		df_insn_rescan (insn);
612		df_notes_rescan (insn);
613	      }
614	  return;
615	}
616
617      if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
618	fprintf (ira_dump_file,
619		 "      Changing RTL for loop %d (header bb%d)\n",
620		 node->loop_num, node->loop->header->index);
621
622      parent = ira_curr_loop_tree_node->parent;
623      map = parent->regno_allocno_map;
624      EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
625				 0, i, bi)
626	{
627	  allocno = ira_allocnos[i];
628	  regno = ALLOCNO_REGNO (allocno);
629	  aclass = ALLOCNO_CLASS (allocno);
630	  pclass = ira_pressure_class_translate[aclass];
631	  parent_allocno = map[regno];
632	  ira_assert (regno < ira_reg_equiv_len);
633	  /* We generate the same hard register move because the
634	     reload pass can put an allocno into memory in this case
635	     we will have live range splitting.  If it does not happen
636	     such the same hard register moves will be removed.  The
637	     worst case when the both allocnos are put into memory by
638	     the reload is very rare.  */
639	  if (parent_allocno != NULL
640	      && (ALLOCNO_HARD_REGNO (allocno)
641		  == ALLOCNO_HARD_REGNO (parent_allocno))
642	      && (ALLOCNO_HARD_REGNO (allocno) < 0
643		  || (parent->reg_pressure[pclass] + 1
644		      <= ira_class_hard_regs_num[pclass])
645		  || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
646					[ALLOCNO_MODE (allocno)],
647					ALLOCNO_HARD_REGNO (allocno))
648		  /* don't create copies because reload can spill an
649		     allocno set by copy although the allocno will not
650		     get memory slot.  */
651		  || ira_equiv_no_lvalue_p (regno)
652		  || (pic_offset_table_rtx != NULL
653		      && (ALLOCNO_REGNO (allocno)
654			  == (int) REGNO (pic_offset_table_rtx)))))
655	    continue;
656	  original_reg = allocno_emit_reg (allocno);
657	  if (parent_allocno == NULL
658	      || (REGNO (allocno_emit_reg (parent_allocno))
659		  == REGNO (original_reg)))
660	    {
661	      if (internal_flag_ira_verbose > 3 && ira_dump_file)
662		fprintf (ira_dump_file, "  %i vs parent %i:",
663			 ALLOCNO_HARD_REGNO (allocno),
664			 ALLOCNO_HARD_REGNO (parent_allocno));
665	      set_allocno_reg (allocno, ira_create_new_reg (original_reg));
666	    }
667	}
668    }
669  /* Rename locals: Local allocnos with same regno in different loops
670     might get the different hard register.  So we need to change
671     ALLOCNO_REG.  */
672  bitmap_and_compl (local_allocno_bitmap,
673		    ira_curr_loop_tree_node->all_allocnos,
674		    ira_curr_loop_tree_node->border_allocnos);
675  EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
676    {
677      allocno = ira_allocnos[i];
678      regno = ALLOCNO_REGNO (allocno);
679      if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
680	continue;
681      used_p = !bitmap_set_bit (used_regno_bitmap, regno);
682      ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
683      if (! used_p)
684	continue;
685      bitmap_set_bit (renamed_regno_bitmap, regno);
686      set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
687    }
688}
689
690/* Process to set up flag somewhere_renamed_p.  */
691static void
692set_allocno_somewhere_renamed_p (void)
693{
694  unsigned int regno;
695  ira_allocno_t allocno;
696  ira_allocno_iterator ai;
697
698  FOR_EACH_ALLOCNO (allocno, ai)
699    {
700      regno = ALLOCNO_REGNO (allocno);
701      if (bitmap_bit_p (renamed_regno_bitmap, regno)
702	  && REGNO (allocno_emit_reg (allocno)) == regno)
703	ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
704    }
705}
706
707/* Return TRUE if move lists on all edges given in vector VEC are
708   equal.  */
709static bool
710eq_edge_move_lists_p (vec<edge, va_gc> *vec)
711{
712  move_t list;
713  int i;
714
715  list = (move_t) EDGE_I (vec, 0)->aux;
716  for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
717    if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
718      return false;
719  return true;
720}
721
722/* Look at all entry edges (if START_P) or exit edges of basic block
723   BB and put move lists at the BB start or end if it is possible.  In
724   other words, this decreases code duplication of allocno moves.  */
725static void
726unify_moves (basic_block bb, bool start_p)
727{
728  int i;
729  edge e;
730  move_t list;
731  vec<edge, va_gc> *vec;
732
733  vec = (start_p ? bb->preds : bb->succs);
734  if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
735    return;
736  e = EDGE_I (vec, 0);
737  list = (move_t) e->aux;
738  if (! start_p && control_flow_insn_p (BB_END (bb)))
739    return;
740  e->aux = NULL;
741  for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
742    {
743      e = EDGE_I (vec, i);
744      free_move_list ((move_t) e->aux);
745      e->aux = NULL;
746    }
747  if (start_p)
748    at_bb_start[bb->index] = list;
749  else
750    at_bb_end[bb->index] = list;
751}
752
753/* Last move (in move sequence being processed) setting up the
754   corresponding hard register.  */
755static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
756
757/* If the element value is equal to CURR_TICK then the corresponding
758   element in `hard_regno_last_set' is defined and correct.  */
759static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
760
761/* Last move (in move sequence being processed) setting up the
762   corresponding allocno.  */
763static move_t *allocno_last_set;
764
765/* If the element value is equal to CURR_TICK then the corresponding
766   element in . `allocno_last_set' is defined and correct.  */
767static int *allocno_last_set_check;
768
769/* Definition of vector of moves.  */
770
771/* This vec contains moves sorted topologically (depth-first) on their
772   dependency graph.  */
773static vec<move_t> move_vec;
774
775/* The variable value is used to check correctness of values of
776   elements of arrays `hard_regno_last_set' and
777   `allocno_last_set_check'.  */
778static int curr_tick;
779
780/* This recursive function traverses dependencies of MOVE and produces
781   topological sorting (in depth-first order).  */
782static void
783traverse_moves (move_t move)
784{
785  int i;
786
787  if (move->visited_p)
788    return;
789  move->visited_p = true;
790  for (i = move->deps_num - 1; i >= 0; i--)
791    traverse_moves (move->deps[i]);
792  move_vec.safe_push (move);
793}
794
795/* Remove unnecessary moves in the LIST, makes topological sorting,
796   and removes cycles on hard reg dependencies by introducing new
797   allocnos assigned to memory and additional moves.  It returns the
798   result move list.  */
799static move_t
800modify_move_list (move_t list)
801{
802  int i, n, nregs, hard_regno;
803  ira_allocno_t to, from;
804  move_t move, new_move, set_move, first, last;
805
806  if (list == NULL)
807    return NULL;
808  /* Create move deps.  */
809  curr_tick++;
810  for (move = list; move != NULL; move = move->next)
811    {
812      to = move->to;
813      if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
814	continue;
815      nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
816      for (i = 0; i < nregs; i++)
817	{
818	  hard_regno_last_set[hard_regno + i] = move;
819	  hard_regno_last_set_check[hard_regno + i] = curr_tick;
820	}
821    }
822  for (move = list; move != NULL; move = move->next)
823    {
824      from = move->from;
825      to = move->to;
826      if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
827	{
828	  nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
829	  for (n = i = 0; i < nregs; i++)
830	    if (hard_regno_last_set_check[hard_regno + i] == curr_tick
831		&& (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
832		    != ALLOCNO_REGNO (from)))
833	      n++;
834	  move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
835	  for (n = i = 0; i < nregs; i++)
836	    if (hard_regno_last_set_check[hard_regno + i] == curr_tick
837		&& (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
838		    != ALLOCNO_REGNO (from)))
839	      move->deps[n++] = hard_regno_last_set[hard_regno + i];
840	  move->deps_num = n;
841	}
842    }
843  /* Topological sorting:  */
844  move_vec.truncate (0);
845  for (move = list; move != NULL; move = move->next)
846    traverse_moves (move);
847  last = NULL;
848  for (i = (int) move_vec.length () - 1; i >= 0; i--)
849    {
850      move = move_vec[i];
851      move->next = NULL;
852      if (last != NULL)
853	last->next = move;
854      last = move;
855    }
856  first = move_vec.last ();
857  /* Removing cycles:  */
858  curr_tick++;
859  move_vec.truncate (0);
860  for (move = first; move != NULL; move = move->next)
861    {
862      from = move->from;
863      to = move->to;
864      if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
865	{
866	  nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
867	  for (i = 0; i < nregs; i++)
868	    if (hard_regno_last_set_check[hard_regno + i] == curr_tick
869		&& ALLOCNO_HARD_REGNO
870		   (hard_regno_last_set[hard_regno + i]->to) >= 0)
871	      {
872		int n, j;
873		ira_allocno_t new_allocno;
874
875		set_move = hard_regno_last_set[hard_regno + i];
876		/* It does not matter what loop_tree_node (of TO or
877		   FROM) to use for the new allocno because of
878		   subsequent IRA internal representation
879		   flattening.  */
880		new_allocno
881		  = create_new_allocno (ALLOCNO_REGNO (set_move->to),
882					ALLOCNO_LOOP_TREE_NODE (set_move->to));
883		ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
884		ira_set_allocno_class (new_allocno,
885				       ALLOCNO_CLASS (set_move->to));
886		ira_create_allocno_objects (new_allocno);
887		ALLOCNO_ASSIGNED_P (new_allocno) = true;
888		ALLOCNO_HARD_REGNO (new_allocno) = -1;
889		ALLOCNO_EMIT_DATA (new_allocno)->reg
890		  = ira_create_new_reg (allocno_emit_reg (set_move->to));
891
892		/* Make it possibly conflicting with all earlier
893		   created allocnos.  Cases where temporary allocnos
894		   created to remove the cycles are quite rare.  */
895		n = ALLOCNO_NUM_OBJECTS (new_allocno);
896		gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
897		for (j = 0; j < n; j++)
898		  {
899		    ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
900
901		    OBJECT_MIN (new_obj) = 0;
902		    OBJECT_MAX (new_obj) = ira_objects_num - 1;
903		  }
904
905		new_move = create_move (set_move->to, new_allocno);
906		set_move->to = new_allocno;
907		move_vec.safe_push (new_move);
908		ira_move_loops_num++;
909		if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
910		  fprintf (ira_dump_file,
911			   "    Creating temporary allocno a%dr%d\n",
912			   ALLOCNO_NUM (new_allocno),
913			   REGNO (allocno_emit_reg (new_allocno)));
914	      }
915	}
916      if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
917	continue;
918      nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
919      for (i = 0; i < nregs; i++)
920	{
921	  hard_regno_last_set[hard_regno + i] = move;
922	  hard_regno_last_set_check[hard_regno + i] = curr_tick;
923	}
924    }
925  for (i = (int) move_vec.length () - 1; i >= 0; i--)
926    {
927      move = move_vec[i];
928      move->next = NULL;
929      last->next = move;
930      last = move;
931    }
932  return first;
933}
934
935/* Generate RTX move insns from the move list LIST.  This updates
936   allocation cost using move execution frequency FREQ.  */
937static rtx_insn *
938emit_move_list (move_t list, int freq)
939{
940  rtx to, from, dest;
941  int to_regno, from_regno, cost, regno;
942  rtx_insn *result, *insn;
943  rtx set;
944  machine_mode mode;
945  enum reg_class aclass;
946
947  grow_reg_equivs ();
948  start_sequence ();
949  for (; list != NULL; list = list->next)
950    {
951      start_sequence ();
952      to = allocno_emit_reg (list->to);
953      to_regno = REGNO (to);
954      from = allocno_emit_reg (list->from);
955      from_regno = REGNO (from);
956      emit_move_insn (to, from);
957      list->insn = get_insns ();
958      end_sequence ();
959      for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
960	{
961	  /* The reload needs to have set up insn codes.  If the
962	     reload sets up insn codes by itself, it may fail because
963	     insns will have hard registers instead of pseudos and
964	     there may be no machine insn with given hard
965	     registers.  */
966	  recog_memoized (insn);
967	  /* Add insn to equiv init insn list if it is necessary.
968	     Otherwise reload will not remove this insn if it decides
969	     to use the equivalence.  */
970	  if ((set = single_set (insn)) != NULL_RTX)
971	    {
972	      dest = SET_DEST (set);
973	      if (GET_CODE (dest) == SUBREG)
974		dest = SUBREG_REG (dest);
975	      ira_assert (REG_P (dest));
976	      regno = REGNO (dest);
977	      if (regno >= ira_reg_equiv_len
978		  || (ira_reg_equiv[regno].invariant == NULL_RTX
979		      && ira_reg_equiv[regno].constant == NULL_RTX))
980		continue; /* regno has no equivalence.  */
981	      ira_assert ((int) reg_equivs->length () > regno);
982	      reg_equiv_init (regno)
983		= gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
984	    }
985	}
986      if (ira_use_lra_p)
987	ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
988      emit_insn (list->insn);
989      mode = ALLOCNO_MODE (list->to);
990      aclass = ALLOCNO_CLASS (list->to);
991      cost = 0;
992      if (ALLOCNO_HARD_REGNO (list->to) < 0)
993	{
994	  if (ALLOCNO_HARD_REGNO (list->from) >= 0)
995	    {
996	      cost = ira_memory_move_cost[mode][aclass][0] * freq;
997	      ira_store_cost += cost;
998	    }
999	}
1000      else if (ALLOCNO_HARD_REGNO (list->from) < 0)
1001	{
1002	  if (ALLOCNO_HARD_REGNO (list->to) >= 0)
1003	    {
1004	      cost = ira_memory_move_cost[mode][aclass][0] * freq;
1005	      ira_load_cost += cost;
1006	    }
1007	}
1008      else
1009	{
1010	  ira_init_register_move_cost_if_necessary (mode);
1011	  cost = ira_register_move_cost[mode][aclass][aclass] * freq;
1012	  ira_shuffle_cost += cost;
1013	}
1014      ira_overall_cost += cost;
1015    }
1016  result = get_insns ();
1017  end_sequence ();
1018  return result;
1019}
1020
1021/* Generate RTX move insns from move lists attached to basic blocks
1022   and edges.  */
1023static void
1024emit_moves (void)
1025{
1026  basic_block bb;
1027  edge_iterator ei;
1028  edge e;
1029  rtx_insn *insns, *tmp;
1030
1031  FOR_EACH_BB_FN (bb, cfun)
1032    {
1033      if (at_bb_start[bb->index] != NULL)
1034	{
1035	  at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1036	  insns = emit_move_list (at_bb_start[bb->index],
1037				  REG_FREQ_FROM_BB (bb));
1038	  tmp = BB_HEAD (bb);
1039	  if (LABEL_P (tmp))
1040	    tmp = NEXT_INSN (tmp);
1041	  if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1042	    tmp = NEXT_INSN (tmp);
1043	  if (tmp == BB_HEAD (bb))
1044	    emit_insn_before (insns, tmp);
1045	  else if (tmp != NULL_RTX)
1046	    emit_insn_after (insns, PREV_INSN (tmp));
1047	  else
1048	    emit_insn_after (insns, get_last_insn ());
1049	}
1050
1051      if (at_bb_end[bb->index] != NULL)
1052	{
1053	  at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1054	  insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1055	  ira_assert (! control_flow_insn_p (BB_END (bb)));
1056	  emit_insn_after (insns, BB_END (bb));
1057	}
1058
1059      FOR_EACH_EDGE (e, ei, bb->succs)
1060	{
1061	  if (e->aux == NULL)
1062	    continue;
1063	  ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1064		      || ! EDGE_CRITICAL_P (e));
1065	  e->aux = modify_move_list ((move_t) e->aux);
1066	  insert_insn_on_edge
1067	    (emit_move_list ((move_t) e->aux,
1068			     REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1069	     e);
1070	  if (e->src->next_bb != e->dest)
1071	    ira_additional_jumps_num++;
1072	}
1073    }
1074}
1075
1076/* Update costs of A and corresponding allocnos on upper levels on the
1077   loop tree from reading (if READ_P) or writing A on an execution
1078   path with FREQ.  */
1079static void
1080update_costs (ira_allocno_t a, bool read_p, int freq)
1081{
1082  ira_loop_tree_node_t parent;
1083
1084  for (;;)
1085    {
1086      ALLOCNO_NREFS (a)++;
1087      ALLOCNO_FREQ (a) += freq;
1088      ALLOCNO_MEMORY_COST (a)
1089	+= (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
1090	    [read_p ? 1 : 0] * freq);
1091      if (ALLOCNO_CAP (a) != NULL)
1092	a = ALLOCNO_CAP (a);
1093      else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1094	       || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1095	break;
1096    }
1097}
1098
1099/* Process moves from LIST with execution FREQ to add ranges, copies,
1100   and modify costs for allocnos involved in the moves.  All regnos
1101   living through the list is in LIVE_THROUGH, and the loop tree node
1102   used to find corresponding allocnos is NODE.  */
1103static void
1104add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1105				     bitmap live_through, int freq)
1106{
1107  int start, n;
1108  unsigned int regno;
1109  move_t move;
1110  ira_allocno_t a;
1111  ira_copy_t cp;
1112  live_range_t r;
1113  bitmap_iterator bi;
1114  HARD_REG_SET hard_regs_live;
1115
1116  if (list == NULL)
1117    return;
1118  n = 0;
1119  EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1120    n++;
1121  REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1122  /* This is a trick to guarantee that new ranges is not merged with
1123     the old ones.  */
1124  ira_max_point++;
1125  start = ira_max_point;
1126  for (move = list; move != NULL; move = move->next)
1127    {
1128      ira_allocno_t from = move->from;
1129      ira_allocno_t to = move->to;
1130      int nr, i;
1131
1132      bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1133      bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
1134
1135      nr = ALLOCNO_NUM_OBJECTS (to);
1136      for (i = 0; i < nr; i++)
1137	{
1138	  ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1139	  if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1140	    {
1141	      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1142		fprintf (ira_dump_file, "    Allocate conflicts for a%dr%d\n",
1143			 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
1144	      ira_allocate_object_conflicts (to_obj, n);
1145	    }
1146	}
1147      ior_hard_reg_conflicts (from, &hard_regs_live);
1148      ior_hard_reg_conflicts (to, &hard_regs_live);
1149
1150      update_costs (from, true, freq);
1151      update_costs (to, false, freq);
1152      cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
1153      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1154	fprintf (ira_dump_file, "    Adding cp%d:a%dr%d-a%dr%d\n",
1155		 cp->num, ALLOCNO_NUM (cp->first),
1156		 REGNO (allocno_emit_reg (cp->first)),
1157		 ALLOCNO_NUM (cp->second),
1158		 REGNO (allocno_emit_reg (cp->second)));
1159
1160      nr = ALLOCNO_NUM_OBJECTS (from);
1161      for (i = 0; i < nr; i++)
1162	{
1163	  ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1164	  r = OBJECT_LIVE_RANGES (from_obj);
1165	  if (r == NULL || r->finish >= 0)
1166	    {
1167	      ira_add_live_range_to_object (from_obj, start, ira_max_point);
1168	      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1169		fprintf (ira_dump_file,
1170			 "    Adding range [%d..%d] to allocno a%dr%d\n",
1171			 start, ira_max_point, ALLOCNO_NUM (from),
1172			 REGNO (allocno_emit_reg (from)));
1173	    }
1174	  else
1175	    {
1176	      r->finish = ira_max_point;
1177	      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1178		fprintf (ira_dump_file,
1179			 "    Adding range [%d..%d] to allocno a%dr%d\n",
1180			 r->start, ira_max_point, ALLOCNO_NUM (from),
1181			 REGNO (allocno_emit_reg (from)));
1182	    }
1183	}
1184      ira_max_point++;
1185      nr = ALLOCNO_NUM_OBJECTS (to);
1186      for (i = 0; i < nr; i++)
1187	{
1188	  ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1189	  ira_add_live_range_to_object (to_obj, ira_max_point, -1);
1190	}
1191      ira_max_point++;
1192    }
1193  for (move = list; move != NULL; move = move->next)
1194    {
1195      int nr, i;
1196      nr = ALLOCNO_NUM_OBJECTS (move->to);
1197      for (i = 0; i < nr; i++)
1198	{
1199	  ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1200	  r = OBJECT_LIVE_RANGES (to_obj);
1201	  if (r->finish < 0)
1202	    {
1203	      r->finish = ira_max_point - 1;
1204	      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1205		fprintf (ira_dump_file,
1206			 "    Adding range [%d..%d] to allocno a%dr%d\n",
1207			 r->start, r->finish, ALLOCNO_NUM (move->to),
1208			 REGNO (allocno_emit_reg (move->to)));
1209	    }
1210	}
1211    }
1212  EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1213    {
1214      ira_allocno_t to;
1215      int nr, i;
1216
1217      a = node->regno_allocno_map[regno];
1218      if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
1219	a = to;
1220      nr = ALLOCNO_NUM_OBJECTS (a);
1221      for (i = 0; i < nr; i++)
1222	{
1223	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
1224	  ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1225	}
1226      if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1227	fprintf
1228	  (ira_dump_file,
1229	   "    Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1230	   start, ira_max_point - 1,
1231	   to != NULL ? "upper level" : "",
1232	   ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
1233    }
1234}
1235
1236/* Process all move list to add ranges, conflicts, copies, and modify
1237   costs for allocnos involved in the moves.  */
1238static void
1239add_ranges_and_copies (void)
1240{
1241  basic_block bb;
1242  edge_iterator ei;
1243  edge e;
1244  ira_loop_tree_node_t node;
1245  bitmap live_through;
1246
1247  live_through = ira_allocate_bitmap ();
1248  FOR_EACH_BB_FN (bb, cfun)
1249    {
1250      /* It does not matter what loop_tree_node (of source or
1251	 destination block) to use for searching allocnos by their
1252	 regnos because of subsequent IR flattening.  */
1253      node = IRA_BB_NODE (bb)->parent;
1254      bitmap_copy (live_through, df_get_live_in (bb));
1255      add_range_and_copies_from_move_list
1256	(at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1257      bitmap_copy (live_through, df_get_live_out (bb));
1258      add_range_and_copies_from_move_list
1259	(at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1260      FOR_EACH_EDGE (e, ei, bb->succs)
1261	{
1262	  bitmap_and (live_through,
1263		      df_get_live_in (e->dest), df_get_live_out (bb));
1264	  add_range_and_copies_from_move_list
1265	    ((move_t) e->aux, node, live_through,
1266	     REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1267	}
1268    }
1269  ira_free_bitmap (live_through);
1270}
1271
1272/* The entry function changes code and generates shuffling allocnos on
1273   region borders for the regional (LOOPS_P is TRUE in this case)
1274   register allocation.  */
1275void
1276ira_emit (bool loops_p)
1277{
1278  basic_block bb;
1279  rtx_insn *insn;
1280  edge_iterator ei;
1281  edge e;
1282  ira_allocno_t a;
1283  ira_allocno_iterator ai;
1284  size_t sz;
1285
1286  FOR_EACH_ALLOCNO (a, ai)
1287    ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
1288  if (! loops_p)
1289    return;
1290  sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1291  at_bb_start = (move_t *) ira_allocate (sz);
1292  memset (at_bb_start, 0, sz);
1293  at_bb_end = (move_t *) ira_allocate (sz);
1294  memset (at_bb_end, 0, sz);
1295  local_allocno_bitmap = ira_allocate_bitmap ();
1296  used_regno_bitmap = ira_allocate_bitmap ();
1297  renamed_regno_bitmap = ira_allocate_bitmap ();
1298  max_regno_before_changing = max_reg_num ();
1299  ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1300  set_allocno_somewhere_renamed_p ();
1301  ira_free_bitmap (used_regno_bitmap);
1302  ira_free_bitmap (renamed_regno_bitmap);
1303  ira_free_bitmap (local_allocno_bitmap);
1304  setup_entered_from_non_parent_p ();
1305  FOR_EACH_BB_FN (bb, cfun)
1306    {
1307      at_bb_start[bb->index] = NULL;
1308      at_bb_end[bb->index] = NULL;
1309      FOR_EACH_EDGE (e, ei, bb->succs)
1310	if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1311	  generate_edge_moves (e);
1312    }
1313  allocno_last_set
1314    = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1315  allocno_last_set_check
1316    = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1317  memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1318  memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1319  curr_tick = 0;
1320  FOR_EACH_BB_FN (bb, cfun)
1321    unify_moves (bb, true);
1322  FOR_EACH_BB_FN (bb, cfun)
1323    unify_moves (bb, false);
1324  move_vec.create (ira_allocnos_num);
1325  emit_moves ();
1326  add_ranges_and_copies ();
1327  /* Clean up: */
1328  FOR_EACH_BB_FN (bb, cfun)
1329    {
1330      free_move_list (at_bb_start[bb->index]);
1331      free_move_list (at_bb_end[bb->index]);
1332      FOR_EACH_EDGE (e, ei, bb->succs)
1333	{
1334	  free_move_list ((move_t) e->aux);
1335	  e->aux = NULL;
1336	}
1337    }
1338  move_vec.release ();
1339  ira_free (allocno_last_set_check);
1340  ira_free (allocno_last_set);
1341  commit_edge_insertions ();
1342  /* Fix insn codes.  It is necessary to do it before reload because
1343     reload assumes initial insn codes defined.  The insn codes can be
1344     invalidated by CFG infrastructure for example in jump
1345     redirection.  */
1346  FOR_EACH_BB_FN (bb, cfun)
1347    FOR_BB_INSNS_REVERSE (bb, insn)
1348      if (INSN_P (insn))
1349	recog_memoized (insn);
1350  ira_free (at_bb_end);
1351  ira_free (at_bb_start);
1352}
1353