1/* Dwarf2 Call Frame Information helper routines.
2   Copyright (C) 1992-2015 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "version.h"
25#include "flags.h"
26#include "rtl.h"
27#include "hash-set.h"
28#include "machmode.h"
29#include "vec.h"
30#include "double-int.h"
31#include "input.h"
32#include "alias.h"
33#include "symtab.h"
34#include "wide-int.h"
35#include "inchash.h"
36#include "real.h"
37#include "tree.h"
38#include "stor-layout.h"
39#include "hard-reg-set.h"
40#include "function.h"
41#include "cfgbuild.h"
42#include "dwarf2.h"
43#include "dwarf2out.h"
44#include "dwarf2asm.h"
45#include "ggc.h"
46#include "hash-table.h"
47#include "tm_p.h"
48#include "target.h"
49#include "common/common-target.h"
50#include "tree-pass.h"
51
52#include "except.h"		/* expand_builtin_dwarf_sp_column */
53#include "hashtab.h"
54#include "statistics.h"
55#include "fixed-value.h"
56#include "insn-config.h"
57#include "expmed.h"
58#include "dojump.h"
59#include "explow.h"
60#include "calls.h"
61#include "emit-rtl.h"
62#include "varasm.h"
63#include "stmt.h"
64#include "expr.h"		/* init_return_column_size */
65#include "regs.h"		/* expand_builtin_init_dwarf_reg_sizes */
66#include "output.h"		/* asm_out_file */
67#include "debug.h"		/* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
68
69
70/* ??? Poison these here until it can be done generically.  They've been
71   totally replaced in this file; make sure it stays that way.  */
72#undef DWARF2_UNWIND_INFO
73#undef DWARF2_FRAME_INFO
74#if (GCC_VERSION >= 3000)
75 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
76#endif
77
78#ifndef INCOMING_RETURN_ADDR_RTX
79#define INCOMING_RETURN_ADDR_RTX  (gcc_unreachable (), NULL_RTX)
80#endif
81
82/* Maximum size (in bytes) of an artificially generated label.  */
83#define MAX_ARTIFICIAL_LABEL_BYTES	30
84
85/* A collected description of an entire row of the abstract CFI table.  */
86typedef struct GTY(()) dw_cfi_row_struct
87{
88  /* The expression that computes the CFA, expressed in two different ways.
89     The CFA member for the simple cases, and the full CFI expression for
90     the complex cases.  The later will be a DW_CFA_cfa_expression.  */
91  dw_cfa_location cfa;
92  dw_cfi_ref cfa_cfi;
93
94  /* The expressions for any register column that is saved.  */
95  cfi_vec reg_save;
96} dw_cfi_row;
97
98/* The caller's ORIG_REG is saved in SAVED_IN_REG.  */
99typedef struct GTY(()) reg_saved_in_data_struct {
100  rtx orig_reg;
101  rtx saved_in_reg;
102} reg_saved_in_data;
103
104
105/* Since we no longer have a proper CFG, we're going to create a facsimile
106   of one on the fly while processing the frame-related insns.
107
108   We create dw_trace_info structures for each extended basic block beginning
109   and ending at a "save point".  Save points are labels, barriers, certain
110   notes, and of course the beginning and end of the function.
111
112   As we encounter control transfer insns, we propagate the "current"
113   row state across the edges to the starts of traces.  When checking is
114   enabled, we validate that we propagate the same data from all sources.
115
116   All traces are members of the TRACE_INFO array, in the order in which
117   they appear in the instruction stream.
118
119   All save points are present in the TRACE_INDEX hash, mapping the insn
120   starting a trace to the dw_trace_info describing the trace.  */
121
122typedef struct
123{
124  /* The insn that begins the trace.  */
125  rtx_insn *head;
126
127  /* The row state at the beginning and end of the trace.  */
128  dw_cfi_row *beg_row, *end_row;
129
130  /* Tracking for DW_CFA_GNU_args_size.  The "true" sizes are those we find
131     while scanning insns.  However, the args_size value is irrelevant at
132     any point except can_throw_internal_p insns.  Therefore the "delay"
133     sizes the values that must actually be emitted for this trace.  */
134  HOST_WIDE_INT beg_true_args_size, end_true_args_size;
135  HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
136
137  /* The first EH insn in the trace, where beg_delay_args_size must be set.  */
138  rtx_insn *eh_head;
139
140  /* The following variables contain data used in interpreting frame related
141     expressions.  These are not part of the "real" row state as defined by
142     Dwarf, but it seems like they need to be propagated into a trace in case
143     frame related expressions have been sunk.  */
144  /* ??? This seems fragile.  These variables are fragments of a larger
145     expression.  If we do not keep the entire expression together, we risk
146     not being able to put it together properly.  Consider forcing targets
147     to generate self-contained expressions and dropping all of the magic
148     interpretation code in this file.  Or at least refusing to shrink wrap
149     any frame related insn that doesn't contain a complete expression.  */
150
151  /* The register used for saving registers to the stack, and its offset
152     from the CFA.  */
153  dw_cfa_location cfa_store;
154
155  /* A temporary register holding an integral value used in adjusting SP
156     or setting up the store_reg.  The "offset" field holds the integer
157     value, not an offset.  */
158  dw_cfa_location cfa_temp;
159
160  /* A set of registers saved in other registers.  This is the inverse of
161     the row->reg_save info, if the entry is a DW_CFA_register.  This is
162     implemented as a flat array because it normally contains zero or 1
163     entry, depending on the target.  IA-64 is the big spender here, using
164     a maximum of 5 entries.  */
165  vec<reg_saved_in_data> regs_saved_in_regs;
166
167  /* An identifier for this trace.  Used only for debugging dumps.  */
168  unsigned id;
169
170  /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS.  */
171  bool switch_sections;
172
173  /* True if we've seen different values incoming to beg_true_args_size.  */
174  bool args_size_undefined;
175} dw_trace_info;
176
177
178typedef dw_trace_info *dw_trace_info_ref;
179
180
181/* Hashtable helpers.  */
182
183struct trace_info_hasher : typed_noop_remove <dw_trace_info>
184{
185  typedef dw_trace_info value_type;
186  typedef dw_trace_info compare_type;
187  static inline hashval_t hash (const value_type *);
188  static inline bool equal (const value_type *, const compare_type *);
189};
190
191inline hashval_t
192trace_info_hasher::hash (const value_type *ti)
193{
194  return INSN_UID (ti->head);
195}
196
197inline bool
198trace_info_hasher::equal (const value_type *a, const compare_type *b)
199{
200  return a->head == b->head;
201}
202
203
204/* The variables making up the pseudo-cfg, as described above.  */
205static vec<dw_trace_info> trace_info;
206static vec<dw_trace_info_ref> trace_work_list;
207static hash_table<trace_info_hasher> *trace_index;
208
209/* A vector of call frame insns for the CIE.  */
210cfi_vec cie_cfi_vec;
211
212/* The state of the first row of the FDE table, which includes the
213   state provided by the CIE.  */
214static GTY(()) dw_cfi_row *cie_cfi_row;
215
216static GTY(()) reg_saved_in_data *cie_return_save;
217
218static GTY(()) unsigned long dwarf2out_cfi_label_num;
219
220/* The insn after which a new CFI note should be emitted.  */
221static rtx add_cfi_insn;
222
223/* When non-null, add_cfi will add the CFI to this vector.  */
224static cfi_vec *add_cfi_vec;
225
226/* The current instruction trace.  */
227static dw_trace_info *cur_trace;
228
229/* The current, i.e. most recently generated, row of the CFI table.  */
230static dw_cfi_row *cur_row;
231
232/* A copy of the current CFA, for use during the processing of a
233   single insn.  */
234static dw_cfa_location *cur_cfa;
235
236/* We delay emitting a register save until either (a) we reach the end
237   of the prologue or (b) the register is clobbered.  This clusters
238   register saves so that there are fewer pc advances.  */
239
240typedef struct {
241  rtx reg;
242  rtx saved_reg;
243  HOST_WIDE_INT cfa_offset;
244} queued_reg_save;
245
246
247static vec<queued_reg_save> queued_reg_saves;
248
249/* True if any CFI directives were emitted at the current insn.  */
250static bool any_cfis_emitted;
251
252/* Short-hand for commonly used register numbers.  */
253static unsigned dw_stack_pointer_regnum;
254static unsigned dw_frame_pointer_regnum;
255
256/* Hook used by __throw.  */
257
258rtx
259expand_builtin_dwarf_sp_column (void)
260{
261  unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
262  return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
263}
264
265/* MEM is a memory reference for the register size table, each element of
266   which has mode MODE.  Initialize column C as a return address column.  */
267
268static void
269init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
270{
271  HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
272  HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
273  emit_move_insn (adjust_address (mem, mode, offset),
274		  gen_int_mode (size, mode));
275}
276
277/* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
278   init_one_dwarf_reg_size to communicate on what has been done by the
279   latter.  */
280
281typedef struct
282{
283  /* Whether the dwarf return column was initialized.  */
284  bool wrote_return_column;
285
286  /* For each hard register REGNO, whether init_one_dwarf_reg_size
287     was given REGNO to process already.  */
288  bool processed_regno [FIRST_PSEUDO_REGISTER];
289
290} init_one_dwarf_reg_state;
291
292/* Helper for expand_builtin_init_dwarf_reg_sizes.  Generate code to
293   initialize the dwarf register size table entry corresponding to register
294   REGNO in REGMODE.  TABLE is the table base address, SLOTMODE is the mode to
295   use for the size entry to initialize, and INIT_STATE is the communication
296   datastructure conveying what we're doing to our caller.  */
297
298static
299void init_one_dwarf_reg_size (int regno, machine_mode regmode,
300			      rtx table, machine_mode slotmode,
301			      init_one_dwarf_reg_state *init_state)
302{
303  const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
304  const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
305  const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
306
307  const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
308  const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
309
310  init_state->processed_regno[regno] = true;
311
312  if (rnum >= DWARF_FRAME_REGISTERS)
313    return;
314
315  if (dnum == DWARF_FRAME_RETURN_COLUMN)
316    {
317      if (regmode == VOIDmode)
318	return;
319      init_state->wrote_return_column = true;
320    }
321
322  if (slotoffset < 0)
323    return;
324
325  emit_move_insn (adjust_address (table, slotmode, slotoffset),
326		  gen_int_mode (regsize, slotmode));
327}
328
329/* Generate code to initialize the dwarf register size table located
330   at the provided ADDRESS.  */
331
332void
333expand_builtin_init_dwarf_reg_sizes (tree address)
334{
335  unsigned int i;
336  machine_mode mode = TYPE_MODE (char_type_node);
337  rtx addr = expand_normal (address);
338  rtx mem = gen_rtx_MEM (BLKmode, addr);
339
340  init_one_dwarf_reg_state init_state;
341
342  memset ((char *)&init_state, 0, sizeof (init_state));
343
344  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
345    {
346      machine_mode save_mode;
347      rtx span;
348
349      /* No point in processing a register multiple times.  This could happen
350	 with register spans, e.g. when a reg is first processed as a piece of
351	 a span, then as a register on its own later on.  */
352
353      if (init_state.processed_regno[i])
354	continue;
355
356      save_mode = targetm.dwarf_frame_reg_mode (i);
357      span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
358
359      if (!span)
360	init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
361      else
362	{
363	  for (int si = 0; si < XVECLEN (span, 0); si++)
364	    {
365	      rtx reg = XVECEXP (span, 0, si);
366
367	      init_one_dwarf_reg_size
368		(REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
369	    }
370	}
371    }
372
373  if (!init_state.wrote_return_column)
374    init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
375
376#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
377  init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
378#endif
379
380  targetm.init_dwarf_reg_sizes_extra (address);
381}
382
383
384static dw_trace_info *
385get_trace_info (rtx_insn *insn)
386{
387  dw_trace_info dummy;
388  dummy.head = insn;
389  return trace_index->find_with_hash (&dummy, INSN_UID (insn));
390}
391
392static bool
393save_point_p (rtx_insn *insn)
394{
395  /* Labels, except those that are really jump tables.  */
396  if (LABEL_P (insn))
397    return inside_basic_block_p (insn);
398
399  /* We split traces at the prologue/epilogue notes because those
400     are points at which the unwind info is usually stable.  This
401     makes it easier to find spots with identical unwind info so
402     that we can use remember/restore_state opcodes.  */
403  if (NOTE_P (insn))
404    switch (NOTE_KIND (insn))
405      {
406      case NOTE_INSN_PROLOGUE_END:
407      case NOTE_INSN_EPILOGUE_BEG:
408	return true;
409      }
410
411  return false;
412}
413
414/* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder.  */
415
416static inline HOST_WIDE_INT
417div_data_align (HOST_WIDE_INT off)
418{
419  HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
420  gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
421  return r;
422}
423
424/* Return true if we need a signed version of a given opcode
425   (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended).  */
426
427static inline bool
428need_data_align_sf_opcode (HOST_WIDE_INT off)
429{
430  return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
431}
432
433/* Return a pointer to a newly allocated Call Frame Instruction.  */
434
435static inline dw_cfi_ref
436new_cfi (void)
437{
438  dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
439
440  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
441  cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
442
443  return cfi;
444}
445
446/* Return a newly allocated CFI row, with no defined data.  */
447
448static dw_cfi_row *
449new_cfi_row (void)
450{
451  dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
452
453  row->cfa.reg = INVALID_REGNUM;
454
455  return row;
456}
457
458/* Return a copy of an existing CFI row.  */
459
460static dw_cfi_row *
461copy_cfi_row (dw_cfi_row *src)
462{
463  dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
464
465  *dst = *src;
466  dst->reg_save = vec_safe_copy (src->reg_save);
467
468  return dst;
469}
470
471/* Generate a new label for the CFI info to refer to.  */
472
473static char *
474dwarf2out_cfi_label (void)
475{
476  int num = dwarf2out_cfi_label_num++;
477  char label[20];
478
479  ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
480
481  return xstrdup (label);
482}
483
484/* Add CFI either to the current insn stream or to a vector, or both.  */
485
486static void
487add_cfi (dw_cfi_ref cfi)
488{
489  any_cfis_emitted = true;
490
491  if (add_cfi_insn != NULL)
492    {
493      add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
494      NOTE_CFI (add_cfi_insn) = cfi;
495    }
496
497  if (add_cfi_vec != NULL)
498    vec_safe_push (*add_cfi_vec, cfi);
499}
500
501static void
502add_cfi_args_size (HOST_WIDE_INT size)
503{
504  dw_cfi_ref cfi = new_cfi ();
505
506  /* While we can occasionally have args_size < 0 internally, this state
507     should not persist at a point we actually need an opcode.  */
508  gcc_assert (size >= 0);
509
510  cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
511  cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
512
513  add_cfi (cfi);
514}
515
516static void
517add_cfi_restore (unsigned reg)
518{
519  dw_cfi_ref cfi = new_cfi ();
520
521  cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
522  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
523
524  add_cfi (cfi);
525}
526
527/* Perform ROW->REG_SAVE[COLUMN] = CFI.  CFI may be null, indicating
528   that the register column is no longer saved.  */
529
530static void
531update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
532{
533  if (vec_safe_length (row->reg_save) <= column)
534    vec_safe_grow_cleared (row->reg_save, column + 1);
535  (*row->reg_save)[column] = cfi;
536}
537
538/* This function fills in aa dw_cfa_location structure from a dwarf location
539   descriptor sequence.  */
540
541static void
542get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
543{
544  struct dw_loc_descr_node *ptr;
545  cfa->offset = 0;
546  cfa->base_offset = 0;
547  cfa->indirect = 0;
548  cfa->reg = -1;
549
550  for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
551    {
552      enum dwarf_location_atom op = ptr->dw_loc_opc;
553
554      switch (op)
555	{
556	case DW_OP_reg0:
557	case DW_OP_reg1:
558	case DW_OP_reg2:
559	case DW_OP_reg3:
560	case DW_OP_reg4:
561	case DW_OP_reg5:
562	case DW_OP_reg6:
563	case DW_OP_reg7:
564	case DW_OP_reg8:
565	case DW_OP_reg9:
566	case DW_OP_reg10:
567	case DW_OP_reg11:
568	case DW_OP_reg12:
569	case DW_OP_reg13:
570	case DW_OP_reg14:
571	case DW_OP_reg15:
572	case DW_OP_reg16:
573	case DW_OP_reg17:
574	case DW_OP_reg18:
575	case DW_OP_reg19:
576	case DW_OP_reg20:
577	case DW_OP_reg21:
578	case DW_OP_reg22:
579	case DW_OP_reg23:
580	case DW_OP_reg24:
581	case DW_OP_reg25:
582	case DW_OP_reg26:
583	case DW_OP_reg27:
584	case DW_OP_reg28:
585	case DW_OP_reg29:
586	case DW_OP_reg30:
587	case DW_OP_reg31:
588	  cfa->reg = op - DW_OP_reg0;
589	  break;
590	case DW_OP_regx:
591	  cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
592	  break;
593	case DW_OP_breg0:
594	case DW_OP_breg1:
595	case DW_OP_breg2:
596	case DW_OP_breg3:
597	case DW_OP_breg4:
598	case DW_OP_breg5:
599	case DW_OP_breg6:
600	case DW_OP_breg7:
601	case DW_OP_breg8:
602	case DW_OP_breg9:
603	case DW_OP_breg10:
604	case DW_OP_breg11:
605	case DW_OP_breg12:
606	case DW_OP_breg13:
607	case DW_OP_breg14:
608	case DW_OP_breg15:
609	case DW_OP_breg16:
610	case DW_OP_breg17:
611	case DW_OP_breg18:
612	case DW_OP_breg19:
613	case DW_OP_breg20:
614	case DW_OP_breg21:
615	case DW_OP_breg22:
616	case DW_OP_breg23:
617	case DW_OP_breg24:
618	case DW_OP_breg25:
619	case DW_OP_breg26:
620	case DW_OP_breg27:
621	case DW_OP_breg28:
622	case DW_OP_breg29:
623	case DW_OP_breg30:
624	case DW_OP_breg31:
625	  cfa->reg = op - DW_OP_breg0;
626	  cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
627	  break;
628	case DW_OP_bregx:
629	  cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
630	  cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
631	  break;
632	case DW_OP_deref:
633	  cfa->indirect = 1;
634	  break;
635	case DW_OP_plus_uconst:
636	  cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
637	  break;
638	default:
639	  gcc_unreachable ();
640	}
641    }
642}
643
644/* Find the previous value for the CFA, iteratively.  CFI is the opcode
645   to interpret, *LOC will be updated as necessary, *REMEMBER is used for
646   one level of remember/restore state processing.  */
647
648void
649lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
650{
651  switch (cfi->dw_cfi_opc)
652    {
653    case DW_CFA_def_cfa_offset:
654    case DW_CFA_def_cfa_offset_sf:
655      loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
656      break;
657    case DW_CFA_def_cfa_register:
658      loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
659      break;
660    case DW_CFA_def_cfa:
661    case DW_CFA_def_cfa_sf:
662      loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
663      loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
664      break;
665    case DW_CFA_def_cfa_expression:
666      get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
667      break;
668
669    case DW_CFA_remember_state:
670      gcc_assert (!remember->in_use);
671      *remember = *loc;
672      remember->in_use = 1;
673      break;
674    case DW_CFA_restore_state:
675      gcc_assert (remember->in_use);
676      *loc = *remember;
677      remember->in_use = 0;
678      break;
679
680    default:
681      break;
682    }
683}
684
685/* Determine if two dw_cfa_location structures define the same data.  */
686
687bool
688cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
689{
690  return (loc1->reg == loc2->reg
691	  && loc1->offset == loc2->offset
692	  && loc1->indirect == loc2->indirect
693	  && (loc1->indirect == 0
694	      || loc1->base_offset == loc2->base_offset));
695}
696
697/* Determine if two CFI operands are identical.  */
698
699static bool
700cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
701{
702  switch (t)
703    {
704    case dw_cfi_oprnd_unused:
705      return true;
706    case dw_cfi_oprnd_reg_num:
707      return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
708    case dw_cfi_oprnd_offset:
709      return a->dw_cfi_offset == b->dw_cfi_offset;
710    case dw_cfi_oprnd_addr:
711      return (a->dw_cfi_addr == b->dw_cfi_addr
712	      || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
713    case dw_cfi_oprnd_loc:
714      return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
715    }
716  gcc_unreachable ();
717}
718
719/* Determine if two CFI entries are identical.  */
720
721static bool
722cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
723{
724  enum dwarf_call_frame_info opc;
725
726  /* Make things easier for our callers, including missing operands.  */
727  if (a == b)
728    return true;
729  if (a == NULL || b == NULL)
730    return false;
731
732  /* Obviously, the opcodes must match.  */
733  opc = a->dw_cfi_opc;
734  if (opc != b->dw_cfi_opc)
735    return false;
736
737  /* Compare the two operands, re-using the type of the operands as
738     already exposed elsewhere.  */
739  return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
740			     &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
741	  && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
742				&a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
743}
744
745/* Determine if two CFI_ROW structures are identical.  */
746
747static bool
748cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
749{
750  size_t i, n_a, n_b, n_max;
751
752  if (a->cfa_cfi)
753    {
754      if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
755	return false;
756    }
757  else if (!cfa_equal_p (&a->cfa, &b->cfa))
758    return false;
759
760  n_a = vec_safe_length (a->reg_save);
761  n_b = vec_safe_length (b->reg_save);
762  n_max = MAX (n_a, n_b);
763
764  for (i = 0; i < n_max; ++i)
765    {
766      dw_cfi_ref r_a = NULL, r_b = NULL;
767
768      if (i < n_a)
769	r_a = (*a->reg_save)[i];
770      if (i < n_b)
771	r_b = (*b->reg_save)[i];
772
773      if (!cfi_equal_p (r_a, r_b))
774        return false;
775    }
776
777  return true;
778}
779
780/* The CFA is now calculated from NEW_CFA.  Consider OLD_CFA in determining
781   what opcode to emit.  Returns the CFI opcode to effect the change, or
782   NULL if NEW_CFA == OLD_CFA.  */
783
784static dw_cfi_ref
785def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
786{
787  dw_cfi_ref cfi;
788
789  /* If nothing changed, no need to issue any call frame instructions.  */
790  if (cfa_equal_p (old_cfa, new_cfa))
791    return NULL;
792
793  cfi = new_cfi ();
794
795  if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
796    {
797      /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
798	 the CFA register did not change but the offset did.  The data
799	 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
800	 in the assembler via the .cfi_def_cfa_offset directive.  */
801      if (new_cfa->offset < 0)
802	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
803      else
804	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
805      cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
806    }
807  else if (new_cfa->offset == old_cfa->offset
808	   && old_cfa->reg != INVALID_REGNUM
809	   && !new_cfa->indirect
810	   && !old_cfa->indirect)
811    {
812      /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
813	 indicating the CFA register has changed to <register> but the
814	 offset has not changed.  */
815      cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
816      cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
817    }
818  else if (new_cfa->indirect == 0)
819    {
820      /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
821	 indicating the CFA register has changed to <register> with
822	 the specified offset.  The data factoring for DW_CFA_def_cfa_sf
823	 happens in output_cfi, or in the assembler via the .cfi_def_cfa
824	 directive.  */
825      if (new_cfa->offset < 0)
826	cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
827      else
828	cfi->dw_cfi_opc = DW_CFA_def_cfa;
829      cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
830      cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
831    }
832  else
833    {
834      /* Construct a DW_CFA_def_cfa_expression instruction to
835	 calculate the CFA using a full location expression since no
836	 register-offset pair is available.  */
837      struct dw_loc_descr_node *loc_list;
838
839      cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
840      loc_list = build_cfa_loc (new_cfa, 0);
841      cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
842    }
843
844  return cfi;
845}
846
847/* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact.  */
848
849static void
850def_cfa_1 (dw_cfa_location *new_cfa)
851{
852  dw_cfi_ref cfi;
853
854  if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
855    cur_trace->cfa_store.offset = new_cfa->offset;
856
857  cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
858  if (cfi)
859    {
860      cur_row->cfa = *new_cfa;
861      cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
862			  ? cfi : NULL);
863
864      add_cfi (cfi);
865    }
866}
867
868/* Add the CFI for saving a register.  REG is the CFA column number.
869   If SREG is -1, the register is saved at OFFSET from the CFA;
870   otherwise it is saved in SREG.  */
871
872static void
873reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
874{
875  dw_fde_ref fde = cfun ? cfun->fde : NULL;
876  dw_cfi_ref cfi = new_cfi ();
877
878  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
879
880  /* When stack is aligned, store REG using DW_CFA_expression with FP.  */
881  if (fde
882      && fde->stack_realign
883      && sreg == INVALID_REGNUM)
884    {
885      cfi->dw_cfi_opc = DW_CFA_expression;
886      cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
887      cfi->dw_cfi_oprnd2.dw_cfi_loc
888	= build_cfa_aligned_loc (&cur_row->cfa, offset,
889				 fde->stack_realignment);
890    }
891  else if (sreg == INVALID_REGNUM)
892    {
893      if (need_data_align_sf_opcode (offset))
894	cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
895      else if (reg & ~0x3f)
896	cfi->dw_cfi_opc = DW_CFA_offset_extended;
897      else
898	cfi->dw_cfi_opc = DW_CFA_offset;
899      cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
900    }
901  else if (sreg == reg)
902    {
903      /* While we could emit something like DW_CFA_same_value or
904	 DW_CFA_restore, we never expect to see something like that
905	 in a prologue.  This is more likely to be a bug.  A backend
906	 can always bypass this by using REG_CFA_RESTORE directly.  */
907      gcc_unreachable ();
908    }
909  else
910    {
911      cfi->dw_cfi_opc = DW_CFA_register;
912      cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
913    }
914
915  add_cfi (cfi);
916  update_row_reg_save (cur_row, reg, cfi);
917}
918
919/* A subroutine of scan_trace.  Check INSN for a REG_ARGS_SIZE note
920   and adjust data structures to match.  */
921
922static void
923notice_args_size (rtx insn)
924{
925  HOST_WIDE_INT args_size, delta;
926  rtx note;
927
928  note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
929  if (note == NULL)
930    return;
931
932  args_size = INTVAL (XEXP (note, 0));
933  delta = args_size - cur_trace->end_true_args_size;
934  if (delta == 0)
935    return;
936
937  cur_trace->end_true_args_size = args_size;
938
939  /* If the CFA is computed off the stack pointer, then we must adjust
940     the computation of the CFA as well.  */
941  if (cur_cfa->reg == dw_stack_pointer_regnum)
942    {
943      gcc_assert (!cur_cfa->indirect);
944
945      /* Convert a change in args_size (always a positive in the
946	 direction of stack growth) to a change in stack pointer.  */
947#ifndef STACK_GROWS_DOWNWARD
948      delta = -delta;
949#endif
950      cur_cfa->offset += delta;
951    }
952}
953
954/* A subroutine of scan_trace.  INSN is can_throw_internal.  Update the
955   data within the trace related to EH insns and args_size.  */
956
957static void
958notice_eh_throw (rtx_insn *insn)
959{
960  HOST_WIDE_INT args_size;
961
962  args_size = cur_trace->end_true_args_size;
963  if (cur_trace->eh_head == NULL)
964    {
965      cur_trace->eh_head = insn;
966      cur_trace->beg_delay_args_size = args_size;
967      cur_trace->end_delay_args_size = args_size;
968    }
969  else if (cur_trace->end_delay_args_size != args_size)
970    {
971      cur_trace->end_delay_args_size = args_size;
972
973      /* ??? If the CFA is the stack pointer, search backward for the last
974	 CFI note and insert there.  Given that the stack changed for the
975	 args_size change, there *must* be such a note in between here and
976	 the last eh insn.  */
977      add_cfi_args_size (args_size);
978    }
979}
980
981/* Short-hand inline for the very common D_F_R (REGNO (x)) operation.  */
982/* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
983   used in places where rtl is prohibited.  */
984
985static inline unsigned
986dwf_regno (const_rtx reg)
987{
988  gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
989  return DWARF_FRAME_REGNUM (REGNO (reg));
990}
991
992/* Compare X and Y for equivalence.  The inputs may be REGs or PC_RTX.  */
993
994static bool
995compare_reg_or_pc (rtx x, rtx y)
996{
997  if (REG_P (x) && REG_P (y))
998    return REGNO (x) == REGNO (y);
999  return x == y;
1000}
1001
1002/* Record SRC as being saved in DEST.  DEST may be null to delete an
1003   existing entry.  SRC may be a register or PC_RTX.  */
1004
1005static void
1006record_reg_saved_in_reg (rtx dest, rtx src)
1007{
1008  reg_saved_in_data *elt;
1009  size_t i;
1010
1011  FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1012    if (compare_reg_or_pc (elt->orig_reg, src))
1013      {
1014	if (dest == NULL)
1015	  cur_trace->regs_saved_in_regs.unordered_remove (i);
1016	else
1017	  elt->saved_in_reg = dest;
1018	return;
1019      }
1020
1021  if (dest == NULL)
1022    return;
1023
1024  reg_saved_in_data e = {src, dest};
1025  cur_trace->regs_saved_in_regs.safe_push (e);
1026}
1027
1028/* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1029   SREG, or if SREG is NULL then it is saved at OFFSET to the CFA.  */
1030
1031static void
1032queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1033{
1034  queued_reg_save *q;
1035  queued_reg_save e = {reg, sreg, offset};
1036  size_t i;
1037
1038  /* Duplicates waste space, but it's also necessary to remove them
1039     for correctness, since the queue gets output in reverse order.  */
1040  FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1041    if (compare_reg_or_pc (q->reg, reg))
1042      {
1043	*q = e;
1044	return;
1045      }
1046
1047  queued_reg_saves.safe_push (e);
1048}
1049
1050/* Output all the entries in QUEUED_REG_SAVES.  */
1051
1052static void
1053dwarf2out_flush_queued_reg_saves (void)
1054{
1055  queued_reg_save *q;
1056  size_t i;
1057
1058  FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1059    {
1060      unsigned int reg, sreg;
1061
1062      record_reg_saved_in_reg (q->saved_reg, q->reg);
1063
1064      if (q->reg == pc_rtx)
1065	reg = DWARF_FRAME_RETURN_COLUMN;
1066      else
1067        reg = dwf_regno (q->reg);
1068      if (q->saved_reg)
1069	sreg = dwf_regno (q->saved_reg);
1070      else
1071	sreg = INVALID_REGNUM;
1072      reg_save (reg, sreg, q->cfa_offset);
1073    }
1074
1075  queued_reg_saves.truncate (0);
1076}
1077
1078/* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1079   location for?  Or, does it clobber a register which we've previously
1080   said that some other register is saved in, and for which we now
1081   have a new location for?  */
1082
1083static bool
1084clobbers_queued_reg_save (const_rtx insn)
1085{
1086  queued_reg_save *q;
1087  size_t iq;
1088
1089  FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1090    {
1091      size_t ir;
1092      reg_saved_in_data *rir;
1093
1094      if (modified_in_p (q->reg, insn))
1095	return true;
1096
1097      FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1098	if (compare_reg_or_pc (q->reg, rir->orig_reg)
1099	    && modified_in_p (rir->saved_in_reg, insn))
1100	  return true;
1101    }
1102
1103  return false;
1104}
1105
1106/* What register, if any, is currently saved in REG?  */
1107
1108static rtx
1109reg_saved_in (rtx reg)
1110{
1111  unsigned int regn = REGNO (reg);
1112  queued_reg_save *q;
1113  reg_saved_in_data *rir;
1114  size_t i;
1115
1116  FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1117    if (q->saved_reg && regn == REGNO (q->saved_reg))
1118      return q->reg;
1119
1120  FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1121    if (regn == REGNO (rir->saved_in_reg))
1122      return rir->orig_reg;
1123
1124  return NULL_RTX;
1125}
1126
1127/* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note.  */
1128
1129static void
1130dwarf2out_frame_debug_def_cfa (rtx pat)
1131{
1132  memset (cur_cfa, 0, sizeof (*cur_cfa));
1133
1134  if (GET_CODE (pat) == PLUS)
1135    {
1136      cur_cfa->offset = INTVAL (XEXP (pat, 1));
1137      pat = XEXP (pat, 0);
1138    }
1139  if (MEM_P (pat))
1140    {
1141      cur_cfa->indirect = 1;
1142      pat = XEXP (pat, 0);
1143      if (GET_CODE (pat) == PLUS)
1144	{
1145	  cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1146	  pat = XEXP (pat, 0);
1147	}
1148    }
1149  /* ??? If this fails, we could be calling into the _loc functions to
1150     define a full expression.  So far no port does that.  */
1151  gcc_assert (REG_P (pat));
1152  cur_cfa->reg = dwf_regno (pat);
1153}
1154
1155/* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note.  */
1156
1157static void
1158dwarf2out_frame_debug_adjust_cfa (rtx pat)
1159{
1160  rtx src, dest;
1161
1162  gcc_assert (GET_CODE (pat) == SET);
1163  dest = XEXP (pat, 0);
1164  src = XEXP (pat, 1);
1165
1166  switch (GET_CODE (src))
1167    {
1168    case PLUS:
1169      gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1170      cur_cfa->offset -= INTVAL (XEXP (src, 1));
1171      break;
1172
1173    case REG:
1174      break;
1175
1176    default:
1177      gcc_unreachable ();
1178    }
1179
1180  cur_cfa->reg = dwf_regno (dest);
1181  gcc_assert (cur_cfa->indirect == 0);
1182}
1183
1184/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note.  */
1185
1186static void
1187dwarf2out_frame_debug_cfa_offset (rtx set)
1188{
1189  HOST_WIDE_INT offset;
1190  rtx src, addr, span;
1191  unsigned int sregno;
1192
1193  src = XEXP (set, 1);
1194  addr = XEXP (set, 0);
1195  gcc_assert (MEM_P (addr));
1196  addr = XEXP (addr, 0);
1197
1198  /* As documented, only consider extremely simple addresses.  */
1199  switch (GET_CODE (addr))
1200    {
1201    case REG:
1202      gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1203      offset = -cur_cfa->offset;
1204      break;
1205    case PLUS:
1206      gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1207      offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1208      break;
1209    default:
1210      gcc_unreachable ();
1211    }
1212
1213  if (src == pc_rtx)
1214    {
1215      span = NULL;
1216      sregno = DWARF_FRAME_RETURN_COLUMN;
1217    }
1218  else
1219    {
1220      span = targetm.dwarf_register_span (src);
1221      sregno = dwf_regno (src);
1222    }
1223
1224  /* ??? We'd like to use queue_reg_save, but we need to come up with
1225     a different flushing heuristic for epilogues.  */
1226  if (!span)
1227    reg_save (sregno, INVALID_REGNUM, offset);
1228  else
1229    {
1230      /* We have a PARALLEL describing where the contents of SRC live.
1231   	 Adjust the offset for each piece of the PARALLEL.  */
1232      HOST_WIDE_INT span_offset = offset;
1233
1234      gcc_assert (GET_CODE (span) == PARALLEL);
1235
1236      const int par_len = XVECLEN (span, 0);
1237      for (int par_index = 0; par_index < par_len; par_index++)
1238	{
1239	  rtx elem = XVECEXP (span, 0, par_index);
1240	  sregno = dwf_regno (src);
1241	  reg_save (sregno, INVALID_REGNUM, span_offset);
1242	  span_offset += GET_MODE_SIZE (GET_MODE (elem));
1243	}
1244    }
1245}
1246
1247/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note.  */
1248
1249static void
1250dwarf2out_frame_debug_cfa_register (rtx set)
1251{
1252  rtx src, dest;
1253  unsigned sregno, dregno;
1254
1255  src = XEXP (set, 1);
1256  dest = XEXP (set, 0);
1257
1258  record_reg_saved_in_reg (dest, src);
1259  if (src == pc_rtx)
1260    sregno = DWARF_FRAME_RETURN_COLUMN;
1261  else
1262    sregno = dwf_regno (src);
1263
1264  dregno = dwf_regno (dest);
1265
1266  /* ??? We'd like to use queue_reg_save, but we need to come up with
1267     a different flushing heuristic for epilogues.  */
1268  reg_save (sregno, dregno, 0);
1269}
1270
1271/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1272
1273static void
1274dwarf2out_frame_debug_cfa_expression (rtx set)
1275{
1276  rtx src, dest, span;
1277  dw_cfi_ref cfi = new_cfi ();
1278  unsigned regno;
1279
1280  dest = SET_DEST (set);
1281  src = SET_SRC (set);
1282
1283  gcc_assert (REG_P (src));
1284  gcc_assert (MEM_P (dest));
1285
1286  span = targetm.dwarf_register_span (src);
1287  gcc_assert (!span);
1288
1289  regno = dwf_regno (src);
1290
1291  cfi->dw_cfi_opc = DW_CFA_expression;
1292  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1293  cfi->dw_cfi_oprnd2.dw_cfi_loc
1294    = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1295			  GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1296
1297  /* ??? We'd like to use queue_reg_save, were the interface different,
1298     and, as above, we could manage flushing for epilogues.  */
1299  add_cfi (cfi);
1300  update_row_reg_save (cur_row, regno, cfi);
1301}
1302
1303/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note.  */
1304
1305static void
1306dwarf2out_frame_debug_cfa_restore (rtx reg)
1307{
1308  gcc_assert (REG_P (reg));
1309
1310  rtx span = targetm.dwarf_register_span (reg);
1311  if (!span)
1312    {
1313      unsigned int regno = dwf_regno (reg);
1314      add_cfi_restore (regno);
1315      update_row_reg_save (cur_row, regno, NULL);
1316    }
1317  else
1318    {
1319      /* We have a PARALLEL describing where the contents of REG live.
1320	 Restore the register for each piece of the PARALLEL.  */
1321      gcc_assert (GET_CODE (span) == PARALLEL);
1322
1323      const int par_len = XVECLEN (span, 0);
1324      for (int par_index = 0; par_index < par_len; par_index++)
1325	{
1326	  reg = XVECEXP (span, 0, par_index);
1327	  gcc_assert (REG_P (reg));
1328	  unsigned int regno = dwf_regno (reg);
1329	  add_cfi_restore (regno);
1330	  update_row_reg_save (cur_row, regno, NULL);
1331	}
1332    }
1333}
1334
1335/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1336   ??? Perhaps we should note in the CIE where windows are saved (instead of
1337   assuming 0(cfa)) and what registers are in the window.  */
1338
1339static void
1340dwarf2out_frame_debug_cfa_window_save (void)
1341{
1342  dw_cfi_ref cfi = new_cfi ();
1343
1344  cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1345  add_cfi (cfi);
1346}
1347
1348/* Record call frame debugging information for an expression EXPR,
1349   which either sets SP or FP (adjusting how we calculate the frame
1350   address) or saves a register to the stack or another register.
1351   LABEL indicates the address of EXPR.
1352
1353   This function encodes a state machine mapping rtxes to actions on
1354   cfa, cfa_store, and cfa_temp.reg.  We describe these rules so
1355   users need not read the source code.
1356
1357  The High-Level Picture
1358
1359  Changes in the register we use to calculate the CFA: Currently we
1360  assume that if you copy the CFA register into another register, we
1361  should take the other one as the new CFA register; this seems to
1362  work pretty well.  If it's wrong for some target, it's simple
1363  enough not to set RTX_FRAME_RELATED_P on the insn in question.
1364
1365  Changes in the register we use for saving registers to the stack:
1366  This is usually SP, but not always.  Again, we deduce that if you
1367  copy SP into another register (and SP is not the CFA register),
1368  then the new register is the one we will be using for register
1369  saves.  This also seems to work.
1370
1371  Register saves: There's not much guesswork about this one; if
1372  RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1373  register save, and the register used to calculate the destination
1374  had better be the one we think we're using for this purpose.
1375  It's also assumed that a copy from a call-saved register to another
1376  register is saving that register if RTX_FRAME_RELATED_P is set on
1377  that instruction.  If the copy is from a call-saved register to
1378  the *same* register, that means that the register is now the same
1379  value as in the caller.
1380
1381  Except: If the register being saved is the CFA register, and the
1382  offset is nonzero, we are saving the CFA, so we assume we have to
1383  use DW_CFA_def_cfa_expression.  If the offset is 0, we assume that
1384  the intent is to save the value of SP from the previous frame.
1385
1386  In addition, if a register has previously been saved to a different
1387  register,
1388
1389  Invariants / Summaries of Rules
1390
1391  cfa	       current rule for calculating the CFA.  It usually
1392	       consists of a register and an offset.  This is
1393	       actually stored in *cur_cfa, but abbreviated
1394	       for the purposes of this documentation.
1395  cfa_store    register used by prologue code to save things to the stack
1396	       cfa_store.offset is the offset from the value of
1397	       cfa_store.reg to the actual CFA
1398  cfa_temp     register holding an integral value.  cfa_temp.offset
1399	       stores the value, which will be used to adjust the
1400	       stack pointer.  cfa_temp is also used like cfa_store,
1401	       to track stores to the stack via fp or a temp reg.
1402
1403  Rules  1- 4: Setting a register's value to cfa.reg or an expression
1404	       with cfa.reg as the first operand changes the cfa.reg and its
1405	       cfa.offset.  Rule 1 and 4 also set cfa_temp.reg and
1406	       cfa_temp.offset.
1407
1408  Rules  6- 9: Set a non-cfa.reg register value to a constant or an
1409	       expression yielding a constant.  This sets cfa_temp.reg
1410	       and cfa_temp.offset.
1411
1412  Rule 5:      Create a new register cfa_store used to save items to the
1413	       stack.
1414
1415  Rules 10-14: Save a register to the stack.  Define offset as the
1416	       difference of the original location and cfa_store's
1417	       location (or cfa_temp's location if cfa_temp is used).
1418
1419  Rules 16-20: If AND operation happens on sp in prologue, we assume
1420	       stack is realigned.  We will use a group of DW_OP_XXX
1421	       expressions to represent the location of the stored
1422	       register instead of CFA+offset.
1423
1424  The Rules
1425
1426  "{a,b}" indicates a choice of a xor b.
1427  "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1428
1429  Rule 1:
1430  (set <reg1> <reg2>:cfa.reg)
1431  effects: cfa.reg = <reg1>
1432	   cfa.offset unchanged
1433	   cfa_temp.reg = <reg1>
1434	   cfa_temp.offset = cfa.offset
1435
1436  Rule 2:
1437  (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1438			      {<const_int>,<reg>:cfa_temp.reg}))
1439  effects: cfa.reg = sp if fp used
1440	   cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1441	   cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1442	     if cfa_store.reg==sp
1443
1444  Rule 3:
1445  (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1446  effects: cfa.reg = fp
1447	   cfa_offset += +/- <const_int>
1448
1449  Rule 4:
1450  (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1451  constraints: <reg1> != fp
1452	       <reg1> != sp
1453  effects: cfa.reg = <reg1>
1454	   cfa_temp.reg = <reg1>
1455	   cfa_temp.offset = cfa.offset
1456
1457  Rule 5:
1458  (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1459  constraints: <reg1> != fp
1460	       <reg1> != sp
1461  effects: cfa_store.reg = <reg1>
1462	   cfa_store.offset = cfa.offset - cfa_temp.offset
1463
1464  Rule 6:
1465  (set <reg> <const_int>)
1466  effects: cfa_temp.reg = <reg>
1467	   cfa_temp.offset = <const_int>
1468
1469  Rule 7:
1470  (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1471  effects: cfa_temp.reg = <reg1>
1472	   cfa_temp.offset |= <const_int>
1473
1474  Rule 8:
1475  (set <reg> (high <exp>))
1476  effects: none
1477
1478  Rule 9:
1479  (set <reg> (lo_sum <exp> <const_int>))
1480  effects: cfa_temp.reg = <reg>
1481	   cfa_temp.offset = <const_int>
1482
1483  Rule 10:
1484  (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1485  effects: cfa_store.offset -= <const_int>
1486	   cfa.offset = cfa_store.offset if cfa.reg == sp
1487	   cfa.reg = sp
1488	   cfa.base_offset = -cfa_store.offset
1489
1490  Rule 11:
1491  (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1492  effects: cfa_store.offset += -/+ mode_size(mem)
1493	   cfa.offset = cfa_store.offset if cfa.reg == sp
1494	   cfa.reg = sp
1495	   cfa.base_offset = -cfa_store.offset
1496
1497  Rule 12:
1498  (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1499
1500       <reg2>)
1501  effects: cfa.reg = <reg1>
1502	   cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1503
1504  Rule 13:
1505  (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1506  effects: cfa.reg = <reg1>
1507	   cfa.base_offset = -{cfa_store,cfa_temp}.offset
1508
1509  Rule 14:
1510  (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1511  effects: cfa.reg = <reg1>
1512	   cfa.base_offset = -cfa_temp.offset
1513	   cfa_temp.offset -= mode_size(mem)
1514
1515  Rule 15:
1516  (set <reg> {unspec, unspec_volatile})
1517  effects: target-dependent
1518
1519  Rule 16:
1520  (set sp (and: sp <const_int>))
1521  constraints: cfa_store.reg == sp
1522  effects: cfun->fde.stack_realign = 1
1523           cfa_store.offset = 0
1524	   fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1525
1526  Rule 17:
1527  (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1528  effects: cfa_store.offset += -/+ mode_size(mem)
1529
1530  Rule 18:
1531  (set (mem ({pre_inc, pre_dec} sp)) fp)
1532  constraints: fde->stack_realign == 1
1533  effects: cfa_store.offset = 0
1534	   cfa.reg != HARD_FRAME_POINTER_REGNUM
1535
1536  Rule 19:
1537  (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1538  constraints: fde->stack_realign == 1
1539               && cfa.offset == 0
1540               && cfa.indirect == 0
1541               && cfa.reg != HARD_FRAME_POINTER_REGNUM
1542  effects: Use DW_CFA_def_cfa_expression to define cfa
1543  	   cfa.reg == fde->drap_reg  */
1544
1545static void
1546dwarf2out_frame_debug_expr (rtx expr)
1547{
1548  rtx src, dest, span;
1549  HOST_WIDE_INT offset;
1550  dw_fde_ref fde;
1551
1552  /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1553     the PARALLEL independently. The first element is always processed if
1554     it is a SET. This is for backward compatibility.   Other elements
1555     are processed only if they are SETs and the RTX_FRAME_RELATED_P
1556     flag is set in them.  */
1557  if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1558    {
1559      int par_index;
1560      int limit = XVECLEN (expr, 0);
1561      rtx elem;
1562
1563      /* PARALLELs have strict read-modify-write semantics, so we
1564	 ought to evaluate every rvalue before changing any lvalue.
1565	 It's cumbersome to do that in general, but there's an
1566	 easy approximation that is enough for all current users:
1567	 handle register saves before register assignments.  */
1568      if (GET_CODE (expr) == PARALLEL)
1569	for (par_index = 0; par_index < limit; par_index++)
1570	  {
1571	    elem = XVECEXP (expr, 0, par_index);
1572	    if (GET_CODE (elem) == SET
1573		&& MEM_P (SET_DEST (elem))
1574		&& (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1575	      dwarf2out_frame_debug_expr (elem);
1576	  }
1577
1578      for (par_index = 0; par_index < limit; par_index++)
1579	{
1580	  elem = XVECEXP (expr, 0, par_index);
1581	  if (GET_CODE (elem) == SET
1582	      && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1583	      && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1584	    dwarf2out_frame_debug_expr (elem);
1585	}
1586      return;
1587    }
1588
1589  gcc_assert (GET_CODE (expr) == SET);
1590
1591  src = SET_SRC (expr);
1592  dest = SET_DEST (expr);
1593
1594  if (REG_P (src))
1595    {
1596      rtx rsi = reg_saved_in (src);
1597      if (rsi)
1598	src = rsi;
1599    }
1600
1601  fde = cfun->fde;
1602
1603  switch (GET_CODE (dest))
1604    {
1605    case REG:
1606      switch (GET_CODE (src))
1607	{
1608	  /* Setting FP from SP.  */
1609	case REG:
1610	  if (cur_cfa->reg == dwf_regno (src))
1611	    {
1612	      /* Rule 1 */
1613	      /* Update the CFA rule wrt SP or FP.  Make sure src is
1614		 relative to the current CFA register.
1615
1616		 We used to require that dest be either SP or FP, but the
1617		 ARM copies SP to a temporary register, and from there to
1618		 FP.  So we just rely on the backends to only set
1619		 RTX_FRAME_RELATED_P on appropriate insns.  */
1620	      cur_cfa->reg = dwf_regno (dest);
1621	      cur_trace->cfa_temp.reg = cur_cfa->reg;
1622	      cur_trace->cfa_temp.offset = cur_cfa->offset;
1623	    }
1624	  else
1625	    {
1626	      /* Saving a register in a register.  */
1627	      gcc_assert (!fixed_regs [REGNO (dest)]
1628			  /* For the SPARC and its register window.  */
1629			  || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1630
1631              /* After stack is aligned, we can only save SP in FP
1632		 if drap register is used.  In this case, we have
1633		 to restore stack pointer with the CFA value and we
1634		 don't generate this DWARF information.  */
1635	      if (fde
1636		  && fde->stack_realign
1637		  && REGNO (src) == STACK_POINTER_REGNUM)
1638		gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1639			    && fde->drap_reg != INVALID_REGNUM
1640			    && cur_cfa->reg != dwf_regno (src));
1641	      else
1642		queue_reg_save (src, dest, 0);
1643	    }
1644	  break;
1645
1646	case PLUS:
1647	case MINUS:
1648	case LO_SUM:
1649	  if (dest == stack_pointer_rtx)
1650	    {
1651	      /* Rule 2 */
1652	      /* Adjusting SP.  */
1653	      switch (GET_CODE (XEXP (src, 1)))
1654		{
1655		case CONST_INT:
1656		  offset = INTVAL (XEXP (src, 1));
1657		  break;
1658		case REG:
1659		  gcc_assert (dwf_regno (XEXP (src, 1))
1660			      == cur_trace->cfa_temp.reg);
1661		  offset = cur_trace->cfa_temp.offset;
1662		  break;
1663		default:
1664		  gcc_unreachable ();
1665		}
1666
1667	      if (XEXP (src, 0) == hard_frame_pointer_rtx)
1668		{
1669		  /* Restoring SP from FP in the epilogue.  */
1670		  gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1671		  cur_cfa->reg = dw_stack_pointer_regnum;
1672		}
1673	      else if (GET_CODE (src) == LO_SUM)
1674		/* Assume we've set the source reg of the LO_SUM from sp.  */
1675		;
1676	      else
1677		gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1678
1679	      if (GET_CODE (src) != MINUS)
1680		offset = -offset;
1681	      if (cur_cfa->reg == dw_stack_pointer_regnum)
1682		cur_cfa->offset += offset;
1683	      if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1684		cur_trace->cfa_store.offset += offset;
1685	    }
1686	  else if (dest == hard_frame_pointer_rtx)
1687	    {
1688	      /* Rule 3 */
1689	      /* Either setting the FP from an offset of the SP,
1690		 or adjusting the FP */
1691	      gcc_assert (frame_pointer_needed);
1692
1693	      gcc_assert (REG_P (XEXP (src, 0))
1694			  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1695			  && CONST_INT_P (XEXP (src, 1)));
1696	      offset = INTVAL (XEXP (src, 1));
1697	      if (GET_CODE (src) != MINUS)
1698		offset = -offset;
1699	      cur_cfa->offset += offset;
1700	      cur_cfa->reg = dw_frame_pointer_regnum;
1701	    }
1702	  else
1703	    {
1704	      gcc_assert (GET_CODE (src) != MINUS);
1705
1706	      /* Rule 4 */
1707	      if (REG_P (XEXP (src, 0))
1708		  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1709		  && CONST_INT_P (XEXP (src, 1)))
1710		{
1711		  /* Setting a temporary CFA register that will be copied
1712		     into the FP later on.  */
1713		  offset = - INTVAL (XEXP (src, 1));
1714		  cur_cfa->offset += offset;
1715		  cur_cfa->reg = dwf_regno (dest);
1716		  /* Or used to save regs to the stack.  */
1717		  cur_trace->cfa_temp.reg = cur_cfa->reg;
1718		  cur_trace->cfa_temp.offset = cur_cfa->offset;
1719		}
1720
1721	      /* Rule 5 */
1722	      else if (REG_P (XEXP (src, 0))
1723		       && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1724		       && XEXP (src, 1) == stack_pointer_rtx)
1725		{
1726		  /* Setting a scratch register that we will use instead
1727		     of SP for saving registers to the stack.  */
1728		  gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1729		  cur_trace->cfa_store.reg = dwf_regno (dest);
1730		  cur_trace->cfa_store.offset
1731		    = cur_cfa->offset - cur_trace->cfa_temp.offset;
1732		}
1733
1734	      /* Rule 9 */
1735	      else if (GET_CODE (src) == LO_SUM
1736		       && CONST_INT_P (XEXP (src, 1)))
1737		{
1738		  cur_trace->cfa_temp.reg = dwf_regno (dest);
1739		  cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1740		}
1741	      else
1742		gcc_unreachable ();
1743	    }
1744	  break;
1745
1746	  /* Rule 6 */
1747	case CONST_INT:
1748	  cur_trace->cfa_temp.reg = dwf_regno (dest);
1749	  cur_trace->cfa_temp.offset = INTVAL (src);
1750	  break;
1751
1752	  /* Rule 7 */
1753	case IOR:
1754	  gcc_assert (REG_P (XEXP (src, 0))
1755		      && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1756		      && CONST_INT_P (XEXP (src, 1)));
1757
1758	  cur_trace->cfa_temp.reg = dwf_regno (dest);
1759	  cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1760	  break;
1761
1762	  /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1763	     which will fill in all of the bits.  */
1764	  /* Rule 8 */
1765	case HIGH:
1766	  break;
1767
1768	  /* Rule 15 */
1769	case UNSPEC:
1770	case UNSPEC_VOLATILE:
1771	  /* All unspecs should be represented by REG_CFA_* notes.  */
1772	  gcc_unreachable ();
1773	  return;
1774
1775	  /* Rule 16 */
1776	case AND:
1777          /* If this AND operation happens on stack pointer in prologue,
1778	     we assume the stack is realigned and we extract the
1779	     alignment.  */
1780          if (fde && XEXP (src, 0) == stack_pointer_rtx)
1781            {
1782	      /* We interpret reg_save differently with stack_realign set.
1783		 Thus we must flush whatever we have queued first.  */
1784	      dwarf2out_flush_queued_reg_saves ();
1785
1786              gcc_assert (cur_trace->cfa_store.reg
1787			  == dwf_regno (XEXP (src, 0)));
1788              fde->stack_realign = 1;
1789              fde->stack_realignment = INTVAL (XEXP (src, 1));
1790              cur_trace->cfa_store.offset = 0;
1791
1792	      if (cur_cfa->reg != dw_stack_pointer_regnum
1793		  && cur_cfa->reg != dw_frame_pointer_regnum)
1794		fde->drap_reg = cur_cfa->reg;
1795            }
1796          return;
1797
1798	default:
1799	  gcc_unreachable ();
1800	}
1801      break;
1802
1803    case MEM:
1804
1805      /* Saving a register to the stack.  Make sure dest is relative to the
1806	 CFA register.  */
1807      switch (GET_CODE (XEXP (dest, 0)))
1808	{
1809	  /* Rule 10 */
1810	  /* With a push.  */
1811	case PRE_MODIFY:
1812	case POST_MODIFY:
1813	  /* We can't handle variable size modifications.  */
1814	  gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1815		      == CONST_INT);
1816	  offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1817
1818	  gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1819		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1820
1821	  cur_trace->cfa_store.offset += offset;
1822	  if (cur_cfa->reg == dw_stack_pointer_regnum)
1823	    cur_cfa->offset = cur_trace->cfa_store.offset;
1824
1825	  if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1826	    offset -= cur_trace->cfa_store.offset;
1827	  else
1828	    offset = -cur_trace->cfa_store.offset;
1829	  break;
1830
1831	  /* Rule 11 */
1832	case PRE_INC:
1833	case PRE_DEC:
1834	case POST_DEC:
1835	  offset = GET_MODE_SIZE (GET_MODE (dest));
1836	  if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1837	    offset = -offset;
1838
1839	  gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1840		       == STACK_POINTER_REGNUM)
1841		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1842
1843	  cur_trace->cfa_store.offset += offset;
1844
1845          /* Rule 18: If stack is aligned, we will use FP as a
1846	     reference to represent the address of the stored
1847	     regiser.  */
1848          if (fde
1849              && fde->stack_realign
1850	      && REG_P (src)
1851	      && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1852	    {
1853	      gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1854	      cur_trace->cfa_store.offset = 0;
1855	    }
1856
1857	  if (cur_cfa->reg == dw_stack_pointer_regnum)
1858	    cur_cfa->offset = cur_trace->cfa_store.offset;
1859
1860	  if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1861	    offset += -cur_trace->cfa_store.offset;
1862	  else
1863	    offset = -cur_trace->cfa_store.offset;
1864	  break;
1865
1866	  /* Rule 12 */
1867	  /* With an offset.  */
1868	case PLUS:
1869	case MINUS:
1870	case LO_SUM:
1871	  {
1872	    unsigned int regno;
1873
1874	    gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1875			&& REG_P (XEXP (XEXP (dest, 0), 0)));
1876	    offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1877	    if (GET_CODE (XEXP (dest, 0)) == MINUS)
1878	      offset = -offset;
1879
1880	    regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1881
1882	    if (cur_cfa->reg == regno)
1883	      offset -= cur_cfa->offset;
1884	    else if (cur_trace->cfa_store.reg == regno)
1885	      offset -= cur_trace->cfa_store.offset;
1886	    else
1887	      {
1888		gcc_assert (cur_trace->cfa_temp.reg == regno);
1889		offset -= cur_trace->cfa_temp.offset;
1890	      }
1891	  }
1892	  break;
1893
1894	  /* Rule 13 */
1895	  /* Without an offset.  */
1896	case REG:
1897	  {
1898	    unsigned int regno = dwf_regno (XEXP (dest, 0));
1899
1900	    if (cur_cfa->reg == regno)
1901	      offset = -cur_cfa->offset;
1902	    else if (cur_trace->cfa_store.reg == regno)
1903	      offset = -cur_trace->cfa_store.offset;
1904	    else
1905	      {
1906		gcc_assert (cur_trace->cfa_temp.reg == regno);
1907		offset = -cur_trace->cfa_temp.offset;
1908	      }
1909	  }
1910	  break;
1911
1912	  /* Rule 14 */
1913	case POST_INC:
1914	  gcc_assert (cur_trace->cfa_temp.reg
1915		      == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1916	  offset = -cur_trace->cfa_temp.offset;
1917	  cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1918	  break;
1919
1920	default:
1921	  gcc_unreachable ();
1922	}
1923
1924      /* Rule 17 */
1925      /* If the source operand of this MEM operation is a memory,
1926	 we only care how much stack grew.  */
1927      if (MEM_P (src))
1928        break;
1929
1930      if (REG_P (src)
1931	  && REGNO (src) != STACK_POINTER_REGNUM
1932	  && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1933	  && dwf_regno (src) == cur_cfa->reg)
1934	{
1935	  /* We're storing the current CFA reg into the stack.  */
1936
1937	  if (cur_cfa->offset == 0)
1938	    {
1939              /* Rule 19 */
1940              /* If stack is aligned, putting CFA reg into stack means
1941		 we can no longer use reg + offset to represent CFA.
1942		 Here we use DW_CFA_def_cfa_expression instead.  The
1943		 result of this expression equals to the original CFA
1944		 value.  */
1945              if (fde
1946                  && fde->stack_realign
1947                  && cur_cfa->indirect == 0
1948                  && cur_cfa->reg != dw_frame_pointer_regnum)
1949                {
1950		  gcc_assert (fde->drap_reg == cur_cfa->reg);
1951
1952		  cur_cfa->indirect = 1;
1953		  cur_cfa->reg = dw_frame_pointer_regnum;
1954		  cur_cfa->base_offset = offset;
1955		  cur_cfa->offset = 0;
1956
1957		  fde->drap_reg_saved = 1;
1958		  break;
1959                }
1960
1961	      /* If the source register is exactly the CFA, assume
1962		 we're saving SP like any other register; this happens
1963		 on the ARM.  */
1964	      queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1965	      break;
1966	    }
1967	  else
1968	    {
1969	      /* Otherwise, we'll need to look in the stack to
1970		 calculate the CFA.  */
1971	      rtx x = XEXP (dest, 0);
1972
1973	      if (!REG_P (x))
1974		x = XEXP (x, 0);
1975	      gcc_assert (REG_P (x));
1976
1977	      cur_cfa->reg = dwf_regno (x);
1978	      cur_cfa->base_offset = offset;
1979	      cur_cfa->indirect = 1;
1980	      break;
1981	    }
1982	}
1983
1984      if (REG_P (src))
1985	span = targetm.dwarf_register_span (src);
1986      else
1987	span = NULL;
1988
1989      if (!span)
1990	queue_reg_save (src, NULL_RTX, offset);
1991      else
1992	{
1993	  /* We have a PARALLEL describing where the contents of SRC live.
1994	     Queue register saves for each piece of the PARALLEL.  */
1995	  HOST_WIDE_INT span_offset = offset;
1996
1997	  gcc_assert (GET_CODE (span) == PARALLEL);
1998
1999	  const int par_len = XVECLEN (span, 0);
2000	  for (int par_index = 0; par_index < par_len; par_index++)
2001	    {
2002	      rtx elem = XVECEXP (span, 0, par_index);
2003	      queue_reg_save (elem, NULL_RTX, span_offset);
2004	      span_offset += GET_MODE_SIZE (GET_MODE (elem));
2005	    }
2006	}
2007      break;
2008
2009    default:
2010      gcc_unreachable ();
2011    }
2012}
2013
2014/* Record call frame debugging information for INSN, which either sets
2015   SP or FP (adjusting how we calculate the frame address) or saves a
2016   register to the stack.  */
2017
2018static void
2019dwarf2out_frame_debug (rtx_insn *insn)
2020{
2021  rtx note, n, pat;
2022  bool handled_one = false;
2023
2024  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2025    switch (REG_NOTE_KIND (note))
2026      {
2027      case REG_FRAME_RELATED_EXPR:
2028	pat = XEXP (note, 0);
2029	goto do_frame_expr;
2030
2031      case REG_CFA_DEF_CFA:
2032	dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2033	handled_one = true;
2034	break;
2035
2036      case REG_CFA_ADJUST_CFA:
2037	n = XEXP (note, 0);
2038	if (n == NULL)
2039	  {
2040	    n = PATTERN (insn);
2041	    if (GET_CODE (n) == PARALLEL)
2042	      n = XVECEXP (n, 0, 0);
2043	  }
2044	dwarf2out_frame_debug_adjust_cfa (n);
2045	handled_one = true;
2046	break;
2047
2048      case REG_CFA_OFFSET:
2049	n = XEXP (note, 0);
2050	if (n == NULL)
2051	  n = single_set (insn);
2052	dwarf2out_frame_debug_cfa_offset (n);
2053	handled_one = true;
2054	break;
2055
2056      case REG_CFA_REGISTER:
2057	n = XEXP (note, 0);
2058	if (n == NULL)
2059	  {
2060	    n = PATTERN (insn);
2061	    if (GET_CODE (n) == PARALLEL)
2062	      n = XVECEXP (n, 0, 0);
2063	  }
2064	dwarf2out_frame_debug_cfa_register (n);
2065	handled_one = true;
2066	break;
2067
2068      case REG_CFA_EXPRESSION:
2069	n = XEXP (note, 0);
2070	if (n == NULL)
2071	  n = single_set (insn);
2072	dwarf2out_frame_debug_cfa_expression (n);
2073	handled_one = true;
2074	break;
2075
2076      case REG_CFA_RESTORE:
2077	n = XEXP (note, 0);
2078	if (n == NULL)
2079	  {
2080	    n = PATTERN (insn);
2081	    if (GET_CODE (n) == PARALLEL)
2082	      n = XVECEXP (n, 0, 0);
2083	    n = XEXP (n, 0);
2084	  }
2085	dwarf2out_frame_debug_cfa_restore (n);
2086	handled_one = true;
2087	break;
2088
2089      case REG_CFA_SET_VDRAP:
2090	n = XEXP (note, 0);
2091	if (REG_P (n))
2092	  {
2093	    dw_fde_ref fde = cfun->fde;
2094	    if (fde)
2095	      {
2096		gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2097		if (REG_P (n))
2098		  fde->vdrap_reg = dwf_regno (n);
2099	      }
2100	  }
2101	handled_one = true;
2102	break;
2103
2104      case REG_CFA_WINDOW_SAVE:
2105	dwarf2out_frame_debug_cfa_window_save ();
2106	handled_one = true;
2107	break;
2108
2109      case REG_CFA_FLUSH_QUEUE:
2110	/* The actual flush happens elsewhere.  */
2111	handled_one = true;
2112	break;
2113
2114      default:
2115	break;
2116      }
2117
2118  if (!handled_one)
2119    {
2120      pat = PATTERN (insn);
2121    do_frame_expr:
2122      dwarf2out_frame_debug_expr (pat);
2123
2124      /* Check again.  A parallel can save and update the same register.
2125         We could probably check just once, here, but this is safer than
2126         removing the check at the start of the function.  */
2127      if (clobbers_queued_reg_save (pat))
2128	dwarf2out_flush_queued_reg_saves ();
2129    }
2130}
2131
2132/* Emit CFI info to change the state from OLD_ROW to NEW_ROW.  */
2133
2134static void
2135change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2136{
2137  size_t i, n_old, n_new, n_max;
2138  dw_cfi_ref cfi;
2139
2140  if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2141    add_cfi (new_row->cfa_cfi);
2142  else
2143    {
2144      cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2145      if (cfi)
2146	add_cfi (cfi);
2147    }
2148
2149  n_old = vec_safe_length (old_row->reg_save);
2150  n_new = vec_safe_length (new_row->reg_save);
2151  n_max = MAX (n_old, n_new);
2152
2153  for (i = 0; i < n_max; ++i)
2154    {
2155      dw_cfi_ref r_old = NULL, r_new = NULL;
2156
2157      if (i < n_old)
2158	r_old = (*old_row->reg_save)[i];
2159      if (i < n_new)
2160	r_new = (*new_row->reg_save)[i];
2161
2162      if (r_old == r_new)
2163	;
2164      else if (r_new == NULL)
2165	add_cfi_restore (i);
2166      else if (!cfi_equal_p (r_old, r_new))
2167        add_cfi (r_new);
2168    }
2169}
2170
2171/* Examine CFI and return true if a cfi label and set_loc is needed
2172   beforehand.  Even when generating CFI assembler instructions, we
2173   still have to add the cfi to the list so that lookup_cfa_1 works
2174   later on.  When -g2 and above we even need to force emitting of
2175   CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2176   purposes.  If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2177   and so don't use convert_cfa_to_fb_loc_list.  */
2178
2179static bool
2180cfi_label_required_p (dw_cfi_ref cfi)
2181{
2182  if (!dwarf2out_do_cfi_asm ())
2183    return true;
2184
2185  if (dwarf_version == 2
2186      && debug_info_level > DINFO_LEVEL_TERSE
2187      && (write_symbols == DWARF2_DEBUG
2188	  || write_symbols == VMS_AND_DWARF2_DEBUG))
2189    {
2190      switch (cfi->dw_cfi_opc)
2191	{
2192	case DW_CFA_def_cfa_offset:
2193	case DW_CFA_def_cfa_offset_sf:
2194	case DW_CFA_def_cfa_register:
2195	case DW_CFA_def_cfa:
2196	case DW_CFA_def_cfa_sf:
2197	case DW_CFA_def_cfa_expression:
2198	case DW_CFA_restore_state:
2199	  return true;
2200	default:
2201	  return false;
2202	}
2203    }
2204  return false;
2205}
2206
2207/* Walk the function, looking for NOTE_INSN_CFI notes.  Add the CFIs to the
2208   function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2209   necessary.  */
2210static void
2211add_cfis_to_fde (void)
2212{
2213  dw_fde_ref fde = cfun->fde;
2214  rtx_insn *insn, *next;
2215  /* We always start with a function_begin label.  */
2216  bool first = false;
2217
2218  for (insn = get_insns (); insn; insn = next)
2219    {
2220      next = NEXT_INSN (insn);
2221
2222      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2223	{
2224	  fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2225	  /* Don't attempt to advance_loc4 between labels
2226	     in different sections.  */
2227	  first = true;
2228	}
2229
2230      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2231	{
2232	  bool required = cfi_label_required_p (NOTE_CFI (insn));
2233	  while (next)
2234	    if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2235	      {
2236		required |= cfi_label_required_p (NOTE_CFI (next));
2237		next = NEXT_INSN (next);
2238	      }
2239	    else if (active_insn_p (next)
2240		     || (NOTE_P (next) && (NOTE_KIND (next)
2241					   == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2242	      break;
2243	    else
2244	      next = NEXT_INSN (next);
2245	  if (required)
2246	    {
2247	      int num = dwarf2out_cfi_label_num;
2248	      const char *label = dwarf2out_cfi_label ();
2249	      dw_cfi_ref xcfi;
2250	      rtx tmp;
2251
2252	      /* Set the location counter to the new label.  */
2253	      xcfi = new_cfi ();
2254	      xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2255				  : DW_CFA_advance_loc4);
2256	      xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2257	      vec_safe_push (fde->dw_fde_cfi, xcfi);
2258
2259	      tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2260	      NOTE_LABEL_NUMBER (tmp) = num;
2261	    }
2262
2263	  do
2264	    {
2265	      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2266		vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2267	      insn = NEXT_INSN (insn);
2268	    }
2269	  while (insn != next);
2270	  first = false;
2271	}
2272    }
2273}
2274
2275/* If LABEL is the start of a trace, then initialize the state of that
2276   trace from CUR_TRACE and CUR_ROW.  */
2277
2278static void
2279maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2280{
2281  dw_trace_info *ti;
2282  HOST_WIDE_INT args_size;
2283
2284  ti = get_trace_info (start);
2285  gcc_assert (ti != NULL);
2286
2287  if (dump_file)
2288    {
2289      fprintf (dump_file, "   saw edge from trace %u to %u (via %s %d)\n",
2290	       cur_trace->id, ti->id,
2291	       (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2292	       (origin ? INSN_UID (origin) : 0));
2293    }
2294
2295  args_size = cur_trace->end_true_args_size;
2296  if (ti->beg_row == NULL)
2297    {
2298      /* This is the first time we've encountered this trace.  Propagate
2299	 state across the edge and push the trace onto the work list.  */
2300      ti->beg_row = copy_cfi_row (cur_row);
2301      ti->beg_true_args_size = args_size;
2302
2303      ti->cfa_store = cur_trace->cfa_store;
2304      ti->cfa_temp = cur_trace->cfa_temp;
2305      ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2306
2307      trace_work_list.safe_push (ti);
2308
2309      if (dump_file)
2310	fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2311    }
2312  else
2313    {
2314
2315      /* We ought to have the same state incoming to a given trace no
2316	 matter how we arrive at the trace.  Anything else means we've
2317	 got some kind of optimization error.  */
2318      gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2319
2320      /* The args_size is allowed to conflict if it isn't actually used.  */
2321      if (ti->beg_true_args_size != args_size)
2322	ti->args_size_undefined = true;
2323    }
2324}
2325
2326/* Similarly, but handle the args_size and CFA reset across EH
2327   and non-local goto edges.  */
2328
2329static void
2330maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2331{
2332  HOST_WIDE_INT save_args_size, delta;
2333  dw_cfa_location save_cfa;
2334
2335  save_args_size = cur_trace->end_true_args_size;
2336  if (save_args_size == 0)
2337    {
2338      maybe_record_trace_start (start, origin);
2339      return;
2340    }
2341
2342  delta = -save_args_size;
2343  cur_trace->end_true_args_size = 0;
2344
2345  save_cfa = cur_row->cfa;
2346  if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2347    {
2348      /* Convert a change in args_size (always a positive in the
2349	 direction of stack growth) to a change in stack pointer.  */
2350#ifndef STACK_GROWS_DOWNWARD
2351      delta = -delta;
2352#endif
2353      cur_row->cfa.offset += delta;
2354    }
2355
2356  maybe_record_trace_start (start, origin);
2357
2358  cur_trace->end_true_args_size = save_args_size;
2359  cur_row->cfa = save_cfa;
2360}
2361
2362/* Propagate CUR_TRACE state to the destinations implied by INSN.  */
2363/* ??? Sadly, this is in large part a duplicate of make_edges.  */
2364
2365static void
2366create_trace_edges (rtx_insn *insn)
2367{
2368  rtx tmp;
2369  int i, n;
2370
2371  if (JUMP_P (insn))
2372    {
2373      rtx_jump_table_data *table;
2374
2375      if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2376	return;
2377
2378      if (tablejump_p (insn, NULL, &table))
2379	{
2380	  rtvec vec = table->get_labels ();
2381
2382	  n = GET_NUM_ELEM (vec);
2383	  for (i = 0; i < n; ++i)
2384	    {
2385	      rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2386	      maybe_record_trace_start (lab, insn);
2387	    }
2388	}
2389      else if (computed_jump_p (insn))
2390	{
2391	  for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2392	    maybe_record_trace_start (lab->insn (), insn);
2393	}
2394      else if (returnjump_p (insn))
2395	;
2396      else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2397	{
2398	  n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2399	  for (i = 0; i < n; ++i)
2400	    {
2401	      rtx_insn *lab =
2402		as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2403	      maybe_record_trace_start (lab, insn);
2404	    }
2405	}
2406      else
2407	{
2408	  rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2409	  gcc_assert (lab != NULL);
2410	  maybe_record_trace_start (lab, insn);
2411	}
2412    }
2413  else if (CALL_P (insn))
2414    {
2415      /* Sibling calls don't have edges inside this function.  */
2416      if (SIBLING_CALL_P (insn))
2417	return;
2418
2419      /* Process non-local goto edges.  */
2420      if (can_nonlocal_goto (insn))
2421	for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2422	     lab;
2423	     lab = lab->next ())
2424	  maybe_record_trace_start_abnormal (lab->insn (), insn);
2425    }
2426  else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2427    {
2428      int i, n = seq->len ();
2429      for (i = 0; i < n; ++i)
2430	create_trace_edges (seq->insn (i));
2431      return;
2432    }
2433
2434  /* Process EH edges.  */
2435  if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2436    {
2437      eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2438      if (lp)
2439	maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2440    }
2441}
2442
2443/* A subroutine of scan_trace.  Do what needs to be done "after" INSN.  */
2444
2445static void
2446scan_insn_after (rtx_insn *insn)
2447{
2448  if (RTX_FRAME_RELATED_P (insn))
2449    dwarf2out_frame_debug (insn);
2450  notice_args_size (insn);
2451}
2452
2453/* Scan the trace beginning at INSN and create the CFI notes for the
2454   instructions therein.  */
2455
2456static void
2457scan_trace (dw_trace_info *trace)
2458{
2459  rtx_insn *prev, *insn = trace->head;
2460  dw_cfa_location this_cfa;
2461
2462  if (dump_file)
2463    fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2464	     trace->id, rtx_name[(int) GET_CODE (insn)],
2465	     INSN_UID (insn));
2466
2467  trace->end_row = copy_cfi_row (trace->beg_row);
2468  trace->end_true_args_size = trace->beg_true_args_size;
2469
2470  cur_trace = trace;
2471  cur_row = trace->end_row;
2472
2473  this_cfa = cur_row->cfa;
2474  cur_cfa = &this_cfa;
2475
2476  for (prev = insn, insn = NEXT_INSN (insn);
2477       insn;
2478       prev = insn, insn = NEXT_INSN (insn))
2479    {
2480      rtx_insn *control;
2481
2482      /* Do everything that happens "before" the insn.  */
2483      add_cfi_insn = prev;
2484
2485      /* Notice the end of a trace.  */
2486      if (BARRIER_P (insn))
2487	{
2488	  /* Don't bother saving the unneeded queued registers at all.  */
2489	  queued_reg_saves.truncate (0);
2490	  break;
2491	}
2492      if (save_point_p (insn))
2493	{
2494	  /* Propagate across fallthru edges.  */
2495	  dwarf2out_flush_queued_reg_saves ();
2496	  maybe_record_trace_start (insn, NULL);
2497	  break;
2498	}
2499
2500      if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2501	continue;
2502
2503      /* Handle all changes to the row state.  Sequences require special
2504	 handling for the positioning of the notes.  */
2505      if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2506	{
2507	  rtx_insn *elt;
2508	  int i, n = pat->len ();
2509
2510	  control = pat->insn (0);
2511	  if (can_throw_internal (control))
2512	    notice_eh_throw (control);
2513	  dwarf2out_flush_queued_reg_saves ();
2514
2515	  if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2516	    {
2517	      /* ??? Hopefully multiple delay slots are not annulled.  */
2518	      gcc_assert (n == 2);
2519	      gcc_assert (!RTX_FRAME_RELATED_P (control));
2520	      gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2521
2522	      elt = pat->insn (1);
2523
2524	      if (INSN_FROM_TARGET_P (elt))
2525		{
2526		  HOST_WIDE_INT restore_args_size;
2527		  cfi_vec save_row_reg_save;
2528
2529		  /* If ELT is an instruction from target of an annulled
2530		     branch, the effects are for the target only and so
2531		     the args_size and CFA along the current path
2532		     shouldn't change.  */
2533		  add_cfi_insn = NULL;
2534		  restore_args_size = cur_trace->end_true_args_size;
2535		  cur_cfa = &cur_row->cfa;
2536		  save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2537
2538		  scan_insn_after (elt);
2539
2540		  /* ??? Should we instead save the entire row state?  */
2541		  gcc_assert (!queued_reg_saves.length ());
2542
2543		  create_trace_edges (control);
2544
2545		  cur_trace->end_true_args_size = restore_args_size;
2546		  cur_row->cfa = this_cfa;
2547		  cur_row->reg_save = save_row_reg_save;
2548		  cur_cfa = &this_cfa;
2549		}
2550	      else
2551		{
2552		  /* If ELT is a annulled branch-taken instruction (i.e.
2553		     executed only when branch is not taken), the args_size
2554		     and CFA should not change through the jump.  */
2555		  create_trace_edges (control);
2556
2557		  /* Update and continue with the trace.  */
2558		  add_cfi_insn = insn;
2559		  scan_insn_after (elt);
2560		  def_cfa_1 (&this_cfa);
2561		}
2562	      continue;
2563	    }
2564
2565	  /* The insns in the delay slot should all be considered to happen
2566	     "before" a call insn.  Consider a call with a stack pointer
2567	     adjustment in the delay slot.  The backtrace from the callee
2568	     should include the sp adjustment.  Unfortunately, that leaves
2569	     us with an unavoidable unwinding error exactly at the call insn
2570	     itself.  For jump insns we'd prefer to avoid this error by
2571	     placing the notes after the sequence.  */
2572	  if (JUMP_P (control))
2573	    add_cfi_insn = insn;
2574
2575	  for (i = 1; i < n; ++i)
2576	    {
2577	      elt = pat->insn (i);
2578	      scan_insn_after (elt);
2579	    }
2580
2581	  /* Make sure any register saves are visible at the jump target.  */
2582	  dwarf2out_flush_queued_reg_saves ();
2583	  any_cfis_emitted = false;
2584
2585          /* However, if there is some adjustment on the call itself, e.g.
2586	     a call_pop, that action should be considered to happen after
2587	     the call returns.  */
2588	  add_cfi_insn = insn;
2589	  scan_insn_after (control);
2590	}
2591      else
2592	{
2593	  /* Flush data before calls and jumps, and of course if necessary.  */
2594	  if (can_throw_internal (insn))
2595	    {
2596	      notice_eh_throw (insn);
2597	      dwarf2out_flush_queued_reg_saves ();
2598	    }
2599	  else if (!NONJUMP_INSN_P (insn)
2600		   || clobbers_queued_reg_save (insn)
2601		   || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2602	    dwarf2out_flush_queued_reg_saves ();
2603	  any_cfis_emitted = false;
2604
2605	  add_cfi_insn = insn;
2606	  scan_insn_after (insn);
2607	  control = insn;
2608	}
2609
2610      /* Between frame-related-p and args_size we might have otherwise
2611	 emitted two cfa adjustments.  Do it now.  */
2612      def_cfa_1 (&this_cfa);
2613
2614      /* Minimize the number of advances by emitting the entire queue
2615	 once anything is emitted.  */
2616      if (any_cfis_emitted
2617	  || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2618	dwarf2out_flush_queued_reg_saves ();
2619
2620      /* Note that a test for control_flow_insn_p does exactly the
2621	 same tests as are done to actually create the edges.  So
2622	 always call the routine and let it not create edges for
2623	 non-control-flow insns.  */
2624      create_trace_edges (control);
2625    }
2626
2627  add_cfi_insn = NULL;
2628  cur_row = NULL;
2629  cur_trace = NULL;
2630  cur_cfa = NULL;
2631}
2632
2633/* Scan the function and create the initial set of CFI notes.  */
2634
2635static void
2636create_cfi_notes (void)
2637{
2638  dw_trace_info *ti;
2639
2640  gcc_checking_assert (!queued_reg_saves.exists ());
2641  gcc_checking_assert (!trace_work_list.exists ());
2642
2643  /* Always begin at the entry trace.  */
2644  ti = &trace_info[0];
2645  scan_trace (ti);
2646
2647  while (!trace_work_list.is_empty ())
2648    {
2649      ti = trace_work_list.pop ();
2650      scan_trace (ti);
2651    }
2652
2653  queued_reg_saves.release ();
2654  trace_work_list.release ();
2655}
2656
2657/* Return the insn before the first NOTE_INSN_CFI after START.  */
2658
2659static rtx_insn *
2660before_next_cfi_note (rtx_insn *start)
2661{
2662  rtx_insn *prev = start;
2663  while (start)
2664    {
2665      if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2666	return prev;
2667      prev = start;
2668      start = NEXT_INSN (start);
2669    }
2670  gcc_unreachable ();
2671}
2672
2673/* Insert CFI notes between traces to properly change state between them.  */
2674
2675static void
2676connect_traces (void)
2677{
2678  unsigned i, n = trace_info.length ();
2679  dw_trace_info *prev_ti, *ti;
2680
2681  /* ??? Ideally, we should have both queued and processed every trace.
2682     However the current representation of constant pools on various targets
2683     is indistinguishable from unreachable code.  Assume for the moment that
2684     we can simply skip over such traces.  */
2685  /* ??? Consider creating a DATA_INSN rtx code to indicate that
2686     these are not "real" instructions, and should not be considered.
2687     This could be generically useful for tablejump data as well.  */
2688  /* Remove all unprocessed traces from the list.  */
2689  for (i = n - 1; i > 0; --i)
2690    {
2691      ti = &trace_info[i];
2692      if (ti->beg_row == NULL)
2693	{
2694	  trace_info.ordered_remove (i);
2695	  n -= 1;
2696	}
2697      else
2698	gcc_assert (ti->end_row != NULL);
2699    }
2700
2701  /* Work from the end back to the beginning.  This lets us easily insert
2702     remember/restore_state notes in the correct order wrt other notes.  */
2703  prev_ti = &trace_info[n - 1];
2704  for (i = n - 1; i > 0; --i)
2705    {
2706      dw_cfi_row *old_row;
2707
2708      ti = prev_ti;
2709      prev_ti = &trace_info[i - 1];
2710
2711      add_cfi_insn = ti->head;
2712
2713      /* In dwarf2out_switch_text_section, we'll begin a new FDE
2714	 for the portion of the function in the alternate text
2715	 section.  The row state at the very beginning of that
2716	 new FDE will be exactly the row state from the CIE.  */
2717      if (ti->switch_sections)
2718	old_row = cie_cfi_row;
2719      else
2720	{
2721	  old_row = prev_ti->end_row;
2722	  /* If there's no change from the previous end state, fine.  */
2723	  if (cfi_row_equal_p (old_row, ti->beg_row))
2724	    ;
2725	  /* Otherwise check for the common case of sharing state with
2726	     the beginning of an epilogue, but not the end.  Insert
2727	     remember/restore opcodes in that case.  */
2728	  else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2729	    {
2730	      dw_cfi_ref cfi;
2731
2732	      /* Note that if we blindly insert the remember at the
2733		 start of the trace, we can wind up increasing the
2734		 size of the unwind info due to extra advance opcodes.
2735		 Instead, put the remember immediately before the next
2736		 state change.  We know there must be one, because the
2737		 state at the beginning and head of the trace differ.  */
2738	      add_cfi_insn = before_next_cfi_note (prev_ti->head);
2739	      cfi = new_cfi ();
2740	      cfi->dw_cfi_opc = DW_CFA_remember_state;
2741	      add_cfi (cfi);
2742
2743	      add_cfi_insn = ti->head;
2744	      cfi = new_cfi ();
2745	      cfi->dw_cfi_opc = DW_CFA_restore_state;
2746	      add_cfi (cfi);
2747
2748	      old_row = prev_ti->beg_row;
2749	    }
2750	  /* Otherwise, we'll simply change state from the previous end.  */
2751	}
2752
2753      change_cfi_row (old_row, ti->beg_row);
2754
2755      if (dump_file && add_cfi_insn != ti->head)
2756	{
2757	  rtx_insn *note;
2758
2759	  fprintf (dump_file, "Fixup between trace %u and %u:\n",
2760		   prev_ti->id, ti->id);
2761
2762	  note = ti->head;
2763	  do
2764	    {
2765	      note = NEXT_INSN (note);
2766	      gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2767	      output_cfi_directive (dump_file, NOTE_CFI (note));
2768	    }
2769	  while (note != add_cfi_insn);
2770	}
2771    }
2772
2773  /* Connect args_size between traces that have can_throw_internal insns.  */
2774  if (cfun->eh->lp_array)
2775    {
2776      HOST_WIDE_INT prev_args_size = 0;
2777
2778      for (i = 0; i < n; ++i)
2779	{
2780	  ti = &trace_info[i];
2781
2782	  if (ti->switch_sections)
2783	    prev_args_size = 0;
2784	  if (ti->eh_head == NULL)
2785	    continue;
2786	  gcc_assert (!ti->args_size_undefined);
2787
2788	  if (ti->beg_delay_args_size != prev_args_size)
2789	    {
2790	      /* ??? Search back to previous CFI note.  */
2791	      add_cfi_insn = PREV_INSN (ti->eh_head);
2792	      add_cfi_args_size (ti->beg_delay_args_size);
2793	    }
2794
2795	  prev_args_size = ti->end_delay_args_size;
2796	}
2797    }
2798}
2799
2800/* Set up the pseudo-cfg of instruction traces, as described at the
2801   block comment at the top of the file.  */
2802
2803static void
2804create_pseudo_cfg (void)
2805{
2806  bool saw_barrier, switch_sections;
2807  dw_trace_info ti;
2808  rtx_insn *insn;
2809  unsigned i;
2810
2811  /* The first trace begins at the start of the function,
2812     and begins with the CIE row state.  */
2813  trace_info.create (16);
2814  memset (&ti, 0, sizeof (ti));
2815  ti.head = get_insns ();
2816  ti.beg_row = cie_cfi_row;
2817  ti.cfa_store = cie_cfi_row->cfa;
2818  ti.cfa_temp.reg = INVALID_REGNUM;
2819  trace_info.quick_push (ti);
2820
2821  if (cie_return_save)
2822    ti.regs_saved_in_regs.safe_push (*cie_return_save);
2823
2824  /* Walk all the insns, collecting start of trace locations.  */
2825  saw_barrier = false;
2826  switch_sections = false;
2827  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2828    {
2829      if (BARRIER_P (insn))
2830	saw_barrier = true;
2831      else if (NOTE_P (insn)
2832	       && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2833	{
2834	  /* We should have just seen a barrier.  */
2835	  gcc_assert (saw_barrier);
2836	  switch_sections = true;
2837	}
2838      /* Watch out for save_point notes between basic blocks.
2839	 In particular, a note after a barrier.  Do not record these,
2840	 delaying trace creation until the label.  */
2841      else if (save_point_p (insn)
2842	       && (LABEL_P (insn) || !saw_barrier))
2843	{
2844	  memset (&ti, 0, sizeof (ti));
2845	  ti.head = insn;
2846	  ti.switch_sections = switch_sections;
2847	  ti.id = trace_info.length ();
2848	  trace_info.safe_push (ti);
2849
2850	  saw_barrier = false;
2851	  switch_sections = false;
2852	}
2853    }
2854
2855  /* Create the trace index after we've finished building trace_info,
2856     avoiding stale pointer problems due to reallocation.  */
2857  trace_index
2858    = new hash_table<trace_info_hasher> (trace_info.length ());
2859  dw_trace_info *tp;
2860  FOR_EACH_VEC_ELT (trace_info, i, tp)
2861    {
2862      dw_trace_info **slot;
2863
2864      if (dump_file)
2865	fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2866		 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2867		 tp->switch_sections ? " (section switch)" : "");
2868
2869      slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2870      gcc_assert (*slot == NULL);
2871      *slot = tp;
2872    }
2873}
2874
2875/* Record the initial position of the return address.  RTL is
2876   INCOMING_RETURN_ADDR_RTX.  */
2877
2878static void
2879initial_return_save (rtx rtl)
2880{
2881  unsigned int reg = INVALID_REGNUM;
2882  HOST_WIDE_INT offset = 0;
2883
2884  switch (GET_CODE (rtl))
2885    {
2886    case REG:
2887      /* RA is in a register.  */
2888      reg = dwf_regno (rtl);
2889      break;
2890
2891    case MEM:
2892      /* RA is on the stack.  */
2893      rtl = XEXP (rtl, 0);
2894      switch (GET_CODE (rtl))
2895	{
2896	case REG:
2897	  gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2898	  offset = 0;
2899	  break;
2900
2901	case PLUS:
2902	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2903	  offset = INTVAL (XEXP (rtl, 1));
2904	  break;
2905
2906	case MINUS:
2907	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2908	  offset = -INTVAL (XEXP (rtl, 1));
2909	  break;
2910
2911	default:
2912	  gcc_unreachable ();
2913	}
2914
2915      break;
2916
2917    case PLUS:
2918      /* The return address is at some offset from any value we can
2919	 actually load.  For instance, on the SPARC it is in %i7+8. Just
2920	 ignore the offset for now; it doesn't matter for unwinding frames.  */
2921      gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2922      initial_return_save (XEXP (rtl, 0));
2923      return;
2924
2925    default:
2926      gcc_unreachable ();
2927    }
2928
2929  if (reg != DWARF_FRAME_RETURN_COLUMN)
2930    {
2931      if (reg != INVALID_REGNUM)
2932        record_reg_saved_in_reg (rtl, pc_rtx);
2933      reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2934    }
2935}
2936
2937static void
2938create_cie_data (void)
2939{
2940  dw_cfa_location loc;
2941  dw_trace_info cie_trace;
2942
2943  dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2944  dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2945
2946  memset (&cie_trace, 0, sizeof (cie_trace));
2947  cur_trace = &cie_trace;
2948
2949  add_cfi_vec = &cie_cfi_vec;
2950  cie_cfi_row = cur_row = new_cfi_row ();
2951
2952  /* On entry, the Canonical Frame Address is at SP.  */
2953  memset (&loc, 0, sizeof (loc));
2954  loc.reg = dw_stack_pointer_regnum;
2955  loc.offset = INCOMING_FRAME_SP_OFFSET;
2956  def_cfa_1 (&loc);
2957
2958  if (targetm.debug_unwind_info () == UI_DWARF2
2959      || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2960    {
2961      initial_return_save (INCOMING_RETURN_ADDR_RTX);
2962
2963      /* For a few targets, we have the return address incoming into a
2964	 register, but choose a different return column.  This will result
2965	 in a DW_CFA_register for the return, and an entry in
2966	 regs_saved_in_regs to match.  If the target later stores that
2967	 return address register to the stack, we want to be able to emit
2968	 the DW_CFA_offset against the return column, not the intermediate
2969	 save register.  Save the contents of regs_saved_in_regs so that
2970	 we can re-initialize it at the start of each function.  */
2971      switch (cie_trace.regs_saved_in_regs.length ())
2972	{
2973	case 0:
2974	  break;
2975	case 1:
2976	  cie_return_save = ggc_alloc<reg_saved_in_data> ();
2977	  *cie_return_save = cie_trace.regs_saved_in_regs[0];
2978	  cie_trace.regs_saved_in_regs.release ();
2979	  break;
2980	default:
2981	  gcc_unreachable ();
2982	}
2983    }
2984
2985  add_cfi_vec = NULL;
2986  cur_row = NULL;
2987  cur_trace = NULL;
2988}
2989
2990/* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2991   state at each location within the function.  These notes will be
2992   emitted during pass_final.  */
2993
2994static unsigned int
2995execute_dwarf2_frame (void)
2996{
2997  /* The first time we're called, compute the incoming frame state.  */
2998  if (cie_cfi_vec == NULL)
2999    create_cie_data ();
3000
3001  dwarf2out_alloc_current_fde ();
3002
3003  create_pseudo_cfg ();
3004
3005  /* Do the work.  */
3006  create_cfi_notes ();
3007  connect_traces ();
3008  add_cfis_to_fde ();
3009
3010  /* Free all the data we allocated.  */
3011  {
3012    size_t i;
3013    dw_trace_info *ti;
3014
3015    FOR_EACH_VEC_ELT (trace_info, i, ti)
3016      ti->regs_saved_in_regs.release ();
3017  }
3018  trace_info.release ();
3019
3020  delete trace_index;
3021  trace_index = NULL;
3022
3023  return 0;
3024}
3025
3026/* Convert a DWARF call frame info. operation to its string name */
3027
3028static const char *
3029dwarf_cfi_name (unsigned int cfi_opc)
3030{
3031  const char *name = get_DW_CFA_name (cfi_opc);
3032
3033  if (name != NULL)
3034    return name;
3035
3036  return "DW_CFA_<unknown>";
3037}
3038
3039/* This routine will generate the correct assembly data for a location
3040   description based on a cfi entry with a complex address.  */
3041
3042static void
3043output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3044{
3045  dw_loc_descr_ref loc;
3046  unsigned long size;
3047
3048  if (cfi->dw_cfi_opc == DW_CFA_expression)
3049    {
3050      unsigned r =
3051	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3052      dw2_asm_output_data (1, r, NULL);
3053      loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3054    }
3055  else
3056    loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3057
3058  /* Output the size of the block.  */
3059  size = size_of_locs (loc);
3060  dw2_asm_output_data_uleb128 (size, NULL);
3061
3062  /* Now output the operations themselves.  */
3063  output_loc_sequence (loc, for_eh);
3064}
3065
3066/* Similar, but used for .cfi_escape.  */
3067
3068static void
3069output_cfa_loc_raw (dw_cfi_ref cfi)
3070{
3071  dw_loc_descr_ref loc;
3072  unsigned long size;
3073
3074  if (cfi->dw_cfi_opc == DW_CFA_expression)
3075    {
3076      unsigned r =
3077	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3078      fprintf (asm_out_file, "%#x,", r);
3079      loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3080    }
3081  else
3082    loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3083
3084  /* Output the size of the block.  */
3085  size = size_of_locs (loc);
3086  dw2_asm_output_data_uleb128_raw (size);
3087  fputc (',', asm_out_file);
3088
3089  /* Now output the operations themselves.  */
3090  output_loc_sequence_raw (loc);
3091}
3092
3093/* Output a Call Frame Information opcode and its operand(s).  */
3094
3095void
3096output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3097{
3098  unsigned long r;
3099  HOST_WIDE_INT off;
3100
3101  if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3102    dw2_asm_output_data (1, (cfi->dw_cfi_opc
3103			     | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3104			 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3105			 ((unsigned HOST_WIDE_INT)
3106			  cfi->dw_cfi_oprnd1.dw_cfi_offset));
3107  else if (cfi->dw_cfi_opc == DW_CFA_offset)
3108    {
3109      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3110      dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3111			   "DW_CFA_offset, column %#lx", r);
3112      off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3113      dw2_asm_output_data_uleb128 (off, NULL);
3114    }
3115  else if (cfi->dw_cfi_opc == DW_CFA_restore)
3116    {
3117      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3118      dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3119			   "DW_CFA_restore, column %#lx", r);
3120    }
3121  else
3122    {
3123      dw2_asm_output_data (1, cfi->dw_cfi_opc,
3124			   "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3125
3126      switch (cfi->dw_cfi_opc)
3127	{
3128	case DW_CFA_set_loc:
3129	  if (for_eh)
3130	    dw2_asm_output_encoded_addr_rtx (
3131		ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3132		gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3133		false, NULL);
3134	  else
3135	    dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3136				 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3137	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3138	  break;
3139
3140	case DW_CFA_advance_loc1:
3141	  dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3142				fde->dw_fde_current_label, NULL);
3143	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3144	  break;
3145
3146	case DW_CFA_advance_loc2:
3147	  dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3148				fde->dw_fde_current_label, NULL);
3149	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3150	  break;
3151
3152	case DW_CFA_advance_loc4:
3153	  dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3154				fde->dw_fde_current_label, NULL);
3155	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3156	  break;
3157
3158	case DW_CFA_MIPS_advance_loc8:
3159	  dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3160				fde->dw_fde_current_label, NULL);
3161	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3162	  break;
3163
3164	case DW_CFA_offset_extended:
3165	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3166	  dw2_asm_output_data_uleb128 (r, NULL);
3167	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3168	  dw2_asm_output_data_uleb128 (off, NULL);
3169	  break;
3170
3171	case DW_CFA_def_cfa:
3172	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3173	  dw2_asm_output_data_uleb128 (r, NULL);
3174	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3175	  break;
3176
3177	case DW_CFA_offset_extended_sf:
3178	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3179	  dw2_asm_output_data_uleb128 (r, NULL);
3180	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3181	  dw2_asm_output_data_sleb128 (off, NULL);
3182	  break;
3183
3184	case DW_CFA_def_cfa_sf:
3185	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3186	  dw2_asm_output_data_uleb128 (r, NULL);
3187	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3188	  dw2_asm_output_data_sleb128 (off, NULL);
3189	  break;
3190
3191	case DW_CFA_restore_extended:
3192	case DW_CFA_undefined:
3193	case DW_CFA_same_value:
3194	case DW_CFA_def_cfa_register:
3195	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3196	  dw2_asm_output_data_uleb128 (r, NULL);
3197	  break;
3198
3199	case DW_CFA_register:
3200	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3201	  dw2_asm_output_data_uleb128 (r, NULL);
3202	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3203	  dw2_asm_output_data_uleb128 (r, NULL);
3204	  break;
3205
3206	case DW_CFA_def_cfa_offset:
3207	case DW_CFA_GNU_args_size:
3208	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3209	  break;
3210
3211	case DW_CFA_def_cfa_offset_sf:
3212	  off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3213	  dw2_asm_output_data_sleb128 (off, NULL);
3214	  break;
3215
3216	case DW_CFA_GNU_window_save:
3217	  break;
3218
3219	case DW_CFA_def_cfa_expression:
3220	case DW_CFA_expression:
3221	  output_cfa_loc (cfi, for_eh);
3222	  break;
3223
3224	case DW_CFA_GNU_negative_offset_extended:
3225	  /* Obsoleted by DW_CFA_offset_extended_sf.  */
3226	  gcc_unreachable ();
3227
3228	default:
3229	  break;
3230	}
3231    }
3232}
3233
3234/* Similar, but do it via assembler directives instead.  */
3235
3236void
3237output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3238{
3239  unsigned long r, r2;
3240
3241  switch (cfi->dw_cfi_opc)
3242    {
3243    case DW_CFA_advance_loc:
3244    case DW_CFA_advance_loc1:
3245    case DW_CFA_advance_loc2:
3246    case DW_CFA_advance_loc4:
3247    case DW_CFA_MIPS_advance_loc8:
3248    case DW_CFA_set_loc:
3249      /* Should only be created in a code path not followed when emitting
3250	 via directives.  The assembler is going to take care of this for
3251	 us.  But this routines is also used for debugging dumps, so
3252	 print something.  */
3253      gcc_assert (f != asm_out_file);
3254      fprintf (f, "\t.cfi_advance_loc\n");
3255      break;
3256
3257    case DW_CFA_offset:
3258    case DW_CFA_offset_extended:
3259    case DW_CFA_offset_extended_sf:
3260      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3261      fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3262	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3263      break;
3264
3265    case DW_CFA_restore:
3266    case DW_CFA_restore_extended:
3267      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3268      fprintf (f, "\t.cfi_restore %lu\n", r);
3269      break;
3270
3271    case DW_CFA_undefined:
3272      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3273      fprintf (f, "\t.cfi_undefined %lu\n", r);
3274      break;
3275
3276    case DW_CFA_same_value:
3277      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3278      fprintf (f, "\t.cfi_same_value %lu\n", r);
3279      break;
3280
3281    case DW_CFA_def_cfa:
3282    case DW_CFA_def_cfa_sf:
3283      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3284      fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3285	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3286      break;
3287
3288    case DW_CFA_def_cfa_register:
3289      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3290      fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3291      break;
3292
3293    case DW_CFA_register:
3294      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3295      r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3296      fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3297      break;
3298
3299    case DW_CFA_def_cfa_offset:
3300    case DW_CFA_def_cfa_offset_sf:
3301      fprintf (f, "\t.cfi_def_cfa_offset "
3302	       HOST_WIDE_INT_PRINT_DEC"\n",
3303	       cfi->dw_cfi_oprnd1.dw_cfi_offset);
3304      break;
3305
3306    case DW_CFA_remember_state:
3307      fprintf (f, "\t.cfi_remember_state\n");
3308      break;
3309    case DW_CFA_restore_state:
3310      fprintf (f, "\t.cfi_restore_state\n");
3311      break;
3312
3313    case DW_CFA_GNU_args_size:
3314      if (f == asm_out_file)
3315	{
3316	  fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3317	  dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3318	  if (flag_debug_asm)
3319	    fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3320		     ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3321	  fputc ('\n', f);
3322	}
3323      else
3324	{
3325	  fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3326		   cfi->dw_cfi_oprnd1.dw_cfi_offset);
3327	}
3328      break;
3329
3330    case DW_CFA_GNU_window_save:
3331      fprintf (f, "\t.cfi_window_save\n");
3332      break;
3333
3334    case DW_CFA_def_cfa_expression:
3335      if (f != asm_out_file)
3336	{
3337	  fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3338	  break;
3339	}
3340      /* FALLTHRU */
3341    case DW_CFA_expression:
3342      if (f != asm_out_file)
3343	{
3344	  fprintf (f, "\t.cfi_cfa_expression ...\n");
3345	  break;
3346	}
3347      fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3348      output_cfa_loc_raw (cfi);
3349      fputc ('\n', f);
3350      break;
3351
3352    default:
3353      gcc_unreachable ();
3354    }
3355}
3356
3357void
3358dwarf2out_emit_cfi (dw_cfi_ref cfi)
3359{
3360  if (dwarf2out_do_cfi_asm ())
3361    output_cfi_directive (asm_out_file, cfi);
3362}
3363
3364static void
3365dump_cfi_row (FILE *f, dw_cfi_row *row)
3366{
3367  dw_cfi_ref cfi;
3368  unsigned i;
3369
3370  cfi = row->cfa_cfi;
3371  if (!cfi)
3372    {
3373      dw_cfa_location dummy;
3374      memset (&dummy, 0, sizeof (dummy));
3375      dummy.reg = INVALID_REGNUM;
3376      cfi = def_cfa_0 (&dummy, &row->cfa);
3377    }
3378  output_cfi_directive (f, cfi);
3379
3380  FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3381    if (cfi)
3382      output_cfi_directive (f, cfi);
3383}
3384
3385void debug_cfi_row (dw_cfi_row *row);
3386
3387void
3388debug_cfi_row (dw_cfi_row *row)
3389{
3390  dump_cfi_row (stderr, row);
3391}
3392
3393
3394/* Save the result of dwarf2out_do_frame across PCH.
3395   This variable is tri-state, with 0 unset, >0 true, <0 false.  */
3396static GTY(()) signed char saved_do_cfi_asm = 0;
3397
3398/* Decide whether we want to emit frame unwind information for the current
3399   translation unit.  */
3400
3401bool
3402dwarf2out_do_frame (void)
3403{
3404  /* We want to emit correct CFA location expressions or lists, so we
3405     have to return true if we're going to output debug info, even if
3406     we're not going to output frame or unwind info.  */
3407  if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3408    return true;
3409
3410  if (saved_do_cfi_asm > 0)
3411    return true;
3412
3413  if (targetm.debug_unwind_info () == UI_DWARF2)
3414    return true;
3415
3416  if ((flag_unwind_tables || flag_exceptions)
3417      && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3418    return true;
3419
3420  return false;
3421}
3422
3423/* Decide whether to emit frame unwind via assembler directives.  */
3424
3425bool
3426dwarf2out_do_cfi_asm (void)
3427{
3428  int enc;
3429
3430  if (saved_do_cfi_asm != 0)
3431    return saved_do_cfi_asm > 0;
3432
3433  /* Assume failure for a moment.  */
3434  saved_do_cfi_asm = -1;
3435
3436  if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3437    return false;
3438  if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3439    return false;
3440
3441  /* Make sure the personality encoding is one the assembler can support.
3442     In particular, aligned addresses can't be handled.  */
3443  enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3444  if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3445    return false;
3446  enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3447  if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3448    return false;
3449
3450  /* If we can't get the assembler to emit only .debug_frame, and we don't need
3451     dwarf2 unwind info for exceptions, then emit .debug_frame by hand.  */
3452  if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3453      && !flag_unwind_tables && !flag_exceptions
3454      && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3455    return false;
3456
3457  /* Success!  */
3458  saved_do_cfi_asm = 1;
3459  return true;
3460}
3461
3462namespace {
3463
3464const pass_data pass_data_dwarf2_frame =
3465{
3466  RTL_PASS, /* type */
3467  "dwarf2", /* name */
3468  OPTGROUP_NONE, /* optinfo_flags */
3469  TV_FINAL, /* tv_id */
3470  0, /* properties_required */
3471  0, /* properties_provided */
3472  0, /* properties_destroyed */
3473  0, /* todo_flags_start */
3474  0, /* todo_flags_finish */
3475};
3476
3477class pass_dwarf2_frame : public rtl_opt_pass
3478{
3479public:
3480  pass_dwarf2_frame (gcc::context *ctxt)
3481    : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3482  {}
3483
3484  /* opt_pass methods: */
3485  virtual bool gate (function *);
3486  virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3487
3488}; // class pass_dwarf2_frame
3489
3490bool
3491pass_dwarf2_frame::gate (function *)
3492{
3493#ifndef HAVE_prologue
3494  /* Targets which still implement the prologue in assembler text
3495     cannot use the generic dwarf2 unwinding.  */
3496  return false;
3497#endif
3498
3499  /* ??? What to do for UI_TARGET unwinding?  They might be able to benefit
3500     from the optimized shrink-wrapping annotations that we will compute.
3501     For now, only produce the CFI notes for dwarf2.  */
3502  return dwarf2out_do_frame ();
3503}
3504
3505} // anon namespace
3506
3507rtl_opt_pass *
3508make_pass_dwarf2_frame (gcc::context *ctxt)
3509{
3510  return new pass_dwarf2_frame (ctxt);
3511}
3512
3513#include "gt-dwarf2cfi.h"
3514