1/* SPU specific support for 32-bit ELF
2
3   Copyright (C) 2006-2017 Free Software Foundation, Inc.
4
5   This file is part of BFD, the Binary File Descriptor library.
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3 of the License, or
10   (at your option) any later version.
11
12   This program is distributed in the hope that it will be useful,
13   but WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15   GNU General Public License for more details.
16
17   You should have received a copy of the GNU General Public License along
18   with this program; if not, write to the Free Software Foundation, Inc.,
19   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
21#include "sysdep.h"
22#include "libiberty.h"
23#include "bfd.h"
24#include "bfdlink.h"
25#include "libbfd.h"
26#include "elf-bfd.h"
27#include "elf/spu.h"
28#include "elf32-spu.h"
29
30/* We use RELA style relocs.  Don't define USE_REL.  */
31
32static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33					   void *, asection *,
34					   bfd *, char **);
35
36/* Values of type 'enum elf_spu_reloc_type' are used to index this
37   array, so it must be declared in the order of that type.  */
38
39static reloc_howto_type elf_howto_table[] = {
40  HOWTO (R_SPU_NONE,       0, 3,  0, FALSE,  0, complain_overflow_dont,
41	 bfd_elf_generic_reloc, "SPU_NONE",
42	 FALSE, 0, 0x00000000, FALSE),
43  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44	 bfd_elf_generic_reloc, "SPU_ADDR10",
45	 FALSE, 0, 0x00ffc000, FALSE),
46  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
47	 bfd_elf_generic_reloc, "SPU_ADDR16",
48	 FALSE, 0, 0x007fff80, FALSE),
49  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
50	 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51	 FALSE, 0, 0x007fff80, FALSE),
52  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
53	 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54	 FALSE, 0, 0x007fff80, FALSE),
55  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
56	 bfd_elf_generic_reloc, "SPU_ADDR18",
57	 FALSE, 0, 0x01ffff80, FALSE),
58  HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
59	 bfd_elf_generic_reloc, "SPU_ADDR32",
60	 FALSE, 0, 0xffffffff, FALSE),
61  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
62	 bfd_elf_generic_reloc, "SPU_REL16",
63	 FALSE, 0, 0x007fff80, TRUE),
64  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
65	 bfd_elf_generic_reloc, "SPU_ADDR7",
66	 FALSE, 0, 0x001fc000, FALSE),
67  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
68	 spu_elf_rel9,          "SPU_REL9",
69	 FALSE, 0, 0x0180007f, TRUE),
70  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
71	 spu_elf_rel9,          "SPU_REL9I",
72	 FALSE, 0, 0x0000c07f, TRUE),
73  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
74	 bfd_elf_generic_reloc, "SPU_ADDR10I",
75	 FALSE, 0, 0x00ffc000, FALSE),
76  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
77	 bfd_elf_generic_reloc, "SPU_ADDR16I",
78	 FALSE, 0, 0x007fff80, FALSE),
79  HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
80	 bfd_elf_generic_reloc, "SPU_REL32",
81	 FALSE, 0, 0xffffffff, TRUE),
82  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
83	 bfd_elf_generic_reloc, "SPU_ADDR16X",
84	 FALSE, 0, 0x007fff80, FALSE),
85  HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
86	 bfd_elf_generic_reloc, "SPU_PPU32",
87	 FALSE, 0, 0xffffffff, FALSE),
88  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
89	 bfd_elf_generic_reloc, "SPU_PPU64",
90	 FALSE, 0, -1, FALSE),
91  HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
92	 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93	 FALSE, 0, 0x00000000, FALSE),
94};
95
96static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99  { NULL, 0, 0, 0, 0 }
100};
101
102static enum elf_spu_reloc_type
103spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104{
105  switch (code)
106    {
107    default:
108      return (enum elf_spu_reloc_type) -1;
109    case BFD_RELOC_NONE:
110      return R_SPU_NONE;
111    case BFD_RELOC_SPU_IMM10W:
112      return R_SPU_ADDR10;
113    case BFD_RELOC_SPU_IMM16W:
114      return R_SPU_ADDR16;
115    case BFD_RELOC_SPU_LO16:
116      return R_SPU_ADDR16_LO;
117    case BFD_RELOC_SPU_HI16:
118      return R_SPU_ADDR16_HI;
119    case BFD_RELOC_SPU_IMM18:
120      return R_SPU_ADDR18;
121    case BFD_RELOC_SPU_PCREL16:
122      return R_SPU_REL16;
123    case BFD_RELOC_SPU_IMM7:
124      return R_SPU_ADDR7;
125    case BFD_RELOC_SPU_IMM8:
126      return R_SPU_NONE;
127    case BFD_RELOC_SPU_PCREL9a:
128      return R_SPU_REL9;
129    case BFD_RELOC_SPU_PCREL9b:
130      return R_SPU_REL9I;
131    case BFD_RELOC_SPU_IMM10:
132      return R_SPU_ADDR10I;
133    case BFD_RELOC_SPU_IMM16:
134      return R_SPU_ADDR16I;
135    case BFD_RELOC_32:
136      return R_SPU_ADDR32;
137    case BFD_RELOC_32_PCREL:
138      return R_SPU_REL32;
139    case BFD_RELOC_SPU_PPU32:
140      return R_SPU_PPU32;
141    case BFD_RELOC_SPU_PPU64:
142      return R_SPU_PPU64;
143    case BFD_RELOC_SPU_ADD_PIC:
144      return R_SPU_ADD_PIC;
145    }
146}
147
148static void
149spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
150		       arelent *cache_ptr,
151		       Elf_Internal_Rela *dst)
152{
153  enum elf_spu_reloc_type r_type;
154
155  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
156  /* PR 17512: file: 90c2a92e.  */
157  if (r_type >= R_SPU_max)
158    {
159      /* xgettext:c-format */
160      _bfd_error_handler (_("%B: unrecognised SPU reloc number: %d"),
161			  abfd, r_type);
162      bfd_set_error (bfd_error_bad_value);
163      r_type = R_SPU_NONE;
164    }
165  cache_ptr->howto = &elf_howto_table[(int) r_type];
166}
167
168static reloc_howto_type *
169spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
170			   bfd_reloc_code_real_type code)
171{
172  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
173
174  if (r_type == (enum elf_spu_reloc_type) -1)
175    return NULL;
176
177  return elf_howto_table + r_type;
178}
179
180static reloc_howto_type *
181spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
182			   const char *r_name)
183{
184  unsigned int i;
185
186  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
187    if (elf_howto_table[i].name != NULL
188	&& strcasecmp (elf_howto_table[i].name, r_name) == 0)
189      return &elf_howto_table[i];
190
191  return NULL;
192}
193
194/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
195
196static bfd_reloc_status_type
197spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
198	      void *data, asection *input_section,
199	      bfd *output_bfd, char **error_message)
200{
201  bfd_size_type octets;
202  bfd_vma val;
203  long insn;
204
205  /* If this is a relocatable link (output_bfd test tells us), just
206     call the generic function.  Any adjustment will be done at final
207     link time.  */
208  if (output_bfd != NULL)
209    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
210				  input_section, output_bfd, error_message);
211
212  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
213    return bfd_reloc_outofrange;
214  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
215
216  /* Get symbol value.  */
217  val = 0;
218  if (!bfd_is_com_section (symbol->section))
219    val = symbol->value;
220  if (symbol->section->output_section)
221    val += symbol->section->output_section->vma;
222
223  val += reloc_entry->addend;
224
225  /* Make it pc-relative.  */
226  val -= input_section->output_section->vma + input_section->output_offset;
227
228  val >>= 2;
229  if (val + 256 >= 512)
230    return bfd_reloc_overflow;
231
232  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
233
234  /* Move two high bits of value to REL9I and REL9 position.
235     The mask will take care of selecting the right field.  */
236  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
237  insn &= ~reloc_entry->howto->dst_mask;
238  insn |= val & reloc_entry->howto->dst_mask;
239  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
240  return bfd_reloc_ok;
241}
242
243static bfd_boolean
244spu_elf_new_section_hook (bfd *abfd, asection *sec)
245{
246  if (!sec->used_by_bfd)
247    {
248      struct _spu_elf_section_data *sdata;
249
250      sdata = bfd_zalloc (abfd, sizeof (*sdata));
251      if (sdata == NULL)
252	return FALSE;
253      sec->used_by_bfd = sdata;
254    }
255
256  return _bfd_elf_new_section_hook (abfd, sec);
257}
258
259/* Set up overlay info for executables.  */
260
261static bfd_boolean
262spu_elf_object_p (bfd *abfd)
263{
264  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
265    {
266      unsigned int i, num_ovl, num_buf;
267      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
268      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
269      Elf_Internal_Phdr *last_phdr = NULL;
270
271      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
272	if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
273	  {
274	    unsigned int j;
275
276	    ++num_ovl;
277	    if (last_phdr == NULL
278		|| ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
279	      ++num_buf;
280	    last_phdr = phdr;
281	    for (j = 1; j < elf_numsections (abfd); j++)
282	      {
283		Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
284
285		if (ELF_SECTION_SIZE (shdr, phdr) != 0
286		    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
287		  {
288		    asection *sec = shdr->bfd_section;
289		    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
290		    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
291		  }
292	      }
293	  }
294    }
295  return TRUE;
296}
297
298/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
299   strip --strip-unneeded will not remove them.  */
300
301static void
302spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
303{
304  if (sym->name != NULL
305      && sym->section != bfd_abs_section_ptr
306      && strncmp (sym->name, "_EAR_", 5) == 0)
307    sym->flags |= BSF_KEEP;
308}
309
310/* SPU ELF linker hash table.  */
311
312struct spu_link_hash_table
313{
314  struct elf_link_hash_table elf;
315
316  struct spu_elf_params *params;
317
318  /* Shortcuts to overlay sections.  */
319  asection *ovtab;
320  asection *init;
321  asection *toe;
322  asection **ovl_sec;
323
324  /* Count of stubs in each overlay section.  */
325  unsigned int *stub_count;
326
327  /* The stub section for each overlay section.  */
328  asection **stub_sec;
329
330  struct elf_link_hash_entry *ovly_entry[2];
331
332  /* Number of overlay buffers.  */
333  unsigned int num_buf;
334
335  /* Total number of overlays.  */
336  unsigned int num_overlays;
337
338  /* For soft icache.  */
339  unsigned int line_size_log2;
340  unsigned int num_lines_log2;
341  unsigned int fromelem_size_log2;
342
343  /* How much memory we have.  */
344  unsigned int local_store;
345
346  /* Count of overlay stubs needed in non-overlay area.  */
347  unsigned int non_ovly_stub;
348
349  /* Pointer to the fixup section */
350  asection *sfixup;
351
352  /* Set on error.  */
353  unsigned int stub_err : 1;
354};
355
356/* Hijack the generic got fields for overlay stub accounting.  */
357
358struct got_entry
359{
360  struct got_entry *next;
361  unsigned int ovl;
362  union {
363    bfd_vma addend;
364    bfd_vma br_addr;
365  };
366  bfd_vma stub_addr;
367};
368
369#define spu_hash_table(p) \
370  (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
371  == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
372
373struct call_info
374{
375  struct function_info *fun;
376  struct call_info *next;
377  unsigned int count;
378  unsigned int max_depth;
379  unsigned int is_tail : 1;
380  unsigned int is_pasted : 1;
381  unsigned int broken_cycle : 1;
382  unsigned int priority : 13;
383};
384
385struct function_info
386{
387  /* List of functions called.  Also branches to hot/cold part of
388     function.  */
389  struct call_info *call_list;
390  /* For hot/cold part of function, point to owner.  */
391  struct function_info *start;
392  /* Symbol at start of function.  */
393  union {
394    Elf_Internal_Sym *sym;
395    struct elf_link_hash_entry *h;
396  } u;
397  /* Function section.  */
398  asection *sec;
399  asection *rodata;
400  /* Where last called from, and number of sections called from.  */
401  asection *last_caller;
402  unsigned int call_count;
403  /* Address range of (this part of) function.  */
404  bfd_vma lo, hi;
405  /* Offset where we found a store of lr, or -1 if none found.  */
406  bfd_vma lr_store;
407  /* Offset where we found the stack adjustment insn.  */
408  bfd_vma sp_adjust;
409  /* Stack usage.  */
410  int stack;
411  /* Distance from root of call tree.  Tail and hot/cold branches
412     count as one deeper.  We aren't counting stack frames here.  */
413  unsigned int depth;
414  /* Set if global symbol.  */
415  unsigned int global : 1;
416  /* Set if known to be start of function (as distinct from a hunk
417     in hot/cold section.  */
418  unsigned int is_func : 1;
419  /* Set if not a root node.  */
420  unsigned int non_root : 1;
421  /* Flags used during call tree traversal.  It's cheaper to replicate
422     the visit flags than have one which needs clearing after a traversal.  */
423  unsigned int visit1 : 1;
424  unsigned int visit2 : 1;
425  unsigned int marking : 1;
426  unsigned int visit3 : 1;
427  unsigned int visit4 : 1;
428  unsigned int visit5 : 1;
429  unsigned int visit6 : 1;
430  unsigned int visit7 : 1;
431};
432
433struct spu_elf_stack_info
434{
435  int num_fun;
436  int max_fun;
437  /* Variable size array describing functions, one per contiguous
438     address range belonging to a function.  */
439  struct function_info fun[1];
440};
441
442static struct function_info *find_function (asection *, bfd_vma,
443					    struct bfd_link_info *);
444
445/* Create a spu ELF linker hash table.  */
446
447static struct bfd_link_hash_table *
448spu_elf_link_hash_table_create (bfd *abfd)
449{
450  struct spu_link_hash_table *htab;
451
452  htab = bfd_zmalloc (sizeof (*htab));
453  if (htab == NULL)
454    return NULL;
455
456  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
457				      _bfd_elf_link_hash_newfunc,
458				      sizeof (struct elf_link_hash_entry),
459				      SPU_ELF_DATA))
460    {
461      free (htab);
462      return NULL;
463    }
464
465  htab->elf.init_got_refcount.refcount = 0;
466  htab->elf.init_got_refcount.glist = NULL;
467  htab->elf.init_got_offset.offset = 0;
468  htab->elf.init_got_offset.glist = NULL;
469  return &htab->elf.root;
470}
471
472void
473spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
474{
475  bfd_vma max_branch_log2;
476
477  struct spu_link_hash_table *htab = spu_hash_table (info);
478  htab->params = params;
479  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
480  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
481
482  /* For the software i-cache, we provide a "from" list whose size
483     is a power-of-two number of quadwords, big enough to hold one
484     byte per outgoing branch.  Compute this number here.  */
485  max_branch_log2 = bfd_log2 (htab->params->max_branch);
486  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
487}
488
489/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
490   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
491   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
492
493static bfd_boolean
494get_sym_h (struct elf_link_hash_entry **hp,
495	   Elf_Internal_Sym **symp,
496	   asection **symsecp,
497	   Elf_Internal_Sym **locsymsp,
498	   unsigned long r_symndx,
499	   bfd *ibfd)
500{
501  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
502
503  if (r_symndx >= symtab_hdr->sh_info)
504    {
505      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
506      struct elf_link_hash_entry *h;
507
508      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
509      while (h->root.type == bfd_link_hash_indirect
510	     || h->root.type == bfd_link_hash_warning)
511	h = (struct elf_link_hash_entry *) h->root.u.i.link;
512
513      if (hp != NULL)
514	*hp = h;
515
516      if (symp != NULL)
517	*symp = NULL;
518
519      if (symsecp != NULL)
520	{
521	  asection *symsec = NULL;
522	  if (h->root.type == bfd_link_hash_defined
523	      || h->root.type == bfd_link_hash_defweak)
524	    symsec = h->root.u.def.section;
525	  *symsecp = symsec;
526	}
527    }
528  else
529    {
530      Elf_Internal_Sym *sym;
531      Elf_Internal_Sym *locsyms = *locsymsp;
532
533      if (locsyms == NULL)
534	{
535	  locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
536	  if (locsyms == NULL)
537	    locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
538					    symtab_hdr->sh_info,
539					    0, NULL, NULL, NULL);
540	  if (locsyms == NULL)
541	    return FALSE;
542	  *locsymsp = locsyms;
543	}
544      sym = locsyms + r_symndx;
545
546      if (hp != NULL)
547	*hp = NULL;
548
549      if (symp != NULL)
550	*symp = sym;
551
552      if (symsecp != NULL)
553	*symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
554    }
555
556  return TRUE;
557}
558
559/* Create the note section if not already present.  This is done early so
560   that the linker maps the sections to the right place in the output.  */
561
562bfd_boolean
563spu_elf_create_sections (struct bfd_link_info *info)
564{
565  struct spu_link_hash_table *htab = spu_hash_table (info);
566  bfd *ibfd;
567
568  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
569    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
570      break;
571
572  if (ibfd == NULL)
573    {
574      /* Make SPU_PTNOTE_SPUNAME section.  */
575      asection *s;
576      size_t name_len;
577      size_t size;
578      bfd_byte *data;
579      flagword flags;
580
581      ibfd = info->input_bfds;
582      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
583      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
584      if (s == NULL
585	  || !bfd_set_section_alignment (ibfd, s, 4))
586	return FALSE;
587
588      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
589      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
590      size += (name_len + 3) & -4;
591
592      if (!bfd_set_section_size (ibfd, s, size))
593	return FALSE;
594
595      data = bfd_zalloc (ibfd, size);
596      if (data == NULL)
597	return FALSE;
598
599      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
600      bfd_put_32 (ibfd, name_len, data + 4);
601      bfd_put_32 (ibfd, 1, data + 8);
602      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
603      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
604	      bfd_get_filename (info->output_bfd), name_len);
605      s->contents = data;
606    }
607
608  if (htab->params->emit_fixups)
609    {
610      asection *s;
611      flagword flags;
612
613      if (htab->elf.dynobj == NULL)
614	htab->elf.dynobj = ibfd;
615      ibfd = htab->elf.dynobj;
616      flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
617	       | SEC_IN_MEMORY | SEC_LINKER_CREATED);
618      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
619      if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
620	return FALSE;
621      htab->sfixup = s;
622    }
623
624  return TRUE;
625}
626
627/* qsort predicate to sort sections by vma.  */
628
629static int
630sort_sections (const void *a, const void *b)
631{
632  const asection *const *s1 = a;
633  const asection *const *s2 = b;
634  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
635
636  if (delta != 0)
637    return delta < 0 ? -1 : 1;
638
639  return (*s1)->index - (*s2)->index;
640}
641
642/* Identify overlays in the output bfd, and number them.
643   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
644
645int
646spu_elf_find_overlays (struct bfd_link_info *info)
647{
648  struct spu_link_hash_table *htab = spu_hash_table (info);
649  asection **alloc_sec;
650  unsigned int i, n, ovl_index, num_buf;
651  asection *s;
652  bfd_vma ovl_end;
653  static const char *const entry_names[2][2] = {
654    { "__ovly_load", "__icache_br_handler" },
655    { "__ovly_return", "__icache_call_handler" }
656  };
657
658  if (info->output_bfd->section_count < 2)
659    return 1;
660
661  alloc_sec
662    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
663  if (alloc_sec == NULL)
664    return 0;
665
666  /* Pick out all the alloced sections.  */
667  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
668    if ((s->flags & SEC_ALLOC) != 0
669	&& (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
670	&& s->size != 0)
671      alloc_sec[n++] = s;
672
673  if (n == 0)
674    {
675      free (alloc_sec);
676      return 1;
677    }
678
679  /* Sort them by vma.  */
680  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
681
682  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
683  if (htab->params->ovly_flavour == ovly_soft_icache)
684    {
685      unsigned int prev_buf = 0, set_id = 0;
686
687      /* Look for an overlapping vma to find the first overlay section.  */
688      bfd_vma vma_start = 0;
689
690      for (i = 1; i < n; i++)
691	{
692	  s = alloc_sec[i];
693	  if (s->vma < ovl_end)
694	    {
695	      asection *s0 = alloc_sec[i - 1];
696	      vma_start = s0->vma;
697	      ovl_end = (s0->vma
698			 + ((bfd_vma) 1
699			    << (htab->num_lines_log2 + htab->line_size_log2)));
700	      --i;
701	      break;
702	    }
703	  else
704	    ovl_end = s->vma + s->size;
705	}
706
707      /* Now find any sections within the cache area.  */
708      for (ovl_index = 0, num_buf = 0; i < n; i++)
709	{
710	  s = alloc_sec[i];
711	  if (s->vma >= ovl_end)
712	    break;
713
714	  /* A section in an overlay area called .ovl.init is not
715	     an overlay, in the sense that it might be loaded in
716	     by the overlay manager, but rather the initial
717	     section contents for the overlay buffer.  */
718	  if (strncmp (s->name, ".ovl.init", 9) != 0)
719	    {
720	      num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
721	      set_id = (num_buf == prev_buf)? set_id + 1 : 0;
722	      prev_buf = num_buf;
723
724	      if ((s->vma - vma_start) & (htab->params->line_size - 1))
725		{
726		  info->callbacks->einfo (_("%X%P: overlay section %A "
727					    "does not start on a cache line.\n"),
728					  s);
729		  bfd_set_error (bfd_error_bad_value);
730		  return 0;
731		}
732	      else if (s->size > htab->params->line_size)
733		{
734		  info->callbacks->einfo (_("%X%P: overlay section %A "
735					    "is larger than a cache line.\n"),
736					  s);
737		  bfd_set_error (bfd_error_bad_value);
738		  return 0;
739		}
740
741	      alloc_sec[ovl_index++] = s;
742	      spu_elf_section_data (s)->u.o.ovl_index
743		= (set_id << htab->num_lines_log2) + num_buf;
744	      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
745	    }
746	}
747
748      /* Ensure there are no more overlay sections.  */
749      for ( ; i < n; i++)
750	{
751	  s = alloc_sec[i];
752	  if (s->vma < ovl_end)
753	    {
754	      info->callbacks->einfo (_("%X%P: overlay section %A "
755					"is not in cache area.\n"),
756				      alloc_sec[i-1]);
757	      bfd_set_error (bfd_error_bad_value);
758	      return 0;
759	    }
760	  else
761	    ovl_end = s->vma + s->size;
762	}
763    }
764  else
765    {
766      /* Look for overlapping vmas.  Any with overlap must be overlays.
767	 Count them.  Also count the number of overlay regions.  */
768      for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
769	{
770	  s = alloc_sec[i];
771	  if (s->vma < ovl_end)
772	    {
773	      asection *s0 = alloc_sec[i - 1];
774
775	      if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
776		{
777		  ++num_buf;
778		  if (strncmp (s0->name, ".ovl.init", 9) != 0)
779		    {
780		      alloc_sec[ovl_index] = s0;
781		      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
782		      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
783		    }
784		  else
785		    ovl_end = s->vma + s->size;
786		}
787	      if (strncmp (s->name, ".ovl.init", 9) != 0)
788		{
789		  alloc_sec[ovl_index] = s;
790		  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
791		  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
792		  if (s0->vma != s->vma)
793		    {
794		      /* xgettext:c-format */
795		      info->callbacks->einfo (_("%X%P: overlay sections %A "
796						"and %A do not start at the "
797						"same address.\n"),
798					      s0, s);
799		      bfd_set_error (bfd_error_bad_value);
800		      return 0;
801		    }
802		  if (ovl_end < s->vma + s->size)
803		    ovl_end = s->vma + s->size;
804		}
805	    }
806	  else
807	    ovl_end = s->vma + s->size;
808	}
809    }
810
811  htab->num_overlays = ovl_index;
812  htab->num_buf = num_buf;
813  htab->ovl_sec = alloc_sec;
814
815  if (ovl_index == 0)
816    return 1;
817
818  for (i = 0; i < 2; i++)
819    {
820      const char *name;
821      struct elf_link_hash_entry *h;
822
823      name = entry_names[i][htab->params->ovly_flavour];
824      h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
825      if (h == NULL)
826	return 0;
827
828      if (h->root.type == bfd_link_hash_new)
829	{
830	  h->root.type = bfd_link_hash_undefined;
831	  h->ref_regular = 1;
832	  h->ref_regular_nonweak = 1;
833	  h->non_elf = 0;
834	}
835      htab->ovly_entry[i] = h;
836    }
837
838  return 2;
839}
840
841/* Non-zero to use bra in overlay stubs rather than br.  */
842#define BRA_STUBS 0
843
844#define BRA	0x30000000
845#define BRASL	0x31000000
846#define BR	0x32000000
847#define BRSL	0x33000000
848#define NOP	0x40200000
849#define LNOP	0x00200000
850#define ILA	0x42000000
851
852/* Return true for all relative and absolute branch instructions.
853   bra   00110000 0..
854   brasl 00110001 0..
855   br    00110010 0..
856   brsl  00110011 0..
857   brz   00100000 0..
858   brnz  00100001 0..
859   brhz  00100010 0..
860   brhnz 00100011 0..  */
861
862static bfd_boolean
863is_branch (const unsigned char *insn)
864{
865  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
866}
867
868/* Return true for all indirect branch instructions.
869   bi     00110101 000
870   bisl   00110101 001
871   iret   00110101 010
872   bisled 00110101 011
873   biz    00100101 000
874   binz   00100101 001
875   bihz   00100101 010
876   bihnz  00100101 011  */
877
878static bfd_boolean
879is_indirect_branch (const unsigned char *insn)
880{
881  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
882}
883
884/* Return true for branch hint instructions.
885   hbra  0001000..
886   hbrr  0001001..  */
887
888static bfd_boolean
889is_hint (const unsigned char *insn)
890{
891  return (insn[0] & 0xfc) == 0x10;
892}
893
894/* True if INPUT_SECTION might need overlay stubs.  */
895
896static bfd_boolean
897maybe_needs_stubs (asection *input_section)
898{
899  /* No stubs for debug sections and suchlike.  */
900  if ((input_section->flags & SEC_ALLOC) == 0)
901    return FALSE;
902
903  /* No stubs for link-once sections that will be discarded.  */
904  if (input_section->output_section == bfd_abs_section_ptr)
905    return FALSE;
906
907  /* Don't create stubs for .eh_frame references.  */
908  if (strcmp (input_section->name, ".eh_frame") == 0)
909    return FALSE;
910
911  return TRUE;
912}
913
914enum _stub_type
915{
916  no_stub,
917  call_ovl_stub,
918  br000_ovl_stub,
919  br001_ovl_stub,
920  br010_ovl_stub,
921  br011_ovl_stub,
922  br100_ovl_stub,
923  br101_ovl_stub,
924  br110_ovl_stub,
925  br111_ovl_stub,
926  nonovl_stub,
927  stub_error
928};
929
930/* Return non-zero if this reloc symbol should go via an overlay stub.
931   Return 2 if the stub must be in non-overlay area.  */
932
933static enum _stub_type
934needs_ovl_stub (struct elf_link_hash_entry *h,
935		Elf_Internal_Sym *sym,
936		asection *sym_sec,
937		asection *input_section,
938		Elf_Internal_Rela *irela,
939		bfd_byte *contents,
940		struct bfd_link_info *info)
941{
942  struct spu_link_hash_table *htab = spu_hash_table (info);
943  enum elf_spu_reloc_type r_type;
944  unsigned int sym_type;
945  bfd_boolean branch, hint, call;
946  enum _stub_type ret = no_stub;
947  bfd_byte insn[4];
948
949  if (sym_sec == NULL
950      || sym_sec->output_section == bfd_abs_section_ptr
951      || spu_elf_section_data (sym_sec->output_section) == NULL)
952    return ret;
953
954  if (h != NULL)
955    {
956      /* Ensure no stubs for user supplied overlay manager syms.  */
957      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
958	return ret;
959
960      /* setjmp always goes via an overlay stub, because then the return
961	 and hence the longjmp goes via __ovly_return.  That magically
962	 makes setjmp/longjmp between overlays work.  */
963      if (strncmp (h->root.root.string, "setjmp", 6) == 0
964	  && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
965	ret = call_ovl_stub;
966    }
967
968  if (h != NULL)
969    sym_type = h->type;
970  else
971    sym_type = ELF_ST_TYPE (sym->st_info);
972
973  r_type = ELF32_R_TYPE (irela->r_info);
974  branch = FALSE;
975  hint = FALSE;
976  call = FALSE;
977  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
978    {
979      if (contents == NULL)
980	{
981	  contents = insn;
982	  if (!bfd_get_section_contents (input_section->owner,
983					 input_section,
984					 contents,
985					 irela->r_offset, 4))
986	    return stub_error;
987	}
988      else
989	contents += irela->r_offset;
990
991      branch = is_branch (contents);
992      hint = is_hint (contents);
993      if (branch || hint)
994	{
995	  call = (contents[0] & 0xfd) == 0x31;
996	  if (call
997	      && sym_type != STT_FUNC
998	      && contents != insn)
999	    {
1000	      /* It's common for people to write assembly and forget
1001		 to give function symbols the right type.  Handle
1002		 calls to such symbols, but warn so that (hopefully)
1003		 people will fix their code.  We need the symbol
1004		 type to be correct to distinguish function pointer
1005		 initialisation from other pointer initialisations.  */
1006	      const char *sym_name;
1007
1008	      if (h != NULL)
1009		sym_name = h->root.root.string;
1010	      else
1011		{
1012		  Elf_Internal_Shdr *symtab_hdr;
1013		  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1014		  sym_name = bfd_elf_sym_name (input_section->owner,
1015					       symtab_hdr,
1016					       sym,
1017					       sym_sec);
1018		}
1019	      _bfd_error_handler
1020		/* xgettext:c-format */
1021		(_("warning: call to non-function symbol %s defined in %B"),
1022		 sym_sec->owner, sym_name);
1023
1024	    }
1025	}
1026    }
1027
1028  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1029      || (sym_type != STT_FUNC
1030	  && !(branch || hint)
1031	  && (sym_sec->flags & SEC_CODE) == 0))
1032    return no_stub;
1033
1034  /* Usually, symbols in non-overlay sections don't need stubs.  */
1035  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1036      && !htab->params->non_overlay_stubs)
1037    return ret;
1038
1039  /* A reference from some other section to a symbol in an overlay
1040     section needs a stub.  */
1041  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1042       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1043    {
1044      unsigned int lrlive = 0;
1045      if (branch)
1046	lrlive = (contents[1] & 0x70) >> 4;
1047
1048      if (!lrlive && (call || sym_type == STT_FUNC))
1049	ret = call_ovl_stub;
1050      else
1051	ret = br000_ovl_stub + lrlive;
1052    }
1053
1054  /* If this insn isn't a branch then we are possibly taking the
1055     address of a function and passing it out somehow.  Soft-icache code
1056     always generates inline code to do indirect branches.  */
1057  if (!(branch || hint)
1058      && sym_type == STT_FUNC
1059      && htab->params->ovly_flavour != ovly_soft_icache)
1060    ret = nonovl_stub;
1061
1062  return ret;
1063}
1064
1065static bfd_boolean
1066count_stub (struct spu_link_hash_table *htab,
1067	    bfd *ibfd,
1068	    asection *isec,
1069	    enum _stub_type stub_type,
1070	    struct elf_link_hash_entry *h,
1071	    const Elf_Internal_Rela *irela)
1072{
1073  unsigned int ovl = 0;
1074  struct got_entry *g, **head;
1075  bfd_vma addend;
1076
1077  /* If this instruction is a branch or call, we need a stub
1078     for it.  One stub per function per overlay.
1079     If it isn't a branch, then we are taking the address of
1080     this function so need a stub in the non-overlay area
1081     for it.  One stub per function.  */
1082  if (stub_type != nonovl_stub)
1083    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1084
1085  if (h != NULL)
1086    head = &h->got.glist;
1087  else
1088    {
1089      if (elf_local_got_ents (ibfd) == NULL)
1090	{
1091	  bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1092			       * sizeof (*elf_local_got_ents (ibfd)));
1093	  elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1094	  if (elf_local_got_ents (ibfd) == NULL)
1095	    return FALSE;
1096	}
1097      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1098    }
1099
1100  if (htab->params->ovly_flavour == ovly_soft_icache)
1101    {
1102      htab->stub_count[ovl] += 1;
1103      return TRUE;
1104    }
1105
1106  addend = 0;
1107  if (irela != NULL)
1108    addend = irela->r_addend;
1109
1110  if (ovl == 0)
1111    {
1112      struct got_entry *gnext;
1113
1114      for (g = *head; g != NULL; g = g->next)
1115	if (g->addend == addend && g->ovl == 0)
1116	  break;
1117
1118      if (g == NULL)
1119	{
1120	  /* Need a new non-overlay area stub.  Zap other stubs.  */
1121	  for (g = *head; g != NULL; g = gnext)
1122	    {
1123	      gnext = g->next;
1124	      if (g->addend == addend)
1125		{
1126		  htab->stub_count[g->ovl] -= 1;
1127		  free (g);
1128		}
1129	    }
1130	}
1131    }
1132  else
1133    {
1134      for (g = *head; g != NULL; g = g->next)
1135	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1136	  break;
1137    }
1138
1139  if (g == NULL)
1140    {
1141      g = bfd_malloc (sizeof *g);
1142      if (g == NULL)
1143	return FALSE;
1144      g->ovl = ovl;
1145      g->addend = addend;
1146      g->stub_addr = (bfd_vma) -1;
1147      g->next = *head;
1148      *head = g;
1149
1150      htab->stub_count[ovl] += 1;
1151    }
1152
1153  return TRUE;
1154}
1155
1156/* Support two sizes of overlay stubs, a slower more compact stub of two
1157   instructions, and a faster stub of four instructions.
1158   Soft-icache stubs are four or eight words.  */
1159
1160static unsigned int
1161ovl_stub_size (struct spu_elf_params *params)
1162{
1163  return 16 << params->ovly_flavour >> params->compact_stub;
1164}
1165
1166static unsigned int
1167ovl_stub_size_log2 (struct spu_elf_params *params)
1168{
1169  return 4 + params->ovly_flavour - params->compact_stub;
1170}
1171
1172/* Two instruction overlay stubs look like:
1173
1174   brsl $75,__ovly_load
1175   .word target_ovl_and_address
1176
1177   ovl_and_address is a word with the overlay number in the top 14 bits
1178   and local store address in the bottom 18 bits.
1179
1180   Four instruction overlay stubs look like:
1181
1182   ila $78,ovl_number
1183   lnop
1184   ila $79,target_address
1185   br __ovly_load
1186
1187   Software icache stubs are:
1188
1189   .word target_index
1190   .word target_ia;
1191   .word lrlive_branchlocalstoreaddr;
1192   brasl $75,__icache_br_handler
1193   .quad xor_pattern
1194*/
1195
1196static bfd_boolean
1197build_stub (struct bfd_link_info *info,
1198	    bfd *ibfd,
1199	    asection *isec,
1200	    enum _stub_type stub_type,
1201	    struct elf_link_hash_entry *h,
1202	    const Elf_Internal_Rela *irela,
1203	    bfd_vma dest,
1204	    asection *dest_sec)
1205{
1206  struct spu_link_hash_table *htab = spu_hash_table (info);
1207  unsigned int ovl, dest_ovl, set_id;
1208  struct got_entry *g, **head;
1209  asection *sec;
1210  bfd_vma addend, from, to, br_dest, patt;
1211  unsigned int lrlive;
1212
1213  ovl = 0;
1214  if (stub_type != nonovl_stub)
1215    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1216
1217  if (h != NULL)
1218    head = &h->got.glist;
1219  else
1220    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1221
1222  addend = 0;
1223  if (irela != NULL)
1224    addend = irela->r_addend;
1225
1226  if (htab->params->ovly_flavour == ovly_soft_icache)
1227    {
1228      g = bfd_malloc (sizeof *g);
1229      if (g == NULL)
1230	return FALSE;
1231      g->ovl = ovl;
1232      g->br_addr = 0;
1233      if (irela != NULL)
1234	g->br_addr = (irela->r_offset
1235		      + isec->output_offset
1236		      + isec->output_section->vma);
1237      g->next = *head;
1238      *head = g;
1239    }
1240  else
1241    {
1242      for (g = *head; g != NULL; g = g->next)
1243	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1244	  break;
1245      if (g == NULL)
1246	abort ();
1247
1248      if (g->ovl == 0 && ovl != 0)
1249	return TRUE;
1250
1251      if (g->stub_addr != (bfd_vma) -1)
1252	return TRUE;
1253    }
1254
1255  sec = htab->stub_sec[ovl];
1256  dest += dest_sec->output_offset + dest_sec->output_section->vma;
1257  from = sec->size + sec->output_offset + sec->output_section->vma;
1258  g->stub_addr = from;
1259  to = (htab->ovly_entry[0]->root.u.def.value
1260	+ htab->ovly_entry[0]->root.u.def.section->output_offset
1261	+ htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1262
1263  if (((dest | to | from) & 3) != 0)
1264    {
1265      htab->stub_err = 1;
1266      return FALSE;
1267    }
1268  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1269
1270  if (htab->params->ovly_flavour == ovly_normal
1271      && !htab->params->compact_stub)
1272    {
1273      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1274		  sec->contents + sec->size);
1275      bfd_put_32 (sec->owner, LNOP,
1276		  sec->contents + sec->size + 4);
1277      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1278		  sec->contents + sec->size + 8);
1279      if (!BRA_STUBS)
1280	bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1281		    sec->contents + sec->size + 12);
1282      else
1283	bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1284		    sec->contents + sec->size + 12);
1285    }
1286  else if (htab->params->ovly_flavour == ovly_normal
1287	   && htab->params->compact_stub)
1288    {
1289      if (!BRA_STUBS)
1290	bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1291		    sec->contents + sec->size);
1292      else
1293	bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1294		    sec->contents + sec->size);
1295      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1296		  sec->contents + sec->size + 4);
1297    }
1298  else if (htab->params->ovly_flavour == ovly_soft_icache
1299	   && htab->params->compact_stub)
1300    {
1301      lrlive = 0;
1302      if (stub_type == nonovl_stub)
1303	;
1304      else if (stub_type == call_ovl_stub)
1305	/* A brsl makes lr live and *(*sp+16) is live.
1306	   Tail calls have the same liveness.  */
1307	lrlive = 5;
1308      else if (!htab->params->lrlive_analysis)
1309	/* Assume stack frame and lr save.  */
1310	lrlive = 1;
1311      else if (irela != NULL)
1312	{
1313	  /* Analyse branch instructions.  */
1314	  struct function_info *caller;
1315	  bfd_vma off;
1316
1317	  caller = find_function (isec, irela->r_offset, info);
1318	  if (caller->start == NULL)
1319	    off = irela->r_offset;
1320	  else
1321	    {
1322	      struct function_info *found = NULL;
1323
1324	      /* Find the earliest piece of this function that
1325		 has frame adjusting instructions.  We might
1326		 see dynamic frame adjustment (eg. for alloca)
1327		 in some later piece, but functions using
1328		 alloca always set up a frame earlier.  Frame
1329		 setup instructions are always in one piece.  */
1330	      if (caller->lr_store != (bfd_vma) -1
1331		  || caller->sp_adjust != (bfd_vma) -1)
1332		found = caller;
1333	      while (caller->start != NULL)
1334		{
1335		  caller = caller->start;
1336		  if (caller->lr_store != (bfd_vma) -1
1337		      || caller->sp_adjust != (bfd_vma) -1)
1338		    found = caller;
1339		}
1340	      if (found != NULL)
1341		caller = found;
1342	      off = (bfd_vma) -1;
1343	    }
1344
1345	  if (off > caller->sp_adjust)
1346	    {
1347	      if (off > caller->lr_store)
1348		/* Only *(*sp+16) is live.  */
1349		lrlive = 1;
1350	      else
1351		/* If no lr save, then we must be in a
1352		   leaf function with a frame.
1353		   lr is still live.  */
1354		lrlive = 4;
1355	    }
1356	  else if (off > caller->lr_store)
1357	    {
1358	      /* Between lr save and stack adjust.  */
1359	      lrlive = 3;
1360	      /* This should never happen since prologues won't
1361		 be split here.  */
1362	      BFD_ASSERT (0);
1363	    }
1364	  else
1365	    /* On entry to function.  */
1366	    lrlive = 5;
1367
1368	  if (stub_type != br000_ovl_stub
1369	      && lrlive != stub_type - br000_ovl_stub)
1370	    /* xgettext:c-format */
1371	    info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1372				      "from analysis (%u)\n"),
1373				    isec, irela->r_offset, lrlive,
1374				    stub_type - br000_ovl_stub);
1375	}
1376
1377      /* If given lrlive info via .brinfo, use it.  */
1378      if (stub_type > br000_ovl_stub)
1379	lrlive = stub_type - br000_ovl_stub;
1380
1381      if (ovl == 0)
1382	to = (htab->ovly_entry[1]->root.u.def.value
1383	      + htab->ovly_entry[1]->root.u.def.section->output_offset
1384	      + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1385
1386      /* The branch that uses this stub goes to stub_addr + 4.  We'll
1387	 set up an xor pattern that can be used by the icache manager
1388	 to modify this branch to go directly to its destination.  */
1389      g->stub_addr += 4;
1390      br_dest = g->stub_addr;
1391      if (irela == NULL)
1392	{
1393	  /* Except in the case of _SPUEAR_ stubs, the branch in
1394	     question is the one in the stub itself.  */
1395	  BFD_ASSERT (stub_type == nonovl_stub);
1396	  g->br_addr = g->stub_addr;
1397	  br_dest = to;
1398	}
1399
1400      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1401      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1402		  sec->contents + sec->size);
1403      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1404		  sec->contents + sec->size + 4);
1405      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1406		  sec->contents + sec->size + 8);
1407      patt = dest ^ br_dest;
1408      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1409	patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1410      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1411		  sec->contents + sec->size + 12);
1412
1413      if (ovl == 0)
1414	/* Extra space for linked list entries.  */
1415	sec->size += 16;
1416    }
1417  else
1418    abort ();
1419
1420  sec->size += ovl_stub_size (htab->params);
1421
1422  if (htab->params->emit_stub_syms)
1423    {
1424      size_t len;
1425      char *name;
1426      int add;
1427
1428      len = 8 + sizeof (".ovl_call.") - 1;
1429      if (h != NULL)
1430	len += strlen (h->root.root.string);
1431      else
1432	len += 8 + 1 + 8;
1433      add = 0;
1434      if (irela != NULL)
1435	add = (int) irela->r_addend & 0xffffffff;
1436      if (add != 0)
1437	len += 1 + 8;
1438      name = bfd_malloc (len + 1);
1439      if (name == NULL)
1440	return FALSE;
1441
1442      sprintf (name, "%08x.ovl_call.", g->ovl);
1443      if (h != NULL)
1444	strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1445      else
1446	sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1447		 dest_sec->id & 0xffffffff,
1448		 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1449      if (add != 0)
1450	sprintf (name + len - 9, "+%x", add);
1451
1452      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1453      free (name);
1454      if (h == NULL)
1455	return FALSE;
1456      if (h->root.type == bfd_link_hash_new)
1457	{
1458	  h->root.type = bfd_link_hash_defined;
1459	  h->root.u.def.section = sec;
1460	  h->size = ovl_stub_size (htab->params);
1461	  h->root.u.def.value = sec->size - h->size;
1462	  h->type = STT_FUNC;
1463	  h->ref_regular = 1;
1464	  h->def_regular = 1;
1465	  h->ref_regular_nonweak = 1;
1466	  h->forced_local = 1;
1467	  h->non_elf = 0;
1468	}
1469    }
1470
1471  return TRUE;
1472}
1473
1474/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1475   symbols.  */
1476
1477static bfd_boolean
1478allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1479{
1480  /* Symbols starting with _SPUEAR_ need a stub because they may be
1481     invoked by the PPU.  */
1482  struct bfd_link_info *info = inf;
1483  struct spu_link_hash_table *htab = spu_hash_table (info);
1484  asection *sym_sec;
1485
1486  if ((h->root.type == bfd_link_hash_defined
1487       || h->root.type == bfd_link_hash_defweak)
1488      && h->def_regular
1489      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1490      && (sym_sec = h->root.u.def.section) != NULL
1491      && sym_sec->output_section != bfd_abs_section_ptr
1492      && spu_elf_section_data (sym_sec->output_section) != NULL
1493      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1494	  || htab->params->non_overlay_stubs))
1495    {
1496      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1497    }
1498
1499  return TRUE;
1500}
1501
1502static bfd_boolean
1503build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1504{
1505  /* Symbols starting with _SPUEAR_ need a stub because they may be
1506     invoked by the PPU.  */
1507  struct bfd_link_info *info = inf;
1508  struct spu_link_hash_table *htab = spu_hash_table (info);
1509  asection *sym_sec;
1510
1511  if ((h->root.type == bfd_link_hash_defined
1512       || h->root.type == bfd_link_hash_defweak)
1513      && h->def_regular
1514      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1515      && (sym_sec = h->root.u.def.section) != NULL
1516      && sym_sec->output_section != bfd_abs_section_ptr
1517      && spu_elf_section_data (sym_sec->output_section) != NULL
1518      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1519	  || htab->params->non_overlay_stubs))
1520    {
1521      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1522			 h->root.u.def.value, sym_sec);
1523    }
1524
1525  return TRUE;
1526}
1527
1528/* Size or build stubs.  */
1529
1530static bfd_boolean
1531process_stubs (struct bfd_link_info *info, bfd_boolean build)
1532{
1533  struct spu_link_hash_table *htab = spu_hash_table (info);
1534  bfd *ibfd;
1535
1536  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1537    {
1538      extern const bfd_target spu_elf32_vec;
1539      Elf_Internal_Shdr *symtab_hdr;
1540      asection *isec;
1541      Elf_Internal_Sym *local_syms = NULL;
1542
1543      if (ibfd->xvec != &spu_elf32_vec)
1544	continue;
1545
1546      /* We'll need the symbol table in a second.  */
1547      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1548      if (symtab_hdr->sh_info == 0)
1549	continue;
1550
1551      /* Walk over each section attached to the input bfd.  */
1552      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1553	{
1554	  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1555
1556	  /* If there aren't any relocs, then there's nothing more to do.  */
1557	  if ((isec->flags & SEC_RELOC) == 0
1558	      || isec->reloc_count == 0)
1559	    continue;
1560
1561	  if (!maybe_needs_stubs (isec))
1562	    continue;
1563
1564	  /* Get the relocs.  */
1565	  internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1566						       info->keep_memory);
1567	  if (internal_relocs == NULL)
1568	    goto error_ret_free_local;
1569
1570	  /* Now examine each relocation.  */
1571	  irela = internal_relocs;
1572	  irelaend = irela + isec->reloc_count;
1573	  for (; irela < irelaend; irela++)
1574	    {
1575	      enum elf_spu_reloc_type r_type;
1576	      unsigned int r_indx;
1577	      asection *sym_sec;
1578	      Elf_Internal_Sym *sym;
1579	      struct elf_link_hash_entry *h;
1580	      enum _stub_type stub_type;
1581
1582	      r_type = ELF32_R_TYPE (irela->r_info);
1583	      r_indx = ELF32_R_SYM (irela->r_info);
1584
1585	      if (r_type >= R_SPU_max)
1586		{
1587		  bfd_set_error (bfd_error_bad_value);
1588		error_ret_free_internal:
1589		  if (elf_section_data (isec)->relocs != internal_relocs)
1590		    free (internal_relocs);
1591		error_ret_free_local:
1592		  if (local_syms != NULL
1593		      && (symtab_hdr->contents
1594			  != (unsigned char *) local_syms))
1595		    free (local_syms);
1596		  return FALSE;
1597		}
1598
1599	      /* Determine the reloc target section.  */
1600	      if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1601		goto error_ret_free_internal;
1602
1603	      stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1604					  NULL, info);
1605	      if (stub_type == no_stub)
1606		continue;
1607	      else if (stub_type == stub_error)
1608		goto error_ret_free_internal;
1609
1610	      if (htab->stub_count == NULL)
1611		{
1612		  bfd_size_type amt;
1613		  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1614		  htab->stub_count = bfd_zmalloc (amt);
1615		  if (htab->stub_count == NULL)
1616		    goto error_ret_free_internal;
1617		}
1618
1619	      if (!build)
1620		{
1621		  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1622		    goto error_ret_free_internal;
1623		}
1624	      else
1625		{
1626		  bfd_vma dest;
1627
1628		  if (h != NULL)
1629		    dest = h->root.u.def.value;
1630		  else
1631		    dest = sym->st_value;
1632		  dest += irela->r_addend;
1633		  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1634				   dest, sym_sec))
1635		    goto error_ret_free_internal;
1636		}
1637	    }
1638
1639	  /* We're done with the internal relocs, free them.  */
1640	  if (elf_section_data (isec)->relocs != internal_relocs)
1641	    free (internal_relocs);
1642	}
1643
1644      if (local_syms != NULL
1645	  && symtab_hdr->contents != (unsigned char *) local_syms)
1646	{
1647	  if (!info->keep_memory)
1648	    free (local_syms);
1649	  else
1650	    symtab_hdr->contents = (unsigned char *) local_syms;
1651	}
1652    }
1653
1654  return TRUE;
1655}
1656
1657/* Allocate space for overlay call and return stubs.
1658   Return 0 on error, 1 if no overlays, 2 otherwise.  */
1659
1660int
1661spu_elf_size_stubs (struct bfd_link_info *info)
1662{
1663  struct spu_link_hash_table *htab;
1664  bfd *ibfd;
1665  bfd_size_type amt;
1666  flagword flags;
1667  unsigned int i;
1668  asection *stub;
1669
1670  if (!process_stubs (info, FALSE))
1671    return 0;
1672
1673  htab = spu_hash_table (info);
1674  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1675  if (htab->stub_err)
1676    return 0;
1677
1678  ibfd = info->input_bfds;
1679  if (htab->stub_count != NULL)
1680    {
1681      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1682      htab->stub_sec = bfd_zmalloc (amt);
1683      if (htab->stub_sec == NULL)
1684	return 0;
1685
1686      flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1687	       | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1688      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1689      htab->stub_sec[0] = stub;
1690      if (stub == NULL
1691	  || !bfd_set_section_alignment (ibfd, stub,
1692					 ovl_stub_size_log2 (htab->params)))
1693	return 0;
1694      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1695      if (htab->params->ovly_flavour == ovly_soft_icache)
1696	/* Extra space for linked list entries.  */
1697	stub->size += htab->stub_count[0] * 16;
1698
1699      for (i = 0; i < htab->num_overlays; ++i)
1700	{
1701	  asection *osec = htab->ovl_sec[i];
1702	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1703	  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1704	  htab->stub_sec[ovl] = stub;
1705	  if (stub == NULL
1706	      || !bfd_set_section_alignment (ibfd, stub,
1707					     ovl_stub_size_log2 (htab->params)))
1708	    return 0;
1709	  stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1710	}
1711    }
1712
1713  if (htab->params->ovly_flavour == ovly_soft_icache)
1714    {
1715      /* Space for icache manager tables.
1716	 a) Tag array, one quadword per cache line.
1717	 b) Rewrite "to" list, one quadword per cache line.
1718	 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1719	    a power-of-two number of full quadwords) per cache line.  */
1720
1721      flags = SEC_ALLOC;
1722      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1723      if (htab->ovtab == NULL
1724	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1725	return 0;
1726
1727      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1728			  << htab->num_lines_log2;
1729
1730      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1731      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1732      if (htab->init == NULL
1733	  || !bfd_set_section_alignment (ibfd, htab->init, 4))
1734	return 0;
1735
1736      htab->init->size = 16;
1737    }
1738  else if (htab->stub_count == NULL)
1739    return 1;
1740  else
1741    {
1742      /* htab->ovtab consists of two arrays.
1743	 .	struct {
1744	 .	  u32 vma;
1745	 .	  u32 size;
1746	 .	  u32 file_off;
1747	 .	  u32 buf;
1748	 .	} _ovly_table[];
1749	 .
1750	 .	struct {
1751	 .	  u32 mapped;
1752	 .	} _ovly_buf_table[];
1753	 .  */
1754
1755      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1756      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1757      if (htab->ovtab == NULL
1758	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1759	return 0;
1760
1761      htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1762    }
1763
1764  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1765  if (htab->toe == NULL
1766      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1767    return 0;
1768  htab->toe->size = 16;
1769
1770  return 2;
1771}
1772
1773/* Called from ld to place overlay manager data sections.  This is done
1774   after the overlay manager itself is loaded, mainly so that the
1775   linker's htab->init section is placed after any other .ovl.init
1776   sections.  */
1777
1778void
1779spu_elf_place_overlay_data (struct bfd_link_info *info)
1780{
1781  struct spu_link_hash_table *htab = spu_hash_table (info);
1782  unsigned int i;
1783
1784  if (htab->stub_sec != NULL)
1785    {
1786      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1787
1788      for (i = 0; i < htab->num_overlays; ++i)
1789	{
1790	  asection *osec = htab->ovl_sec[i];
1791	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1792	  (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1793	}
1794    }
1795
1796  if (htab->params->ovly_flavour == ovly_soft_icache)
1797    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1798
1799  if (htab->ovtab != NULL)
1800    {
1801      const char *ovout = ".data";
1802      if (htab->params->ovly_flavour == ovly_soft_icache)
1803	ovout = ".bss";
1804      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1805    }
1806
1807  if (htab->toe != NULL)
1808    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1809}
1810
1811/* Functions to handle embedded spu_ovl.o object.  */
1812
1813static void *
1814ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1815{
1816  return stream;
1817}
1818
1819static file_ptr
1820ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1821	       void *stream,
1822	       void *buf,
1823	       file_ptr nbytes,
1824	       file_ptr offset)
1825{
1826  struct _ovl_stream *os;
1827  size_t count;
1828  size_t max;
1829
1830  os = (struct _ovl_stream *) stream;
1831  max = (const char *) os->end - (const char *) os->start;
1832
1833  if ((ufile_ptr) offset >= max)
1834    return 0;
1835
1836  count = nbytes;
1837  if (count > max - offset)
1838    count = max - offset;
1839
1840  memcpy (buf, (const char *) os->start + offset, count);
1841  return count;
1842}
1843
1844static int
1845ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1846	      void *stream,
1847	      struct stat *sb)
1848{
1849  struct _ovl_stream *os = (struct _ovl_stream *) stream;
1850
1851  memset (sb, 0, sizeof (*sb));
1852  sb->st_size = (const char *) os->end - (const char *) os->start;
1853  return 0;
1854}
1855
1856bfd_boolean
1857spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1858{
1859  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1860			      "elf32-spu",
1861			      ovl_mgr_open,
1862			      (void *) stream,
1863			      ovl_mgr_pread,
1864			      NULL,
1865			      ovl_mgr_stat);
1866  return *ovl_bfd != NULL;
1867}
1868
1869static unsigned int
1870overlay_index (asection *sec)
1871{
1872  if (sec == NULL
1873      || sec->output_section == bfd_abs_section_ptr)
1874    return 0;
1875  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1876}
1877
1878/* Define an STT_OBJECT symbol.  */
1879
1880static struct elf_link_hash_entry *
1881define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1882{
1883  struct elf_link_hash_entry *h;
1884
1885  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1886  if (h == NULL)
1887    return NULL;
1888
1889  if (h->root.type != bfd_link_hash_defined
1890      || !h->def_regular)
1891    {
1892      h->root.type = bfd_link_hash_defined;
1893      h->root.u.def.section = htab->ovtab;
1894      h->type = STT_OBJECT;
1895      h->ref_regular = 1;
1896      h->def_regular = 1;
1897      h->ref_regular_nonweak = 1;
1898      h->non_elf = 0;
1899    }
1900  else if (h->root.u.def.section->owner != NULL)
1901    {
1902      /* xgettext:c-format */
1903      _bfd_error_handler (_("%B is not allowed to define %s"),
1904			  h->root.u.def.section->owner,
1905			  h->root.root.string);
1906      bfd_set_error (bfd_error_bad_value);
1907      return NULL;
1908    }
1909  else
1910    {
1911      _bfd_error_handler (_("you are not allowed to define %s in a script"),
1912			  h->root.root.string);
1913      bfd_set_error (bfd_error_bad_value);
1914      return NULL;
1915    }
1916
1917  return h;
1918}
1919
1920/* Fill in all stubs and the overlay tables.  */
1921
1922static bfd_boolean
1923spu_elf_build_stubs (struct bfd_link_info *info)
1924{
1925  struct spu_link_hash_table *htab = spu_hash_table (info);
1926  struct elf_link_hash_entry *h;
1927  bfd_byte *p;
1928  asection *s;
1929  bfd *obfd;
1930  unsigned int i;
1931
1932  if (htab->num_overlays != 0)
1933    {
1934      for (i = 0; i < 2; i++)
1935	{
1936	  h = htab->ovly_entry[i];
1937	  if (h != NULL
1938	      && (h->root.type == bfd_link_hash_defined
1939		  || h->root.type == bfd_link_hash_defweak)
1940	      && h->def_regular)
1941	    {
1942	      s = h->root.u.def.section->output_section;
1943	      if (spu_elf_section_data (s)->u.o.ovl_index)
1944		{
1945		  _bfd_error_handler (_("%s in overlay section"),
1946				      h->root.root.string);
1947		  bfd_set_error (bfd_error_bad_value);
1948		  return FALSE;
1949		}
1950	    }
1951	}
1952    }
1953
1954  if (htab->stub_sec != NULL)
1955    {
1956      for (i = 0; i <= htab->num_overlays; i++)
1957	if (htab->stub_sec[i]->size != 0)
1958	  {
1959	    htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1960						      htab->stub_sec[i]->size);
1961	    if (htab->stub_sec[i]->contents == NULL)
1962	      return FALSE;
1963	    htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1964	    htab->stub_sec[i]->size = 0;
1965	  }
1966
1967      /* Fill in all the stubs.  */
1968      process_stubs (info, TRUE);
1969      if (!htab->stub_err)
1970	elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1971
1972      if (htab->stub_err)
1973	{
1974	  _bfd_error_handler (_("overlay stub relocation overflow"));
1975	  bfd_set_error (bfd_error_bad_value);
1976	  return FALSE;
1977	}
1978
1979      for (i = 0; i <= htab->num_overlays; i++)
1980	{
1981	  if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1982	    {
1983	      _bfd_error_handler  (_("stubs don't match calculated size"));
1984	      bfd_set_error (bfd_error_bad_value);
1985	      return FALSE;
1986	    }
1987	  htab->stub_sec[i]->rawsize = 0;
1988	}
1989    }
1990
1991  if (htab->ovtab == NULL || htab->ovtab->size == 0)
1992    return TRUE;
1993
1994  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1995  if (htab->ovtab->contents == NULL)
1996    return FALSE;
1997
1998  p = htab->ovtab->contents;
1999  if (htab->params->ovly_flavour == ovly_soft_icache)
2000    {
2001      bfd_vma off;
2002
2003      h = define_ovtab_symbol (htab, "__icache_tag_array");
2004      if (h == NULL)
2005	return FALSE;
2006      h->root.u.def.value = 0;
2007      h->size = 16 << htab->num_lines_log2;
2008      off = h->size;
2009
2010      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2011      if (h == NULL)
2012	return FALSE;
2013      h->root.u.def.value = 16 << htab->num_lines_log2;
2014      h->root.u.def.section = bfd_abs_section_ptr;
2015
2016      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2017      if (h == NULL)
2018	return FALSE;
2019      h->root.u.def.value = off;
2020      h->size = 16 << htab->num_lines_log2;
2021      off += h->size;
2022
2023      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2024      if (h == NULL)
2025	return FALSE;
2026      h->root.u.def.value = 16 << htab->num_lines_log2;
2027      h->root.u.def.section = bfd_abs_section_ptr;
2028
2029      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2030      if (h == NULL)
2031	return FALSE;
2032      h->root.u.def.value = off;
2033      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2034      off += h->size;
2035
2036      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2037      if (h == NULL)
2038	return FALSE;
2039      h->root.u.def.value = 16 << (htab->fromelem_size_log2
2040				   + htab->num_lines_log2);
2041      h->root.u.def.section = bfd_abs_section_ptr;
2042
2043      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2044      if (h == NULL)
2045	return FALSE;
2046      h->root.u.def.value = htab->fromelem_size_log2;
2047      h->root.u.def.section = bfd_abs_section_ptr;
2048
2049      h = define_ovtab_symbol (htab, "__icache_base");
2050      if (h == NULL)
2051	return FALSE;
2052      h->root.u.def.value = htab->ovl_sec[0]->vma;
2053      h->root.u.def.section = bfd_abs_section_ptr;
2054      h->size = htab->num_buf << htab->line_size_log2;
2055
2056      h = define_ovtab_symbol (htab, "__icache_linesize");
2057      if (h == NULL)
2058	return FALSE;
2059      h->root.u.def.value = 1 << htab->line_size_log2;
2060      h->root.u.def.section = bfd_abs_section_ptr;
2061
2062      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2063      if (h == NULL)
2064	return FALSE;
2065      h->root.u.def.value = htab->line_size_log2;
2066      h->root.u.def.section = bfd_abs_section_ptr;
2067
2068      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2069      if (h == NULL)
2070	return FALSE;
2071      h->root.u.def.value = -htab->line_size_log2;
2072      h->root.u.def.section = bfd_abs_section_ptr;
2073
2074      h = define_ovtab_symbol (htab, "__icache_cachesize");
2075      if (h == NULL)
2076	return FALSE;
2077      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2078      h->root.u.def.section = bfd_abs_section_ptr;
2079
2080      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2081      if (h == NULL)
2082	return FALSE;
2083      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2084      h->root.u.def.section = bfd_abs_section_ptr;
2085
2086      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2087      if (h == NULL)
2088	return FALSE;
2089      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2090      h->root.u.def.section = bfd_abs_section_ptr;
2091
2092      if (htab->init != NULL && htab->init->size != 0)
2093	{
2094	  htab->init->contents = bfd_zalloc (htab->init->owner,
2095					     htab->init->size);
2096	  if (htab->init->contents == NULL)
2097	    return FALSE;
2098
2099	  h = define_ovtab_symbol (htab, "__icache_fileoff");
2100	  if (h == NULL)
2101	    return FALSE;
2102	  h->root.u.def.value = 0;
2103	  h->root.u.def.section = htab->init;
2104	  h->size = 8;
2105	}
2106    }
2107  else
2108    {
2109      /* Write out _ovly_table.  */
2110      /* set low bit of .size to mark non-overlay area as present.  */
2111      p[7] = 1;
2112      obfd = htab->ovtab->output_section->owner;
2113      for (s = obfd->sections; s != NULL; s = s->next)
2114	{
2115	  unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2116
2117	  if (ovl_index != 0)
2118	    {
2119	      unsigned long off = ovl_index * 16;
2120	      unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2121
2122	      bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2123	      bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2124			  p + off + 4);
2125	      /* file_off written later in spu_elf_modify_program_headers.  */
2126	      bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2127	    }
2128	}
2129
2130      h = define_ovtab_symbol (htab, "_ovly_table");
2131      if (h == NULL)
2132	return FALSE;
2133      h->root.u.def.value = 16;
2134      h->size = htab->num_overlays * 16;
2135
2136      h = define_ovtab_symbol (htab, "_ovly_table_end");
2137      if (h == NULL)
2138	return FALSE;
2139      h->root.u.def.value = htab->num_overlays * 16 + 16;
2140      h->size = 0;
2141
2142      h = define_ovtab_symbol (htab, "_ovly_buf_table");
2143      if (h == NULL)
2144	return FALSE;
2145      h->root.u.def.value = htab->num_overlays * 16 + 16;
2146      h->size = htab->num_buf * 4;
2147
2148      h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2149      if (h == NULL)
2150	return FALSE;
2151      h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2152      h->size = 0;
2153    }
2154
2155  h = define_ovtab_symbol (htab, "_EAR_");
2156  if (h == NULL)
2157    return FALSE;
2158  h->root.u.def.section = htab->toe;
2159  h->root.u.def.value = 0;
2160  h->size = 16;
2161
2162  return TRUE;
2163}
2164
2165/* Check that all loadable section VMAs lie in the range
2166   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2167
2168asection *
2169spu_elf_check_vma (struct bfd_link_info *info)
2170{
2171  struct elf_segment_map *m;
2172  unsigned int i;
2173  struct spu_link_hash_table *htab = spu_hash_table (info);
2174  bfd *abfd = info->output_bfd;
2175  bfd_vma hi = htab->params->local_store_hi;
2176  bfd_vma lo = htab->params->local_store_lo;
2177
2178  htab->local_store = hi + 1 - lo;
2179
2180  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2181    if (m->p_type == PT_LOAD)
2182      for (i = 0; i < m->count; i++)
2183	if (m->sections[i]->size != 0
2184	    && (m->sections[i]->vma < lo
2185		|| m->sections[i]->vma > hi
2186		|| m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2187	  return m->sections[i];
2188
2189  return NULL;
2190}
2191
2192/* OFFSET in SEC (presumably) is the beginning of a function prologue.
2193   Search for stack adjusting insns, and return the sp delta.
2194   If a store of lr is found save the instruction offset to *LR_STORE.
2195   If a stack adjusting instruction is found, save that offset to
2196   *SP_ADJUST.  */
2197
2198static int
2199find_function_stack_adjust (asection *sec,
2200			    bfd_vma offset,
2201			    bfd_vma *lr_store,
2202			    bfd_vma *sp_adjust)
2203{
2204  int reg[128];
2205
2206  memset (reg, 0, sizeof (reg));
2207  for ( ; offset + 4 <= sec->size; offset += 4)
2208    {
2209      unsigned char buf[4];
2210      int rt, ra;
2211      int imm;
2212
2213      /* Assume no relocs on stack adjusing insns.  */
2214      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2215	break;
2216
2217      rt = buf[3] & 0x7f;
2218      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2219
2220      if (buf[0] == 0x24 /* stqd */)
2221	{
2222	  if (rt == 0 /* lr */ && ra == 1 /* sp */)
2223	    *lr_store = offset;
2224	  continue;
2225	}
2226
2227      /* Partly decoded immediate field.  */
2228      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2229
2230      if (buf[0] == 0x1c /* ai */)
2231	{
2232	  imm >>= 7;
2233	  imm = (imm ^ 0x200) - 0x200;
2234	  reg[rt] = reg[ra] + imm;
2235
2236	  if (rt == 1 /* sp */)
2237	    {
2238	      if (reg[rt] > 0)
2239		break;
2240	      *sp_adjust = offset;
2241	      return reg[rt];
2242	    }
2243	}
2244      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2245	{
2246	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2247
2248	  reg[rt] = reg[ra] + reg[rb];
2249	  if (rt == 1)
2250	    {
2251	      if (reg[rt] > 0)
2252		break;
2253	      *sp_adjust = offset;
2254	      return reg[rt];
2255	    }
2256	}
2257      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2258	{
2259	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2260
2261	  reg[rt] = reg[rb] - reg[ra];
2262	  if (rt == 1)
2263	    {
2264	      if (reg[rt] > 0)
2265		break;
2266	      *sp_adjust = offset;
2267	      return reg[rt];
2268	    }
2269	}
2270      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2271	{
2272	  if (buf[0] >= 0x42 /* ila */)
2273	    imm |= (buf[0] & 1) << 17;
2274	  else
2275	    {
2276	      imm &= 0xffff;
2277
2278	      if (buf[0] == 0x40 /* il */)
2279		{
2280		  if ((buf[1] & 0x80) == 0)
2281		    continue;
2282		  imm = (imm ^ 0x8000) - 0x8000;
2283		}
2284	      else if ((buf[1] & 0x80) == 0 /* ilhu */)
2285		imm <<= 16;
2286	    }
2287	  reg[rt] = imm;
2288	  continue;
2289	}
2290      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2291	{
2292	  reg[rt] |= imm & 0xffff;
2293	  continue;
2294	}
2295      else if (buf[0] == 0x04 /* ori */)
2296	{
2297	  imm >>= 7;
2298	  imm = (imm ^ 0x200) - 0x200;
2299	  reg[rt] = reg[ra] | imm;
2300	  continue;
2301	}
2302      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2303	{
2304	  reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2305		     | ((imm & 0x4000) ? 0x00ff0000 : 0)
2306		     | ((imm & 0x2000) ? 0x0000ff00 : 0)
2307		     | ((imm & 0x1000) ? 0x000000ff : 0));
2308	  continue;
2309	}
2310      else if (buf[0] == 0x16 /* andbi */)
2311	{
2312	  imm >>= 7;
2313	  imm &= 0xff;
2314	  imm |= imm << 8;
2315	  imm |= imm << 16;
2316	  reg[rt] = reg[ra] & imm;
2317	  continue;
2318	}
2319      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2320	{
2321	  /* Used in pic reg load.  Say rt is trashed.  Won't be used
2322	     in stack adjust, but we need to continue past this branch.  */
2323	  reg[rt] = 0;
2324	  continue;
2325	}
2326      else if (is_branch (buf) || is_indirect_branch (buf))
2327	/* If we hit a branch then we must be out of the prologue.  */
2328	break;
2329    }
2330
2331  return 0;
2332}
2333
2334/* qsort predicate to sort symbols by section and value.  */
2335
2336static Elf_Internal_Sym *sort_syms_syms;
2337static asection **sort_syms_psecs;
2338
2339static int
2340sort_syms (const void *a, const void *b)
2341{
2342  Elf_Internal_Sym *const *s1 = a;
2343  Elf_Internal_Sym *const *s2 = b;
2344  asection *sec1,*sec2;
2345  bfd_signed_vma delta;
2346
2347  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2348  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2349
2350  if (sec1 != sec2)
2351    return sec1->index - sec2->index;
2352
2353  delta = (*s1)->st_value - (*s2)->st_value;
2354  if (delta != 0)
2355    return delta < 0 ? -1 : 1;
2356
2357  delta = (*s2)->st_size - (*s1)->st_size;
2358  if (delta != 0)
2359    return delta < 0 ? -1 : 1;
2360
2361  return *s1 < *s2 ? -1 : 1;
2362}
2363
2364/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2365   entries for section SEC.  */
2366
2367static struct spu_elf_stack_info *
2368alloc_stack_info (asection *sec, int max_fun)
2369{
2370  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2371  bfd_size_type amt;
2372
2373  amt = sizeof (struct spu_elf_stack_info);
2374  amt += (max_fun - 1) * sizeof (struct function_info);
2375  sec_data->u.i.stack_info = bfd_zmalloc (amt);
2376  if (sec_data->u.i.stack_info != NULL)
2377    sec_data->u.i.stack_info->max_fun = max_fun;
2378  return sec_data->u.i.stack_info;
2379}
2380
2381/* Add a new struct function_info describing a (part of a) function
2382   starting at SYM_H.  Keep the array sorted by address.  */
2383
2384static struct function_info *
2385maybe_insert_function (asection *sec,
2386		       void *sym_h,
2387		       bfd_boolean global,
2388		       bfd_boolean is_func)
2389{
2390  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2391  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2392  int i;
2393  bfd_vma off, size;
2394
2395  if (sinfo == NULL)
2396    {
2397      sinfo = alloc_stack_info (sec, 20);
2398      if (sinfo == NULL)
2399	return NULL;
2400    }
2401
2402  if (!global)
2403    {
2404      Elf_Internal_Sym *sym = sym_h;
2405      off = sym->st_value;
2406      size = sym->st_size;
2407    }
2408  else
2409    {
2410      struct elf_link_hash_entry *h = sym_h;
2411      off = h->root.u.def.value;
2412      size = h->size;
2413    }
2414
2415  for (i = sinfo->num_fun; --i >= 0; )
2416    if (sinfo->fun[i].lo <= off)
2417      break;
2418
2419  if (i >= 0)
2420    {
2421      /* Don't add another entry for an alias, but do update some
2422	 info.  */
2423      if (sinfo->fun[i].lo == off)
2424	{
2425	  /* Prefer globals over local syms.  */
2426	  if (global && !sinfo->fun[i].global)
2427	    {
2428	      sinfo->fun[i].global = TRUE;
2429	      sinfo->fun[i].u.h = sym_h;
2430	    }
2431	  if (is_func)
2432	    sinfo->fun[i].is_func = TRUE;
2433	  return &sinfo->fun[i];
2434	}
2435      /* Ignore a zero-size symbol inside an existing function.  */
2436      else if (sinfo->fun[i].hi > off && size == 0)
2437	return &sinfo->fun[i];
2438    }
2439
2440  if (sinfo->num_fun >= sinfo->max_fun)
2441    {
2442      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2443      bfd_size_type old = amt;
2444
2445      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2446      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2447      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2448      sinfo = bfd_realloc (sinfo, amt);
2449      if (sinfo == NULL)
2450	return NULL;
2451      memset ((char *) sinfo + old, 0, amt - old);
2452      sec_data->u.i.stack_info = sinfo;
2453    }
2454
2455  if (++i < sinfo->num_fun)
2456    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2457	     (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2458  sinfo->fun[i].is_func = is_func;
2459  sinfo->fun[i].global = global;
2460  sinfo->fun[i].sec = sec;
2461  if (global)
2462    sinfo->fun[i].u.h = sym_h;
2463  else
2464    sinfo->fun[i].u.sym = sym_h;
2465  sinfo->fun[i].lo = off;
2466  sinfo->fun[i].hi = off + size;
2467  sinfo->fun[i].lr_store = -1;
2468  sinfo->fun[i].sp_adjust = -1;
2469  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2470						     &sinfo->fun[i].lr_store,
2471						     &sinfo->fun[i].sp_adjust);
2472  sinfo->num_fun += 1;
2473  return &sinfo->fun[i];
2474}
2475
2476/* Return the name of FUN.  */
2477
2478static const char *
2479func_name (struct function_info *fun)
2480{
2481  asection *sec;
2482  bfd *ibfd;
2483  Elf_Internal_Shdr *symtab_hdr;
2484
2485  while (fun->start != NULL)
2486    fun = fun->start;
2487
2488  if (fun->global)
2489    return fun->u.h->root.root.string;
2490
2491  sec = fun->sec;
2492  if (fun->u.sym->st_name == 0)
2493    {
2494      size_t len = strlen (sec->name);
2495      char *name = bfd_malloc (len + 10);
2496      if (name == NULL)
2497	return "(null)";
2498      sprintf (name, "%s+%lx", sec->name,
2499	       (unsigned long) fun->u.sym->st_value & 0xffffffff);
2500      return name;
2501    }
2502  ibfd = sec->owner;
2503  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2504  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2505}
2506
2507/* Read the instruction at OFF in SEC.  Return true iff the instruction
2508   is a nop, lnop, or stop 0 (all zero insn).  */
2509
2510static bfd_boolean
2511is_nop (asection *sec, bfd_vma off)
2512{
2513  unsigned char insn[4];
2514
2515  if (off + 4 > sec->size
2516      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2517    return FALSE;
2518  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2519    return TRUE;
2520  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2521    return TRUE;
2522  return FALSE;
2523}
2524
2525/* Extend the range of FUN to cover nop padding up to LIMIT.
2526   Return TRUE iff some instruction other than a NOP was found.  */
2527
2528static bfd_boolean
2529insns_at_end (struct function_info *fun, bfd_vma limit)
2530{
2531  bfd_vma off = (fun->hi + 3) & -4;
2532
2533  while (off < limit && is_nop (fun->sec, off))
2534    off += 4;
2535  if (off < limit)
2536    {
2537      fun->hi = off;
2538      return TRUE;
2539    }
2540  fun->hi = limit;
2541  return FALSE;
2542}
2543
2544/* Check and fix overlapping function ranges.  Return TRUE iff there
2545   are gaps in the current info we have about functions in SEC.  */
2546
2547static bfd_boolean
2548check_function_ranges (asection *sec, struct bfd_link_info *info)
2549{
2550  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2551  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2552  int i;
2553  bfd_boolean gaps = FALSE;
2554
2555  if (sinfo == NULL)
2556    return FALSE;
2557
2558  for (i = 1; i < sinfo->num_fun; i++)
2559    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2560      {
2561	/* Fix overlapping symbols.  */
2562	const char *f1 = func_name (&sinfo->fun[i - 1]);
2563	const char *f2 = func_name (&sinfo->fun[i]);
2564
2565	/* xgettext:c-format */
2566	info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2567	sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2568      }
2569    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2570      gaps = TRUE;
2571
2572  if (sinfo->num_fun == 0)
2573    gaps = TRUE;
2574  else
2575    {
2576      if (sinfo->fun[0].lo != 0)
2577	gaps = TRUE;
2578      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2579	{
2580	  const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2581
2582	  info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2583	  sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2584	}
2585      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2586	gaps = TRUE;
2587    }
2588  return gaps;
2589}
2590
2591/* Search current function info for a function that contains address
2592   OFFSET in section SEC.  */
2593
2594static struct function_info *
2595find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2596{
2597  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2598  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2599  int lo, hi, mid;
2600
2601  lo = 0;
2602  hi = sinfo->num_fun;
2603  while (lo < hi)
2604    {
2605      mid = (lo + hi) / 2;
2606      if (offset < sinfo->fun[mid].lo)
2607	hi = mid;
2608      else if (offset >= sinfo->fun[mid].hi)
2609	lo = mid + 1;
2610      else
2611	return &sinfo->fun[mid];
2612    }
2613  /* xgettext:c-format */
2614  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2615			  sec, offset);
2616  bfd_set_error (bfd_error_bad_value);
2617  return NULL;
2618}
2619
2620/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2621   if CALLEE was new.  If this function return FALSE, CALLEE should
2622   be freed.  */
2623
2624static bfd_boolean
2625insert_callee (struct function_info *caller, struct call_info *callee)
2626{
2627  struct call_info **pp, *p;
2628
2629  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2630    if (p->fun == callee->fun)
2631      {
2632	/* Tail calls use less stack than normal calls.  Retain entry
2633	   for normal call over one for tail call.  */
2634	p->is_tail &= callee->is_tail;
2635	if (!p->is_tail)
2636	  {
2637	    p->fun->start = NULL;
2638	    p->fun->is_func = TRUE;
2639	  }
2640	p->count += callee->count;
2641	/* Reorder list so most recent call is first.  */
2642	*pp = p->next;
2643	p->next = caller->call_list;
2644	caller->call_list = p;
2645	return FALSE;
2646      }
2647  callee->next = caller->call_list;
2648  caller->call_list = callee;
2649  return TRUE;
2650}
2651
2652/* Copy CALL and insert the copy into CALLER.  */
2653
2654static bfd_boolean
2655copy_callee (struct function_info *caller, const struct call_info *call)
2656{
2657  struct call_info *callee;
2658  callee = bfd_malloc (sizeof (*callee));
2659  if (callee == NULL)
2660    return FALSE;
2661  *callee = *call;
2662  if (!insert_callee (caller, callee))
2663    free (callee);
2664  return TRUE;
2665}
2666
2667/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2668   overlay stub sections.  */
2669
2670static bfd_boolean
2671interesting_section (asection *s)
2672{
2673  return (s->output_section != bfd_abs_section_ptr
2674	  && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2675	      == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2676	  && s->size != 0);
2677}
2678
2679/* Rummage through the relocs for SEC, looking for function calls.
2680   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2681   mark destination symbols on calls as being functions.  Also
2682   look at branches, which may be tail calls or go to hot/cold
2683   section part of same function.  */
2684
2685static bfd_boolean
2686mark_functions_via_relocs (asection *sec,
2687			   struct bfd_link_info *info,
2688			   int call_tree)
2689{
2690  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2691  Elf_Internal_Shdr *symtab_hdr;
2692  void *psyms;
2693  unsigned int priority = 0;
2694  static bfd_boolean warned;
2695
2696  if (!interesting_section (sec)
2697      || sec->reloc_count == 0)
2698    return TRUE;
2699
2700  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2701					       info->keep_memory);
2702  if (internal_relocs == NULL)
2703    return FALSE;
2704
2705  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2706  psyms = &symtab_hdr->contents;
2707  irela = internal_relocs;
2708  irelaend = irela + sec->reloc_count;
2709  for (; irela < irelaend; irela++)
2710    {
2711      enum elf_spu_reloc_type r_type;
2712      unsigned int r_indx;
2713      asection *sym_sec;
2714      Elf_Internal_Sym *sym;
2715      struct elf_link_hash_entry *h;
2716      bfd_vma val;
2717      bfd_boolean nonbranch, is_call;
2718      struct function_info *caller;
2719      struct call_info *callee;
2720
2721      r_type = ELF32_R_TYPE (irela->r_info);
2722      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2723
2724      r_indx = ELF32_R_SYM (irela->r_info);
2725      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2726	return FALSE;
2727
2728      if (sym_sec == NULL
2729	  || sym_sec->output_section == bfd_abs_section_ptr)
2730	continue;
2731
2732      is_call = FALSE;
2733      if (!nonbranch)
2734	{
2735	  unsigned char insn[4];
2736
2737	  if (!bfd_get_section_contents (sec->owner, sec, insn,
2738					 irela->r_offset, 4))
2739	    return FALSE;
2740	  if (is_branch (insn))
2741	    {
2742	      is_call = (insn[0] & 0xfd) == 0x31;
2743	      priority = insn[1] & 0x0f;
2744	      priority <<= 8;
2745	      priority |= insn[2];
2746	      priority <<= 8;
2747	      priority |= insn[3];
2748	      priority >>= 7;
2749	      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2750		  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2751		{
2752		  if (!warned)
2753		    info->callbacks->einfo
2754		      /* xgettext:c-format */
2755		      (_("%B(%A+0x%v): call to non-code section"
2756			 " %B(%A), analysis incomplete\n"),
2757		       sec->owner, sec, irela->r_offset,
2758		       sym_sec->owner, sym_sec);
2759		  warned = TRUE;
2760		  continue;
2761		}
2762	    }
2763	  else
2764	    {
2765	      nonbranch = TRUE;
2766	      if (is_hint (insn))
2767		continue;
2768	    }
2769	}
2770
2771      if (nonbranch)
2772	{
2773	  /* For --auto-overlay, count possible stubs we need for
2774	     function pointer references.  */
2775	  unsigned int sym_type;
2776	  if (h)
2777	    sym_type = h->type;
2778	  else
2779	    sym_type = ELF_ST_TYPE (sym->st_info);
2780	  if (sym_type == STT_FUNC)
2781	    {
2782	      if (call_tree && spu_hash_table (info)->params->auto_overlay)
2783		spu_hash_table (info)->non_ovly_stub += 1;
2784	      /* If the symbol type is STT_FUNC then this must be a
2785		 function pointer initialisation.  */
2786	      continue;
2787	    }
2788	  /* Ignore data references.  */
2789	  if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2790	      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2791	    continue;
2792	  /* Otherwise we probably have a jump table reloc for
2793	     a switch statement or some other reference to a
2794	     code label.  */
2795	}
2796
2797      if (h)
2798	val = h->root.u.def.value;
2799      else
2800	val = sym->st_value;
2801      val += irela->r_addend;
2802
2803      if (!call_tree)
2804	{
2805	  struct function_info *fun;
2806
2807	  if (irela->r_addend != 0)
2808	    {
2809	      Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2810	      if (fake == NULL)
2811		return FALSE;
2812	      fake->st_value = val;
2813	      fake->st_shndx
2814		= _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2815	      sym = fake;
2816	    }
2817	  if (sym)
2818	    fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2819	  else
2820	    fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2821	  if (fun == NULL)
2822	    return FALSE;
2823	  if (irela->r_addend != 0
2824	      && fun->u.sym != sym)
2825	    free (sym);
2826	  continue;
2827	}
2828
2829      caller = find_function (sec, irela->r_offset, info);
2830      if (caller == NULL)
2831	return FALSE;
2832      callee = bfd_malloc (sizeof *callee);
2833      if (callee == NULL)
2834	return FALSE;
2835
2836      callee->fun = find_function (sym_sec, val, info);
2837      if (callee->fun == NULL)
2838	return FALSE;
2839      callee->is_tail = !is_call;
2840      callee->is_pasted = FALSE;
2841      callee->broken_cycle = FALSE;
2842      callee->priority = priority;
2843      callee->count = nonbranch? 0 : 1;
2844      if (callee->fun->last_caller != sec)
2845	{
2846	  callee->fun->last_caller = sec;
2847	  callee->fun->call_count += 1;
2848	}
2849      if (!insert_callee (caller, callee))
2850	free (callee);
2851      else if (!is_call
2852	       && !callee->fun->is_func
2853	       && callee->fun->stack == 0)
2854	{
2855	  /* This is either a tail call or a branch from one part of
2856	     the function to another, ie. hot/cold section.  If the
2857	     destination has been called by some other function then
2858	     it is a separate function.  We also assume that functions
2859	     are not split across input files.  */
2860	  if (sec->owner != sym_sec->owner)
2861	    {
2862	      callee->fun->start = NULL;
2863	      callee->fun->is_func = TRUE;
2864	    }
2865	  else if (callee->fun->start == NULL)
2866	    {
2867	      struct function_info *caller_start = caller;
2868	      while (caller_start->start)
2869		caller_start = caller_start->start;
2870
2871	      if (caller_start != callee->fun)
2872		callee->fun->start = caller_start;
2873	    }
2874	  else
2875	    {
2876	      struct function_info *callee_start;
2877	      struct function_info *caller_start;
2878	      callee_start = callee->fun;
2879	      while (callee_start->start)
2880		callee_start = callee_start->start;
2881	      caller_start = caller;
2882	      while (caller_start->start)
2883		caller_start = caller_start->start;
2884	      if (caller_start != callee_start)
2885		{
2886		  callee->fun->start = NULL;
2887		  callee->fun->is_func = TRUE;
2888		}
2889	    }
2890	}
2891    }
2892
2893  return TRUE;
2894}
2895
2896/* Handle something like .init or .fini, which has a piece of a function.
2897   These sections are pasted together to form a single function.  */
2898
2899static bfd_boolean
2900pasted_function (asection *sec)
2901{
2902  struct bfd_link_order *l;
2903  struct _spu_elf_section_data *sec_data;
2904  struct spu_elf_stack_info *sinfo;
2905  Elf_Internal_Sym *fake;
2906  struct function_info *fun, *fun_start;
2907
2908  fake = bfd_zmalloc (sizeof (*fake));
2909  if (fake == NULL)
2910    return FALSE;
2911  fake->st_value = 0;
2912  fake->st_size = sec->size;
2913  fake->st_shndx
2914    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2915  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2916  if (!fun)
2917    return FALSE;
2918
2919  /* Find a function immediately preceding this section.  */
2920  fun_start = NULL;
2921  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2922    {
2923      if (l->u.indirect.section == sec)
2924	{
2925	  if (fun_start != NULL)
2926	    {
2927	      struct call_info *callee = bfd_malloc (sizeof *callee);
2928	      if (callee == NULL)
2929		return FALSE;
2930
2931	      fun->start = fun_start;
2932	      callee->fun = fun;
2933	      callee->is_tail = TRUE;
2934	      callee->is_pasted = TRUE;
2935	      callee->broken_cycle = FALSE;
2936	      callee->priority = 0;
2937	      callee->count = 1;
2938	      if (!insert_callee (fun_start, callee))
2939		free (callee);
2940	      return TRUE;
2941	    }
2942	  break;
2943	}
2944      if (l->type == bfd_indirect_link_order
2945	  && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2946	  && (sinfo = sec_data->u.i.stack_info) != NULL
2947	  && sinfo->num_fun != 0)
2948	fun_start = &sinfo->fun[sinfo->num_fun - 1];
2949    }
2950
2951  /* Don't return an error if we did not find a function preceding this
2952     section.  The section may have incorrect flags.  */
2953  return TRUE;
2954}
2955
2956/* Map address ranges in code sections to functions.  */
2957
2958static bfd_boolean
2959discover_functions (struct bfd_link_info *info)
2960{
2961  bfd *ibfd;
2962  int bfd_idx;
2963  Elf_Internal_Sym ***psym_arr;
2964  asection ***sec_arr;
2965  bfd_boolean gaps = FALSE;
2966
2967  bfd_idx = 0;
2968  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2969    bfd_idx++;
2970
2971  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2972  if (psym_arr == NULL)
2973    return FALSE;
2974  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2975  if (sec_arr == NULL)
2976    return FALSE;
2977
2978  for (ibfd = info->input_bfds, bfd_idx = 0;
2979       ibfd != NULL;
2980       ibfd = ibfd->link.next, bfd_idx++)
2981    {
2982      extern const bfd_target spu_elf32_vec;
2983      Elf_Internal_Shdr *symtab_hdr;
2984      asection *sec;
2985      size_t symcount;
2986      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2987      asection **psecs, **p;
2988
2989      if (ibfd->xvec != &spu_elf32_vec)
2990	continue;
2991
2992      /* Read all the symbols.  */
2993      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2994      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2995      if (symcount == 0)
2996	{
2997	  if (!gaps)
2998	    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2999	      if (interesting_section (sec))
3000		{
3001		  gaps = TRUE;
3002		  break;
3003		}
3004	  continue;
3005	}
3006
3007      if (symtab_hdr->contents != NULL)
3008	{
3009	  /* Don't use cached symbols since the generic ELF linker
3010	     code only reads local symbols, and we need globals too.  */
3011	  free (symtab_hdr->contents);
3012	  symtab_hdr->contents = NULL;
3013	}
3014      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3015				   NULL, NULL, NULL);
3016      symtab_hdr->contents = (void *) syms;
3017      if (syms == NULL)
3018	return FALSE;
3019
3020      /* Select defined function symbols that are going to be output.  */
3021      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3022      if (psyms == NULL)
3023	return FALSE;
3024      psym_arr[bfd_idx] = psyms;
3025      psecs = bfd_malloc (symcount * sizeof (*psecs));
3026      if (psecs == NULL)
3027	return FALSE;
3028      sec_arr[bfd_idx] = psecs;
3029      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3030	if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3031	    || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3032	  {
3033	    asection *s;
3034
3035	    *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3036	    if (s != NULL && interesting_section (s))
3037	      *psy++ = sy;
3038	  }
3039      symcount = psy - psyms;
3040      *psy = NULL;
3041
3042      /* Sort them by section and offset within section.  */
3043      sort_syms_syms = syms;
3044      sort_syms_psecs = psecs;
3045      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3046
3047      /* Now inspect the function symbols.  */
3048      for (psy = psyms; psy < psyms + symcount; )
3049	{
3050	  asection *s = psecs[*psy - syms];
3051	  Elf_Internal_Sym **psy2;
3052
3053	  for (psy2 = psy; ++psy2 < psyms + symcount; )
3054	    if (psecs[*psy2 - syms] != s)
3055	      break;
3056
3057	  if (!alloc_stack_info (s, psy2 - psy))
3058	    return FALSE;
3059	  psy = psy2;
3060	}
3061
3062      /* First install info about properly typed and sized functions.
3063	 In an ideal world this will cover all code sections, except
3064	 when partitioning functions into hot and cold sections,
3065	 and the horrible pasted together .init and .fini functions.  */
3066      for (psy = psyms; psy < psyms + symcount; ++psy)
3067	{
3068	  sy = *psy;
3069	  if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3070	    {
3071	      asection *s = psecs[sy - syms];
3072	      if (!maybe_insert_function (s, sy, FALSE, TRUE))
3073		return FALSE;
3074	    }
3075	}
3076
3077      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3078	if (interesting_section (sec))
3079	  gaps |= check_function_ranges (sec, info);
3080    }
3081
3082  if (gaps)
3083    {
3084      /* See if we can discover more function symbols by looking at
3085	 relocations.  */
3086      for (ibfd = info->input_bfds, bfd_idx = 0;
3087	   ibfd != NULL;
3088	   ibfd = ibfd->link.next, bfd_idx++)
3089	{
3090	  asection *sec;
3091
3092	  if (psym_arr[bfd_idx] == NULL)
3093	    continue;
3094
3095	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3096	    if (!mark_functions_via_relocs (sec, info, FALSE))
3097	      return FALSE;
3098	}
3099
3100      for (ibfd = info->input_bfds, bfd_idx = 0;
3101	   ibfd != NULL;
3102	   ibfd = ibfd->link.next, bfd_idx++)
3103	{
3104	  Elf_Internal_Shdr *symtab_hdr;
3105	  asection *sec;
3106	  Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3107	  asection **psecs;
3108
3109	  if ((psyms = psym_arr[bfd_idx]) == NULL)
3110	    continue;
3111
3112	  psecs = sec_arr[bfd_idx];
3113
3114	  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3115	  syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3116
3117	  gaps = FALSE;
3118	  for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3119	    if (interesting_section (sec))
3120	      gaps |= check_function_ranges (sec, info);
3121	  if (!gaps)
3122	    continue;
3123
3124	  /* Finally, install all globals.  */
3125	  for (psy = psyms; (sy = *psy) != NULL; ++psy)
3126	    {
3127	      asection *s;
3128
3129	      s = psecs[sy - syms];
3130
3131	      /* Global syms might be improperly typed functions.  */
3132	      if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3133		  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3134		{
3135		  if (!maybe_insert_function (s, sy, FALSE, FALSE))
3136		    return FALSE;
3137		}
3138	    }
3139	}
3140
3141      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3142	{
3143	  extern const bfd_target spu_elf32_vec;
3144	  asection *sec;
3145
3146	  if (ibfd->xvec != &spu_elf32_vec)
3147	    continue;
3148
3149	  /* Some of the symbols we've installed as marking the
3150	     beginning of functions may have a size of zero.  Extend
3151	     the range of such functions to the beginning of the
3152	     next symbol of interest.  */
3153	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3154	    if (interesting_section (sec))
3155	      {
3156		struct _spu_elf_section_data *sec_data;
3157		struct spu_elf_stack_info *sinfo;
3158
3159		sec_data = spu_elf_section_data (sec);
3160		sinfo = sec_data->u.i.stack_info;
3161		if (sinfo != NULL && sinfo->num_fun != 0)
3162		  {
3163		    int fun_idx;
3164		    bfd_vma hi = sec->size;
3165
3166		    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3167		      {
3168			sinfo->fun[fun_idx].hi = hi;
3169			hi = sinfo->fun[fun_idx].lo;
3170		      }
3171
3172		    sinfo->fun[0].lo = 0;
3173		  }
3174		/* No symbols in this section.  Must be .init or .fini
3175		   or something similar.  */
3176		else if (!pasted_function (sec))
3177		  return FALSE;
3178	      }
3179	}
3180    }
3181
3182  for (ibfd = info->input_bfds, bfd_idx = 0;
3183       ibfd != NULL;
3184       ibfd = ibfd->link.next, bfd_idx++)
3185    {
3186      if (psym_arr[bfd_idx] == NULL)
3187	continue;
3188
3189      free (psym_arr[bfd_idx]);
3190      free (sec_arr[bfd_idx]);
3191    }
3192
3193  free (psym_arr);
3194  free (sec_arr);
3195
3196  return TRUE;
3197}
3198
3199/* Iterate over all function_info we have collected, calling DOIT on
3200   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3201   if ROOT_ONLY.  */
3202
3203static bfd_boolean
3204for_each_node (bfd_boolean (*doit) (struct function_info *,
3205				    struct bfd_link_info *,
3206				    void *),
3207	       struct bfd_link_info *info,
3208	       void *param,
3209	       int root_only)
3210{
3211  bfd *ibfd;
3212
3213  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3214    {
3215      extern const bfd_target spu_elf32_vec;
3216      asection *sec;
3217
3218      if (ibfd->xvec != &spu_elf32_vec)
3219	continue;
3220
3221      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3222	{
3223	  struct _spu_elf_section_data *sec_data;
3224	  struct spu_elf_stack_info *sinfo;
3225
3226	  if ((sec_data = spu_elf_section_data (sec)) != NULL
3227	      && (sinfo = sec_data->u.i.stack_info) != NULL)
3228	    {
3229	      int i;
3230	      for (i = 0; i < sinfo->num_fun; ++i)
3231		if (!root_only || !sinfo->fun[i].non_root)
3232		  if (!doit (&sinfo->fun[i], info, param))
3233		    return FALSE;
3234	    }
3235	}
3236    }
3237  return TRUE;
3238}
3239
3240/* Transfer call info attached to struct function_info entries for
3241   all of a given function's sections to the first entry.  */
3242
3243static bfd_boolean
3244transfer_calls (struct function_info *fun,
3245		struct bfd_link_info *info ATTRIBUTE_UNUSED,
3246		void *param ATTRIBUTE_UNUSED)
3247{
3248  struct function_info *start = fun->start;
3249
3250  if (start != NULL)
3251    {
3252      struct call_info *call, *call_next;
3253
3254      while (start->start != NULL)
3255	start = start->start;
3256      for (call = fun->call_list; call != NULL; call = call_next)
3257	{
3258	  call_next = call->next;
3259	  if (!insert_callee (start, call))
3260	    free (call);
3261	}
3262      fun->call_list = NULL;
3263    }
3264  return TRUE;
3265}
3266
3267/* Mark nodes in the call graph that are called by some other node.  */
3268
3269static bfd_boolean
3270mark_non_root (struct function_info *fun,
3271	       struct bfd_link_info *info ATTRIBUTE_UNUSED,
3272	       void *param ATTRIBUTE_UNUSED)
3273{
3274  struct call_info *call;
3275
3276  if (fun->visit1)
3277    return TRUE;
3278  fun->visit1 = TRUE;
3279  for (call = fun->call_list; call; call = call->next)
3280    {
3281      call->fun->non_root = TRUE;
3282      mark_non_root (call->fun, 0, 0);
3283    }
3284  return TRUE;
3285}
3286
3287/* Remove cycles from the call graph.  Set depth of nodes.  */
3288
3289static bfd_boolean
3290remove_cycles (struct function_info *fun,
3291	       struct bfd_link_info *info,
3292	       void *param)
3293{
3294  struct call_info **callp, *call;
3295  unsigned int depth = *(unsigned int *) param;
3296  unsigned int max_depth = depth;
3297
3298  fun->depth = depth;
3299  fun->visit2 = TRUE;
3300  fun->marking = TRUE;
3301
3302  callp = &fun->call_list;
3303  while ((call = *callp) != NULL)
3304    {
3305      call->max_depth = depth + !call->is_pasted;
3306      if (!call->fun->visit2)
3307	{
3308	  if (!remove_cycles (call->fun, info, &call->max_depth))
3309	    return FALSE;
3310	  if (max_depth < call->max_depth)
3311	    max_depth = call->max_depth;
3312	}
3313      else if (call->fun->marking)
3314	{
3315	  struct spu_link_hash_table *htab = spu_hash_table (info);
3316
3317	  if (!htab->params->auto_overlay
3318	      && htab->params->stack_analysis)
3319	    {
3320	      const char *f1 = func_name (fun);
3321	      const char *f2 = func_name (call->fun);
3322
3323	      /* xgettext:c-format */
3324	      info->callbacks->info (_("Stack analysis will ignore the call "
3325				       "from %s to %s\n"),
3326				     f1, f2);
3327	    }
3328
3329	  call->broken_cycle = TRUE;
3330	}
3331      callp = &call->next;
3332    }
3333  fun->marking = FALSE;
3334  *(unsigned int *) param = max_depth;
3335  return TRUE;
3336}
3337
3338/* Check that we actually visited all nodes in remove_cycles.  If we
3339   didn't, then there is some cycle in the call graph not attached to
3340   any root node.  Arbitrarily choose a node in the cycle as a new
3341   root and break the cycle.  */
3342
3343static bfd_boolean
3344mark_detached_root (struct function_info *fun,
3345		    struct bfd_link_info *info,
3346		    void *param)
3347{
3348  if (fun->visit2)
3349    return TRUE;
3350  fun->non_root = FALSE;
3351  *(unsigned int *) param = 0;
3352  return remove_cycles (fun, info, param);
3353}
3354
3355/* Populate call_list for each function.  */
3356
3357static bfd_boolean
3358build_call_tree (struct bfd_link_info *info)
3359{
3360  bfd *ibfd;
3361  unsigned int depth;
3362
3363  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3364    {
3365      extern const bfd_target spu_elf32_vec;
3366      asection *sec;
3367
3368      if (ibfd->xvec != &spu_elf32_vec)
3369	continue;
3370
3371      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3372	if (!mark_functions_via_relocs (sec, info, TRUE))
3373	  return FALSE;
3374    }
3375
3376  /* Transfer call info from hot/cold section part of function
3377     to main entry.  */
3378  if (!spu_hash_table (info)->params->auto_overlay
3379      && !for_each_node (transfer_calls, info, 0, FALSE))
3380    return FALSE;
3381
3382  /* Find the call graph root(s).  */
3383  if (!for_each_node (mark_non_root, info, 0, FALSE))
3384    return FALSE;
3385
3386  /* Remove cycles from the call graph.  We start from the root node(s)
3387     so that we break cycles in a reasonable place.  */
3388  depth = 0;
3389  if (!for_each_node (remove_cycles, info, &depth, TRUE))
3390    return FALSE;
3391
3392  return for_each_node (mark_detached_root, info, &depth, FALSE);
3393}
3394
3395/* qsort predicate to sort calls by priority, max_depth then count.  */
3396
3397static int
3398sort_calls (const void *a, const void *b)
3399{
3400  struct call_info *const *c1 = a;
3401  struct call_info *const *c2 = b;
3402  int delta;
3403
3404  delta = (*c2)->priority - (*c1)->priority;
3405  if (delta != 0)
3406    return delta;
3407
3408  delta = (*c2)->max_depth - (*c1)->max_depth;
3409  if (delta != 0)
3410    return delta;
3411
3412  delta = (*c2)->count - (*c1)->count;
3413  if (delta != 0)
3414    return delta;
3415
3416  return (char *) c1 - (char *) c2;
3417}
3418
3419struct _mos_param {
3420  unsigned int max_overlay_size;
3421};
3422
3423/* Set linker_mark and gc_mark on any sections that we will put in
3424   overlays.  These flags are used by the generic ELF linker, but we
3425   won't be continuing on to bfd_elf_final_link so it is OK to use
3426   them.  linker_mark is clear before we get here.  Set segment_mark
3427   on sections that are part of a pasted function (excluding the last
3428   section).
3429
3430   Set up function rodata section if --overlay-rodata.  We don't
3431   currently include merged string constant rodata sections since
3432
3433   Sort the call graph so that the deepest nodes will be visited
3434   first.  */
3435
3436static bfd_boolean
3437mark_overlay_section (struct function_info *fun,
3438		      struct bfd_link_info *info,
3439		      void *param)
3440{
3441  struct call_info *call;
3442  unsigned int count;
3443  struct _mos_param *mos_param = param;
3444  struct spu_link_hash_table *htab = spu_hash_table (info);
3445
3446  if (fun->visit4)
3447    return TRUE;
3448
3449  fun->visit4 = TRUE;
3450  if (!fun->sec->linker_mark
3451      && (htab->params->ovly_flavour != ovly_soft_icache
3452	  || htab->params->non_ia_text
3453	  || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3454	  || strcmp (fun->sec->name, ".init") == 0
3455	  || strcmp (fun->sec->name, ".fini") == 0))
3456    {
3457      unsigned int size;
3458
3459      fun->sec->linker_mark = 1;
3460      fun->sec->gc_mark = 1;
3461      fun->sec->segment_mark = 0;
3462      /* Ensure SEC_CODE is set on this text section (it ought to
3463	 be!), and SEC_CODE is clear on rodata sections.  We use
3464	 this flag to differentiate the two overlay section types.  */
3465      fun->sec->flags |= SEC_CODE;
3466
3467      size = fun->sec->size;
3468      if (htab->params->auto_overlay & OVERLAY_RODATA)
3469	{
3470	  char *name = NULL;
3471
3472	  /* Find the rodata section corresponding to this function's
3473	     text section.  */
3474	  if (strcmp (fun->sec->name, ".text") == 0)
3475	    {
3476	      name = bfd_malloc (sizeof (".rodata"));
3477	      if (name == NULL)
3478		return FALSE;
3479	      memcpy (name, ".rodata", sizeof (".rodata"));
3480	    }
3481	  else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3482	    {
3483	      size_t len = strlen (fun->sec->name);
3484	      name = bfd_malloc (len + 3);
3485	      if (name == NULL)
3486		return FALSE;
3487	      memcpy (name, ".rodata", sizeof (".rodata"));
3488	      memcpy (name + 7, fun->sec->name + 5, len - 4);
3489	    }
3490	  else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3491	    {
3492	      size_t len = strlen (fun->sec->name) + 1;
3493	      name = bfd_malloc (len);
3494	      if (name == NULL)
3495		return FALSE;
3496	      memcpy (name, fun->sec->name, len);
3497	      name[14] = 'r';
3498	    }
3499
3500	  if (name != NULL)
3501	    {
3502	      asection *rodata = NULL;
3503	      asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3504	      if (group_sec == NULL)
3505		rodata = bfd_get_section_by_name (fun->sec->owner, name);
3506	      else
3507		while (group_sec != NULL && group_sec != fun->sec)
3508		  {
3509		    if (strcmp (group_sec->name, name) == 0)
3510		      {
3511			rodata = group_sec;
3512			break;
3513		      }
3514		    group_sec = elf_section_data (group_sec)->next_in_group;
3515		  }
3516	      fun->rodata = rodata;
3517	      if (fun->rodata)
3518		{
3519		  size += fun->rodata->size;
3520		  if (htab->params->line_size != 0
3521		      && size > htab->params->line_size)
3522		    {
3523		      size -= fun->rodata->size;
3524		      fun->rodata = NULL;
3525		    }
3526		  else
3527		    {
3528		      fun->rodata->linker_mark = 1;
3529		      fun->rodata->gc_mark = 1;
3530		      fun->rodata->flags &= ~SEC_CODE;
3531		    }
3532		}
3533	      free (name);
3534	    }
3535	}
3536      if (mos_param->max_overlay_size < size)
3537	mos_param->max_overlay_size = size;
3538    }
3539
3540  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3541    count += 1;
3542
3543  if (count > 1)
3544    {
3545      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3546      if (calls == NULL)
3547	return FALSE;
3548
3549      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3550	calls[count++] = call;
3551
3552      qsort (calls, count, sizeof (*calls), sort_calls);
3553
3554      fun->call_list = NULL;
3555      while (count != 0)
3556	{
3557	  --count;
3558	  calls[count]->next = fun->call_list;
3559	  fun->call_list = calls[count];
3560	}
3561      free (calls);
3562    }
3563
3564  for (call = fun->call_list; call != NULL; call = call->next)
3565    {
3566      if (call->is_pasted)
3567	{
3568	  /* There can only be one is_pasted call per function_info.  */
3569	  BFD_ASSERT (!fun->sec->segment_mark);
3570	  fun->sec->segment_mark = 1;
3571	}
3572      if (!call->broken_cycle
3573	  && !mark_overlay_section (call->fun, info, param))
3574	return FALSE;
3575    }
3576
3577  /* Don't put entry code into an overlay.  The overlay manager needs
3578     a stack!  Also, don't mark .ovl.init as an overlay.  */
3579  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3580      == info->output_bfd->start_address
3581      || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3582    {
3583      fun->sec->linker_mark = 0;
3584      if (fun->rodata != NULL)
3585	fun->rodata->linker_mark = 0;
3586    }
3587  return TRUE;
3588}
3589
3590/* If non-zero then unmark functions called from those within sections
3591   that we need to unmark.  Unfortunately this isn't reliable since the
3592   call graph cannot know the destination of function pointer calls.  */
3593#define RECURSE_UNMARK 0
3594
3595struct _uos_param {
3596  asection *exclude_input_section;
3597  asection *exclude_output_section;
3598  unsigned long clearing;
3599};
3600
3601/* Undo some of mark_overlay_section's work.  */
3602
3603static bfd_boolean
3604unmark_overlay_section (struct function_info *fun,
3605			struct bfd_link_info *info,
3606			void *param)
3607{
3608  struct call_info *call;
3609  struct _uos_param *uos_param = param;
3610  unsigned int excluded = 0;
3611
3612  if (fun->visit5)
3613    return TRUE;
3614
3615  fun->visit5 = TRUE;
3616
3617  excluded = 0;
3618  if (fun->sec == uos_param->exclude_input_section
3619      || fun->sec->output_section == uos_param->exclude_output_section)
3620    excluded = 1;
3621
3622  if (RECURSE_UNMARK)
3623    uos_param->clearing += excluded;
3624
3625  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3626    {
3627      fun->sec->linker_mark = 0;
3628      if (fun->rodata)
3629	fun->rodata->linker_mark = 0;
3630    }
3631
3632  for (call = fun->call_list; call != NULL; call = call->next)
3633    if (!call->broken_cycle
3634	&& !unmark_overlay_section (call->fun, info, param))
3635      return FALSE;
3636
3637  if (RECURSE_UNMARK)
3638    uos_param->clearing -= excluded;
3639  return TRUE;
3640}
3641
3642struct _cl_param {
3643  unsigned int lib_size;
3644  asection **lib_sections;
3645};
3646
3647/* Add sections we have marked as belonging to overlays to an array
3648   for consideration as non-overlay sections.  The array consist of
3649   pairs of sections, (text,rodata), for functions in the call graph.  */
3650
3651static bfd_boolean
3652collect_lib_sections (struct function_info *fun,
3653		      struct bfd_link_info *info,
3654		      void *param)
3655{
3656  struct _cl_param *lib_param = param;
3657  struct call_info *call;
3658  unsigned int size;
3659
3660  if (fun->visit6)
3661    return TRUE;
3662
3663  fun->visit6 = TRUE;
3664  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3665    return TRUE;
3666
3667  size = fun->sec->size;
3668  if (fun->rodata)
3669    size += fun->rodata->size;
3670
3671  if (size <= lib_param->lib_size)
3672    {
3673      *lib_param->lib_sections++ = fun->sec;
3674      fun->sec->gc_mark = 0;
3675      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3676	{
3677	  *lib_param->lib_sections++ = fun->rodata;
3678	  fun->rodata->gc_mark = 0;
3679	}
3680      else
3681	*lib_param->lib_sections++ = NULL;
3682    }
3683
3684  for (call = fun->call_list; call != NULL; call = call->next)
3685    if (!call->broken_cycle)
3686      collect_lib_sections (call->fun, info, param);
3687
3688  return TRUE;
3689}
3690
3691/* qsort predicate to sort sections by call count.  */
3692
3693static int
3694sort_lib (const void *a, const void *b)
3695{
3696  asection *const *s1 = a;
3697  asection *const *s2 = b;
3698  struct _spu_elf_section_data *sec_data;
3699  struct spu_elf_stack_info *sinfo;
3700  int delta;
3701
3702  delta = 0;
3703  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3704      && (sinfo = sec_data->u.i.stack_info) != NULL)
3705    {
3706      int i;
3707      for (i = 0; i < sinfo->num_fun; ++i)
3708	delta -= sinfo->fun[i].call_count;
3709    }
3710
3711  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3712      && (sinfo = sec_data->u.i.stack_info) != NULL)
3713    {
3714      int i;
3715      for (i = 0; i < sinfo->num_fun; ++i)
3716	delta += sinfo->fun[i].call_count;
3717    }
3718
3719  if (delta != 0)
3720    return delta;
3721
3722  return s1 - s2;
3723}
3724
3725/* Remove some sections from those marked to be in overlays.  Choose
3726   those that are called from many places, likely library functions.  */
3727
3728static unsigned int
3729auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3730{
3731  bfd *ibfd;
3732  asection **lib_sections;
3733  unsigned int i, lib_count;
3734  struct _cl_param collect_lib_param;
3735  struct function_info dummy_caller;
3736  struct spu_link_hash_table *htab;
3737
3738  memset (&dummy_caller, 0, sizeof (dummy_caller));
3739  lib_count = 0;
3740  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3741    {
3742      extern const bfd_target spu_elf32_vec;
3743      asection *sec;
3744
3745      if (ibfd->xvec != &spu_elf32_vec)
3746	continue;
3747
3748      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3749	if (sec->linker_mark
3750	    && sec->size < lib_size
3751	    && (sec->flags & SEC_CODE) != 0)
3752	  lib_count += 1;
3753    }
3754  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3755  if (lib_sections == NULL)
3756    return (unsigned int) -1;
3757  collect_lib_param.lib_size = lib_size;
3758  collect_lib_param.lib_sections = lib_sections;
3759  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3760		      TRUE))
3761    return (unsigned int) -1;
3762  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3763
3764  /* Sort sections so that those with the most calls are first.  */
3765  if (lib_count > 1)
3766    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3767
3768  htab = spu_hash_table (info);
3769  for (i = 0; i < lib_count; i++)
3770    {
3771      unsigned int tmp, stub_size;
3772      asection *sec;
3773      struct _spu_elf_section_data *sec_data;
3774      struct spu_elf_stack_info *sinfo;
3775
3776      sec = lib_sections[2 * i];
3777      /* If this section is OK, its size must be less than lib_size.  */
3778      tmp = sec->size;
3779      /* If it has a rodata section, then add that too.  */
3780      if (lib_sections[2 * i + 1])
3781	tmp += lib_sections[2 * i + 1]->size;
3782      /* Add any new overlay call stubs needed by the section.  */
3783      stub_size = 0;
3784      if (tmp < lib_size
3785	  && (sec_data = spu_elf_section_data (sec)) != NULL
3786	  && (sinfo = sec_data->u.i.stack_info) != NULL)
3787	{
3788	  int k;
3789	  struct call_info *call;
3790
3791	  for (k = 0; k < sinfo->num_fun; ++k)
3792	    for (call = sinfo->fun[k].call_list; call; call = call->next)
3793	      if (call->fun->sec->linker_mark)
3794		{
3795		  struct call_info *p;
3796		  for (p = dummy_caller.call_list; p; p = p->next)
3797		    if (p->fun == call->fun)
3798		      break;
3799		  if (!p)
3800		    stub_size += ovl_stub_size (htab->params);
3801		}
3802	}
3803      if (tmp + stub_size < lib_size)
3804	{
3805	  struct call_info **pp, *p;
3806
3807	  /* This section fits.  Mark it as non-overlay.  */
3808	  lib_sections[2 * i]->linker_mark = 0;
3809	  if (lib_sections[2 * i + 1])
3810	    lib_sections[2 * i + 1]->linker_mark = 0;
3811	  lib_size -= tmp + stub_size;
3812	  /* Call stubs to the section we just added are no longer
3813	     needed.  */
3814	  pp = &dummy_caller.call_list;
3815	  while ((p = *pp) != NULL)
3816	    if (!p->fun->sec->linker_mark)
3817	      {
3818		lib_size += ovl_stub_size (htab->params);
3819		*pp = p->next;
3820		free (p);
3821	      }
3822	    else
3823	      pp = &p->next;
3824	  /* Add new call stubs to dummy_caller.  */
3825	  if ((sec_data = spu_elf_section_data (sec)) != NULL
3826	      && (sinfo = sec_data->u.i.stack_info) != NULL)
3827	    {
3828	      int k;
3829	      struct call_info *call;
3830
3831	      for (k = 0; k < sinfo->num_fun; ++k)
3832		for (call = sinfo->fun[k].call_list;
3833		     call;
3834		     call = call->next)
3835		  if (call->fun->sec->linker_mark)
3836		    {
3837		      struct call_info *callee;
3838		      callee = bfd_malloc (sizeof (*callee));
3839		      if (callee == NULL)
3840			return (unsigned int) -1;
3841		      *callee = *call;
3842		      if (!insert_callee (&dummy_caller, callee))
3843			free (callee);
3844		    }
3845	    }
3846	}
3847    }
3848  while (dummy_caller.call_list != NULL)
3849    {
3850      struct call_info *call = dummy_caller.call_list;
3851      dummy_caller.call_list = call->next;
3852      free (call);
3853    }
3854  for (i = 0; i < 2 * lib_count; i++)
3855    if (lib_sections[i])
3856      lib_sections[i]->gc_mark = 1;
3857  free (lib_sections);
3858  return lib_size;
3859}
3860
3861/* Build an array of overlay sections.  The deepest node's section is
3862   added first, then its parent node's section, then everything called
3863   from the parent section.  The idea being to group sections to
3864   minimise calls between different overlays.  */
3865
3866static bfd_boolean
3867collect_overlays (struct function_info *fun,
3868		  struct bfd_link_info *info,
3869		  void *param)
3870{
3871  struct call_info *call;
3872  bfd_boolean added_fun;
3873  asection ***ovly_sections = param;
3874
3875  if (fun->visit7)
3876    return TRUE;
3877
3878  fun->visit7 = TRUE;
3879  for (call = fun->call_list; call != NULL; call = call->next)
3880    if (!call->is_pasted && !call->broken_cycle)
3881      {
3882	if (!collect_overlays (call->fun, info, ovly_sections))
3883	  return FALSE;
3884	break;
3885      }
3886
3887  added_fun = FALSE;
3888  if (fun->sec->linker_mark && fun->sec->gc_mark)
3889    {
3890      fun->sec->gc_mark = 0;
3891      *(*ovly_sections)++ = fun->sec;
3892      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3893	{
3894	  fun->rodata->gc_mark = 0;
3895	  *(*ovly_sections)++ = fun->rodata;
3896	}
3897      else
3898	*(*ovly_sections)++ = NULL;
3899      added_fun = TRUE;
3900
3901      /* Pasted sections must stay with the first section.  We don't
3902	 put pasted sections in the array, just the first section.
3903	 Mark subsequent sections as already considered.  */
3904      if (fun->sec->segment_mark)
3905	{
3906	  struct function_info *call_fun = fun;
3907	  do
3908	    {
3909	      for (call = call_fun->call_list; call != NULL; call = call->next)
3910		if (call->is_pasted)
3911		  {
3912		    call_fun = call->fun;
3913		    call_fun->sec->gc_mark = 0;
3914		    if (call_fun->rodata)
3915		      call_fun->rodata->gc_mark = 0;
3916		    break;
3917		  }
3918	      if (call == NULL)
3919		abort ();
3920	    }
3921	  while (call_fun->sec->segment_mark);
3922	}
3923    }
3924
3925  for (call = fun->call_list; call != NULL; call = call->next)
3926    if (!call->broken_cycle
3927	&& !collect_overlays (call->fun, info, ovly_sections))
3928      return FALSE;
3929
3930  if (added_fun)
3931    {
3932      struct _spu_elf_section_data *sec_data;
3933      struct spu_elf_stack_info *sinfo;
3934
3935      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3936	  && (sinfo = sec_data->u.i.stack_info) != NULL)
3937	{
3938	  int i;
3939	  for (i = 0; i < sinfo->num_fun; ++i)
3940	    if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3941	      return FALSE;
3942	}
3943    }
3944
3945  return TRUE;
3946}
3947
3948struct _sum_stack_param {
3949  size_t cum_stack;
3950  size_t overall_stack;
3951  bfd_boolean emit_stack_syms;
3952};
3953
3954/* Descend the call graph for FUN, accumulating total stack required.  */
3955
3956static bfd_boolean
3957sum_stack (struct function_info *fun,
3958	   struct bfd_link_info *info,
3959	   void *param)
3960{
3961  struct call_info *call;
3962  struct function_info *max;
3963  size_t stack, cum_stack;
3964  const char *f1;
3965  bfd_boolean has_call;
3966  struct _sum_stack_param *sum_stack_param = param;
3967  struct spu_link_hash_table *htab;
3968
3969  cum_stack = fun->stack;
3970  sum_stack_param->cum_stack = cum_stack;
3971  if (fun->visit3)
3972    return TRUE;
3973
3974  has_call = FALSE;
3975  max = NULL;
3976  for (call = fun->call_list; call; call = call->next)
3977    {
3978      if (call->broken_cycle)
3979	continue;
3980      if (!call->is_pasted)
3981	has_call = TRUE;
3982      if (!sum_stack (call->fun, info, sum_stack_param))
3983	return FALSE;
3984      stack = sum_stack_param->cum_stack;
3985      /* Include caller stack for normal calls, don't do so for
3986	 tail calls.  fun->stack here is local stack usage for
3987	 this function.  */
3988      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3989	stack += fun->stack;
3990      if (cum_stack < stack)
3991	{
3992	  cum_stack = stack;
3993	  max = call->fun;
3994	}
3995    }
3996
3997  sum_stack_param->cum_stack = cum_stack;
3998  stack = fun->stack;
3999  /* Now fun->stack holds cumulative stack.  */
4000  fun->stack = cum_stack;
4001  fun->visit3 = TRUE;
4002
4003  if (!fun->non_root
4004      && sum_stack_param->overall_stack < cum_stack)
4005    sum_stack_param->overall_stack = cum_stack;
4006
4007  htab = spu_hash_table (info);
4008  if (htab->params->auto_overlay)
4009    return TRUE;
4010
4011  f1 = func_name (fun);
4012  if (htab->params->stack_analysis)
4013    {
4014      if (!fun->non_root)
4015	info->callbacks->info ("  %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4016      info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4017			      f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4018
4019      if (has_call)
4020	{
4021	  info->callbacks->minfo (_("  calls:\n"));
4022	  for (call = fun->call_list; call; call = call->next)
4023	    if (!call->is_pasted && !call->broken_cycle)
4024	      {
4025		const char *f2 = func_name (call->fun);
4026		const char *ann1 = call->fun == max ? "*" : " ";
4027		const char *ann2 = call->is_tail ? "t" : " ";
4028
4029		info->callbacks->minfo ("   %s%s %s\n", ann1, ann2, f2);
4030	      }
4031	}
4032    }
4033
4034  if (sum_stack_param->emit_stack_syms)
4035    {
4036      char *name = bfd_malloc (18 + strlen (f1));
4037      struct elf_link_hash_entry *h;
4038
4039      if (name == NULL)
4040	return FALSE;
4041
4042      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4043	sprintf (name, "__stack_%s", f1);
4044      else
4045	sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4046
4047      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4048      free (name);
4049      if (h != NULL
4050	  && (h->root.type == bfd_link_hash_new
4051	      || h->root.type == bfd_link_hash_undefined
4052	      || h->root.type == bfd_link_hash_undefweak))
4053	{
4054	  h->root.type = bfd_link_hash_defined;
4055	  h->root.u.def.section = bfd_abs_section_ptr;
4056	  h->root.u.def.value = cum_stack;
4057	  h->size = 0;
4058	  h->type = 0;
4059	  h->ref_regular = 1;
4060	  h->def_regular = 1;
4061	  h->ref_regular_nonweak = 1;
4062	  h->forced_local = 1;
4063	  h->non_elf = 0;
4064	}
4065    }
4066
4067  return TRUE;
4068}
4069
4070/* SEC is part of a pasted function.  Return the call_info for the
4071   next section of this function.  */
4072
4073static struct call_info *
4074find_pasted_call (asection *sec)
4075{
4076  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4077  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4078  struct call_info *call;
4079  int k;
4080
4081  for (k = 0; k < sinfo->num_fun; ++k)
4082    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4083      if (call->is_pasted)
4084	return call;
4085  abort ();
4086  return 0;
4087}
4088
4089/* qsort predicate to sort bfds by file name.  */
4090
4091static int
4092sort_bfds (const void *a, const void *b)
4093{
4094  bfd *const *abfd1 = a;
4095  bfd *const *abfd2 = b;
4096
4097  return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4098}
4099
4100static unsigned int
4101print_one_overlay_section (FILE *script,
4102			   unsigned int base,
4103			   unsigned int count,
4104			   unsigned int ovlynum,
4105			   unsigned int *ovly_map,
4106			   asection **ovly_sections,
4107			   struct bfd_link_info *info)
4108{
4109  unsigned int j;
4110
4111  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4112    {
4113      asection *sec = ovly_sections[2 * j];
4114
4115      if (fprintf (script, "   %s%c%s (%s)\n",
4116		   (sec->owner->my_archive != NULL
4117		    ? sec->owner->my_archive->filename : ""),
4118		   info->path_separator,
4119		   sec->owner->filename,
4120		   sec->name) <= 0)
4121	return -1;
4122      if (sec->segment_mark)
4123	{
4124	  struct call_info *call = find_pasted_call (sec);
4125	  while (call != NULL)
4126	    {
4127	      struct function_info *call_fun = call->fun;
4128	      sec = call_fun->sec;
4129	      if (fprintf (script, "   %s%c%s (%s)\n",
4130			   (sec->owner->my_archive != NULL
4131			    ? sec->owner->my_archive->filename : ""),
4132			   info->path_separator,
4133			   sec->owner->filename,
4134			   sec->name) <= 0)
4135		return -1;
4136	      for (call = call_fun->call_list; call; call = call->next)
4137		if (call->is_pasted)
4138		  break;
4139	    }
4140	}
4141    }
4142
4143  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4144    {
4145      asection *sec = ovly_sections[2 * j + 1];
4146      if (sec != NULL
4147	  && fprintf (script, "   %s%c%s (%s)\n",
4148		      (sec->owner->my_archive != NULL
4149		       ? sec->owner->my_archive->filename : ""),
4150		      info->path_separator,
4151		      sec->owner->filename,
4152		      sec->name) <= 0)
4153	return -1;
4154
4155      sec = ovly_sections[2 * j];
4156      if (sec->segment_mark)
4157	{
4158	  struct call_info *call = find_pasted_call (sec);
4159	  while (call != NULL)
4160	    {
4161	      struct function_info *call_fun = call->fun;
4162	      sec = call_fun->rodata;
4163	      if (sec != NULL
4164		  && fprintf (script, "   %s%c%s (%s)\n",
4165			      (sec->owner->my_archive != NULL
4166			       ? sec->owner->my_archive->filename : ""),
4167			      info->path_separator,
4168			      sec->owner->filename,
4169			      sec->name) <= 0)
4170		return -1;
4171	      for (call = call_fun->call_list; call; call = call->next)
4172		if (call->is_pasted)
4173		  break;
4174	    }
4175	}
4176    }
4177
4178  return j;
4179}
4180
4181/* Handle --auto-overlay.  */
4182
4183static void
4184spu_elf_auto_overlay (struct bfd_link_info *info)
4185{
4186  bfd *ibfd;
4187  bfd **bfd_arr;
4188  struct elf_segment_map *m;
4189  unsigned int fixed_size, lo, hi;
4190  unsigned int reserved;
4191  struct spu_link_hash_table *htab;
4192  unsigned int base, i, count, bfd_count;
4193  unsigned int region, ovlynum;
4194  asection **ovly_sections, **ovly_p;
4195  unsigned int *ovly_map;
4196  FILE *script;
4197  unsigned int total_overlay_size, overlay_size;
4198  const char *ovly_mgr_entry;
4199  struct elf_link_hash_entry *h;
4200  struct _mos_param mos_param;
4201  struct _uos_param uos_param;
4202  struct function_info dummy_caller;
4203
4204  /* Find the extents of our loadable image.  */
4205  lo = (unsigned int) -1;
4206  hi = 0;
4207  for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4208    if (m->p_type == PT_LOAD)
4209      for (i = 0; i < m->count; i++)
4210	if (m->sections[i]->size != 0)
4211	  {
4212	    if (m->sections[i]->vma < lo)
4213	      lo = m->sections[i]->vma;
4214	    if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4215	      hi = m->sections[i]->vma + m->sections[i]->size - 1;
4216	  }
4217  fixed_size = hi + 1 - lo;
4218
4219  if (!discover_functions (info))
4220    goto err_exit;
4221
4222  if (!build_call_tree (info))
4223    goto err_exit;
4224
4225  htab = spu_hash_table (info);
4226  reserved = htab->params->auto_overlay_reserved;
4227  if (reserved == 0)
4228    {
4229      struct _sum_stack_param sum_stack_param;
4230
4231      sum_stack_param.emit_stack_syms = 0;
4232      sum_stack_param.overall_stack = 0;
4233      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4234	goto err_exit;
4235      reserved = (sum_stack_param.overall_stack
4236		  + htab->params->extra_stack_space);
4237    }
4238
4239  /* No need for overlays if everything already fits.  */
4240  if (fixed_size + reserved <= htab->local_store
4241      && htab->params->ovly_flavour != ovly_soft_icache)
4242    {
4243      htab->params->auto_overlay = 0;
4244      return;
4245    }
4246
4247  uos_param.exclude_input_section = 0;
4248  uos_param.exclude_output_section
4249    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4250
4251  ovly_mgr_entry = "__ovly_load";
4252  if (htab->params->ovly_flavour == ovly_soft_icache)
4253    ovly_mgr_entry = "__icache_br_handler";
4254  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4255			    FALSE, FALSE, FALSE);
4256  if (h != NULL
4257      && (h->root.type == bfd_link_hash_defined
4258	  || h->root.type == bfd_link_hash_defweak)
4259      && h->def_regular)
4260    {
4261      /* We have a user supplied overlay manager.  */
4262      uos_param.exclude_input_section = h->root.u.def.section;
4263    }
4264  else
4265    {
4266      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4267	 builtin version to .text, and will adjust .text size.  */
4268      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4269    }
4270
4271  /* Mark overlay sections, and find max overlay section size.  */
4272  mos_param.max_overlay_size = 0;
4273  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4274    goto err_exit;
4275
4276  /* We can't put the overlay manager or interrupt routines in
4277     overlays.  */
4278  uos_param.clearing = 0;
4279  if ((uos_param.exclude_input_section
4280       || uos_param.exclude_output_section)
4281      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4282    goto err_exit;
4283
4284  bfd_count = 0;
4285  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4286    ++bfd_count;
4287  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4288  if (bfd_arr == NULL)
4289    goto err_exit;
4290
4291  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4292  count = 0;
4293  bfd_count = 0;
4294  total_overlay_size = 0;
4295  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4296    {
4297      extern const bfd_target spu_elf32_vec;
4298      asection *sec;
4299      unsigned int old_count;
4300
4301      if (ibfd->xvec != &spu_elf32_vec)
4302	continue;
4303
4304      old_count = count;
4305      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4306	if (sec->linker_mark)
4307	  {
4308	    if ((sec->flags & SEC_CODE) != 0)
4309	      count += 1;
4310	    fixed_size -= sec->size;
4311	    total_overlay_size += sec->size;
4312	  }
4313	else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4314		 && sec->output_section->owner == info->output_bfd
4315		 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4316	  fixed_size -= sec->size;
4317      if (count != old_count)
4318	bfd_arr[bfd_count++] = ibfd;
4319    }
4320
4321  /* Since the overlay link script selects sections by file name and
4322     section name, ensure that file names are unique.  */
4323  if (bfd_count > 1)
4324    {
4325      bfd_boolean ok = TRUE;
4326
4327      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4328      for (i = 1; i < bfd_count; ++i)
4329	if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4330	  {
4331	    if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4332	      {
4333		if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4334		  /* xgettext:c-format */
4335		  info->callbacks->einfo (_("%s duplicated in %s\n"),
4336					  bfd_arr[i]->filename,
4337					  bfd_arr[i]->my_archive->filename);
4338		else
4339		  info->callbacks->einfo (_("%s duplicated\n"),
4340					  bfd_arr[i]->filename);
4341		ok = FALSE;
4342	      }
4343	  }
4344      if (!ok)
4345	{
4346	  info->callbacks->einfo (_("sorry, no support for duplicate "
4347				    "object files in auto-overlay script\n"));
4348	  bfd_set_error (bfd_error_bad_value);
4349	  goto err_exit;
4350	}
4351    }
4352  free (bfd_arr);
4353
4354  fixed_size += reserved;
4355  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4356  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4357    {
4358      if (htab->params->ovly_flavour == ovly_soft_icache)
4359	{
4360	  /* Stubs in the non-icache area are bigger.  */
4361	  fixed_size += htab->non_ovly_stub * 16;
4362	  /* Space for icache manager tables.
4363	     a) Tag array, one quadword per cache line.
4364	     - word 0: ia address of present line, init to zero.  */
4365	  fixed_size += 16 << htab->num_lines_log2;
4366	  /* b) Rewrite "to" list, one quadword per cache line.  */
4367	  fixed_size += 16 << htab->num_lines_log2;
4368	  /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4369		to a power-of-two number of full quadwords) per cache line.  */
4370	  fixed_size += 16 << (htab->fromelem_size_log2
4371			       + htab->num_lines_log2);
4372	  /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4373	  fixed_size += 16;
4374	}
4375      else
4376	{
4377	  /* Guess number of overlays.  Assuming overlay buffer is on
4378	     average only half full should be conservative.  */
4379	  ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4380		     / (htab->local_store - fixed_size));
4381	  /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4382	  fixed_size += ovlynum * 16 + 16 + 4 + 16;
4383	}
4384    }
4385
4386  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4387    /* xgettext:c-format */
4388    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4389			      "size of 0x%v exceeds local store\n"),
4390			    (bfd_vma) fixed_size,
4391			    (bfd_vma) mos_param.max_overlay_size);
4392
4393  /* Now see if we should put some functions in the non-overlay area.  */
4394  else if (fixed_size < htab->params->auto_overlay_fixed)
4395    {
4396      unsigned int max_fixed, lib_size;
4397
4398      max_fixed = htab->local_store - mos_param.max_overlay_size;
4399      if (max_fixed > htab->params->auto_overlay_fixed)
4400	max_fixed = htab->params->auto_overlay_fixed;
4401      lib_size = max_fixed - fixed_size;
4402      lib_size = auto_ovl_lib_functions (info, lib_size);
4403      if (lib_size == (unsigned int) -1)
4404	goto err_exit;
4405      fixed_size = max_fixed - lib_size;
4406    }
4407
4408  /* Build an array of sections, suitably sorted to place into
4409     overlays.  */
4410  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4411  if (ovly_sections == NULL)
4412    goto err_exit;
4413  ovly_p = ovly_sections;
4414  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4415    goto err_exit;
4416  count = (size_t) (ovly_p - ovly_sections) / 2;
4417  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4418  if (ovly_map == NULL)
4419    goto err_exit;
4420
4421  memset (&dummy_caller, 0, sizeof (dummy_caller));
4422  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4423  if (htab->params->line_size != 0)
4424    overlay_size = htab->params->line_size;
4425  base = 0;
4426  ovlynum = 0;
4427  while (base < count)
4428    {
4429      unsigned int size = 0, rosize = 0, roalign = 0;
4430
4431      for (i = base; i < count; i++)
4432	{
4433	  asection *sec, *rosec;
4434	  unsigned int tmp, rotmp;
4435	  unsigned int num_stubs;
4436	  struct call_info *call, *pasty;
4437	  struct _spu_elf_section_data *sec_data;
4438	  struct spu_elf_stack_info *sinfo;
4439	  unsigned int k;
4440
4441	  /* See whether we can add this section to the current
4442	     overlay without overflowing our overlay buffer.  */
4443	  sec = ovly_sections[2 * i];
4444	  tmp = align_power (size, sec->alignment_power) + sec->size;
4445	  rotmp = rosize;
4446	  rosec = ovly_sections[2 * i + 1];
4447	  if (rosec != NULL)
4448	    {
4449	      rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4450	      if (roalign < rosec->alignment_power)
4451		roalign = rosec->alignment_power;
4452	    }
4453	  if (align_power (tmp, roalign) + rotmp > overlay_size)
4454	    break;
4455	  if (sec->segment_mark)
4456	    {
4457	      /* Pasted sections must stay together, so add their
4458		 sizes too.  */
4459	      pasty = find_pasted_call (sec);
4460	      while (pasty != NULL)
4461		{
4462		  struct function_info *call_fun = pasty->fun;
4463		  tmp = (align_power (tmp, call_fun->sec->alignment_power)
4464			 + call_fun->sec->size);
4465		  if (call_fun->rodata)
4466		    {
4467		      rotmp = (align_power (rotmp,
4468					    call_fun->rodata->alignment_power)
4469			       + call_fun->rodata->size);
4470		      if (roalign < rosec->alignment_power)
4471			roalign = rosec->alignment_power;
4472		    }
4473		  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4474		    if (pasty->is_pasted)
4475		      break;
4476		}
4477	    }
4478	  if (align_power (tmp, roalign) + rotmp > overlay_size)
4479	    break;
4480
4481	  /* If we add this section, we might need new overlay call
4482	     stubs.  Add any overlay section calls to dummy_call.  */
4483	  pasty = NULL;
4484	  sec_data = spu_elf_section_data (sec);
4485	  sinfo = sec_data->u.i.stack_info;
4486	  for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4487	    for (call = sinfo->fun[k].call_list; call; call = call->next)
4488	      if (call->is_pasted)
4489		{
4490		  BFD_ASSERT (pasty == NULL);
4491		  pasty = call;
4492		}
4493	      else if (call->fun->sec->linker_mark)
4494		{
4495		  if (!copy_callee (&dummy_caller, call))
4496		    goto err_exit;
4497		}
4498	  while (pasty != NULL)
4499	    {
4500	      struct function_info *call_fun = pasty->fun;
4501	      pasty = NULL;
4502	      for (call = call_fun->call_list; call; call = call->next)
4503		if (call->is_pasted)
4504		  {
4505		    BFD_ASSERT (pasty == NULL);
4506		    pasty = call;
4507		  }
4508		else if (!copy_callee (&dummy_caller, call))
4509		  goto err_exit;
4510	    }
4511
4512	  /* Calculate call stub size.  */
4513	  num_stubs = 0;
4514	  for (call = dummy_caller.call_list; call; call = call->next)
4515	    {
4516	      unsigned int stub_delta = 1;
4517
4518	      if (htab->params->ovly_flavour == ovly_soft_icache)
4519		stub_delta = call->count;
4520	      num_stubs += stub_delta;
4521
4522	      /* If the call is within this overlay, we won't need a
4523		 stub.  */
4524	      for (k = base; k < i + 1; k++)
4525		if (call->fun->sec == ovly_sections[2 * k])
4526		  {
4527		    num_stubs -= stub_delta;
4528		    break;
4529		  }
4530	    }
4531	  if (htab->params->ovly_flavour == ovly_soft_icache
4532	      && num_stubs > htab->params->max_branch)
4533	    break;
4534	  if (align_power (tmp, roalign) + rotmp
4535	      + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4536	    break;
4537	  size = tmp;
4538	  rosize = rotmp;
4539	}
4540
4541      if (i == base)
4542	{
4543	  /* xgettext:c-format */
4544	  info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4545				  ovly_sections[2 * i]->owner,
4546				  ovly_sections[2 * i],
4547				  ovly_sections[2 * i + 1] ? " + rodata" : "");
4548	  bfd_set_error (bfd_error_bad_value);
4549	  goto err_exit;
4550	}
4551
4552      while (dummy_caller.call_list != NULL)
4553	{
4554	  struct call_info *call = dummy_caller.call_list;
4555	  dummy_caller.call_list = call->next;
4556	  free (call);
4557	}
4558
4559      ++ovlynum;
4560      while (base < i)
4561	ovly_map[base++] = ovlynum;
4562    }
4563
4564  script = htab->params->spu_elf_open_overlay_script ();
4565
4566  if (htab->params->ovly_flavour == ovly_soft_icache)
4567    {
4568      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4569	goto file_err;
4570
4571      if (fprintf (script,
4572		   " . = ALIGN (%u);\n"
4573		   " .ovl.init : { *(.ovl.init) }\n"
4574		   " . = ABSOLUTE (ADDR (.ovl.init));\n",
4575		   htab->params->line_size) <= 0)
4576	goto file_err;
4577
4578      base = 0;
4579      ovlynum = 1;
4580      while (base < count)
4581	{
4582	  unsigned int indx = ovlynum - 1;
4583	  unsigned int vma, lma;
4584
4585	  vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4586	  lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4587
4588	  if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4589			       ": AT (LOADADDR (.ovl.init) + %u) {\n",
4590		       ovlynum, vma, lma) <= 0)
4591	    goto file_err;
4592
4593	  base = print_one_overlay_section (script, base, count, ovlynum,
4594					    ovly_map, ovly_sections, info);
4595	  if (base == (unsigned) -1)
4596	    goto file_err;
4597
4598	  if (fprintf (script, "  }\n") <= 0)
4599	    goto file_err;
4600
4601	  ovlynum++;
4602	}
4603
4604      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4605		   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4606	goto file_err;
4607
4608      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4609	goto file_err;
4610    }
4611  else
4612    {
4613      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4614	goto file_err;
4615
4616      if (fprintf (script,
4617		   " . = ALIGN (16);\n"
4618		   " .ovl.init : { *(.ovl.init) }\n"
4619		   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4620	goto file_err;
4621
4622      for (region = 1; region <= htab->params->num_lines; region++)
4623	{
4624	  ovlynum = region;
4625	  base = 0;
4626	  while (base < count && ovly_map[base] < ovlynum)
4627	    base++;
4628
4629	  if (base == count)
4630	    break;
4631
4632	  if (region == 1)
4633	    {
4634	      /* We need to set lma since we are overlaying .ovl.init.  */
4635	      if (fprintf (script,
4636			   " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4637		goto file_err;
4638	    }
4639	  else
4640	    {
4641	      if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4642		goto file_err;
4643	    }
4644
4645	  while (base < count)
4646	    {
4647	      if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4648		goto file_err;
4649
4650	      base = print_one_overlay_section (script, base, count, ovlynum,
4651						ovly_map, ovly_sections, info);
4652	      if (base == (unsigned) -1)
4653		goto file_err;
4654
4655	      if (fprintf (script, "  }\n") <= 0)
4656		goto file_err;
4657
4658	      ovlynum += htab->params->num_lines;
4659	      while (base < count && ovly_map[base] < ovlynum)
4660		base++;
4661	    }
4662
4663	  if (fprintf (script, " }\n") <= 0)
4664	    goto file_err;
4665	}
4666
4667      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4668	goto file_err;
4669    }
4670
4671  free (ovly_map);
4672  free (ovly_sections);
4673
4674  if (fclose (script) != 0)
4675    goto file_err;
4676
4677  if (htab->params->auto_overlay & AUTO_RELINK)
4678    (*htab->params->spu_elf_relink) ();
4679
4680  xexit (0);
4681
4682 file_err:
4683  bfd_set_error (bfd_error_system_call);
4684 err_exit:
4685  info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4686  xexit (1);
4687}
4688
4689/* Provide an estimate of total stack required.  */
4690
4691static bfd_boolean
4692spu_elf_stack_analysis (struct bfd_link_info *info)
4693{
4694  struct spu_link_hash_table *htab;
4695  struct _sum_stack_param sum_stack_param;
4696
4697  if (!discover_functions (info))
4698    return FALSE;
4699
4700  if (!build_call_tree (info))
4701    return FALSE;
4702
4703  htab = spu_hash_table (info);
4704  if (htab->params->stack_analysis)
4705    {
4706      info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4707      info->callbacks->minfo (_("\nStack size for functions.  "
4708				"Annotations: '*' max stack, 't' tail call\n"));
4709    }
4710
4711  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4712  sum_stack_param.overall_stack = 0;
4713  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4714    return FALSE;
4715
4716  if (htab->params->stack_analysis)
4717    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4718			   (bfd_vma) sum_stack_param.overall_stack);
4719  return TRUE;
4720}
4721
4722/* Perform a final link.  */
4723
4724static bfd_boolean
4725spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4726{
4727  struct spu_link_hash_table *htab = spu_hash_table (info);
4728
4729  if (htab->params->auto_overlay)
4730    spu_elf_auto_overlay (info);
4731
4732  if ((htab->params->stack_analysis
4733       || (htab->params->ovly_flavour == ovly_soft_icache
4734	   && htab->params->lrlive_analysis))
4735      && !spu_elf_stack_analysis (info))
4736    info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4737
4738  if (!spu_elf_build_stubs (info))
4739    info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4740
4741  return bfd_elf_final_link (output_bfd, info);
4742}
4743
4744/* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4745   and !info->emitrelocations.  Returns a count of special relocs
4746   that need to be emitted.  */
4747
4748static unsigned int
4749spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4750{
4751  Elf_Internal_Rela *relocs;
4752  unsigned int count = 0;
4753
4754  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4755				      info->keep_memory);
4756  if (relocs != NULL)
4757    {
4758      Elf_Internal_Rela *rel;
4759      Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4760
4761      for (rel = relocs; rel < relend; rel++)
4762	{
4763	  int r_type = ELF32_R_TYPE (rel->r_info);
4764	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4765	    ++count;
4766	}
4767
4768      if (elf_section_data (sec)->relocs != relocs)
4769	free (relocs);
4770    }
4771
4772  return count;
4773}
4774
4775/* Functions for adding fixup records to .fixup */
4776
4777#define FIXUP_RECORD_SIZE 4
4778
4779#define FIXUP_PUT(output_bfd,htab,index,addr) \
4780	  bfd_put_32 (output_bfd, addr, \
4781		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4782#define FIXUP_GET(output_bfd,htab,index) \
4783	  bfd_get_32 (output_bfd, \
4784		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4785
4786/* Store OFFSET in .fixup.  This assumes it will be called with an
4787   increasing OFFSET.  When this OFFSET fits with the last base offset,
4788   it just sets a bit, otherwise it adds a new fixup record.  */
4789static void
4790spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4791		    bfd_vma offset)
4792{
4793  struct spu_link_hash_table *htab = spu_hash_table (info);
4794  asection *sfixup = htab->sfixup;
4795  bfd_vma qaddr = offset & ~(bfd_vma) 15;
4796  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4797  if (sfixup->reloc_count == 0)
4798    {
4799      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4800      sfixup->reloc_count++;
4801    }
4802  else
4803    {
4804      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4805      if (qaddr != (base & ~(bfd_vma) 15))
4806	{
4807	  if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4808	    _bfd_error_handler (_("fatal error while creating .fixup"));
4809	  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4810	  sfixup->reloc_count++;
4811	}
4812      else
4813	FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4814    }
4815}
4816
4817/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4818
4819static int
4820spu_elf_relocate_section (bfd *output_bfd,
4821			  struct bfd_link_info *info,
4822			  bfd *input_bfd,
4823			  asection *input_section,
4824			  bfd_byte *contents,
4825			  Elf_Internal_Rela *relocs,
4826			  Elf_Internal_Sym *local_syms,
4827			  asection **local_sections)
4828{
4829  Elf_Internal_Shdr *symtab_hdr;
4830  struct elf_link_hash_entry **sym_hashes;
4831  Elf_Internal_Rela *rel, *relend;
4832  struct spu_link_hash_table *htab;
4833  asection *ea;
4834  int ret = TRUE;
4835  bfd_boolean emit_these_relocs = FALSE;
4836  bfd_boolean is_ea_sym;
4837  bfd_boolean stubs;
4838  unsigned int iovl = 0;
4839
4840  htab = spu_hash_table (info);
4841  stubs = (htab->stub_sec != NULL
4842	   && maybe_needs_stubs (input_section));
4843  iovl = overlay_index (input_section);
4844  ea = bfd_get_section_by_name (output_bfd, "._ea");
4845  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4846  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4847
4848  rel = relocs;
4849  relend = relocs + input_section->reloc_count;
4850  for (; rel < relend; rel++)
4851    {
4852      int r_type;
4853      reloc_howto_type *howto;
4854      unsigned int r_symndx;
4855      Elf_Internal_Sym *sym;
4856      asection *sec;
4857      struct elf_link_hash_entry *h;
4858      const char *sym_name;
4859      bfd_vma relocation;
4860      bfd_vma addend;
4861      bfd_reloc_status_type r;
4862      bfd_boolean unresolved_reloc;
4863      enum _stub_type stub_type;
4864
4865      r_symndx = ELF32_R_SYM (rel->r_info);
4866      r_type = ELF32_R_TYPE (rel->r_info);
4867      howto = elf_howto_table + r_type;
4868      unresolved_reloc = FALSE;
4869      h = NULL;
4870      sym = NULL;
4871      sec = NULL;
4872      if (r_symndx < symtab_hdr->sh_info)
4873	{
4874	  sym = local_syms + r_symndx;
4875	  sec = local_sections[r_symndx];
4876	  sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4877	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4878	}
4879      else
4880	{
4881	  if (sym_hashes == NULL)
4882	    return FALSE;
4883
4884	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4885
4886	  if (info->wrap_hash != NULL
4887	      && (input_section->flags & SEC_DEBUGGING) != 0)
4888	    h = ((struct elf_link_hash_entry *)
4889		 unwrap_hash_lookup (info, input_bfd, &h->root));
4890
4891	  while (h->root.type == bfd_link_hash_indirect
4892		 || h->root.type == bfd_link_hash_warning)
4893	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
4894
4895	  relocation = 0;
4896	  if (h->root.type == bfd_link_hash_defined
4897	      || h->root.type == bfd_link_hash_defweak)
4898	    {
4899	      sec = h->root.u.def.section;
4900	      if (sec == NULL
4901		  || sec->output_section == NULL)
4902		/* Set a flag that will be cleared later if we find a
4903		   relocation value for this symbol.  output_section
4904		   is typically NULL for symbols satisfied by a shared
4905		   library.  */
4906		unresolved_reloc = TRUE;
4907	      else
4908		relocation = (h->root.u.def.value
4909			      + sec->output_section->vma
4910			      + sec->output_offset);
4911	    }
4912	  else if (h->root.type == bfd_link_hash_undefweak)
4913	    ;
4914	  else if (info->unresolved_syms_in_objects == RM_IGNORE
4915		   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4916	    ;
4917	  else if (!bfd_link_relocatable (info)
4918		   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4919	    {
4920	      bfd_boolean err;
4921	      err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4922		     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4923	      (*info->callbacks->undefined_symbol) (info,
4924						    h->root.root.string,
4925						    input_bfd,
4926						    input_section,
4927						    rel->r_offset, err);
4928	    }
4929	  sym_name = h->root.root.string;
4930	}
4931
4932      if (sec != NULL && discarded_section (sec))
4933	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4934					 rel, 1, relend, howto, 0, contents);
4935
4936      if (bfd_link_relocatable (info))
4937	continue;
4938
4939      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4940      if (r_type == R_SPU_ADD_PIC
4941	  && h != NULL
4942	  && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4943	{
4944	  bfd_byte *loc = contents + rel->r_offset;
4945	  loc[0] = 0x1c;
4946	  loc[1] = 0x00;
4947	  loc[2] &= 0x3f;
4948	}
4949
4950      is_ea_sym = (ea != NULL
4951		   && sec != NULL
4952		   && sec->output_section == ea);
4953
4954      /* If this symbol is in an overlay area, we may need to relocate
4955	 to the overlay stub.  */
4956      addend = rel->r_addend;
4957      if (stubs
4958	  && !is_ea_sym
4959	  && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4960					  contents, info)) != no_stub)
4961	{
4962	  unsigned int ovl = 0;
4963	  struct got_entry *g, **head;
4964
4965	  if (stub_type != nonovl_stub)
4966	    ovl = iovl;
4967
4968	  if (h != NULL)
4969	    head = &h->got.glist;
4970	  else
4971	    head = elf_local_got_ents (input_bfd) + r_symndx;
4972
4973	  for (g = *head; g != NULL; g = g->next)
4974	    if (htab->params->ovly_flavour == ovly_soft_icache
4975		? (g->ovl == ovl
4976		   && g->br_addr == (rel->r_offset
4977				     + input_section->output_offset
4978				     + input_section->output_section->vma))
4979		: g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4980	      break;
4981	  if (g == NULL)
4982	    abort ();
4983
4984	  relocation = g->stub_addr;
4985	  addend = 0;
4986	}
4987      else
4988	{
4989	  /* For soft icache, encode the overlay index into addresses.  */
4990	  if (htab->params->ovly_flavour == ovly_soft_icache
4991	      && (r_type == R_SPU_ADDR16_HI
4992		  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4993	      && !is_ea_sym)
4994	    {
4995	      unsigned int ovl = overlay_index (sec);
4996	      if (ovl != 0)
4997		{
4998		  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4999		  relocation += set_id << 18;
5000		}
5001	    }
5002	}
5003
5004      if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5005	  && (input_section->flags & SEC_ALLOC) != 0
5006	  && r_type == R_SPU_ADDR32)
5007	{
5008	  bfd_vma offset;
5009	  offset = rel->r_offset + input_section->output_section->vma
5010		   + input_section->output_offset;
5011	  spu_elf_emit_fixup (output_bfd, info, offset);
5012	}
5013
5014      if (unresolved_reloc)
5015	;
5016      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5017	{
5018	  if (is_ea_sym)
5019	    {
5020	      /* ._ea is a special section that isn't allocated in SPU
5021		 memory, but rather occupies space in PPU memory as
5022		 part of an embedded ELF image.  If this reloc is
5023		 against a symbol defined in ._ea, then transform the
5024		 reloc into an equivalent one without a symbol
5025		 relative to the start of the ELF image.  */
5026	      rel->r_addend += (relocation
5027				- ea->vma
5028				+ elf_section_data (ea)->this_hdr.sh_offset);
5029	      rel->r_info = ELF32_R_INFO (0, r_type);
5030	    }
5031	  emit_these_relocs = TRUE;
5032	  continue;
5033	}
5034      else if (is_ea_sym)
5035	unresolved_reloc = TRUE;
5036
5037      if (unresolved_reloc
5038	  && _bfd_elf_section_offset (output_bfd, info, input_section,
5039				      rel->r_offset) != (bfd_vma) -1)
5040	{
5041	  _bfd_error_handler
5042	    /* xgettext:c-format */
5043	    (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5044	     input_bfd,
5045	     bfd_get_section_name (input_bfd, input_section),
5046	     (long) rel->r_offset,
5047	     howto->name,
5048	     sym_name);
5049	  ret = FALSE;
5050	}
5051
5052      r = _bfd_final_link_relocate (howto,
5053				    input_bfd,
5054				    input_section,
5055				    contents,
5056				    rel->r_offset, relocation, addend);
5057
5058      if (r != bfd_reloc_ok)
5059	{
5060	  const char *msg = (const char *) 0;
5061
5062	  switch (r)
5063	    {
5064	    case bfd_reloc_overflow:
5065	      (*info->callbacks->reloc_overflow)
5066		(info, (h ? &h->root : NULL), sym_name, howto->name,
5067		 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5068	      break;
5069
5070	    case bfd_reloc_undefined:
5071	      (*info->callbacks->undefined_symbol)
5072		(info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
5073	      break;
5074
5075	    case bfd_reloc_outofrange:
5076	      msg = _("internal error: out of range error");
5077	      goto common_error;
5078
5079	    case bfd_reloc_notsupported:
5080	      msg = _("internal error: unsupported relocation error");
5081	      goto common_error;
5082
5083	    case bfd_reloc_dangerous:
5084	      msg = _("internal error: dangerous error");
5085	      goto common_error;
5086
5087	    default:
5088	      msg = _("internal error: unknown error");
5089	      /* fall through */
5090
5091	    common_error:
5092	      ret = FALSE;
5093	      (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5094					   input_section, rel->r_offset);
5095	      break;
5096	    }
5097	}
5098    }
5099
5100  if (ret
5101      && emit_these_relocs
5102      && !info->emitrelocations)
5103    {
5104      Elf_Internal_Rela *wrel;
5105      Elf_Internal_Shdr *rel_hdr;
5106
5107      wrel = rel = relocs;
5108      relend = relocs + input_section->reloc_count;
5109      for (; rel < relend; rel++)
5110	{
5111	  int r_type;
5112
5113	  r_type = ELF32_R_TYPE (rel->r_info);
5114	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5115	    *wrel++ = *rel;
5116	}
5117      input_section->reloc_count = wrel - relocs;
5118      /* Backflips for _bfd_elf_link_output_relocs.  */
5119      rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5120      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5121      ret = 2;
5122    }
5123
5124  return ret;
5125}
5126
5127static bfd_boolean
5128spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5129				 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5130{
5131  return TRUE;
5132}
5133
5134/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5135
5136static int
5137spu_elf_output_symbol_hook (struct bfd_link_info *info,
5138			    const char *sym_name ATTRIBUTE_UNUSED,
5139			    Elf_Internal_Sym *sym,
5140			    asection *sym_sec ATTRIBUTE_UNUSED,
5141			    struct elf_link_hash_entry *h)
5142{
5143  struct spu_link_hash_table *htab = spu_hash_table (info);
5144
5145  if (!bfd_link_relocatable (info)
5146      && htab->stub_sec != NULL
5147      && h != NULL
5148      && (h->root.type == bfd_link_hash_defined
5149	  || h->root.type == bfd_link_hash_defweak)
5150      && h->def_regular
5151      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5152    {
5153      struct got_entry *g;
5154
5155      for (g = h->got.glist; g != NULL; g = g->next)
5156	if (htab->params->ovly_flavour == ovly_soft_icache
5157	    ? g->br_addr == g->stub_addr
5158	    : g->addend == 0 && g->ovl == 0)
5159	  {
5160	    sym->st_shndx = (_bfd_elf_section_from_bfd_section
5161			     (htab->stub_sec[0]->output_section->owner,
5162			      htab->stub_sec[0]->output_section));
5163	    sym->st_value = g->stub_addr;
5164	    break;
5165	  }
5166    }
5167
5168  return 1;
5169}
5170
5171static int spu_plugin = 0;
5172
5173void
5174spu_elf_plugin (int val)
5175{
5176  spu_plugin = val;
5177}
5178
5179/* Set ELF header e_type for plugins.  */
5180
5181static void
5182spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5183{
5184  if (spu_plugin)
5185    {
5186      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5187
5188      i_ehdrp->e_type = ET_DYN;
5189    }
5190
5191  _bfd_elf_post_process_headers (abfd, info);
5192}
5193
5194/* We may add an extra PT_LOAD segment for .toe.  We also need extra
5195   segments for overlays.  */
5196
5197static int
5198spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5199{
5200  int extra = 0;
5201  asection *sec;
5202
5203  if (info != NULL)
5204    {
5205      struct spu_link_hash_table *htab = spu_hash_table (info);
5206      extra = htab->num_overlays;
5207    }
5208
5209  if (extra)
5210    ++extra;
5211
5212  sec = bfd_get_section_by_name (abfd, ".toe");
5213  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5214    ++extra;
5215
5216  return extra;
5217}
5218
5219/* Remove .toe section from other PT_LOAD segments and put it in
5220   a segment of its own.  Put overlays in separate segments too.  */
5221
5222static bfd_boolean
5223spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5224{
5225  asection *toe, *s;
5226  struct elf_segment_map *m, *m_overlay;
5227  struct elf_segment_map **p, **p_overlay;
5228  unsigned int i;
5229
5230  if (info == NULL)
5231    return TRUE;
5232
5233  toe = bfd_get_section_by_name (abfd, ".toe");
5234  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5235    if (m->p_type == PT_LOAD && m->count > 1)
5236      for (i = 0; i < m->count; i++)
5237	if ((s = m->sections[i]) == toe
5238	    || spu_elf_section_data (s)->u.o.ovl_index != 0)
5239	  {
5240	    struct elf_segment_map *m2;
5241	    bfd_vma amt;
5242
5243	    if (i + 1 < m->count)
5244	      {
5245		amt = sizeof (struct elf_segment_map);
5246		amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5247		m2 = bfd_zalloc (abfd, amt);
5248		if (m2 == NULL)
5249		  return FALSE;
5250		m2->count = m->count - (i + 1);
5251		memcpy (m2->sections, m->sections + i + 1,
5252			m2->count * sizeof (m->sections[0]));
5253		m2->p_type = PT_LOAD;
5254		m2->next = m->next;
5255		m->next = m2;
5256	      }
5257	    m->count = 1;
5258	    if (i != 0)
5259	      {
5260		m->count = i;
5261		amt = sizeof (struct elf_segment_map);
5262		m2 = bfd_zalloc (abfd, amt);
5263		if (m2 == NULL)
5264		  return FALSE;
5265		m2->p_type = PT_LOAD;
5266		m2->count = 1;
5267		m2->sections[0] = s;
5268		m2->next = m->next;
5269		m->next = m2;
5270	      }
5271	    break;
5272	  }
5273
5274
5275  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5276     PT_LOAD segments.  This can cause the .ovl.init section to be
5277     overwritten with the contents of some overlay segment.  To work
5278     around this issue, we ensure that all PF_OVERLAY segments are
5279     sorted first amongst the program headers; this ensures that even
5280     with a broken loader, the .ovl.init section (which is not marked
5281     as PF_OVERLAY) will be placed into SPU local store on startup.  */
5282
5283  /* Move all overlay segments onto a separate list.  */
5284  p = &elf_seg_map (abfd);
5285  p_overlay = &m_overlay;
5286  while (*p != NULL)
5287    {
5288      if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5289	  && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5290	{
5291	  m = *p;
5292	  *p = m->next;
5293	  *p_overlay = m;
5294	  p_overlay = &m->next;
5295	  continue;
5296	}
5297
5298      p = &((*p)->next);
5299    }
5300
5301  /* Re-insert overlay segments at the head of the segment map.  */
5302  *p_overlay = elf_seg_map (abfd);
5303  elf_seg_map (abfd) = m_overlay;
5304
5305  return TRUE;
5306}
5307
5308/* Tweak the section type of .note.spu_name.  */
5309
5310static bfd_boolean
5311spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5312		       Elf_Internal_Shdr *hdr,
5313		       asection *sec)
5314{
5315  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5316    hdr->sh_type = SHT_NOTE;
5317  return TRUE;
5318}
5319
5320/* Tweak phdrs before writing them out.  */
5321
5322static int
5323spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5324{
5325  const struct elf_backend_data *bed;
5326  struct elf_obj_tdata *tdata;
5327  Elf_Internal_Phdr *phdr, *last;
5328  struct spu_link_hash_table *htab;
5329  unsigned int count;
5330  unsigned int i;
5331
5332  if (info == NULL)
5333    return TRUE;
5334
5335  bed = get_elf_backend_data (abfd);
5336  tdata = elf_tdata (abfd);
5337  phdr = tdata->phdr;
5338  count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5339  htab = spu_hash_table (info);
5340  if (htab->num_overlays != 0)
5341    {
5342      struct elf_segment_map *m;
5343      unsigned int o;
5344
5345      for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5346	if (m->count != 0
5347	    && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5348	  {
5349	    /* Mark this as an overlay header.  */
5350	    phdr[i].p_flags |= PF_OVERLAY;
5351
5352	    if (htab->ovtab != NULL && htab->ovtab->size != 0
5353		&& htab->params->ovly_flavour != ovly_soft_icache)
5354	      {
5355		bfd_byte *p = htab->ovtab->contents;
5356		unsigned int off = o * 16 + 8;
5357
5358		/* Write file_off into _ovly_table.  */
5359		bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5360	      }
5361	  }
5362      /* Soft-icache has its file offset put in .ovl.init.  */
5363      if (htab->init != NULL && htab->init->size != 0)
5364	{
5365	  bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5366
5367	  bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5368	}
5369    }
5370
5371  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5372     of 16.  This should always be possible when using the standard
5373     linker scripts, but don't create overlapping segments if
5374     someone is playing games with linker scripts.  */
5375  last = NULL;
5376  for (i = count; i-- != 0; )
5377    if (phdr[i].p_type == PT_LOAD)
5378      {
5379	unsigned adjust;
5380
5381	adjust = -phdr[i].p_filesz & 15;
5382	if (adjust != 0
5383	    && last != NULL
5384	    && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5385	  break;
5386
5387	adjust = -phdr[i].p_memsz & 15;
5388	if (adjust != 0
5389	    && last != NULL
5390	    && phdr[i].p_filesz != 0
5391	    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5392	    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5393	  break;
5394
5395	if (phdr[i].p_filesz != 0)
5396	  last = &phdr[i];
5397      }
5398
5399  if (i == (unsigned int) -1)
5400    for (i = count; i-- != 0; )
5401      if (phdr[i].p_type == PT_LOAD)
5402	{
5403	unsigned adjust;
5404
5405	adjust = -phdr[i].p_filesz & 15;
5406	phdr[i].p_filesz += adjust;
5407
5408	adjust = -phdr[i].p_memsz & 15;
5409	phdr[i].p_memsz += adjust;
5410      }
5411
5412  return TRUE;
5413}
5414
5415bfd_boolean
5416spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5417{
5418  struct spu_link_hash_table *htab = spu_hash_table (info);
5419  if (htab->params->emit_fixups)
5420    {
5421      asection *sfixup = htab->sfixup;
5422      int fixup_count = 0;
5423      bfd *ibfd;
5424      size_t size;
5425
5426      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5427	{
5428	  asection *isec;
5429
5430	  if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5431	    continue;
5432
5433	  /* Walk over each section attached to the input bfd.  */
5434	  for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5435	    {
5436	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5437	      bfd_vma base_end;
5438
5439	      /* If there aren't any relocs, then there's nothing more
5440	         to do.  */
5441	      if ((isec->flags & SEC_ALLOC) == 0
5442		  || (isec->flags & SEC_RELOC) == 0
5443		  || isec->reloc_count == 0)
5444		continue;
5445
5446	      /* Get the relocs.  */
5447	      internal_relocs =
5448		_bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5449					   info->keep_memory);
5450	      if (internal_relocs == NULL)
5451		return FALSE;
5452
5453	      /* 1 quadword can contain up to 4 R_SPU_ADDR32
5454	         relocations.  They are stored in a single word by
5455	         saving the upper 28 bits of the address and setting the
5456	         lower 4 bits to a bit mask of the words that have the
5457	         relocation.  BASE_END keeps track of the next quadword. */
5458	      irela = internal_relocs;
5459	      irelaend = irela + isec->reloc_count;
5460	      base_end = 0;
5461	      for (; irela < irelaend; irela++)
5462		if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5463		    && irela->r_offset >= base_end)
5464		  {
5465		    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5466		    fixup_count++;
5467		  }
5468	    }
5469	}
5470
5471      /* We always have a NULL fixup as a sentinel */
5472      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5473      if (!bfd_set_section_size (output_bfd, sfixup, size))
5474	return FALSE;
5475      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5476      if (sfixup->contents == NULL)
5477	return FALSE;
5478    }
5479  return TRUE;
5480}
5481
5482#define TARGET_BIG_SYM		spu_elf32_vec
5483#define TARGET_BIG_NAME		"elf32-spu"
5484#define ELF_ARCH		bfd_arch_spu
5485#define ELF_TARGET_ID		SPU_ELF_DATA
5486#define ELF_MACHINE_CODE	EM_SPU
5487/* This matches the alignment need for DMA.  */
5488#define ELF_MAXPAGESIZE		0x80
5489#define elf_backend_rela_normal         1
5490#define elf_backend_can_gc_sections	1
5491
5492#define bfd_elf32_bfd_reloc_type_lookup		spu_elf_reloc_type_lookup
5493#define bfd_elf32_bfd_reloc_name_lookup		spu_elf_reloc_name_lookup
5494#define elf_info_to_howto			spu_elf_info_to_howto
5495#define elf_backend_count_relocs		spu_elf_count_relocs
5496#define elf_backend_relocate_section		spu_elf_relocate_section
5497#define elf_backend_finish_dynamic_sections	spu_elf_finish_dynamic_sections
5498#define elf_backend_symbol_processing		spu_elf_backend_symbol_processing
5499#define elf_backend_link_output_symbol_hook	spu_elf_output_symbol_hook
5500#define elf_backend_object_p			spu_elf_object_p
5501#define bfd_elf32_new_section_hook		spu_elf_new_section_hook
5502#define bfd_elf32_bfd_link_hash_table_create	spu_elf_link_hash_table_create
5503
5504#define elf_backend_additional_program_headers	spu_elf_additional_program_headers
5505#define elf_backend_modify_segment_map		spu_elf_modify_segment_map
5506#define elf_backend_modify_program_headers	spu_elf_modify_program_headers
5507#define elf_backend_post_process_headers        spu_elf_post_process_headers
5508#define elf_backend_fake_sections		spu_elf_fake_sections
5509#define elf_backend_special_sections		spu_elf_special_sections
5510#define bfd_elf32_bfd_final_link		spu_elf_final_link
5511
5512#include "elf32-target.h"
5513