1/* Subroutines needed for unwinding stack frames for exception handling.  */
2/* Copyright (C) 1997-2015 Free Software Foundation, Inc.
3   Contributed by Jason Merrill <jason@cygnus.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24<http://www.gnu.org/licenses/>.  */
25
26#ifndef _Unwind_Find_FDE
27#include "tconfig.h"
28#include "tsystem.h"
29#include "coretypes.h"
30#include "tm.h"
31#include "libgcc_tm.h"
32#include "dwarf2.h"
33#include "unwind.h"
34#define NO_BASE_OF_ENCODED_VALUE
35#include "unwind-pe.h"
36#include "unwind-dw2-fde.h"
37#include "gthr.h"
38#endif
39
40/* The unseen_objects list contains objects that have been registered
41   but not yet categorized in any way.  The seen_objects list has had
42   its pc_begin and count fields initialized at minimum, and is sorted
43   by decreasing value of pc_begin.  */
44static struct object *unseen_objects;
45static struct object *seen_objects;
46
47#ifdef __GTHREAD_MUTEX_INIT
48static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49#define init_object_mutex_once()
50#else
51#ifdef __GTHREAD_MUTEX_INIT_FUNCTION
52static __gthread_mutex_t object_mutex;
53
54static void
55init_object_mutex (void)
56{
57  __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
58}
59
60static void
61init_object_mutex_once (void)
62{
63  static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64  __gthread_once (&once, init_object_mutex);
65}
66#else
67/* ???  Several targets include this file with stubbing parts of gthr.h
68   and expect no locking to be done.  */
69#define init_object_mutex_once()
70static __gthread_mutex_t object_mutex;
71#endif
72#endif
73
74/* Called from crtbegin.o to register the unwind info for an object.  */
75
76void
77__register_frame_info_bases (const void *begin, struct object *ob,
78			     void *tbase, void *dbase)
79{
80  /* If .eh_frame is empty, don't register at all.  */
81  if ((const uword *) begin == 0 || *(const uword *) begin == 0)
82    return;
83
84  ob->pc_begin = (void *)-1;
85  ob->tbase = tbase;
86  ob->dbase = dbase;
87  ob->u.single = begin;
88  ob->s.i = 0;
89  ob->s.b.encoding = DW_EH_PE_omit;
90#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
91  ob->fde_end = NULL;
92#endif
93
94  init_object_mutex_once ();
95  __gthread_mutex_lock (&object_mutex);
96
97  ob->next = unseen_objects;
98  unseen_objects = ob;
99
100  __gthread_mutex_unlock (&object_mutex);
101}
102
103void
104__register_frame_info (const void *begin, struct object *ob)
105{
106  __register_frame_info_bases (begin, ob, 0, 0);
107}
108
109void
110__register_frame (void *begin)
111{
112  struct object *ob;
113
114  /* If .eh_frame is empty, don't register at all.  */
115  if (*(uword *) begin == 0)
116    return;
117
118  ob = malloc (sizeof (struct object));
119  __register_frame_info (begin, ob);
120}
121
122/* Similar, but BEGIN is actually a pointer to a table of unwind entries
123   for different translation units.  Called from the file generated by
124   collect2.  */
125
126void
127__register_frame_info_table_bases (void *begin, struct object *ob,
128				   void *tbase, void *dbase)
129{
130  ob->pc_begin = (void *)-1;
131  ob->tbase = tbase;
132  ob->dbase = dbase;
133  ob->u.array = begin;
134  ob->s.i = 0;
135  ob->s.b.from_array = 1;
136  ob->s.b.encoding = DW_EH_PE_omit;
137
138  init_object_mutex_once ();
139  __gthread_mutex_lock (&object_mutex);
140
141  ob->next = unseen_objects;
142  unseen_objects = ob;
143
144  __gthread_mutex_unlock (&object_mutex);
145}
146
147void
148__register_frame_info_table (void *begin, struct object *ob)
149{
150  __register_frame_info_table_bases (begin, ob, 0, 0);
151}
152
153void
154__register_frame_table (void *begin)
155{
156  struct object *ob = malloc (sizeof (struct object));
157  __register_frame_info_table (begin, ob);
158}
159
160/* Called from crtbegin.o to deregister the unwind info for an object.  */
161/* ??? Glibc has for a while now exported __register_frame_info and
162   __deregister_frame_info.  If we call __register_frame_info_bases
163   from crtbegin (wherein it is declared weak), and this object does
164   not get pulled from libgcc.a for other reasons, then the
165   invocation of __deregister_frame_info will be resolved from glibc.
166   Since the registration did not happen there, we'll die.
167
168   Therefore, declare a new deregistration entry point that does the
169   exact same thing, but will resolve to the same library as
170   implements __register_frame_info_bases.  */
171
172void *
173__deregister_frame_info_bases (const void *begin)
174{
175  struct object **p;
176  struct object *ob = 0;
177
178  /* If .eh_frame is empty, we haven't registered.  */
179  if ((const uword *) begin == 0 || *(const uword *) begin == 0)
180    return ob;
181
182  init_object_mutex_once ();
183  __gthread_mutex_lock (&object_mutex);
184
185  for (p = &unseen_objects; *p ; p = &(*p)->next)
186    if ((*p)->u.single == begin)
187      {
188	ob = *p;
189	*p = ob->next;
190	goto out;
191      }
192
193  for (p = &seen_objects; *p ; p = &(*p)->next)
194    if ((*p)->s.b.sorted)
195      {
196	if ((*p)->u.sort->orig_data == begin)
197	  {
198	    ob = *p;
199	    *p = ob->next;
200	    free (ob->u.sort);
201	    goto out;
202	  }
203      }
204    else
205      {
206	if ((*p)->u.single == begin)
207	  {
208	    ob = *p;
209	    *p = ob->next;
210	    goto out;
211	  }
212      }
213
214 out:
215  __gthread_mutex_unlock (&object_mutex);
216  gcc_assert (ob);
217  return (void *) ob;
218}
219
220void *
221__deregister_frame_info (const void *begin)
222{
223  return __deregister_frame_info_bases (begin);
224}
225
226void
227__deregister_frame (void *begin)
228{
229  /* If .eh_frame is empty, we haven't registered.  */
230  if (*(uword *) begin != 0)
231    free (__deregister_frame_info (begin));
232}
233
234
235/* Like base_of_encoded_value, but take the base from a struct object
236   instead of an _Unwind_Context.  */
237
238static _Unwind_Ptr
239base_from_object (unsigned char encoding, struct object *ob)
240{
241  if (encoding == DW_EH_PE_omit)
242    return 0;
243
244  switch (encoding & 0x70)
245    {
246    case DW_EH_PE_absptr:
247    case DW_EH_PE_pcrel:
248    case DW_EH_PE_aligned:
249      return 0;
250
251    case DW_EH_PE_textrel:
252      return (_Unwind_Ptr) ob->tbase;
253    case DW_EH_PE_datarel:
254      return (_Unwind_Ptr) ob->dbase;
255    default:
256      gcc_unreachable ();
257    }
258}
259
260/* Return the FDE pointer encoding from the CIE.  */
261/* ??? This is a subset of extract_cie_info from unwind-dw2.c.  */
262
263static int
264get_cie_encoding (const struct dwarf_cie *cie)
265{
266  const unsigned char *aug, *p;
267  _Unwind_Ptr dummy;
268  _uleb128_t utmp;
269  _sleb128_t stmp;
270
271  aug = cie->augmentation;
272  p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string.  */
273  if (__builtin_expect (cie->version >= 4, 0))
274    {
275      if (p[0] != sizeof (void *) || p[1] != 0)
276	return DW_EH_PE_omit;		/* We are not prepared to handle unexpected
277					   address sizes or segment selectors.  */
278      p += 2;				/* Skip address size and segment size.  */
279    }
280
281  if (aug[0] != 'z')
282    return DW_EH_PE_absptr;
283
284  p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
285  p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
286  if (cie->version == 1)		/* Skip return address column.  */
287    p++;
288  else
289    p = read_uleb128 (p, &utmp);
290
291  aug++;				/* Skip 'z' */
292  p = read_uleb128 (p, &utmp);		/* Skip augmentation length.  */
293  while (1)
294    {
295      /* This is what we're looking for.  */
296      if (*aug == 'R')
297	return *p;
298      /* Personality encoding and pointer.  */
299      else if (*aug == 'P')
300	{
301	  /* ??? Avoid dereferencing indirect pointers, since we're
302	     faking the base address.  Gotta keep DW_EH_PE_aligned
303	     intact, however.  */
304	  p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
305	}
306      /* LSDA encoding.  */
307      else if (*aug == 'L')
308	p++;
309      /* Otherwise end of string, or unknown augmentation.  */
310      else
311	return DW_EH_PE_absptr;
312      aug++;
313    }
314}
315
316static inline int
317get_fde_encoding (const struct dwarf_fde *f)
318{
319  return get_cie_encoding (get_cie (f));
320}
321
322
323/* Sorting an array of FDEs by address.
324   (Ideally we would have the linker sort the FDEs so we don't have to do
325   it at run time. But the linkers are not yet prepared for this.)  */
326
327/* Comparison routines.  Three variants of increasing complexity.  */
328
329static int
330fde_unencoded_compare (struct object *ob __attribute__((unused)),
331		       const fde *x, const fde *y)
332{
333  _Unwind_Ptr x_ptr, y_ptr;
334  memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
335  memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
336
337  if (x_ptr > y_ptr)
338    return 1;
339  if (x_ptr < y_ptr)
340    return -1;
341  return 0;
342}
343
344static int
345fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
346{
347  _Unwind_Ptr base, x_ptr, y_ptr;
348
349  base = base_from_object (ob->s.b.encoding, ob);
350  read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
351  read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
352
353  if (x_ptr > y_ptr)
354    return 1;
355  if (x_ptr < y_ptr)
356    return -1;
357  return 0;
358}
359
360static int
361fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
362{
363  int x_encoding, y_encoding;
364  _Unwind_Ptr x_ptr, y_ptr;
365
366  x_encoding = get_fde_encoding (x);
367  read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
368				x->pc_begin, &x_ptr);
369
370  y_encoding = get_fde_encoding (y);
371  read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
372				y->pc_begin, &y_ptr);
373
374  if (x_ptr > y_ptr)
375    return 1;
376  if (x_ptr < y_ptr)
377    return -1;
378  return 0;
379}
380
381typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
382
383
384/* This is a special mix of insertion sort and heap sort, optimized for
385   the data sets that actually occur. They look like
386   101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
387   I.e. a linearly increasing sequence (coming from functions in the text
388   section), with additionally a few unordered elements (coming from functions
389   in gnu_linkonce sections) whose values are higher than the values in the
390   surrounding linear sequence (but not necessarily higher than the values
391   at the end of the linear sequence!).
392   The worst-case total run time is O(N) + O(n log (n)), where N is the
393   total number of FDEs and n is the number of erratic ones.  */
394
395struct fde_accumulator
396{
397  struct fde_vector *linear;
398  struct fde_vector *erratic;
399};
400
401static inline int
402start_fde_sort (struct fde_accumulator *accu, size_t count)
403{
404  size_t size;
405  if (! count)
406    return 0;
407
408  size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
409  if ((accu->linear = malloc (size)))
410    {
411      accu->linear->count = 0;
412      if ((accu->erratic = malloc (size)))
413	accu->erratic->count = 0;
414      return 1;
415    }
416  else
417    return 0;
418}
419
420static inline void
421fde_insert (struct fde_accumulator *accu, const fde *this_fde)
422{
423  if (accu->linear)
424    accu->linear->array[accu->linear->count++] = this_fde;
425}
426
427/* Split LINEAR into a linear sequence with low values and an erratic
428   sequence with high values, put the linear one (of longest possible
429   length) into LINEAR and the erratic one into ERRATIC. This is O(N).
430
431   Because the longest linear sequence we are trying to locate within the
432   incoming LINEAR array can be interspersed with (high valued) erratic
433   entries.  We construct a chain indicating the sequenced entries.
434   To avoid having to allocate this chain, we overlay it onto the space of
435   the ERRATIC array during construction.  A final pass iterates over the
436   chain to determine what should be placed in the ERRATIC array, and
437   what is the linear sequence.  This overlay is safe from aliasing.  */
438
439static inline void
440fde_split (struct object *ob, fde_compare_t fde_compare,
441	   struct fde_vector *linear, struct fde_vector *erratic)
442{
443  static const fde *marker;
444  size_t count = linear->count;
445  const fde *const *chain_end = &marker;
446  size_t i, j, k;
447
448  /* This should optimize out, but it is wise to make sure this assumption
449     is correct. Should these have different sizes, we cannot cast between
450     them and the overlaying onto ERRATIC will not work.  */
451  gcc_assert (sizeof (const fde *) == sizeof (const fde **));
452
453  for (i = 0; i < count; i++)
454    {
455      const fde *const *probe;
456
457      for (probe = chain_end;
458	   probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
459	   probe = chain_end)
460	{
461	  chain_end = (const fde *const*) erratic->array[probe - linear->array];
462	  erratic->array[probe - linear->array] = NULL;
463	}
464      erratic->array[i] = (const fde *) chain_end;
465      chain_end = &linear->array[i];
466    }
467
468  /* Each entry in LINEAR which is part of the linear sequence we have
469     discovered will correspond to a non-NULL entry in the chain we built in
470     the ERRATIC array.  */
471  for (i = j = k = 0; i < count; i++)
472    if (erratic->array[i])
473      linear->array[j++] = linear->array[i];
474    else
475      erratic->array[k++] = linear->array[i];
476  linear->count = j;
477  erratic->count = k;
478}
479
480#define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
481
482/* Convert a semi-heap to a heap.  A semi-heap is a heap except possibly
483   for the first (root) node; push it down to its rightful place.  */
484
485static void
486frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
487		int lo, int hi)
488{
489  int i, j;
490
491  for (i = lo, j = 2*i+1;
492       j < hi;
493       j = 2*i+1)
494    {
495      if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
496	++j;
497
498      if (fde_compare (ob, a[i], a[j]) < 0)
499	{
500	  SWAP (a[i], a[j]);
501	  i = j;
502	}
503      else
504	break;
505    }
506}
507
508/* This is O(n log(n)).  BSD/OS defines heapsort in stdlib.h, so we must
509   use a name that does not conflict.  */
510
511static void
512frame_heapsort (struct object *ob, fde_compare_t fde_compare,
513		struct fde_vector *erratic)
514{
515  /* For a description of this algorithm, see:
516     Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
517     p. 60-61.  */
518  const fde ** a = erratic->array;
519  /* A portion of the array is called a "heap" if for all i>=0:
520     If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
521     If i and 2i+2 are valid indices, then a[i] >= a[2i+2].  */
522  size_t n = erratic->count;
523  int m;
524
525  /* Expand our heap incrementally from the end of the array, heapifying
526     each resulting semi-heap as we go.  After each step, a[m] is the top
527     of a heap.  */
528  for (m = n/2-1; m >= 0; --m)
529    frame_downheap (ob, fde_compare, a, m, n);
530
531  /* Shrink our heap incrementally from the end of the array, first
532     swapping out the largest element a[0] and then re-heapifying the
533     resulting semi-heap.  After each step, a[0..m) is a heap.  */
534  for (m = n-1; m >= 1; --m)
535    {
536      SWAP (a[0], a[m]);
537      frame_downheap (ob, fde_compare, a, 0, m);
538    }
539#undef SWAP
540}
541
542/* Merge V1 and V2, both sorted, and put the result into V1.  */
543static inline void
544fde_merge (struct object *ob, fde_compare_t fde_compare,
545	   struct fde_vector *v1, struct fde_vector *v2)
546{
547  size_t i1, i2;
548  const fde * fde2;
549
550  i2 = v2->count;
551  if (i2 > 0)
552    {
553      i1 = v1->count;
554      do
555	{
556	  i2--;
557	  fde2 = v2->array[i2];
558	  while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
559	    {
560	      v1->array[i1+i2] = v1->array[i1-1];
561	      i1--;
562	    }
563	  v1->array[i1+i2] = fde2;
564	}
565      while (i2 > 0);
566      v1->count += v2->count;
567    }
568}
569
570static inline void
571end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
572{
573  fde_compare_t fde_compare;
574
575  gcc_assert (!accu->linear || accu->linear->count == count);
576
577  if (ob->s.b.mixed_encoding)
578    fde_compare = fde_mixed_encoding_compare;
579  else if (ob->s.b.encoding == DW_EH_PE_absptr)
580    fde_compare = fde_unencoded_compare;
581  else
582    fde_compare = fde_single_encoding_compare;
583
584  if (accu->erratic)
585    {
586      fde_split (ob, fde_compare, accu->linear, accu->erratic);
587      gcc_assert (accu->linear->count + accu->erratic->count == count);
588      frame_heapsort (ob, fde_compare, accu->erratic);
589      fde_merge (ob, fde_compare, accu->linear, accu->erratic);
590      free (accu->erratic);
591    }
592  else
593    {
594      /* We've not managed to malloc an erratic array,
595	 so heap sort in the linear one.  */
596      frame_heapsort (ob, fde_compare, accu->linear);
597    }
598}
599
600
601/* Update encoding, mixed_encoding, and pc_begin for OB for the
602   fde array beginning at THIS_FDE.  Return the number of fdes
603   encountered along the way.  */
604
605static size_t
606classify_object_over_fdes (struct object *ob, const fde *this_fde)
607{
608  const struct dwarf_cie *last_cie = 0;
609  size_t count = 0;
610  int encoding = DW_EH_PE_absptr;
611  _Unwind_Ptr base = 0;
612
613  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
614    {
615      const struct dwarf_cie *this_cie;
616      _Unwind_Ptr mask, pc_begin;
617
618      /* Skip CIEs.  */
619      if (this_fde->CIE_delta == 0)
620	continue;
621
622      /* Determine the encoding for this FDE.  Note mixed encoded
623	 objects for later.  */
624      this_cie = get_cie (this_fde);
625      if (this_cie != last_cie)
626	{
627	  last_cie = this_cie;
628	  encoding = get_cie_encoding (this_cie);
629	  if (encoding == DW_EH_PE_omit)
630	    return -1;
631	  base = base_from_object (encoding, ob);
632	  if (ob->s.b.encoding == DW_EH_PE_omit)
633	    ob->s.b.encoding = encoding;
634	  else if (ob->s.b.encoding != encoding)
635	    ob->s.b.mixed_encoding = 1;
636	}
637
638      read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
639				    &pc_begin);
640
641      /* Take care to ignore link-once functions that were removed.
642	 In these cases, the function address will be NULL, but if
643	 the encoding is smaller than a pointer a true NULL may not
644	 be representable.  Assume 0 in the representable bits is NULL.  */
645      mask = size_of_encoded_value (encoding);
646      if (mask < sizeof (void *))
647	mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
648      else
649	mask = -1;
650
651      if ((pc_begin & mask) == 0)
652	continue;
653
654      count += 1;
655      if ((void *) pc_begin < ob->pc_begin)
656	ob->pc_begin = (void *) pc_begin;
657    }
658
659  return count;
660}
661
662static void
663add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
664{
665  const struct dwarf_cie *last_cie = 0;
666  int encoding = ob->s.b.encoding;
667  _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
668
669  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
670    {
671      const struct dwarf_cie *this_cie;
672
673      /* Skip CIEs.  */
674      if (this_fde->CIE_delta == 0)
675	continue;
676
677      if (ob->s.b.mixed_encoding)
678	{
679	  /* Determine the encoding for this FDE.  Note mixed encoded
680	     objects for later.  */
681	  this_cie = get_cie (this_fde);
682	  if (this_cie != last_cie)
683	    {
684	      last_cie = this_cie;
685	      encoding = get_cie_encoding (this_cie);
686	      base = base_from_object (encoding, ob);
687	    }
688	}
689
690      if (encoding == DW_EH_PE_absptr)
691	{
692	  _Unwind_Ptr ptr;
693	  memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
694	  if (ptr == 0)
695	    continue;
696	}
697      else
698	{
699	  _Unwind_Ptr pc_begin, mask;
700
701	  read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
702					&pc_begin);
703
704	  /* Take care to ignore link-once functions that were removed.
705	     In these cases, the function address will be NULL, but if
706	     the encoding is smaller than a pointer a true NULL may not
707	     be representable.  Assume 0 in the representable bits is NULL.  */
708	  mask = size_of_encoded_value (encoding);
709	  if (mask < sizeof (void *))
710	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
711	  else
712	    mask = -1;
713
714	  if ((pc_begin & mask) == 0)
715	    continue;
716	}
717
718      fde_insert (accu, this_fde);
719    }
720}
721
722/* Set up a sorted array of pointers to FDEs for a loaded object.  We
723   count up the entries before allocating the array because it's likely to
724   be faster.  We can be called multiple times, should we have failed to
725   allocate a sorted fde array on a previous occasion.  */
726
727static inline void
728init_object (struct object* ob)
729{
730  struct fde_accumulator accu;
731  size_t count;
732
733  count = ob->s.b.count;
734  if (count == 0)
735    {
736      if (ob->s.b.from_array)
737	{
738	  fde **p = ob->u.array;
739	  for (count = 0; *p; ++p)
740	    {
741	      size_t cur_count = classify_object_over_fdes (ob, *p);
742	      if (cur_count == (size_t) -1)
743		goto unhandled_fdes;
744	      count += cur_count;
745	    }
746	}
747      else
748	{
749	  count = classify_object_over_fdes (ob, ob->u.single);
750	  if (count == (size_t) -1)
751	    {
752	      static const fde terminator;
753	    unhandled_fdes:
754	      ob->s.i = 0;
755	      ob->s.b.encoding = DW_EH_PE_omit;
756	      ob->u.single = &terminator;
757	      return;
758	    }
759	}
760
761      /* The count field we have in the main struct object is somewhat
762	 limited, but should suffice for virtually all cases.  If the
763	 counted value doesn't fit, re-write a zero.  The worst that
764	 happens is that we re-count next time -- admittedly non-trivial
765	 in that this implies some 2M fdes, but at least we function.  */
766      ob->s.b.count = count;
767      if (ob->s.b.count != count)
768	ob->s.b.count = 0;
769    }
770
771  if (!start_fde_sort (&accu, count))
772    return;
773
774  if (ob->s.b.from_array)
775    {
776      fde **p;
777      for (p = ob->u.array; *p; ++p)
778	add_fdes (ob, &accu, *p);
779    }
780  else
781    add_fdes (ob, &accu, ob->u.single);
782
783  end_fde_sort (ob, &accu, count);
784
785  /* Save the original fde pointer, since this is the key by which the
786     DSO will deregister the object.  */
787  accu.linear->orig_data = ob->u.single;
788  ob->u.sort = accu.linear;
789
790  ob->s.b.sorted = 1;
791}
792
793/* A linear search through a set of FDEs for the given PC.  This is
794   used when there was insufficient memory to allocate and sort an
795   array.  */
796
797static const fde *
798linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
799{
800  const struct dwarf_cie *last_cie = 0;
801  int encoding = ob->s.b.encoding;
802  _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
803
804  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
805    {
806      const struct dwarf_cie *this_cie;
807      _Unwind_Ptr pc_begin, pc_range;
808
809      /* Skip CIEs.  */
810      if (this_fde->CIE_delta == 0)
811	continue;
812
813      if (ob->s.b.mixed_encoding)
814	{
815	  /* Determine the encoding for this FDE.  Note mixed encoded
816	     objects for later.  */
817	  this_cie = get_cie (this_fde);
818	  if (this_cie != last_cie)
819	    {
820	      last_cie = this_cie;
821	      encoding = get_cie_encoding (this_cie);
822	      base = base_from_object (encoding, ob);
823	    }
824	}
825
826      if (encoding == DW_EH_PE_absptr)
827	{
828	  const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
829	  pc_begin = pc_array[0];
830	  pc_range = pc_array[1];
831	  if (pc_begin == 0)
832	    continue;
833	}
834      else
835	{
836	  _Unwind_Ptr mask;
837	  const unsigned char *p;
838
839	  p = read_encoded_value_with_base (encoding, base,
840					    this_fde->pc_begin, &pc_begin);
841	  read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
842
843	  /* Take care to ignore link-once functions that were removed.
844	     In these cases, the function address will be NULL, but if
845	     the encoding is smaller than a pointer a true NULL may not
846	     be representable.  Assume 0 in the representable bits is NULL.  */
847	  mask = size_of_encoded_value (encoding);
848	  if (mask < sizeof (void *))
849	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
850	  else
851	    mask = -1;
852
853	  if ((pc_begin & mask) == 0)
854	    continue;
855	}
856
857      if ((_Unwind_Ptr) pc - pc_begin < pc_range)
858	return this_fde;
859    }
860
861  return NULL;
862}
863
864/* Binary search for an FDE containing the given PC.  Here are three
865   implementations of increasing complexity.  */
866
867static inline const fde *
868binary_search_unencoded_fdes (struct object *ob, void *pc)
869{
870  struct fde_vector *vec = ob->u.sort;
871  size_t lo, hi;
872
873  for (lo = 0, hi = vec->count; lo < hi; )
874    {
875      size_t i = (lo + hi) / 2;
876      const fde *const f = vec->array[i];
877      void *pc_begin;
878      uaddr pc_range;
879      memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
880      memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
881
882      if (pc < pc_begin)
883	hi = i;
884      else if (pc >= pc_begin + pc_range)
885	lo = i + 1;
886      else
887	return f;
888    }
889
890  return NULL;
891}
892
893static inline const fde *
894binary_search_single_encoding_fdes (struct object *ob, void *pc)
895{
896  struct fde_vector *vec = ob->u.sort;
897  int encoding = ob->s.b.encoding;
898  _Unwind_Ptr base = base_from_object (encoding, ob);
899  size_t lo, hi;
900
901  for (lo = 0, hi = vec->count; lo < hi; )
902    {
903      size_t i = (lo + hi) / 2;
904      const fde *f = vec->array[i];
905      _Unwind_Ptr pc_begin, pc_range;
906      const unsigned char *p;
907
908      p = read_encoded_value_with_base (encoding, base, f->pc_begin,
909					&pc_begin);
910      read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
911
912      if ((_Unwind_Ptr) pc < pc_begin)
913	hi = i;
914      else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
915	lo = i + 1;
916      else
917	return f;
918    }
919
920  return NULL;
921}
922
923static inline const fde *
924binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
925{
926  struct fde_vector *vec = ob->u.sort;
927  size_t lo, hi;
928
929  for (lo = 0, hi = vec->count; lo < hi; )
930    {
931      size_t i = (lo + hi) / 2;
932      const fde *f = vec->array[i];
933      _Unwind_Ptr pc_begin, pc_range;
934      const unsigned char *p;
935      int encoding;
936
937      encoding = get_fde_encoding (f);
938      p = read_encoded_value_with_base (encoding,
939					base_from_object (encoding, ob),
940					f->pc_begin, &pc_begin);
941      read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
942
943      if ((_Unwind_Ptr) pc < pc_begin)
944	hi = i;
945      else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
946	lo = i + 1;
947      else
948	return f;
949    }
950
951  return NULL;
952}
953
954static const fde *
955search_object (struct object* ob, void *pc)
956{
957  /* If the data hasn't been sorted, try to do this now.  We may have
958     more memory available than last time we tried.  */
959  if (! ob->s.b.sorted)
960    {
961      init_object (ob);
962
963      /* Despite the above comment, the normal reason to get here is
964	 that we've not processed this object before.  A quick range
965	 check is in order.  */
966      if (pc < ob->pc_begin)
967	return NULL;
968    }
969
970  if (ob->s.b.sorted)
971    {
972      if (ob->s.b.mixed_encoding)
973	return binary_search_mixed_encoding_fdes (ob, pc);
974      else if (ob->s.b.encoding == DW_EH_PE_absptr)
975	return binary_search_unencoded_fdes (ob, pc);
976      else
977	return binary_search_single_encoding_fdes (ob, pc);
978    }
979  else
980    {
981      /* Long slow laborious linear search, cos we've no memory.  */
982      if (ob->s.b.from_array)
983	{
984	  fde **p;
985	  for (p = ob->u.array; *p ; p++)
986	    {
987	      const fde *f = linear_search_fdes (ob, *p, pc);
988	      if (f)
989		return f;
990	    }
991	  return NULL;
992	}
993      else
994	return linear_search_fdes (ob, ob->u.single, pc);
995    }
996}
997
998const fde *
999_Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1000{
1001  struct object *ob;
1002  const fde *f = NULL;
1003
1004  init_object_mutex_once ();
1005  __gthread_mutex_lock (&object_mutex);
1006
1007  /* Linear search through the classified objects, to find the one
1008     containing the pc.  Note that pc_begin is sorted descending, and
1009     we expect objects to be non-overlapping.  */
1010  for (ob = seen_objects; ob; ob = ob->next)
1011    if (pc >= ob->pc_begin)
1012      {
1013	f = search_object (ob, pc);
1014	if (f)
1015	  goto fini;
1016	break;
1017      }
1018
1019  /* Classify and search the objects we've not yet processed.  */
1020  while ((ob = unseen_objects))
1021    {
1022      struct object **p;
1023
1024      unseen_objects = ob->next;
1025      f = search_object (ob, pc);
1026
1027      /* Insert the object into the classified list.  */
1028      for (p = &seen_objects; *p ; p = &(*p)->next)
1029	if ((*p)->pc_begin < ob->pc_begin)
1030	  break;
1031      ob->next = *p;
1032      *p = ob;
1033
1034      if (f)
1035	goto fini;
1036    }
1037
1038 fini:
1039  __gthread_mutex_unlock (&object_mutex);
1040
1041  if (f)
1042    {
1043      int encoding;
1044      _Unwind_Ptr func;
1045
1046      bases->tbase = ob->tbase;
1047      bases->dbase = ob->dbase;
1048
1049      encoding = ob->s.b.encoding;
1050      if (ob->s.b.mixed_encoding)
1051	encoding = get_fde_encoding (f);
1052      read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1053				    f->pc_begin, &func);
1054      bases->func = (void *) func;
1055    }
1056
1057  return f;
1058}
1059