1/* Subroutines needed for unwinding stack frames for exception handling.  */
2/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
3   Free Software Foundation, Inc.
4   Contributed by Jason Merrill <jason@cygnus.com>.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13In addition to the permissions in the GNU General Public License, the
14Free Software Foundation gives you unlimited permission to link the
15compiled version of this file into combinations with other programs,
16and to distribute those combinations without any restriction coming
17from the use of this file.  (The General Public License restrictions
18do apply in other respects; for example, they cover modification of
19the file, and distribution when not linked into a combine
20executable.)
21
22GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23WARRANTY; without even the implied warranty of MERCHANTABILITY or
24FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
25for more details.
26
27You should have received a copy of the GNU General Public License
28along with GCC; see the file COPYING.  If not, write to the Free
29Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
3002110-1301, USA.  */
31
32#ifndef _Unwind_Find_FDE
33#include "tconfig.h"
34#include "tsystem.h"
35#include "coretypes.h"
36#include "tm.h"
37#include "dwarf2.h"
38#include "unwind.h"
39#define NO_BASE_OF_ENCODED_VALUE
40#include "unwind-pe.h"
41#include "unwind-dw2-fde.h"
42#include "gthr.h"
43#endif
44
45/* The unseen_objects list contains objects that have been registered
46   but not yet categorized in any way.  The seen_objects list has had
47   it's pc_begin and count fields initialized at minimum, and is sorted
48   by decreasing value of pc_begin.  */
49static struct object *unseen_objects;
50static struct object *seen_objects;
51
52#ifdef __GTHREAD_MUTEX_INIT
53static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
54#else
55static __gthread_mutex_t object_mutex;
56#endif
57
58#ifdef __GTHREAD_MUTEX_INIT_FUNCTION
59static void
60init_object_mutex (void)
61{
62  __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
63}
64
65static void
66init_object_mutex_once (void)
67{
68  static __gthread_once_t once = __GTHREAD_ONCE_INIT;
69  __gthread_once (&once, init_object_mutex);
70}
71#else
72#define init_object_mutex_once()
73#endif
74
75/* Called from crtbegin.o to register the unwind info for an object.  */
76
77void
78__register_frame_info_bases (const void *begin, struct object *ob,
79			     void *tbase, void *dbase)
80{
81  /* If .eh_frame is empty, don't register at all.  */
82  if ((uword *) begin == 0 || *(uword *) begin == 0)
83    return;
84
85  ob->pc_begin = (void *)-1;
86  ob->tbase = tbase;
87  ob->dbase = dbase;
88  ob->u.single = begin;
89  ob->s.i = 0;
90  ob->s.b.encoding = DW_EH_PE_omit;
91#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
92  ob->fde_end = NULL;
93#endif
94
95  init_object_mutex_once ();
96  __gthread_mutex_lock (&object_mutex);
97
98  ob->next = unseen_objects;
99  unseen_objects = ob;
100
101  __gthread_mutex_unlock (&object_mutex);
102}
103
104void
105__register_frame_info (const void *begin, struct object *ob)
106{
107  __register_frame_info_bases (begin, ob, 0, 0);
108}
109
110void
111__register_frame (void *begin)
112{
113  struct object *ob;
114
115  /* If .eh_frame is empty, don't register at all.  */
116  if (*(uword *) begin == 0)
117    return;
118
119  ob = malloc (sizeof (struct object));
120  __register_frame_info (begin, ob);
121}
122
123/* Similar, but BEGIN is actually a pointer to a table of unwind entries
124   for different translation units.  Called from the file generated by
125   collect2.  */
126
127void
128__register_frame_info_table_bases (void *begin, struct object *ob,
129				   void *tbase, void *dbase)
130{
131  ob->pc_begin = (void *)-1;
132  ob->tbase = tbase;
133  ob->dbase = dbase;
134  ob->u.array = begin;
135  ob->s.i = 0;
136  ob->s.b.from_array = 1;
137  ob->s.b.encoding = DW_EH_PE_omit;
138
139  init_object_mutex_once ();
140  __gthread_mutex_lock (&object_mutex);
141
142  ob->next = unseen_objects;
143  unseen_objects = ob;
144
145  __gthread_mutex_unlock (&object_mutex);
146}
147
148void
149__register_frame_info_table (void *begin, struct object *ob)
150{
151  __register_frame_info_table_bases (begin, ob, 0, 0);
152}
153
154void
155__register_frame_table (void *begin)
156{
157  struct object *ob = malloc (sizeof (struct object));
158  __register_frame_info_table (begin, ob);
159}
160
161/* Called from crtbegin.o to deregister the unwind info for an object.  */
162/* ??? Glibc has for a while now exported __register_frame_info and
163   __deregister_frame_info.  If we call __register_frame_info_bases
164   from crtbegin (wherein it is declared weak), and this object does
165   not get pulled from libgcc.a for other reasons, then the
166   invocation of __deregister_frame_info will be resolved from glibc.
167   Since the registration did not happen there, we'll die.
168
169   Therefore, declare a new deregistration entry point that does the
170   exact same thing, but will resolve to the same library as
171   implements __register_frame_info_bases.  */
172
173void *
174__deregister_frame_info_bases (const void *begin)
175{
176  struct object **p;
177  struct object *ob = 0;
178
179  /* If .eh_frame is empty, we haven't registered.  */
180  if ((uword *) begin == 0 || *(uword *) begin == 0)
181    return ob;
182
183  init_object_mutex_once ();
184  __gthread_mutex_lock (&object_mutex);
185
186  for (p = &unseen_objects; *p ; p = &(*p)->next)
187    if ((*p)->u.single == begin)
188      {
189	ob = *p;
190	*p = ob->next;
191	goto out;
192      }
193
194  for (p = &seen_objects; *p ; p = &(*p)->next)
195    if ((*p)->s.b.sorted)
196      {
197	if ((*p)->u.sort->orig_data == begin)
198	  {
199	    ob = *p;
200	    *p = ob->next;
201	    free (ob->u.sort);
202	    goto out;
203	  }
204      }
205    else
206      {
207	if ((*p)->u.single == begin)
208	  {
209	    ob = *p;
210	    *p = ob->next;
211	    goto out;
212	  }
213      }
214
215 out:
216  __gthread_mutex_unlock (&object_mutex);
217  gcc_assert (ob);
218  return (void *) ob;
219}
220
221void *
222__deregister_frame_info (const void *begin)
223{
224  return __deregister_frame_info_bases (begin);
225}
226
227void
228__deregister_frame (void *begin)
229{
230  /* If .eh_frame is empty, we haven't registered.  */
231  if (*(uword *) begin != 0)
232    free (__deregister_frame_info (begin));
233}
234
235
236/* Like base_of_encoded_value, but take the base from a struct object
237   instead of an _Unwind_Context.  */
238
239static _Unwind_Ptr
240base_from_object (unsigned char encoding, struct object *ob)
241{
242  if (encoding == DW_EH_PE_omit)
243    return 0;
244
245  switch (encoding & 0x70)
246    {
247    case DW_EH_PE_absptr:
248    case DW_EH_PE_pcrel:
249    case DW_EH_PE_aligned:
250      return 0;
251
252    case DW_EH_PE_textrel:
253      return (_Unwind_Ptr) ob->tbase;
254    case DW_EH_PE_datarel:
255      return (_Unwind_Ptr) ob->dbase;
256    default:
257      gcc_unreachable ();
258    }
259}
260
261/* Return the FDE pointer encoding from the CIE.  */
262/* ??? This is a subset of extract_cie_info from unwind-dw2.c.  */
263
264static int
265get_cie_encoding (const struct dwarf_cie *cie)
266{
267  const unsigned char *aug, *p;
268  _Unwind_Ptr dummy;
269  _Unwind_Word utmp;
270  _Unwind_Sword stmp;
271
272  aug = cie->augmentation;
273  if (aug[0] != 'z')
274    return DW_EH_PE_absptr;
275
276  p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string.  */
277  p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
278  p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
279  if (cie->version == 1)		/* Skip return address column.  */
280    p++;
281  else
282    p = read_uleb128 (p, &utmp);
283
284  aug++;				/* Skip 'z' */
285  p = read_uleb128 (p, &utmp);		/* Skip augmentation length.  */
286  while (1)
287    {
288      /* This is what we're looking for.  */
289      if (*aug == 'R')
290	return *p;
291      /* Personality encoding and pointer.  */
292      else if (*aug == 'P')
293	{
294	  /* ??? Avoid dereferencing indirect pointers, since we're
295	     faking the base address.  Gotta keep DW_EH_PE_aligned
296	     intact, however.  */
297	  p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
298	}
299      /* LSDA encoding.  */
300      else if (*aug == 'L')
301	p++;
302      /* Otherwise end of string, or unknown augmentation.  */
303      else
304	return DW_EH_PE_absptr;
305      aug++;
306    }
307}
308
309static inline int
310get_fde_encoding (const struct dwarf_fde *f)
311{
312  return get_cie_encoding (get_cie (f));
313}
314
315
316/* Sorting an array of FDEs by address.
317   (Ideally we would have the linker sort the FDEs so we don't have to do
318   it at run time. But the linkers are not yet prepared for this.)  */
319
320/* Comparison routines.  Three variants of increasing complexity.  */
321
322static int
323fde_unencoded_compare (struct object *ob __attribute__((unused)),
324		       const fde *x, const fde *y)
325{
326  _Unwind_Ptr x_ptr = *(_Unwind_Ptr *) x->pc_begin;
327  _Unwind_Ptr y_ptr = *(_Unwind_Ptr *) y->pc_begin;
328
329  if (x_ptr > y_ptr)
330    return 1;
331  if (x_ptr < y_ptr)
332    return -1;
333  return 0;
334}
335
336static int
337fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
338{
339  _Unwind_Ptr base, x_ptr, y_ptr;
340
341  base = base_from_object (ob->s.b.encoding, ob);
342  read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
343  read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
344
345  if (x_ptr > y_ptr)
346    return 1;
347  if (x_ptr < y_ptr)
348    return -1;
349  return 0;
350}
351
352static int
353fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
354{
355  int x_encoding, y_encoding;
356  _Unwind_Ptr x_ptr, y_ptr;
357
358  x_encoding = get_fde_encoding (x);
359  read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
360				x->pc_begin, &x_ptr);
361
362  y_encoding = get_fde_encoding (y);
363  read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
364				y->pc_begin, &y_ptr);
365
366  if (x_ptr > y_ptr)
367    return 1;
368  if (x_ptr < y_ptr)
369    return -1;
370  return 0;
371}
372
373typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
374
375
376/* This is a special mix of insertion sort and heap sort, optimized for
377   the data sets that actually occur. They look like
378   101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
379   I.e. a linearly increasing sequence (coming from functions in the text
380   section), with additionally a few unordered elements (coming from functions
381   in gnu_linkonce sections) whose values are higher than the values in the
382   surrounding linear sequence (but not necessarily higher than the values
383   at the end of the linear sequence!).
384   The worst-case total run time is O(N) + O(n log (n)), where N is the
385   total number of FDEs and n is the number of erratic ones.  */
386
387struct fde_accumulator
388{
389  struct fde_vector *linear;
390  struct fde_vector *erratic;
391};
392
393static inline int
394start_fde_sort (struct fde_accumulator *accu, size_t count)
395{
396  size_t size;
397  if (! count)
398    return 0;
399
400  size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
401  if ((accu->linear = malloc (size)))
402    {
403      accu->linear->count = 0;
404      if ((accu->erratic = malloc (size)))
405	accu->erratic->count = 0;
406      return 1;
407    }
408  else
409    return 0;
410}
411
412static inline void
413fde_insert (struct fde_accumulator *accu, const fde *this_fde)
414{
415  if (accu->linear)
416    accu->linear->array[accu->linear->count++] = this_fde;
417}
418
419/* Split LINEAR into a linear sequence with low values and an erratic
420   sequence with high values, put the linear one (of longest possible
421   length) into LINEAR and the erratic one into ERRATIC. This is O(N).
422
423   Because the longest linear sequence we are trying to locate within the
424   incoming LINEAR array can be interspersed with (high valued) erratic
425   entries.  We construct a chain indicating the sequenced entries.
426   To avoid having to allocate this chain, we overlay it onto the space of
427   the ERRATIC array during construction.  A final pass iterates over the
428   chain to determine what should be placed in the ERRATIC array, and
429   what is the linear sequence.  This overlay is safe from aliasing.  */
430
431static inline void
432fde_split (struct object *ob, fde_compare_t fde_compare,
433	   struct fde_vector *linear, struct fde_vector *erratic)
434{
435  static const fde *marker;
436  size_t count = linear->count;
437  const fde **chain_end = &marker;
438  size_t i, j, k;
439
440  /* This should optimize out, but it is wise to make sure this assumption
441     is correct. Should these have different sizes, we cannot cast between
442     them and the overlaying onto ERRATIC will not work.  */
443  gcc_assert (sizeof (const fde *) == sizeof (const fde **));
444
445  for (i = 0; i < count; i++)
446    {
447      const fde **probe;
448
449      for (probe = chain_end;
450	   probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
451	   probe = chain_end)
452	{
453	  chain_end = (const fde **) erratic->array[probe - linear->array];
454	  erratic->array[probe - linear->array] = NULL;
455	}
456      erratic->array[i] = (const fde *) chain_end;
457      chain_end = &linear->array[i];
458    }
459
460  /* Each entry in LINEAR which is part of the linear sequence we have
461     discovered will correspond to a non-NULL entry in the chain we built in
462     the ERRATIC array.  */
463  for (i = j = k = 0; i < count; i++)
464    if (erratic->array[i])
465      linear->array[j++] = linear->array[i];
466    else
467      erratic->array[k++] = linear->array[i];
468  linear->count = j;
469  erratic->count = k;
470}
471
472#define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
473
474/* Convert a semi-heap to a heap.  A semi-heap is a heap except possibly
475   for the first (root) node; push it down to its rightful place.  */
476
477static void
478frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
479		int lo, int hi)
480{
481  int i, j;
482
483  for (i = lo, j = 2*i+1;
484       j < hi;
485       j = 2*i+1)
486    {
487      if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
488	++j;
489
490      if (fde_compare (ob, a[i], a[j]) < 0)
491	{
492	  SWAP (a[i], a[j]);
493	  i = j;
494	}
495      else
496	break;
497    }
498}
499
500/* This is O(n log(n)).  BSD/OS defines heapsort in stdlib.h, so we must
501   use a name that does not conflict.  */
502
503static void
504frame_heapsort (struct object *ob, fde_compare_t fde_compare,
505		struct fde_vector *erratic)
506{
507  /* For a description of this algorithm, see:
508     Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
509     p. 60-61.  */
510  const fde ** a = erratic->array;
511  /* A portion of the array is called a "heap" if for all i>=0:
512     If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
513     If i and 2i+2 are valid indices, then a[i] >= a[2i+2].  */
514  size_t n = erratic->count;
515  int m;
516
517  /* Expand our heap incrementally from the end of the array, heapifying
518     each resulting semi-heap as we go.  After each step, a[m] is the top
519     of a heap.  */
520  for (m = n/2-1; m >= 0; --m)
521    frame_downheap (ob, fde_compare, a, m, n);
522
523  /* Shrink our heap incrementally from the end of the array, first
524     swapping out the largest element a[0] and then re-heapifying the
525     resulting semi-heap.  After each step, a[0..m) is a heap.  */
526  for (m = n-1; m >= 1; --m)
527    {
528      SWAP (a[0], a[m]);
529      frame_downheap (ob, fde_compare, a, 0, m);
530    }
531#undef SWAP
532}
533
534/* Merge V1 and V2, both sorted, and put the result into V1.  */
535static inline void
536fde_merge (struct object *ob, fde_compare_t fde_compare,
537	   struct fde_vector *v1, struct fde_vector *v2)
538{
539  size_t i1, i2;
540  const fde * fde2;
541
542  i2 = v2->count;
543  if (i2 > 0)
544    {
545      i1 = v1->count;
546      do
547	{
548	  i2--;
549	  fde2 = v2->array[i2];
550	  while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
551	    {
552	      v1->array[i1+i2] = v1->array[i1-1];
553	      i1--;
554	    }
555	  v1->array[i1+i2] = fde2;
556	}
557      while (i2 > 0);
558      v1->count += v2->count;
559    }
560}
561
562static inline void
563end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
564{
565  fde_compare_t fde_compare;
566
567  gcc_assert (!accu->linear || accu->linear->count == count);
568
569  if (ob->s.b.mixed_encoding)
570    fde_compare = fde_mixed_encoding_compare;
571  else if (ob->s.b.encoding == DW_EH_PE_absptr)
572    fde_compare = fde_unencoded_compare;
573  else
574    fde_compare = fde_single_encoding_compare;
575
576  if (accu->erratic)
577    {
578      fde_split (ob, fde_compare, accu->linear, accu->erratic);
579      gcc_assert (accu->linear->count + accu->erratic->count == count);
580      frame_heapsort (ob, fde_compare, accu->erratic);
581      fde_merge (ob, fde_compare, accu->linear, accu->erratic);
582      free (accu->erratic);
583    }
584  else
585    {
586      /* We've not managed to malloc an erratic array,
587	 so heap sort in the linear one.  */
588      frame_heapsort (ob, fde_compare, accu->linear);
589    }
590}
591
592
593/* Update encoding, mixed_encoding, and pc_begin for OB for the
594   fde array beginning at THIS_FDE.  Return the number of fdes
595   encountered along the way.  */
596
597static size_t
598classify_object_over_fdes (struct object *ob, const fde *this_fde)
599{
600  const struct dwarf_cie *last_cie = 0;
601  size_t count = 0;
602  int encoding = DW_EH_PE_absptr;
603  _Unwind_Ptr base = 0;
604
605  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
606    {
607      const struct dwarf_cie *this_cie;
608      _Unwind_Ptr mask, pc_begin;
609
610      /* Skip CIEs.  */
611      if (this_fde->CIE_delta == 0)
612	continue;
613
614      /* Determine the encoding for this FDE.  Note mixed encoded
615	 objects for later.  */
616      this_cie = get_cie (this_fde);
617      if (this_cie != last_cie)
618	{
619	  last_cie = this_cie;
620	  encoding = get_cie_encoding (this_cie);
621	  base = base_from_object (encoding, ob);
622	  if (ob->s.b.encoding == DW_EH_PE_omit)
623	    ob->s.b.encoding = encoding;
624	  else if (ob->s.b.encoding != encoding)
625	    ob->s.b.mixed_encoding = 1;
626	}
627
628      read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
629				    &pc_begin);
630
631      /* Take care to ignore link-once functions that were removed.
632	 In these cases, the function address will be NULL, but if
633	 the encoding is smaller than a pointer a true NULL may not
634	 be representable.  Assume 0 in the representable bits is NULL.  */
635      mask = size_of_encoded_value (encoding);
636      if (mask < sizeof (void *))
637	mask = (1L << (mask << 3)) - 1;
638      else
639	mask = -1;
640
641      if ((pc_begin & mask) == 0)
642	continue;
643
644      count += 1;
645      if ((void *) pc_begin < ob->pc_begin)
646	ob->pc_begin = (void *) pc_begin;
647    }
648
649  return count;
650}
651
652static void
653add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
654{
655  const struct dwarf_cie *last_cie = 0;
656  int encoding = ob->s.b.encoding;
657  _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
658
659  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
660    {
661      const struct dwarf_cie *this_cie;
662
663      /* Skip CIEs.  */
664      if (this_fde->CIE_delta == 0)
665	continue;
666
667      if (ob->s.b.mixed_encoding)
668	{
669	  /* Determine the encoding for this FDE.  Note mixed encoded
670	     objects for later.  */
671	  this_cie = get_cie (this_fde);
672	  if (this_cie != last_cie)
673	    {
674	      last_cie = this_cie;
675	      encoding = get_cie_encoding (this_cie);
676	      base = base_from_object (encoding, ob);
677	    }
678	}
679
680      if (encoding == DW_EH_PE_absptr)
681	{
682	  if (*(_Unwind_Ptr *) this_fde->pc_begin == 0)
683	    continue;
684	}
685      else
686	{
687	  _Unwind_Ptr pc_begin, mask;
688
689	  read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
690					&pc_begin);
691
692	  /* Take care to ignore link-once functions that were removed.
693	     In these cases, the function address will be NULL, but if
694	     the encoding is smaller than a pointer a true NULL may not
695	     be representable.  Assume 0 in the representable bits is NULL.  */
696	  mask = size_of_encoded_value (encoding);
697	  if (mask < sizeof (void *))
698	    mask = (1L << (mask << 3)) - 1;
699	  else
700	    mask = -1;
701
702	  if ((pc_begin & mask) == 0)
703	    continue;
704	}
705
706      fde_insert (accu, this_fde);
707    }
708}
709
710/* Set up a sorted array of pointers to FDEs for a loaded object.  We
711   count up the entries before allocating the array because it's likely to
712   be faster.  We can be called multiple times, should we have failed to
713   allocate a sorted fde array on a previous occasion.  */
714
715static inline void
716init_object (struct object* ob)
717{
718  struct fde_accumulator accu;
719  size_t count;
720
721  count = ob->s.b.count;
722  if (count == 0)
723    {
724      if (ob->s.b.from_array)
725	{
726	  fde **p = ob->u.array;
727	  for (count = 0; *p; ++p)
728	    count += classify_object_over_fdes (ob, *p);
729	}
730      else
731	count = classify_object_over_fdes (ob, ob->u.single);
732
733      /* The count field we have in the main struct object is somewhat
734	 limited, but should suffice for virtually all cases.  If the
735	 counted value doesn't fit, re-write a zero.  The worst that
736	 happens is that we re-count next time -- admittedly non-trivial
737	 in that this implies some 2M fdes, but at least we function.  */
738      ob->s.b.count = count;
739      if (ob->s.b.count != count)
740	ob->s.b.count = 0;
741    }
742
743  if (!start_fde_sort (&accu, count))
744    return;
745
746  if (ob->s.b.from_array)
747    {
748      fde **p;
749      for (p = ob->u.array; *p; ++p)
750	add_fdes (ob, &accu, *p);
751    }
752  else
753    add_fdes (ob, &accu, ob->u.single);
754
755  end_fde_sort (ob, &accu, count);
756
757  /* Save the original fde pointer, since this is the key by which the
758     DSO will deregister the object.  */
759  accu.linear->orig_data = ob->u.single;
760  ob->u.sort = accu.linear;
761
762  ob->s.b.sorted = 1;
763}
764
765/* A linear search through a set of FDEs for the given PC.  This is
766   used when there was insufficient memory to allocate and sort an
767   array.  */
768
769static const fde *
770linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
771{
772  const struct dwarf_cie *last_cie = 0;
773  int encoding = ob->s.b.encoding;
774  _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
775
776  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
777    {
778      const struct dwarf_cie *this_cie;
779      _Unwind_Ptr pc_begin, pc_range;
780
781      /* Skip CIEs.  */
782      if (this_fde->CIE_delta == 0)
783	continue;
784
785      if (ob->s.b.mixed_encoding)
786	{
787	  /* Determine the encoding for this FDE.  Note mixed encoded
788	     objects for later.  */
789	  this_cie = get_cie (this_fde);
790	  if (this_cie != last_cie)
791	    {
792	      last_cie = this_cie;
793	      encoding = get_cie_encoding (this_cie);
794	      base = base_from_object (encoding, ob);
795	    }
796	}
797
798      if (encoding == DW_EH_PE_absptr)
799	{
800	  pc_begin = ((_Unwind_Ptr *) this_fde->pc_begin)[0];
801	  pc_range = ((_Unwind_Ptr *) this_fde->pc_begin)[1];
802	  if (pc_begin == 0)
803	    continue;
804	}
805      else
806	{
807	  _Unwind_Ptr mask;
808	  const unsigned char *p;
809
810	  p = read_encoded_value_with_base (encoding, base,
811					    this_fde->pc_begin, &pc_begin);
812	  read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
813
814	  /* Take care to ignore link-once functions that were removed.
815	     In these cases, the function address will be NULL, but if
816	     the encoding is smaller than a pointer a true NULL may not
817	     be representable.  Assume 0 in the representable bits is NULL.  */
818	  mask = size_of_encoded_value (encoding);
819	  if (mask < sizeof (void *))
820	    mask = (1L << (mask << 3)) - 1;
821	  else
822	    mask = -1;
823
824	  if ((pc_begin & mask) == 0)
825	    continue;
826	}
827
828      if ((_Unwind_Ptr) pc - pc_begin < pc_range)
829	return this_fde;
830    }
831
832  return NULL;
833}
834
835/* Binary search for an FDE containing the given PC.  Here are three
836   implementations of increasing complexity.  */
837
838static inline const fde *
839binary_search_unencoded_fdes (struct object *ob, void *pc)
840{
841  struct fde_vector *vec = ob->u.sort;
842  size_t lo, hi;
843
844  for (lo = 0, hi = vec->count; lo < hi; )
845    {
846      size_t i = (lo + hi) / 2;
847      const fde *f = vec->array[i];
848      void *pc_begin;
849      uaddr pc_range;
850
851      pc_begin = ((void **) f->pc_begin)[0];
852      pc_range = ((uaddr *) f->pc_begin)[1];
853
854      if (pc < pc_begin)
855	hi = i;
856      else if (pc >= pc_begin + pc_range)
857	lo = i + 1;
858      else
859	return f;
860    }
861
862  return NULL;
863}
864
865static inline const fde *
866binary_search_single_encoding_fdes (struct object *ob, void *pc)
867{
868  struct fde_vector *vec = ob->u.sort;
869  int encoding = ob->s.b.encoding;
870  _Unwind_Ptr base = base_from_object (encoding, ob);
871  size_t lo, hi;
872
873  for (lo = 0, hi = vec->count; lo < hi; )
874    {
875      size_t i = (lo + hi) / 2;
876      const fde *f = vec->array[i];
877      _Unwind_Ptr pc_begin, pc_range;
878      const unsigned char *p;
879
880      p = read_encoded_value_with_base (encoding, base, f->pc_begin,
881					&pc_begin);
882      read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
883
884      if ((_Unwind_Ptr) pc < pc_begin)
885	hi = i;
886      else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
887	lo = i + 1;
888      else
889	return f;
890    }
891
892  return NULL;
893}
894
895static inline const fde *
896binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
897{
898  struct fde_vector *vec = ob->u.sort;
899  size_t lo, hi;
900
901  for (lo = 0, hi = vec->count; lo < hi; )
902    {
903      size_t i = (lo + hi) / 2;
904      const fde *f = vec->array[i];
905      _Unwind_Ptr pc_begin, pc_range;
906      const unsigned char *p;
907      int encoding;
908
909      encoding = get_fde_encoding (f);
910      p = read_encoded_value_with_base (encoding,
911					base_from_object (encoding, ob),
912					f->pc_begin, &pc_begin);
913      read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
914
915      if ((_Unwind_Ptr) pc < pc_begin)
916	hi = i;
917      else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
918	lo = i + 1;
919      else
920	return f;
921    }
922
923  return NULL;
924}
925
926static const fde *
927search_object (struct object* ob, void *pc)
928{
929  /* If the data hasn't been sorted, try to do this now.  We may have
930     more memory available than last time we tried.  */
931  if (! ob->s.b.sorted)
932    {
933      init_object (ob);
934
935      /* Despite the above comment, the normal reason to get here is
936	 that we've not processed this object before.  A quick range
937	 check is in order.  */
938      if (pc < ob->pc_begin)
939	return NULL;
940    }
941
942  if (ob->s.b.sorted)
943    {
944      if (ob->s.b.mixed_encoding)
945	return binary_search_mixed_encoding_fdes (ob, pc);
946      else if (ob->s.b.encoding == DW_EH_PE_absptr)
947	return binary_search_unencoded_fdes (ob, pc);
948      else
949	return binary_search_single_encoding_fdes (ob, pc);
950    }
951  else
952    {
953      /* Long slow labourious linear search, cos we've no memory.  */
954      if (ob->s.b.from_array)
955	{
956	  fde **p;
957	  for (p = ob->u.array; *p ; p++)
958	    {
959	      const fde *f = linear_search_fdes (ob, *p, pc);
960	      if (f)
961		return f;
962	    }
963	  return NULL;
964	}
965      else
966	return linear_search_fdes (ob, ob->u.single, pc);
967    }
968}
969
970const fde *
971_Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
972{
973  struct object *ob;
974  const fde *f = NULL;
975
976  init_object_mutex_once ();
977  __gthread_mutex_lock (&object_mutex);
978
979  /* Linear search through the classified objects, to find the one
980     containing the pc.  Note that pc_begin is sorted descending, and
981     we expect objects to be non-overlapping.  */
982  for (ob = seen_objects; ob; ob = ob->next)
983    if (pc >= ob->pc_begin)
984      {
985	f = search_object (ob, pc);
986	if (f)
987	  goto fini;
988	break;
989      }
990
991  /* Classify and search the objects we've not yet processed.  */
992  while ((ob = unseen_objects))
993    {
994      struct object **p;
995
996      unseen_objects = ob->next;
997      f = search_object (ob, pc);
998
999      /* Insert the object into the classified list.  */
1000      for (p = &seen_objects; *p ; p = &(*p)->next)
1001	if ((*p)->pc_begin < ob->pc_begin)
1002	  break;
1003      ob->next = *p;
1004      *p = ob;
1005
1006      if (f)
1007	goto fini;
1008    }
1009
1010 fini:
1011  __gthread_mutex_unlock (&object_mutex);
1012
1013  if (f)
1014    {
1015      int encoding;
1016      _Unwind_Ptr func;
1017
1018      bases->tbase = ob->tbase;
1019      bases->dbase = ob->dbase;
1020
1021      encoding = ob->s.b.encoding;
1022      if (ob->s.b.mixed_encoding)
1023	encoding = get_fde_encoding (f);
1024      read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1025				    f->pc_begin, &func);
1026      bases->func = (void *) func;
1027    }
1028
1029  return f;
1030}
1031