1/* Library support for -fsplit-stack.  */
2/* Copyright (C) 2009-2015 Free Software Foundation, Inc.
3   Contributed by Ian Lance Taylor <iant@google.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24<http://www.gnu.org/licenses/>.  */
25
26#include "tconfig.h"
27#include "tsystem.h"
28#include "coretypes.h"
29#include "tm.h"
30#include "libgcc_tm.h"
31
32/* If inhibit_libc is defined, we can not compile this file.  The
33   effect is that people will not be able to use -fsplit-stack.  That
34   is much better than failing the build particularly since people
35   will want to define inhibit_libc while building a compiler which
36   can build glibc.  */
37
38#ifndef inhibit_libc
39
40#include <assert.h>
41#include <errno.h>
42#include <signal.h>
43#include <stdlib.h>
44#include <string.h>
45#include <unistd.h>
46#include <sys/mman.h>
47#include <sys/uio.h>
48
49#include "generic-morestack.h"
50
51typedef unsigned uintptr_type __attribute__ ((mode (pointer)));
52
53/* This file contains subroutines that are used by code compiled with
54   -fsplit-stack.  */
55
56/* Declare functions to avoid warnings--there is no header file for
57   these internal functions.  We give most of these functions the
58   flatten attribute in order to minimize their stack usage--here we
59   must minimize stack usage even at the cost of code size, and in
60   general inlining everything will do that.  */
61
62extern void
63__generic_morestack_set_initial_sp (void *sp, size_t len)
64  __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
65
66extern void *
67__generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
68  __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
69
70extern void *
71__generic_releasestack (size_t *pavailable)
72  __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
73
74extern void
75__morestack_block_signals (void)
76  __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
77
78extern void
79__morestack_unblock_signals (void)
80  __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
81
82extern size_t
83__generic_findstack (void *stack)
84  __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
85
86extern void
87__morestack_load_mmap (void)
88  __attribute__ ((no_split_stack, visibility ("hidden")));
89
90extern void *
91__morestack_allocate_stack_space (size_t size)
92  __attribute__ ((visibility ("hidden")));
93
94/* These are functions which -fsplit-stack code can call.  These are
95   not called by the compiler, and are not hidden.  FIXME: These
96   should be in some header file somewhere, somehow.  */
97
98extern void *
99__splitstack_find (void *, void *, size_t *, void **, void **, void **)
100  __attribute__ ((visibility ("default")));
101
102extern void
103__splitstack_block_signals (int *, int *)
104  __attribute__ ((visibility ("default")));
105
106extern void
107__splitstack_getcontext (void *context[10])
108  __attribute__ ((no_split_stack, visibility ("default")));
109
110extern void
111__splitstack_setcontext (void *context[10])
112  __attribute__ ((no_split_stack, visibility ("default")));
113
114extern void *
115__splitstack_makecontext (size_t, void *context[10], size_t *)
116  __attribute__ ((visibility ("default")));
117
118extern void *
119__splitstack_resetcontext (void *context[10], size_t *)
120  __attribute__ ((visibility ("default")));
121
122extern void
123__splitstack_releasecontext (void *context[10])
124  __attribute__ ((visibility ("default")));
125
126extern void
127__splitstack_block_signals_context (void *context[10], int *, int *)
128  __attribute__ ((visibility ("default")));
129
130extern void *
131__splitstack_find_context (void *context[10], size_t *, void **, void **,
132			   void **)
133  __attribute__ ((visibility ("default")));
134
135/* These functions must be defined by the processor specific code.  */
136
137extern void *__morestack_get_guard (void)
138  __attribute__ ((no_split_stack, visibility ("hidden")));
139
140extern void __morestack_set_guard (void *)
141  __attribute__ ((no_split_stack, visibility ("hidden")));
142
143extern void *__morestack_make_guard (void *, size_t)
144  __attribute__ ((no_split_stack, visibility ("hidden")));
145
146/* When we allocate a stack segment we put this header at the
147   start.  */
148
149struct stack_segment
150{
151  /* The previous stack segment--when a function running on this stack
152     segment returns, it will run on the previous one.  */
153  struct stack_segment *prev;
154  /* The next stack segment, if it has been allocated--when a function
155     is running on this stack segment, the next one is not being
156     used.  */
157  struct stack_segment *next;
158  /* The total size of this stack segment.  */
159  size_t size;
160  /* The stack address when this stack was created.  This is used when
161     popping the stack.  */
162  void *old_stack;
163  /* A list of memory blocks allocated by dynamic stack
164     allocation.  */
165  struct dynamic_allocation_blocks *dynamic_allocation;
166  /* A list of dynamic memory blocks no longer needed.  */
167  struct dynamic_allocation_blocks *free_dynamic_allocation;
168  /* An extra pointer in case we need some more information some
169     day.  */
170  void *extra;
171};
172
173/* This structure holds the (approximate) initial stack pointer and
174   size for the system supplied stack for a thread.  This is set when
175   the thread is created.  We also store a sigset_t here to hold the
176   signal mask while splitting the stack, since we don't want to store
177   that on the stack.  */
178
179struct initial_sp
180{
181  /* The initial stack pointer.  */
182  void *sp;
183  /* The stack length.  */
184  size_t len;
185  /* A signal mask, put here so that the thread can use it without
186     needing stack space.  */
187  sigset_t mask;
188  /* Non-zero if we should not block signals.  This is a reversed flag
189     so that the default zero value is the safe value.  The type is
190     uintptr_type because it replaced one of the void * pointers in
191     extra.  */
192  uintptr_type dont_block_signals;
193  /* Some extra space for later extensibility.  */
194  void *extra[4];
195};
196
197/* A list of memory blocks allocated by dynamic stack allocation.
198   This is used for code that calls alloca or uses variably sized
199   arrays.  */
200
201struct dynamic_allocation_blocks
202{
203  /* The next block in the list.  */
204  struct dynamic_allocation_blocks *next;
205  /* The size of the allocated memory.  */
206  size_t size;
207  /* The allocated memory.  */
208  void *block;
209};
210
211/* These thread local global variables must be shared by all split
212   stack code across shared library boundaries.  Therefore, they have
213   default visibility.  They have extensibility fields if needed for
214   new versions.  If more radical changes are needed, new code can be
215   written using new variable names, while still using the existing
216   variables in a backward compatible manner.  Symbol versioning is
217   also used, although, since these variables are only referenced by
218   code in this file and generic-morestack-thread.c, it is likely that
219   simply using new names will suffice.  */
220
221/* The first stack segment allocated for this thread.  */
222
223__thread struct stack_segment *__morestack_segments
224  __attribute__ ((visibility ("default")));
225
226/* The stack segment that we think we are currently using.  This will
227   be correct in normal usage, but will be incorrect if an exception
228   unwinds into a different stack segment or if longjmp jumps to a
229   different stack segment.  */
230
231__thread struct stack_segment *__morestack_current_segment
232  __attribute__ ((visibility ("default")));
233
234/* The initial stack pointer and size for this thread.  */
235
236__thread struct initial_sp __morestack_initial_sp
237  __attribute__ ((visibility ("default")));
238
239/* A static signal mask, to avoid taking up stack space.  */
240
241static sigset_t __morestack_fullmask;
242
243/* Convert an integer to a decimal string without using much stack
244   space.  Return a pointer to the part of the buffer to use.  We this
245   instead of sprintf because sprintf will require too much stack
246   space.  */
247
248static char *
249print_int (int val, char *buf, int buflen, size_t *print_len)
250{
251  int is_negative;
252  int i;
253  unsigned int uval;
254
255  uval = (unsigned int) val;
256  if (val >= 0)
257    is_negative = 0;
258  else
259    {
260      is_negative = 1;
261      uval = - uval;
262    }
263
264  i = buflen;
265  do
266    {
267      --i;
268      buf[i] = '0' + (uval % 10);
269      uval /= 10;
270    }
271  while (uval != 0 && i > 0);
272
273  if (is_negative)
274    {
275      if (i > 0)
276	--i;
277      buf[i] = '-';
278    }
279
280  *print_len = buflen - i;
281  return buf + i;
282}
283
284/* Print the string MSG/LEN, the errno number ERR, and a newline on
285   stderr.  Then crash.  */
286
287void
288__morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
289
290void
291__morestack_fail (const char *msg, size_t len, int err)
292{
293  char buf[24];
294  static const char nl[] = "\n";
295  struct iovec iov[3];
296  union { char *p; const char *cp; } const_cast;
297
298  const_cast.cp = msg;
299  iov[0].iov_base = const_cast.p;
300  iov[0].iov_len = len;
301  /* We can't call strerror, because it may try to translate the error
302     message, and that would use too much stack space.  */
303  iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
304  const_cast.cp = &nl[0];
305  iov[2].iov_base = const_cast.p;
306  iov[2].iov_len = sizeof nl - 1;
307  /* FIXME: On systems without writev we need to issue three write
308     calls, or punt on printing errno.  For now this is irrelevant
309     since stack splitting only works on GNU/Linux anyhow.  */
310  writev (2, iov, 3);
311  abort ();
312}
313
314/* Allocate a new stack segment.  FRAME_SIZE is the required frame
315   size.  */
316
317static struct stack_segment *
318allocate_segment (size_t frame_size)
319{
320  static unsigned int static_pagesize;
321  static int use_guard_page;
322  unsigned int pagesize;
323  unsigned int overhead;
324  unsigned int allocate;
325  void *space;
326  struct stack_segment *pss;
327
328  pagesize = static_pagesize;
329  if (pagesize == 0)
330    {
331      unsigned int p;
332
333      pagesize = getpagesize ();
334
335#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
336      p = __sync_val_compare_and_swap (&static_pagesize, 0, pagesize);
337#else
338      /* Just hope this assignment is atomic.  */
339      static_pagesize = pagesize;
340      p = 0;
341#endif
342
343      use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
344
345      /* FIXME: I'm not sure this assert should be in the released
346	 code.  */
347      assert (p == 0 || p == pagesize);
348    }
349
350  overhead = sizeof (struct stack_segment);
351
352  allocate = pagesize;
353  if (allocate < MINSIGSTKSZ)
354    allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
355		& ~ (pagesize - 1));
356  if (allocate < frame_size)
357    allocate = ((frame_size + overhead + pagesize - 1)
358		& ~ (pagesize - 1));
359
360  if (use_guard_page)
361    allocate += pagesize;
362
363  /* FIXME: If this binary requires an executable stack, then we need
364     to set PROT_EXEC.  Unfortunately figuring that out is complicated
365     and target dependent.  We would need to use dl_iterate_phdr to
366     see if there is any object which does not have a PT_GNU_STACK
367     phdr, though only for architectures which use that mechanism.  */
368  space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
369		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
370  if (space == MAP_FAILED)
371    {
372      static const char msg[] =
373	"unable to allocate additional stack space: errno ";
374      __morestack_fail (msg, sizeof msg - 1, errno);
375    }
376
377  if (use_guard_page)
378    {
379      void *guard;
380
381#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
382      guard = space;
383      space = (char *) space + pagesize;
384#else
385      guard = space + allocate - pagesize;
386#endif
387
388      mprotect (guard, pagesize, PROT_NONE);
389      allocate -= pagesize;
390    }
391
392  pss = (struct stack_segment *) space;
393
394  pss->prev = NULL;
395  pss->next = NULL;
396  pss->size = allocate - overhead;
397  pss->dynamic_allocation = NULL;
398  pss->free_dynamic_allocation = NULL;
399  pss->extra = NULL;
400
401  return pss;
402}
403
404/* Free a list of dynamic blocks.  */
405
406static void
407free_dynamic_blocks (struct dynamic_allocation_blocks *p)
408{
409  while (p != NULL)
410    {
411      struct dynamic_allocation_blocks *next;
412
413      next = p->next;
414      free (p->block);
415      free (p);
416      p = next;
417    }
418}
419
420/* Merge two lists of dynamic blocks.  */
421
422static struct dynamic_allocation_blocks *
423merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
424		      struct dynamic_allocation_blocks *b)
425{
426  struct dynamic_allocation_blocks **pp;
427
428  if (a == NULL)
429    return b;
430  if (b == NULL)
431    return a;
432  for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
433    ;
434  *pp = b;
435  return a;
436}
437
438/* Release stack segments.  If FREE_DYNAMIC is non-zero, we also free
439   any dynamic blocks.  Otherwise we return them.  */
440
441struct dynamic_allocation_blocks *
442__morestack_release_segments (struct stack_segment **pp, int free_dynamic)
443{
444  struct dynamic_allocation_blocks *ret;
445  struct stack_segment *pss;
446
447  ret = NULL;
448  pss = *pp;
449  while (pss != NULL)
450    {
451      struct stack_segment *next;
452      unsigned int allocate;
453
454      next = pss->next;
455
456      if (pss->dynamic_allocation != NULL
457	  || pss->free_dynamic_allocation != NULL)
458	{
459	  if (free_dynamic)
460	    {
461	      free_dynamic_blocks (pss->dynamic_allocation);
462	      free_dynamic_blocks (pss->free_dynamic_allocation);
463	    }
464	  else
465	    {
466	      ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
467	      ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
468	    }
469	}
470
471      allocate = pss->size + sizeof (struct stack_segment);
472      if (munmap (pss, allocate) < 0)
473	{
474	  static const char msg[] = "munmap of stack space failed: errno ";
475	  __morestack_fail (msg, sizeof msg - 1, errno);
476	}
477
478      pss = next;
479    }
480  *pp = NULL;
481
482  return ret;
483}
484
485/* This function is called by a processor specific function to set the
486   initial stack pointer for a thread.  The operating system will
487   always create a stack for a thread.  Here we record a stack pointer
488   near the base of that stack.  The size argument lets the processor
489   specific code estimate how much stack space is available on this
490   initial stack.  */
491
492void
493__generic_morestack_set_initial_sp (void *sp, size_t len)
494{
495  /* The stack pointer most likely starts on a page boundary.  Adjust
496     to the nearest 512 byte boundary.  It's not essential that we be
497     precise here; getting it wrong will just leave some stack space
498     unused.  */
499#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
500  sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
501#else
502  sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
503#endif
504
505  __morestack_initial_sp.sp = sp;
506  __morestack_initial_sp.len = len;
507  sigemptyset (&__morestack_initial_sp.mask);
508
509  sigfillset (&__morestack_fullmask);
510#if defined(__GLIBC__) && defined(__linux__)
511  /* In glibc, the first two real time signals are used by the NPTL
512     threading library.  By taking them out of the set of signals, we
513     avoiding copying the signal mask in pthread_sigmask.  More
514     importantly, pthread_sigmask uses less stack space on x86_64.  */
515  sigdelset (&__morestack_fullmask, __SIGRTMIN);
516  sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
517#endif
518}
519
520/* This function is called by a processor specific function which is
521   run in the prologue when more stack is needed.  The processor
522   specific function handles the details of saving registers and
523   frobbing the actual stack pointer.  This function is responsible
524   for allocating a new stack segment and for copying a parameter
525   block from the old stack to the new one.  On function entry
526   *PFRAME_SIZE is the size of the required stack frame--the returned
527   stack must be at least this large.  On function exit *PFRAME_SIZE
528   is the amount of space remaining on the allocated stack.  OLD_STACK
529   points at the parameters the old stack (really the current one
530   while this function is running).  OLD_STACK is saved so that it can
531   be returned by a later call to __generic_releasestack.  PARAM_SIZE
532   is the size in bytes of parameters to copy to the new stack.  This
533   function returns a pointer to the new stack segment, pointing to
534   the memory after the parameters have been copied.  The returned
535   value minus the returned *PFRAME_SIZE (or plus if the stack grows
536   upward) is the first address on the stack which should not be used.
537
538   This function is running on the old stack and has only a limited
539   amount of stack space available.  */
540
541void *
542__generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
543{
544  size_t frame_size = *pframe_size;
545  struct stack_segment *current;
546  struct stack_segment **pp;
547  struct dynamic_allocation_blocks *dynamic;
548  char *from;
549  char *to;
550  void *ret;
551  size_t i;
552  size_t aligned;
553
554  current = __morestack_current_segment;
555
556  pp = current != NULL ? &current->next : &__morestack_segments;
557  if (*pp != NULL && (*pp)->size < frame_size)
558    dynamic = __morestack_release_segments (pp, 0);
559  else
560    dynamic = NULL;
561  current = *pp;
562
563  if (current == NULL)
564    {
565      current = allocate_segment (frame_size + param_size);
566      current->prev = __morestack_current_segment;
567      *pp = current;
568    }
569
570  current->old_stack = old_stack;
571
572  __morestack_current_segment = current;
573
574  if (dynamic != NULL)
575    {
576      /* Move the free blocks onto our list.  We don't want to call
577	 free here, as we are short on stack space.  */
578      current->free_dynamic_allocation =
579	merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
580    }
581
582  *pframe_size = current->size - param_size;
583
584  /* Align the returned stack to a 32-byte boundary.  */
585  aligned = (param_size + 31) & ~ (size_t) 31;
586
587#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
588  {
589    char *bottom = (char *) (current + 1) + current->size;
590    to = bottom - aligned;
591    ret = bottom - aligned;
592  }
593#else
594  to = current + 1;
595  to += aligned - param_size;
596  ret = (char *) (current + 1) + aligned;
597#endif
598
599  /* We don't call memcpy to avoid worrying about the dynamic linker
600     trying to resolve it.  */
601  from = (char *) old_stack;
602  for (i = 0; i < param_size; i++)
603    *to++ = *from++;
604
605  return ret;
606}
607
608/* This function is called by a processor specific function when it is
609   ready to release a stack segment.  We don't actually release the
610   stack segment, we just move back to the previous one.  The current
611   stack segment will still be available if we need it in
612   __generic_morestack.  This returns a pointer to the new stack
613   segment to use, which is the one saved by a previous call to
614   __generic_morestack.  The processor specific function is then
615   responsible for actually updating the stack pointer.  This sets
616   *PAVAILABLE to the amount of stack space now available.  */
617
618void *
619__generic_releasestack (size_t *pavailable)
620{
621  struct stack_segment *current;
622  void *old_stack;
623
624  current = __morestack_current_segment;
625  old_stack = current->old_stack;
626  current = current->prev;
627  __morestack_current_segment = current;
628
629  if (current != NULL)
630    {
631#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
632      *pavailable = (char *) old_stack - (char *) (current + 1);
633#else
634      *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
635#endif
636    }
637  else
638    {
639      size_t used;
640
641      /* We have popped back to the original stack.  */
642#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
643      if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
644	used = 0;
645      else
646	used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
647#else
648      if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
649	used = 0;
650      else
651	used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
652#endif
653
654      if (used > __morestack_initial_sp.len)
655	*pavailable = 0;
656      else
657	*pavailable = __morestack_initial_sp.len - used;
658    }
659
660  return old_stack;
661}
662
663/* Block signals while splitting the stack.  This avoids trouble if we
664   try to invoke a signal handler which itself wants to split the
665   stack.  */
666
667extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
668  __attribute__ ((weak));
669
670void
671__morestack_block_signals (void)
672{
673  if (__morestack_initial_sp.dont_block_signals)
674    ;
675  else if (pthread_sigmask)
676    pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
677		     &__morestack_initial_sp.mask);
678  else
679    sigprocmask (SIG_BLOCK, &__morestack_fullmask,
680		 &__morestack_initial_sp.mask);
681}
682
683/* Unblock signals while splitting the stack.  */
684
685void
686__morestack_unblock_signals (void)
687{
688  if (__morestack_initial_sp.dont_block_signals)
689    ;
690  else if (pthread_sigmask)
691    pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
692  else
693    sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
694}
695
696/* This function is called to allocate dynamic stack space, for alloca
697   or a variably sized array.  This is a regular function with
698   sufficient stack space, so we just use malloc to allocate the
699   space.  We attach the allocated blocks to the current stack
700   segment, so that they will eventually be reused or freed.  */
701
702void *
703__morestack_allocate_stack_space (size_t size)
704{
705  struct stack_segment *seg, *current;
706  struct dynamic_allocation_blocks *p;
707
708  /* We have to block signals to avoid getting confused if we get
709     interrupted by a signal whose handler itself uses alloca or a
710     variably sized array.  */
711  __morestack_block_signals ();
712
713  /* Since we don't want to call free while we are low on stack space,
714     we may have a list of already allocated blocks waiting to be
715     freed.  Release them all, unless we find one that is large
716     enough.  We don't look at every block to see if one is large
717     enough, just the first one, because we aren't trying to build a
718     memory allocator here, we're just trying to speed up common
719     cases.  */
720
721  current = __morestack_current_segment;
722  p = NULL;
723  for (seg = __morestack_segments; seg != NULL; seg = seg->next)
724    {
725      p = seg->free_dynamic_allocation;
726      if (p != NULL)
727	{
728	  if (p->size >= size)
729	    {
730	      seg->free_dynamic_allocation = p->next;
731	      break;
732	    }
733
734	  free_dynamic_blocks (p);
735	  seg->free_dynamic_allocation = NULL;
736	  p = NULL;
737	}
738    }
739
740  if (p == NULL)
741    {
742      /* We need to allocate additional memory.  */
743      p = malloc (sizeof (*p));
744      if (p == NULL)
745	abort ();
746      p->size = size;
747      p->block = malloc (size);
748      if (p->block == NULL)
749	abort ();
750    }
751
752  /* If we are still on the initial stack, then we have a space leak.
753     FIXME.  */
754  if (current != NULL)
755    {
756      p->next = current->dynamic_allocation;
757      current->dynamic_allocation = p;
758    }
759
760  __morestack_unblock_signals ();
761
762  return p->block;
763}
764
765/* Find the stack segment for STACK and return the amount of space
766   available.  This is used when unwinding the stack because of an
767   exception, in order to reset the stack guard correctly.  */
768
769size_t
770__generic_findstack (void *stack)
771{
772  struct stack_segment *pss;
773  size_t used;
774
775  for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
776    {
777      if ((char *) pss < (char *) stack
778	  && (char *) pss + pss->size > (char *) stack)
779	{
780	  __morestack_current_segment = pss;
781#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
782	  return (char *) stack - (char *) (pss + 1);
783#else
784	  return (char *) (pss + 1) + pss->size - (char *) stack;
785#endif
786	}
787    }
788
789  /* We have popped back to the original stack.  */
790
791  if (__morestack_initial_sp.sp == NULL)
792    return 0;
793
794#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
795  if ((char *) stack >= (char *) __morestack_initial_sp.sp)
796    used = 0;
797  else
798    used = (char *) __morestack_initial_sp.sp - (char *) stack;
799#else
800  if ((char *) stack <= (char *) __morestack_initial_sp.sp)
801    used = 0;
802  else
803    used = (char *) stack - (char *) __morestack_initial_sp.sp;
804#endif
805
806  if (used > __morestack_initial_sp.len)
807    return 0;
808  else
809    return __morestack_initial_sp.len - used;
810}
811
812/* This function is called at program startup time to make sure that
813   mmap, munmap, and getpagesize are resolved if linking dynamically.
814   We want to resolve them while we have enough stack for them, rather
815   than calling into the dynamic linker while low on stack space.  */
816
817void
818__morestack_load_mmap (void)
819{
820  /* Call with bogus values to run faster.  We don't care if the call
821     fails.  Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
822     TLS accessor function is resolved.  */
823  mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
824  mprotect (NULL, 0, 0);
825  munmap (0, getpagesize ());
826}
827
828/* This function may be used to iterate over the stack segments.
829   This can be called like this.
830     void *next_segment = NULL;
831     void *next_sp = NULL;
832     void *initial_sp = NULL;
833     void *stack;
834     size_t stack_size;
835     while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
836                                        &next_segment, &next_sp,
837					&initial_sp)) != NULL)
838       {
839         // Stack segment starts at stack and is stack_size bytes long.
840       }
841
842   There is no way to iterate over the stack segments of a different
843   thread.  However, what is permitted is for one thread to call this
844   with the first two values NULL, to pass next_segment, next_sp, and
845   initial_sp to a different thread, and then to suspend one way or
846   another.  A different thread may run the subsequent
847   __morestack_find iterations.  Of course, this will only work if the
848   first thread is suspended during the __morestack_find iterations.
849   If not, the second thread will be looking at the stack while it is
850   changing, and anything could happen.
851
852   FIXME: This should be declared in some header file, but where?  */
853
854void *
855__splitstack_find (void *segment_arg, void *sp, size_t *len,
856		   void **next_segment, void **next_sp,
857		   void **initial_sp)
858{
859  struct stack_segment *segment;
860  void *ret;
861  char *nsp;
862
863  if (segment_arg == (void *) (uintptr_type) 1)
864    {
865      char *isp = (char *) *initial_sp;
866
867      if (isp == NULL)
868	return NULL;
869
870      *next_segment = (void *) (uintptr_type) 2;
871      *next_sp = NULL;
872#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
873      if ((char *) sp >= isp)
874	return NULL;
875      *len = (char *) isp - (char *) sp;
876      return sp;
877#else
878      if ((char *) sp <= (char *) isp)
879	return NULL;
880      *len = (char *) sp - (char *) isp;
881      return (void *) isp;
882#endif
883    }
884  else if (segment_arg == (void *) (uintptr_type) 2)
885    return NULL;
886  else if (segment_arg != NULL)
887    segment = (struct stack_segment *) segment_arg;
888  else
889    {
890      *initial_sp = __morestack_initial_sp.sp;
891      segment = __morestack_current_segment;
892      sp = (void *) &segment;
893      while (1)
894	{
895	  if (segment == NULL)
896	    return __splitstack_find ((void *) (uintptr_type) 1, sp, len,
897				      next_segment, next_sp, initial_sp);
898	  if ((char *) sp >= (char *) (segment + 1)
899	      && (char *) sp <= (char *) (segment + 1) + segment->size)
900	    break;
901	  segment = segment->prev;
902	}
903    }
904
905  if (segment->prev == NULL)
906    *next_segment = (void *) (uintptr_type) 1;
907  else
908    *next_segment = segment->prev;
909
910  /* The old_stack value is the address of the function parameters of
911     the function which called __morestack.  So if f1 called f2 which
912     called __morestack, the stack looks like this:
913
914         parameters       <- old_stack
915         return in f1
916	 return in f2
917	 registers pushed by __morestack
918
919     The registers pushed by __morestack may not be visible on any
920     other stack, if we are being called by a signal handler
921     immediately after the call to __morestack_unblock_signals.  We
922     want to adjust our return value to include those registers.  This
923     is target dependent.  */
924
925  nsp = (char *) segment->old_stack;
926
927  if (nsp == NULL)
928    {
929      /* We've reached the top of the stack.  */
930      *next_segment = (void *) (uintptr_type) 2;
931    }
932  else
933    {
934#if defined (__x86_64__)
935      nsp -= 12 * sizeof (void *);
936#elif defined (__i386__)
937      nsp -= 6 * sizeof (void *);
938#else
939#error "unrecognized target"
940#endif
941
942      *next_sp = (void *) nsp;
943    }
944
945#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
946  *len = (char *) (segment + 1) + segment->size - (char *) sp;
947  ret = (void *) sp;
948#else
949  *len = (char *) sp - (char *) (segment + 1);
950  ret = (void *) (segment + 1);
951#endif
952
953  return ret;
954}
955
956/* Tell the split stack code whether it has to block signals while
957   manipulating the stack.  This is for programs in which some threads
958   block all signals.  If a thread already blocks signals, there is no
959   need for the split stack code to block them as well.  If NEW is not
960   NULL, then if *NEW is non-zero signals will be blocked while
961   splitting the stack, otherwise they will not.  If OLD is not NULL,
962   *OLD will be set to the old value.  */
963
964void
965__splitstack_block_signals (int *new, int *old)
966{
967  if (old != NULL)
968    *old = __morestack_initial_sp.dont_block_signals ? 0 : 1;
969  if (new != NULL)
970    __morestack_initial_sp.dont_block_signals = *new ? 0 : 1;
971}
972
973/* The offsets into the arrays used by __splitstack_getcontext and
974   __splitstack_setcontext.  */
975
976enum __splitstack_context_offsets
977{
978  MORESTACK_SEGMENTS = 0,
979  CURRENT_SEGMENT = 1,
980  CURRENT_STACK = 2,
981  STACK_GUARD = 3,
982  INITIAL_SP = 4,
983  INITIAL_SP_LEN = 5,
984  BLOCK_SIGNALS = 6,
985
986  NUMBER_OFFSETS = 10
987};
988
989/* Get the current split stack context.  This may be used for
990   coroutine switching, similar to getcontext.  The argument should
991   have at least 10 void *pointers for extensibility, although we
992   don't currently use all of them.  This would normally be called
993   immediately before a call to getcontext or swapcontext or
994   setjmp.  */
995
996void
997__splitstack_getcontext (void *context[NUMBER_OFFSETS])
998{
999  memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1000  context[MORESTACK_SEGMENTS] = (void *) __morestack_segments;
1001  context[CURRENT_SEGMENT] = (void *) __morestack_current_segment;
1002  context[CURRENT_STACK] = (void *) &context;
1003  context[STACK_GUARD] = __morestack_get_guard ();
1004  context[INITIAL_SP] = (void *) __morestack_initial_sp.sp;
1005  context[INITIAL_SP_LEN] = (void *) (uintptr_type) __morestack_initial_sp.len;
1006  context[BLOCK_SIGNALS] = (void *) __morestack_initial_sp.dont_block_signals;
1007}
1008
1009/* Set the current split stack context.  The argument should be a
1010   context previously passed to __splitstack_getcontext.  This would
1011   normally be called immediately after a call to getcontext or
1012   swapcontext or setjmp if something jumped to it.  */
1013
1014void
1015__splitstack_setcontext (void *context[NUMBER_OFFSETS])
1016{
1017  __morestack_segments = (struct stack_segment *) context[MORESTACK_SEGMENTS];
1018  __morestack_current_segment =
1019    (struct stack_segment *) context[CURRENT_SEGMENT];
1020  __morestack_set_guard (context[STACK_GUARD]);
1021  __morestack_initial_sp.sp = context[INITIAL_SP];
1022  __morestack_initial_sp.len = (size_t) context[INITIAL_SP_LEN];
1023  __morestack_initial_sp.dont_block_signals =
1024    (uintptr_type) context[BLOCK_SIGNALS];
1025}
1026
1027/* Create a new split stack context.  This will allocate a new stack
1028   segment which may be used by a coroutine.  STACK_SIZE is the
1029   minimum size of the new stack.  The caller is responsible for
1030   actually setting the stack pointer.  This would normally be called
1031   before a call to makecontext, and the returned stack pointer and
1032   size would be used to set the uc_stack field.  A function called
1033   via makecontext on a stack created by __splitstack_makecontext may
1034   not return.  Note that the returned pointer points to the lowest
1035   address in the stack space, and thus may not be the value to which
1036   to set the stack pointer.  */
1037
1038void *
1039__splitstack_makecontext (size_t stack_size, void *context[NUMBER_OFFSETS],
1040			  size_t *size)
1041{
1042  struct stack_segment *segment;
1043  void *initial_sp;
1044
1045  memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1046  segment = allocate_segment (stack_size);
1047  context[MORESTACK_SEGMENTS] = segment;
1048  context[CURRENT_SEGMENT] = segment;
1049#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1050  initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1051#else
1052  initial_sp = (void *) (segment + 1);
1053#endif
1054  context[STACK_GUARD] = __morestack_make_guard (initial_sp, segment->size);
1055  context[INITIAL_SP] = NULL;
1056  context[INITIAL_SP_LEN] = 0;
1057  *size = segment->size;
1058  return (void *) (segment + 1);
1059}
1060
1061/* Given an existing split stack context, reset it back to the start
1062   of the stack.  Return the stack pointer and size, appropriate for
1063   use with makecontext.  This may be used if a coroutine exits, in
1064   order to reuse the stack segments for a new coroutine.  */
1065
1066void *
1067__splitstack_resetcontext (void *context[10], size_t *size)
1068{
1069  struct stack_segment *segment;
1070  void *initial_sp;
1071  size_t initial_size;
1072  void *ret;
1073
1074  /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
1075     and INITIAL_SP_LEN are correct.  */
1076
1077  segment = context[MORESTACK_SEGMENTS];
1078  context[CURRENT_SEGMENT] = segment;
1079  context[CURRENT_STACK] = NULL;
1080  if (segment == NULL)
1081    {
1082      initial_sp = context[INITIAL_SP];
1083      initial_size = (uintptr_type) context[INITIAL_SP_LEN];
1084      ret = initial_sp;
1085#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1086      ret = (void *) ((char *) ret - initial_size);
1087#endif
1088    }
1089  else
1090    {
1091#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1092      initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1093#else
1094      initial_sp = (void *) (segment + 1);
1095#endif
1096      initial_size = segment->size;
1097      ret = (void *) (segment + 1);
1098    }
1099  context[STACK_GUARD] = __morestack_make_guard (initial_sp, initial_size);
1100  context[BLOCK_SIGNALS] = NULL;
1101  *size = initial_size;
1102  return ret;
1103}
1104
1105/* Release all the memory associated with a splitstack context.  This
1106   may be used if a coroutine exits and the associated stack should be
1107   freed.  */
1108
1109void
1110__splitstack_releasecontext (void *context[10])
1111{
1112  __morestack_release_segments (((struct stack_segment **)
1113				 &context[MORESTACK_SEGMENTS]),
1114				1);
1115}
1116
1117/* Like __splitstack_block_signals, but operating on CONTEXT, rather
1118   than on the current state.  */
1119
1120void
1121__splitstack_block_signals_context (void *context[NUMBER_OFFSETS], int *new,
1122				    int *old)
1123{
1124  if (old != NULL)
1125    *old = ((uintptr_type) context[BLOCK_SIGNALS]) != 0 ? 0 : 1;
1126  if (new != NULL)
1127    context[BLOCK_SIGNALS] = (void *) (uintptr_type) (*new ? 0 : 1);
1128}
1129
1130/* Find the stack segments associated with a split stack context.
1131   This will return the address of the first stack segment and set
1132   *STACK_SIZE to its size.  It will set next_segment, next_sp, and
1133   initial_sp which may be passed to __splitstack_find to find the
1134   remaining segments.  */
1135
1136void *
1137__splitstack_find_context (void *context[NUMBER_OFFSETS], size_t *stack_size,
1138			   void **next_segment, void **next_sp,
1139			   void **initial_sp)
1140{
1141  void *sp;
1142  struct stack_segment *segment;
1143
1144  *initial_sp = context[INITIAL_SP];
1145
1146  sp = context[CURRENT_STACK];
1147  if (sp == NULL)
1148    {
1149      /* Most likely this context was created but was never used.  The
1150	 value 2 is a code used by __splitstack_find to mean that we
1151	 have reached the end of the list of stacks.  */
1152      *next_segment = (void *) (uintptr_type) 2;
1153      *next_sp = NULL;
1154      *initial_sp = NULL;
1155      return NULL;
1156    }
1157
1158  segment = context[CURRENT_SEGMENT];
1159  if (segment == NULL)
1160    {
1161      /* Most likely this context was saved by a thread which was not
1162	 created using __splistack_makecontext and which has never
1163	 split the stack.  The value 1 is a code used by
1164	 __splitstack_find to look at the initial stack.  */
1165      segment = (struct stack_segment *) (uintptr_type) 1;
1166    }
1167
1168  return __splitstack_find (segment, sp, stack_size, next_segment, next_sp,
1169			    initial_sp);
1170}
1171
1172#endif /* !defined (inhibit_libc) */
1173