1/* Simple garbage collection for the GNU compiler.
2   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3   Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA.  */
21
22/* Generic garbage collection (GC) functions and data, not specific to
23   any particular GC implementation.  */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "hashtab.h"
29#include "ggc.h"
30#include "toplev.h"
31#include "params.h"
32#include "hosthooks.h"
33#include "hosthooks-def.h"
34
35#ifdef HAVE_SYS_RESOURCE_H
36# include <sys/resource.h>
37#endif
38
39#ifdef HAVE_MMAP_FILE
40# include <sys/mman.h>
41# ifdef HAVE_MINCORE
42/* This is on Solaris.  */
43#  include <sys/types.h>
44# endif
45#endif
46
47#ifndef MAP_FAILED
48# define MAP_FAILED ((void *)-1)
49#endif
50
51#ifdef ENABLE_VALGRIND_CHECKING
52# ifdef HAVE_VALGRIND_MEMCHECK_H
53#  include <valgrind/memcheck.h>
54# elif defined HAVE_MEMCHECK_H
55#  include <memcheck.h>
56# else
57#  include <valgrind.h>
58# endif
59#else
60/* Avoid #ifdef:s when we can help it.  */
61#define VALGRIND_DISCARD(x)
62#endif
63
64/* When set, ggc_collect will do collection.  */
65bool ggc_force_collect;
66
67/* Statistics about the allocation.  */
68static ggc_statistics *ggc_stats;
69
70struct traversal_state;
71
72static int ggc_htab_delete (void **, void *);
73static hashval_t saving_htab_hash (const void *);
74static int saving_htab_eq (const void *, const void *);
75static int call_count (void **, void *);
76static int call_alloc (void **, void *);
77static int compare_ptr_data (const void *, const void *);
78static void relocate_ptrs (void *, void *);
79static void write_pch_globals (const struct ggc_root_tab * const *tab,
80			       struct traversal_state *state);
81static double ggc_rlimit_bound (double);
82
83/* Maintain global roots that are preserved during GC.  */
84
85/* Process a slot of an htab by deleting it if it has not been marked.  */
86
87static int
88ggc_htab_delete (void **slot, void *info)
89{
90  const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
91
92  if (! (*r->marked_p) (*slot))
93    htab_clear_slot (*r->base, slot);
94  else
95    (*r->cb) (*slot);
96
97  return 1;
98}
99
100/* Iterate through all registered roots and mark each element.  */
101
102void
103ggc_mark_roots (void)
104{
105  const struct ggc_root_tab *const *rt;
106  const struct ggc_root_tab *rti;
107  const struct ggc_cache_tab *const *ct;
108  const struct ggc_cache_tab *cti;
109  size_t i;
110
111  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
112    for (rti = *rt; rti->base != NULL; rti++)
113      memset (rti->base, 0, rti->stride);
114
115  for (rt = gt_ggc_rtab; *rt; rt++)
116    for (rti = *rt; rti->base != NULL; rti++)
117      for (i = 0; i < rti->nelt; i++)
118	(*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
119
120  ggc_mark_stringpool ();
121
122  /* Now scan all hash tables that have objects which are to be deleted if
123     they are not already marked.  */
124  for (ct = gt_ggc_cache_rtab; *ct; ct++)
125    for (cti = *ct; cti->base != NULL; cti++)
126      if (*cti->base)
127	{
128	  ggc_set_mark (*cti->base);
129	  htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
130	  ggc_set_mark ((*cti->base)->entries);
131	}
132}
133
134/* Allocate a block of memory, then clear it.  */
135void *
136ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL)
137{
138  void *buf = ggc_alloc_stat (size PASS_MEM_STAT);
139  memset (buf, 0, size);
140  return buf;
141}
142
143/* Resize a block of memory, possibly re-allocating it.  */
144void *
145ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
146{
147  void *r;
148  size_t old_size;
149
150  if (x == NULL)
151    return ggc_alloc_stat (size PASS_MEM_STAT);
152
153  old_size = ggc_get_size (x);
154
155  if (size <= old_size)
156    {
157      /* Mark the unwanted memory as unaccessible.  We also need to make
158	 the "new" size accessible, since ggc_get_size returns the size of
159	 the pool, not the size of the individually allocated object, the
160	 size which was previously made accessible.  Unfortunately, we
161	 don't know that previously allocated size.  Without that
162	 knowledge we have to lose some initialization-tracking for the
163	 old parts of the object.  An alternative is to mark the whole
164	 old_size as reachable, but that would lose tracking of writes
165	 after the end of the object (by small offsets).  Discard the
166	 handle to avoid handle leak.  */
167      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
168						old_size - size));
169      VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
170      return x;
171    }
172
173  r = ggc_alloc_stat (size PASS_MEM_STAT);
174
175  /* Since ggc_get_size returns the size of the pool, not the size of the
176     individually allocated object, we'd access parts of the old object
177     that were marked invalid with the memcpy below.  We lose a bit of the
178     initialization-tracking since some of it may be uninitialized.  */
179  VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
180
181  memcpy (r, x, old_size);
182
183  /* The old object is not supposed to be used anymore.  */
184  ggc_free (x);
185
186  return r;
187}
188
189/* Like ggc_alloc_cleared, but performs a multiplication.  */
190void *
191ggc_calloc (size_t s1, size_t s2)
192{
193  return ggc_alloc_cleared (s1 * s2);
194}
195
196/* These are for splay_tree_new_ggc.  */
197void *
198ggc_splay_alloc (int sz, void *nl)
199{
200  gcc_assert (!nl);
201  return ggc_alloc (sz);
202}
203
204void
205ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
206{
207  gcc_assert (!nl);
208}
209
210/* Print statistics that are independent of the collector in use.  */
211#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
212		  ? (x) \
213		  : ((x) < 1024*1024*10 \
214		     ? (x) / 1024 \
215		     : (x) / (1024*1024))))
216#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
217
218void
219ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
220			     ggc_statistics *stats)
221{
222  /* Set the pointer so that during collection we will actually gather
223     the statistics.  */
224  ggc_stats = stats;
225
226  /* Then do one collection to fill in the statistics.  */
227  ggc_collect ();
228
229  /* At present, we don't really gather any interesting statistics.  */
230
231  /* Don't gather statistics any more.  */
232  ggc_stats = NULL;
233}
234
235/* Functions for saving and restoring GCable memory to disk.  */
236
237static htab_t saving_htab;
238
239struct ptr_data
240{
241  void *obj;
242  void *note_ptr_cookie;
243  gt_note_pointers note_ptr_fn;
244  gt_handle_reorder reorder_fn;
245  size_t size;
246  void *new_addr;
247  enum gt_types_enum type;
248};
249
250#define POINTER_HASH(x) (hashval_t)((long)x >> 3)
251
252/* Register an object in the hash table.  */
253
254int
255gt_pch_note_object (void *obj, void *note_ptr_cookie,
256		    gt_note_pointers note_ptr_fn,
257		    enum gt_types_enum type)
258{
259  struct ptr_data **slot;
260
261  if (obj == NULL || obj == (void *) 1)
262    return 0;
263
264  slot = (struct ptr_data **)
265    htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
266			      INSERT);
267  if (*slot != NULL)
268    {
269      gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
270		  && (*slot)->note_ptr_cookie == note_ptr_cookie);
271      return 0;
272    }
273
274  *slot = xcalloc (sizeof (struct ptr_data), 1);
275  (*slot)->obj = obj;
276  (*slot)->note_ptr_fn = note_ptr_fn;
277  (*slot)->note_ptr_cookie = note_ptr_cookie;
278  if (note_ptr_fn == gt_pch_p_S)
279    (*slot)->size = strlen (obj) + 1;
280  else
281    (*slot)->size = ggc_get_size (obj);
282  (*slot)->type = type;
283  return 1;
284}
285
286/* Register an object in the hash table.  */
287
288void
289gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
290		     gt_handle_reorder reorder_fn)
291{
292  struct ptr_data *data;
293
294  if (obj == NULL || obj == (void *) 1)
295    return;
296
297  data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
298  gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
299
300  data->reorder_fn = reorder_fn;
301}
302
303/* Hash and equality functions for saving_htab, callbacks for htab_create.  */
304
305static hashval_t
306saving_htab_hash (const void *p)
307{
308  return POINTER_HASH (((struct ptr_data *)p)->obj);
309}
310
311static int
312saving_htab_eq (const void *p1, const void *p2)
313{
314  return ((struct ptr_data *)p1)->obj == p2;
315}
316
317/* Handy state for the traversal functions.  */
318
319struct traversal_state
320{
321  FILE *f;
322  struct ggc_pch_data *d;
323  size_t count;
324  struct ptr_data **ptrs;
325  size_t ptrs_i;
326};
327
328/* Callbacks for htab_traverse.  */
329
330static int
331call_count (void **slot, void *state_p)
332{
333  struct ptr_data *d = (struct ptr_data *)*slot;
334  struct traversal_state *state = (struct traversal_state *)state_p;
335
336  ggc_pch_count_object (state->d, d->obj, d->size,
337			d->note_ptr_fn == gt_pch_p_S,
338			d->type);
339  state->count++;
340  return 1;
341}
342
343static int
344call_alloc (void **slot, void *state_p)
345{
346  struct ptr_data *d = (struct ptr_data *)*slot;
347  struct traversal_state *state = (struct traversal_state *)state_p;
348
349  d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
350				      d->note_ptr_fn == gt_pch_p_S,
351				      d->type);
352  state->ptrs[state->ptrs_i++] = d;
353  return 1;
354}
355
356/* Callback for qsort.  */
357
358static int
359compare_ptr_data (const void *p1_p, const void *p2_p)
360{
361  struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
362  struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
363  return (((size_t)p1->new_addr > (size_t)p2->new_addr)
364	  - ((size_t)p1->new_addr < (size_t)p2->new_addr));
365}
366
367/* Callbacks for note_ptr_fn.  */
368
369static void
370relocate_ptrs (void *ptr_p, void *state_p)
371{
372  void **ptr = (void **)ptr_p;
373  struct traversal_state *state ATTRIBUTE_UNUSED
374    = (struct traversal_state *)state_p;
375  struct ptr_data *result;
376
377  if (*ptr == NULL || *ptr == (void *)1)
378    return;
379
380  result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
381  gcc_assert (result);
382  *ptr = result->new_addr;
383}
384
385/* Write out, after relocation, the pointers in TAB.  */
386static void
387write_pch_globals (const struct ggc_root_tab * const *tab,
388		   struct traversal_state *state)
389{
390  const struct ggc_root_tab *const *rt;
391  const struct ggc_root_tab *rti;
392  size_t i;
393
394  for (rt = tab; *rt; rt++)
395    for (rti = *rt; rti->base != NULL; rti++)
396      for (i = 0; i < rti->nelt; i++)
397	{
398	  void *ptr = *(void **)((char *)rti->base + rti->stride * i);
399	  struct ptr_data *new_ptr;
400	  if (ptr == NULL || ptr == (void *)1)
401	    {
402	      if (fwrite (&ptr, sizeof (void *), 1, state->f)
403		  != 1)
404		fatal_error ("can't write PCH file: %m");
405	    }
406	  else
407	    {
408	      new_ptr = htab_find_with_hash (saving_htab, ptr,
409					     POINTER_HASH (ptr));
410	      if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
411		  != 1)
412		fatal_error ("can't write PCH file: %m");
413	    }
414	}
415}
416
417/* Hold the information we need to mmap the file back in.  */
418
419struct mmap_info
420{
421  size_t offset;
422  size_t size;
423  void *preferred_base;
424};
425
426/* Write out the state of the compiler to F.  */
427
428void
429gt_pch_save (FILE *f)
430{
431  const struct ggc_root_tab *const *rt;
432  const struct ggc_root_tab *rti;
433  size_t i;
434  struct traversal_state state;
435  char *this_object = NULL;
436  size_t this_object_size = 0;
437  struct mmap_info mmi;
438  const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
439
440  gt_pch_save_stringpool ();
441
442  saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
443
444  for (rt = gt_ggc_rtab; *rt; rt++)
445    for (rti = *rt; rti->base != NULL; rti++)
446      for (i = 0; i < rti->nelt; i++)
447	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
448
449  for (rt = gt_pch_cache_rtab; *rt; rt++)
450    for (rti = *rt; rti->base != NULL; rti++)
451      for (i = 0; i < rti->nelt; i++)
452	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
453
454  /* Prepare the objects for writing, determine addresses and such.  */
455  state.f = f;
456  state.d = init_ggc_pch();
457  state.count = 0;
458  htab_traverse (saving_htab, call_count, &state);
459
460  mmi.size = ggc_pch_total_size (state.d);
461
462  /* Try to arrange things so that no relocation is necessary, but
463     don't try very hard.  On most platforms, this will always work,
464     and on the rest it's a lot of work to do better.
465     (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
466     HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
467  mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
468
469  ggc_pch_this_base (state.d, mmi.preferred_base);
470
471  state.ptrs = XNEWVEC (struct ptr_data *, state.count);
472  state.ptrs_i = 0;
473  htab_traverse (saving_htab, call_alloc, &state);
474  qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
475
476  /* Write out all the scalar variables.  */
477  for (rt = gt_pch_scalar_rtab; *rt; rt++)
478    for (rti = *rt; rti->base != NULL; rti++)
479      if (fwrite (rti->base, rti->stride, 1, f) != 1)
480	fatal_error ("can't write PCH file: %m");
481
482  /* Write out all the global pointers, after translation.  */
483  write_pch_globals (gt_ggc_rtab, &state);
484  write_pch_globals (gt_pch_cache_rtab, &state);
485
486  /* Pad the PCH file so that the mmapped area starts on an allocation
487     granularity (usually page) boundary.  */
488  {
489    long o;
490    o = ftell (state.f) + sizeof (mmi);
491    if (o == -1)
492      fatal_error ("can't get position in PCH file: %m");
493    mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
494    if (mmi.offset == mmap_offset_alignment)
495      mmi.offset = 0;
496    mmi.offset += o;
497  }
498  if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
499    fatal_error ("can't write PCH file: %m");
500  if (mmi.offset != 0
501      && fseek (state.f, mmi.offset, SEEK_SET) != 0)
502    fatal_error ("can't write padding to PCH file: %m");
503
504  ggc_pch_prepare_write (state.d, state.f);
505
506  /* Actually write out the objects.  */
507  for (i = 0; i < state.count; i++)
508    {
509      if (this_object_size < state.ptrs[i]->size)
510	{
511	  this_object_size = state.ptrs[i]->size;
512	  this_object = xrealloc (this_object, this_object_size);
513	}
514      memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
515      if (state.ptrs[i]->reorder_fn != NULL)
516	state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
517				   state.ptrs[i]->note_ptr_cookie,
518				   relocate_ptrs, &state);
519      state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
520				  state.ptrs[i]->note_ptr_cookie,
521				  relocate_ptrs, &state);
522      ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
523			    state.ptrs[i]->new_addr, state.ptrs[i]->size,
524			    state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
525      if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
526	memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
527    }
528  ggc_pch_finish (state.d, state.f);
529  gt_pch_fixup_stringpool ();
530
531  free (state.ptrs);
532  htab_delete (saving_htab);
533}
534
535/* Read the state of the compiler back in from F.  */
536
537void
538gt_pch_restore (FILE *f)
539{
540  const struct ggc_root_tab *const *rt;
541  const struct ggc_root_tab *rti;
542  size_t i;
543  struct mmap_info mmi;
544  int result;
545
546  /* Delete any deletable objects.  This makes ggc_pch_read much
547     faster, as it can be sure that no GCable objects remain other
548     than the ones just read in.  */
549  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
550    for (rti = *rt; rti->base != NULL; rti++)
551      memset (rti->base, 0, rti->stride);
552
553  /* Read in all the scalar variables.  */
554  for (rt = gt_pch_scalar_rtab; *rt; rt++)
555    for (rti = *rt; rti->base != NULL; rti++)
556      if (fread (rti->base, rti->stride, 1, f) != 1)
557	fatal_error ("can't read PCH file: %m");
558
559  /* Read in all the global pointers, in 6 easy loops.  */
560  for (rt = gt_ggc_rtab; *rt; rt++)
561    for (rti = *rt; rti->base != NULL; rti++)
562      for (i = 0; i < rti->nelt; i++)
563	if (fread ((char *)rti->base + rti->stride * i,
564		   sizeof (void *), 1, f) != 1)
565	  fatal_error ("can't read PCH file: %m");
566
567  for (rt = gt_pch_cache_rtab; *rt; rt++)
568    for (rti = *rt; rti->base != NULL; rti++)
569      for (i = 0; i < rti->nelt; i++)
570	if (fread ((char *)rti->base + rti->stride * i,
571		   sizeof (void *), 1, f) != 1)
572	  fatal_error ("can't read PCH file: %m");
573
574  if (fread (&mmi, sizeof (mmi), 1, f) != 1)
575    fatal_error ("can't read PCH file: %m");
576
577  result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
578					  fileno (f), mmi.offset);
579  if (result < 0)
580    fatal_error ("had to relocate PCH");
581  if (result == 0)
582    {
583      if (fseek (f, mmi.offset, SEEK_SET) != 0
584	  || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
585	fatal_error ("can't read PCH file: %m");
586    }
587  else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
588    fatal_error ("can't read PCH file: %m");
589
590  ggc_pch_read (f, mmi.preferred_base);
591
592  gt_pch_restore_stringpool ();
593}
594
595/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
596   Select no address whatsoever, and let gt_pch_save choose what it will with
597   malloc, presumably.  */
598
599void *
600default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
601			    int fd ATTRIBUTE_UNUSED)
602{
603  return NULL;
604}
605
606/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
607   Allocate SIZE bytes with malloc.  Return 0 if the address we got is the
608   same as base, indicating that the memory has been allocated but needs to
609   be read in from the file.  Return -1 if the address differs, to relocation
610   of the PCH file would be required.  */
611
612int
613default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
614			    size_t offset ATTRIBUTE_UNUSED)
615{
616  void *addr = xmalloc (size);
617  return (addr == base) - 1;
618}
619
620/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS.   Return the
621   alignment required for allocating virtual memory. Usually this is the
622   same as pagesize.  */
623
624size_t
625default_gt_pch_alloc_granularity (void)
626{
627  return getpagesize();
628}
629
630#if HAVE_MMAP_FILE
631/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
632   We temporarily allocate SIZE bytes, and let the kernel place the data
633   wherever it will.  If it worked, that's our spot, if not we're likely
634   to be in trouble.  */
635
636void *
637mmap_gt_pch_get_address (size_t size, int fd)
638{
639  void *ret;
640
641  ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
642  if (ret == (void *) MAP_FAILED)
643    ret = NULL;
644  else
645    munmap (ret, size);
646
647  return ret;
648}
649
650/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
651   Map SIZE bytes of FD+OFFSET at BASE.  Return 1 if we succeeded at
652   mapping the data at BASE, -1 if we couldn't.
653
654   This version assumes that the kernel honors the START operand of mmap
655   even without MAP_FIXED if START through START+SIZE are not currently
656   mapped with something.  */
657
658int
659mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
660{
661  void *addr;
662
663  /* We're called with size == 0 if we're not planning to load a PCH
664     file at all.  This allows the hook to free any static space that
665     we might have allocated at link time.  */
666  if (size == 0)
667    return -1;
668
669  addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
670	       fd, offset);
671
672  return addr == base ? 1 : -1;
673}
674#endif /* HAVE_MMAP_FILE */
675
676/* Modify the bound based on rlimits.  */
677static double
678ggc_rlimit_bound (double limit)
679{
680#if defined(HAVE_GETRLIMIT)
681  struct rlimit rlim;
682# if defined (RLIMIT_AS)
683  /* RLIMIT_AS is what POSIX says is the limit on mmap.  Presumably
684     any OS which has RLIMIT_AS also has a working mmap that GCC will use.  */
685  if (getrlimit (RLIMIT_AS, &rlim) == 0
686      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
687      && rlim.rlim_cur < limit)
688    limit = rlim.rlim_cur;
689# elif defined (RLIMIT_DATA)
690  /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
691     might be on an OS that has a broken mmap.  (Others don't bound
692     mmap at all, apparently.)  */
693  if (getrlimit (RLIMIT_DATA, &rlim) == 0
694      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
695      && rlim.rlim_cur < limit
696      /* Darwin has this horribly bogus default setting of
697	 RLIMIT_DATA, to 6144Kb.  No-one notices because RLIMIT_DATA
698	 appears to be ignored.  Ignore such silliness.  If a limit
699	 this small was actually effective for mmap, GCC wouldn't even
700	 start up.  */
701      && rlim.rlim_cur >= 8 * 1024 * 1024)
702    limit = rlim.rlim_cur;
703# endif /* RLIMIT_AS or RLIMIT_DATA */
704#endif /* HAVE_GETRLIMIT */
705
706  return limit;
707}
708
709/* Heuristic to set a default for GGC_MIN_EXPAND.  */
710int
711ggc_min_expand_heuristic (void)
712{
713  double min_expand = physmem_total();
714
715  /* Adjust for rlimits.  */
716  min_expand = ggc_rlimit_bound (min_expand);
717
718  /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
719     a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
720  min_expand /= 1024*1024*1024;
721  min_expand *= 70;
722  min_expand = MIN (min_expand, 70);
723  min_expand += 30;
724
725  return min_expand;
726}
727
728/* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
729int
730ggc_min_heapsize_heuristic (void)
731{
732  double phys_kbytes = physmem_total();
733  double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
734
735  phys_kbytes /= 1024; /* Convert to Kbytes.  */
736  limit_kbytes /= 1024;
737
738  /* The heuristic is RAM/8, with a lower bound of 4M and an upper
739     bound of 128M (when RAM >= 1GB).  */
740  phys_kbytes /= 8;
741
742#if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
743  /* Try not to overrun the RSS limit while doing garbage collection.
744     The RSS limit is only advisory, so no margin is subtracted.  */
745 {
746   struct rlimit rlim;
747   if (getrlimit (RLIMIT_RSS, &rlim) == 0
748       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
749     phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
750 }
751# endif
752
753  /* Don't blindly run over our data limit; do GC at least when the
754     *next* GC would be within 16Mb of the limit.  If GCC does hit the
755     data limit, compilation will fail, so this tries to be
756     conservative.  */
757  limit_kbytes = MAX (0, limit_kbytes - 16 * 1024);
758  limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic());
759  phys_kbytes = MIN (phys_kbytes, limit_kbytes);
760
761  phys_kbytes = MAX (phys_kbytes, 4 * 1024);
762  phys_kbytes = MIN (phys_kbytes, 128 * 1024);
763
764  return phys_kbytes;
765}
766
767void
768init_ggc_heuristics (void)
769{
770#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
771  set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
772  set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
773#endif
774}
775
776#ifdef GATHER_STATISTICS
777
778/* Datastructure used to store per-call-site statistics.  */
779struct loc_descriptor
780{
781  const char *file;
782  int line;
783  const char *function;
784  int times;
785  size_t allocated;
786  size_t overhead;
787  size_t freed;
788  size_t collected;
789};
790
791/* Hashtable used for statistics.  */
792static htab_t loc_hash;
793
794/* Hash table helpers functions.  */
795static hashval_t
796hash_descriptor (const void *p)
797{
798  const struct loc_descriptor *d = p;
799
800  return htab_hash_pointer (d->function) | d->line;
801}
802
803static int
804eq_descriptor (const void *p1, const void *p2)
805{
806  const struct loc_descriptor *d = p1;
807  const struct loc_descriptor *d2 = p2;
808
809  return (d->file == d2->file && d->line == d2->line
810	  && d->function == d2->function);
811}
812
813/* Hashtable converting address of allocated field to loc descriptor.  */
814static htab_t ptr_hash;
815struct ptr_hash_entry
816{
817  void *ptr;
818  struct loc_descriptor *loc;
819  size_t size;
820};
821
822/* Hash table helpers functions.  */
823static hashval_t
824hash_ptr (const void *p)
825{
826  const struct ptr_hash_entry *d = p;
827
828  return htab_hash_pointer (d->ptr);
829}
830
831static int
832eq_ptr (const void *p1, const void *p2)
833{
834  const struct ptr_hash_entry *p = p1;
835
836  return (p->ptr == p2);
837}
838
839/* Return descriptor for given call site, create new one if needed.  */
840static struct loc_descriptor *
841loc_descriptor (const char *name, int line, const char *function)
842{
843  struct loc_descriptor loc;
844  struct loc_descriptor **slot;
845
846  loc.file = name;
847  loc.line = line;
848  loc.function = function;
849  if (!loc_hash)
850    loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
851
852  slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1);
853  if (*slot)
854    return *slot;
855  *slot = xcalloc (sizeof (**slot), 1);
856  (*slot)->file = name;
857  (*slot)->line = line;
858  (*slot)->function = function;
859  return *slot;
860}
861
862/* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION).  */
863void
864ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
865		     const char *name, int line, const char *function)
866{
867  struct loc_descriptor *loc = loc_descriptor (name, line, function);
868  struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
869  PTR *slot;
870
871  p->ptr = ptr;
872  p->loc = loc;
873  p->size = allocated + overhead;
874  if (!ptr_hash)
875    ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
876  slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
877  gcc_assert (!*slot);
878  *slot = p;
879
880  loc->times++;
881  loc->allocated+=allocated;
882  loc->overhead+=overhead;
883}
884
885/* Helper function for prune_overhead_list.  See if SLOT is still marked and
886   remove it from hashtable if it is not.  */
887static int
888ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
889{
890  struct ptr_hash_entry *p = *slot;
891  if (!ggc_marked_p (p->ptr))
892    {
893      p->loc->collected += p->size;
894      htab_clear_slot (ptr_hash, slot);
895      free (p);
896    }
897  return 1;
898}
899
900/* After live values has been marked, walk all recorded pointers and see if
901   they are still live.  */
902void
903ggc_prune_overhead_list (void)
904{
905  htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
906}
907
908/* Notice that the pointer has been freed.  */
909void
910ggc_free_overhead (void *ptr)
911{
912  PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
913					NO_INSERT);
914  struct ptr_hash_entry *p = *slot;
915  p->loc->freed += p->size;
916  htab_clear_slot (ptr_hash, slot);
917  free (p);
918}
919
920/* Helper for qsort; sort descriptors by amount of memory consumed.  */
921static int
922cmp_statistic (const void *loc1, const void *loc2)
923{
924  struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1;
925  struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2;
926  return ((l1->allocated + l1->overhead - l1->freed) -
927	  (l2->allocated + l2->overhead - l2->freed));
928}
929
930/* Collect array of the descriptors from hashtable.  */
931struct loc_descriptor **loc_array;
932static int
933add_statistics (void **slot, void *b)
934{
935  int *n = (int *)b;
936  loc_array[*n] = (struct loc_descriptor *) *slot;
937  (*n)++;
938  return 1;
939}
940
941/* Dump per-site memory statistics.  */
942#endif
943void
944dump_ggc_loc_statistics (void)
945{
946#ifdef GATHER_STATISTICS
947  int nentries = 0;
948  char s[4096];
949  size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
950  int i;
951
952  ggc_force_collect = true;
953  ggc_collect ();
954
955  loc_array = xcalloc (sizeof (*loc_array), loc_hash->n_elements);
956  fprintf (stderr, "-------------------------------------------------------\n");
957  fprintf (stderr, "\n%-48s %10s       %10s       %10s       %10s       %10s\n",
958	   "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
959  fprintf (stderr, "-------------------------------------------------------\n");
960  htab_traverse (loc_hash, add_statistics, &nentries);
961  qsort (loc_array, nentries, sizeof (*loc_array), cmp_statistic);
962  for (i = 0; i < nentries; i++)
963    {
964      struct loc_descriptor *d = loc_array[i];
965      allocated += d->allocated;
966      times += d->times;
967      freed += d->freed;
968      collected += d->collected;
969      overhead += d->overhead;
970    }
971  for (i = 0; i < nentries; i++)
972    {
973      struct loc_descriptor *d = loc_array[i];
974      if (d->allocated)
975	{
976	  const char *s1 = d->file;
977	  const char *s2;
978	  while ((s2 = strstr (s1, "gcc/")))
979	    s1 = s2 + 4;
980	  sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
981	  s[48] = 0;
982	  fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
983		   (long)d->collected,
984		   (d->collected) * 100.0 / collected,
985		   (long)d->freed,
986		   (d->freed) * 100.0 / freed,
987		   (long)(d->allocated + d->overhead - d->freed - d->collected),
988		   (d->allocated + d->overhead - d->freed - d->collected) * 100.0
989		   / (allocated + overhead - freed - collected),
990		   (long)d->overhead,
991		   d->overhead * 100.0 / overhead,
992		   (long)d->times);
993	}
994    }
995  fprintf (stderr, "%-48s %10ld       %10ld       %10ld       %10ld       %10ld\n",
996	   "Total", (long)collected, (long)freed,
997	   (long)(allocated + overhead - freed - collected), (long)overhead,
998	   (long)times);
999  fprintf (stderr, "%-48s %10s       %10s       %10s       %10s       %10s\n",
1000	   "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1001  fprintf (stderr, "-------------------------------------------------------\n");
1002#endif
1003}
1004