190075Sobrien/* "Bag-of-pages" garbage collector for the GNU compiler.
2169689Skan   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3169689Skan   Free Software Foundation, Inc.
490075Sobrien
590075SobrienThis file is part of GCC.
690075Sobrien
790075SobrienGCC is free software; you can redistribute it and/or modify it under
890075Sobrienthe terms of the GNU General Public License as published by the Free
990075SobrienSoftware Foundation; either version 2, or (at your option) any later
1090075Sobrienversion.
1190075Sobrien
1290075SobrienGCC is distributed in the hope that it will be useful, but WITHOUT ANY
1390075SobrienWARRANTY; without even the implied warranty of MERCHANTABILITY or
1490075SobrienFITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1590075Sobrienfor more details.
1690075Sobrien
1790075SobrienYou should have received a copy of the GNU General Public License
1890075Sobrienalong with GCC; see the file COPYING.  If not, write to the Free
19169689SkanSoftware Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20169689Skan02110-1301, USA.  */
2190075Sobrien
2290075Sobrien#include "config.h"
2390075Sobrien#include "system.h"
24132718Skan#include "coretypes.h"
25132718Skan#include "tm.h"
2690075Sobrien#include "tree.h"
2790075Sobrien#include "rtl.h"
2890075Sobrien#include "tm_p.h"
2990075Sobrien#include "toplev.h"
3090075Sobrien#include "flags.h"
3190075Sobrien#include "ggc.h"
3290075Sobrien#include "timevar.h"
33117395Skan#include "params.h"
34169689Skan#include "tree-flow.h"
35117395Skan#ifdef ENABLE_VALGRIND_CHECKING
36132718Skan# ifdef HAVE_VALGRIND_MEMCHECK_H
37132718Skan#  include <valgrind/memcheck.h>
38132718Skan# elif defined HAVE_MEMCHECK_H
39132718Skan#  include <memcheck.h>
40132718Skan# else
41132718Skan#  include <valgrind.h>
42132718Skan# endif
43117395Skan#else
44117395Skan/* Avoid #ifdef:s when we can help it.  */
45117395Skan#define VALGRIND_DISCARD(x)
46117395Skan#endif
4790075Sobrien
4890075Sobrien/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
4990075Sobrien   file open.  Prefer either to valloc.  */
5090075Sobrien#ifdef HAVE_MMAP_ANON
5190075Sobrien# undef HAVE_MMAP_DEV_ZERO
5290075Sobrien
5390075Sobrien# include <sys/mman.h>
5490075Sobrien# ifndef MAP_FAILED
5590075Sobrien#  define MAP_FAILED -1
5690075Sobrien# endif
5790075Sobrien# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
5890075Sobrien#  define MAP_ANONYMOUS MAP_ANON
5990075Sobrien# endif
6090075Sobrien# define USING_MMAP
6190075Sobrien
6290075Sobrien#endif
6390075Sobrien
6490075Sobrien#ifdef HAVE_MMAP_DEV_ZERO
6590075Sobrien
6690075Sobrien# include <sys/mman.h>
6790075Sobrien# ifndef MAP_FAILED
6890075Sobrien#  define MAP_FAILED -1
6990075Sobrien# endif
7090075Sobrien# define USING_MMAP
7190075Sobrien
7290075Sobrien#endif
7390075Sobrien
7490075Sobrien#ifndef USING_MMAP
7590075Sobrien#define USING_MALLOC_PAGE_GROUPS
7690075Sobrien#endif
7790075Sobrien
78169689Skan/* Strategy:
7990075Sobrien
8090075Sobrien   This garbage-collecting allocator allocates objects on one of a set
8190075Sobrien   of pages.  Each page can allocate objects of a single size only;
8290075Sobrien   available sizes are powers of two starting at four bytes.  The size
8390075Sobrien   of an allocation request is rounded up to the next power of two
8490075Sobrien   (`order'), and satisfied from the appropriate page.
8590075Sobrien
8690075Sobrien   Each page is recorded in a page-entry, which also maintains an
8790075Sobrien   in-use bitmap of object positions on the page.  This allows the
8890075Sobrien   allocation state of a particular object to be flipped without
8990075Sobrien   touching the page itself.
9090075Sobrien
9190075Sobrien   Each page-entry also has a context depth, which is used to track
9290075Sobrien   pushing and popping of allocation contexts.  Only objects allocated
93117395Skan   in the current (highest-numbered) context may be collected.
9490075Sobrien
9590075Sobrien   Page entries are arranged in an array of singly-linked lists.  The
9690075Sobrien   array is indexed by the allocation size, in bits, of the pages on
9790075Sobrien   it; i.e. all pages on a list allocate objects of the same size.
9890075Sobrien   Pages are ordered on the list such that all non-full pages precede
9990075Sobrien   all full pages, with non-full pages arranged in order of decreasing
10090075Sobrien   context depth.
10190075Sobrien
10290075Sobrien   Empty pages (of all orders) are kept on a single page cache list,
10390075Sobrien   and are considered first when new pages are required; they are
10490075Sobrien   deallocated at the start of the next collection if they haven't
10590075Sobrien   been recycled by then.  */
10690075Sobrien
10790075Sobrien/* Define GGC_DEBUG_LEVEL to print debugging information.
10890075Sobrien     0: No debugging output.
10990075Sobrien     1: GC statistics only.
11090075Sobrien     2: Page-entry allocations/deallocations as well.
11190075Sobrien     3: Object allocations as well.
11290075Sobrien     4: Object marks as well.  */
11390075Sobrien#define GGC_DEBUG_LEVEL (0)
11490075Sobrien
11590075Sobrien#ifndef HOST_BITS_PER_PTR
11690075Sobrien#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
11790075Sobrien#endif
11890075Sobrien
11990075Sobrien
12090075Sobrien/* A two-level tree is used to look up the page-entry for a given
12190075Sobrien   pointer.  Two chunks of the pointer's bits are extracted to index
12290075Sobrien   the first and second levels of the tree, as follows:
12390075Sobrien
12490075Sobrien				   HOST_PAGE_SIZE_BITS
12590075Sobrien			   32		|      |
12690075Sobrien       msb +----------------+----+------+------+ lsb
12790075Sobrien			    |    |      |
12890075Sobrien			 PAGE_L1_BITS   |
12990075Sobrien				 |      |
13090075Sobrien			       PAGE_L2_BITS
13190075Sobrien
13290075Sobrien   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
13390075Sobrien   pages are aligned on system page boundaries.  The next most
13490075Sobrien   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
135117395Skan   index values in the lookup table, respectively.
13690075Sobrien
13790075Sobrien   For 32-bit architectures and the settings below, there are no
13890075Sobrien   leftover bits.  For architectures with wider pointers, the lookup
13990075Sobrien   tree points to a list of pages, which must be scanned to find the
14090075Sobrien   correct one.  */
14190075Sobrien
14290075Sobrien#define PAGE_L1_BITS	(8)
14390075Sobrien#define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
14490075Sobrien#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
14590075Sobrien#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
14690075Sobrien
14790075Sobrien#define LOOKUP_L1(p) \
14890075Sobrien  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
14990075Sobrien
15090075Sobrien#define LOOKUP_L2(p) \
15190075Sobrien  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
15290075Sobrien
15390075Sobrien/* The number of objects per allocation page, for objects on a page of
15490075Sobrien   the indicated ORDER.  */
15590075Sobrien#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
15690075Sobrien
157132718Skan/* The number of objects in P.  */
158132718Skan#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159132718Skan
16090075Sobrien/* The size of an object on a page of the indicated ORDER.  */
16190075Sobrien#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
16290075Sobrien
163117395Skan/* For speed, we avoid doing a general integer divide to locate the
164117395Skan   offset in the allocation bitmap, by precalculating numbers M, S
165117395Skan   such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
166117395Skan   within the page which is evenly divisible by the object size Z.  */
167117395Skan#define DIV_MULT(ORDER) inverse_table[ORDER].mult
168117395Skan#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
169117395Skan#define OFFSET_TO_BIT(OFFSET, ORDER) \
170117395Skan  (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171117395Skan
17290075Sobrien/* The number of extra orders, not corresponding to power-of-two sized
17390075Sobrien   objects.  */
17490075Sobrien
175117395Skan#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
17690075Sobrien
177117395Skan#define RTL_SIZE(NSLOTS) \
178132718Skan  (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
179117395Skan
180132718Skan#define TREE_EXP_SIZE(OPS) \
181132718Skan  (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
182132718Skan
18390075Sobrien/* The Ith entry is the maximum size of an object to be stored in the
18490075Sobrien   Ith extra order.  Adding a new entry to this array is the *only*
18590075Sobrien   thing you need to do to add a new special allocation size.  */
18690075Sobrien
18790075Sobrienstatic const size_t extra_order_size_table[] = {
188169689Skan  sizeof (struct stmt_ann_d),
189169689Skan  sizeof (struct var_ann_d),
190169689Skan  sizeof (struct tree_decl_non_common),
191169689Skan  sizeof (struct tree_field_decl),
192169689Skan  sizeof (struct tree_parm_decl),
193169689Skan  sizeof (struct tree_var_decl),
194117395Skan  sizeof (struct tree_list),
195169689Skan  sizeof (struct tree_ssa_name),
196169689Skan  sizeof (struct function),
197169689Skan  sizeof (struct basic_block_def),
198169689Skan  sizeof (bitmap_element),
199169689Skan  /* PHI nodes with one to three arguments are already covered by the
200169689Skan     above sizes.  */
201169689Skan  sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
202132718Skan  TREE_EXP_SIZE (2),
203132718Skan  RTL_SIZE (2),			/* MEM, PLUS, etc.  */
204132718Skan  RTL_SIZE (9),			/* INSN */
20590075Sobrien};
20690075Sobrien
20790075Sobrien/* The total number of orders.  */
20890075Sobrien
20990075Sobrien#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
21090075Sobrien
21190075Sobrien/* We use this structure to determine the alignment required for
21290075Sobrien   allocations.  For power-of-two sized allocations, that's not a
21390075Sobrien   problem, but it does matter for odd-sized allocations.  */
21490075Sobrien
21590075Sobrienstruct max_alignment {
21690075Sobrien  char c;
21790075Sobrien  union {
21890075Sobrien    HOST_WIDEST_INT i;
21990075Sobrien    long double d;
22090075Sobrien  } u;
22190075Sobrien};
22290075Sobrien
22390075Sobrien/* The biggest alignment required.  */
22490075Sobrien
22590075Sobrien#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
22690075Sobrien
227132718Skan/* Compute the smallest nonnegative number which when added to X gives
228132718Skan   a multiple of F.  */
229132718Skan
230132718Skan#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
231132718Skan
232132718Skan/* Compute the smallest multiple of F that is >= X.  */
233132718Skan
234132718Skan#define ROUND_UP(x, f) (CEIL (x, f) * (f))
235132718Skan
23690075Sobrien/* The Ith entry is the number of objects on a page or order I.  */
23790075Sobrien
23890075Sobrienstatic unsigned objects_per_page_table[NUM_ORDERS];
23990075Sobrien
24090075Sobrien/* The Ith entry is the size of an object on a page of order I.  */
24190075Sobrien
24290075Sobrienstatic size_t object_size_table[NUM_ORDERS];
24390075Sobrien
244117395Skan/* The Ith entry is a pair of numbers (mult, shift) such that
245117395Skan   ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
246117395Skan   for all k evenly divisible by OBJECT_SIZE(I).  */
247117395Skan
248117395Skanstatic struct
249117395Skan{
250132718Skan  size_t mult;
251117395Skan  unsigned int shift;
252117395Skan}
253117395Skaninverse_table[NUM_ORDERS];
254117395Skan
25590075Sobrien/* A page_entry records the status of an allocation page.  This
25690075Sobrien   structure is dynamically sized to fit the bitmap in_use_p.  */
257117395Skantypedef struct page_entry
25890075Sobrien{
25990075Sobrien  /* The next page-entry with objects of the same size, or NULL if
26090075Sobrien     this is the last page-entry.  */
26190075Sobrien  struct page_entry *next;
26290075Sobrien
263169689Skan  /* The previous page-entry with objects of the same size, or NULL if
264169689Skan     this is the first page-entry.   The PREV pointer exists solely to
265169689Skan     keep the cost of ggc_free manageable.  */
266169689Skan  struct page_entry *prev;
267169689Skan
26890075Sobrien  /* The number of bytes allocated.  (This will always be a multiple
26990075Sobrien     of the host system page size.)  */
27090075Sobrien  size_t bytes;
27190075Sobrien
27290075Sobrien  /* The address at which the memory is allocated.  */
27390075Sobrien  char *page;
27490075Sobrien
27590075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
27690075Sobrien  /* Back pointer to the page group this page came from.  */
27790075Sobrien  struct page_group *group;
27890075Sobrien#endif
27990075Sobrien
280117395Skan  /* This is the index in the by_depth varray where this page table
281117395Skan     can be found.  */
282117395Skan  unsigned long index_by_depth;
28390075Sobrien
28490075Sobrien  /* Context depth of this page.  */
28590075Sobrien  unsigned short context_depth;
28690075Sobrien
28790075Sobrien  /* The number of free objects remaining on this page.  */
28890075Sobrien  unsigned short num_free_objects;
28990075Sobrien
29090075Sobrien  /* A likely candidate for the bit position of a free object for the
29190075Sobrien     next allocation from this page.  */
29290075Sobrien  unsigned short next_bit_hint;
29390075Sobrien
29490075Sobrien  /* The lg of size of objects allocated from this page.  */
29590075Sobrien  unsigned char order;
29690075Sobrien
29790075Sobrien  /* A bit vector indicating whether or not objects are in use.  The
29890075Sobrien     Nth bit is one if the Nth object on this page is allocated.  This
29990075Sobrien     array is dynamically sized.  */
30090075Sobrien  unsigned long in_use_p[1];
30190075Sobrien} page_entry;
30290075Sobrien
30390075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
30490075Sobrien/* A page_group describes a large allocation from malloc, from which
30590075Sobrien   we parcel out aligned pages.  */
30690075Sobrientypedef struct page_group
30790075Sobrien{
30890075Sobrien  /* A linked list of all extant page groups.  */
30990075Sobrien  struct page_group *next;
31090075Sobrien
31190075Sobrien  /* The address we received from malloc.  */
31290075Sobrien  char *allocation;
31390075Sobrien
31490075Sobrien  /* The size of the block.  */
31590075Sobrien  size_t alloc_size;
31690075Sobrien
31790075Sobrien  /* A bitmask of pages in use.  */
31890075Sobrien  unsigned int in_use;
31990075Sobrien} page_group;
32090075Sobrien#endif
32190075Sobrien
32290075Sobrien#if HOST_BITS_PER_PTR <= 32
32390075Sobrien
32490075Sobrien/* On 32-bit hosts, we use a two level page table, as pictured above.  */
32590075Sobrientypedef page_entry **page_table[PAGE_L1_SIZE];
32690075Sobrien
32790075Sobrien#else
32890075Sobrien
32990075Sobrien/* On 64-bit hosts, we use the same two level page tables plus a linked
33090075Sobrien   list that disambiguates the top 32-bits.  There will almost always be
33190075Sobrien   exactly one entry in the list.  */
33290075Sobrientypedef struct page_table_chain
33390075Sobrien{
33490075Sobrien  struct page_table_chain *next;
33590075Sobrien  size_t high_bits;
33690075Sobrien  page_entry **table[PAGE_L1_SIZE];
33790075Sobrien} *page_table;
33890075Sobrien
33990075Sobrien#endif
34090075Sobrien
34190075Sobrien/* The rest of the global variables.  */
34290075Sobrienstatic struct globals
34390075Sobrien{
34490075Sobrien  /* The Nth element in this array is a page with objects of size 2^N.
34590075Sobrien     If there are any pages with free objects, they will be at the
34690075Sobrien     head of the list.  NULL if there are no page-entries for this
34790075Sobrien     object size.  */
34890075Sobrien  page_entry *pages[NUM_ORDERS];
34990075Sobrien
35090075Sobrien  /* The Nth element in this array is the last page with objects of
35190075Sobrien     size 2^N.  NULL if there are no page-entries for this object
35290075Sobrien     size.  */
35390075Sobrien  page_entry *page_tails[NUM_ORDERS];
35490075Sobrien
35590075Sobrien  /* Lookup table for associating allocation pages with object addresses.  */
35690075Sobrien  page_table lookup;
35790075Sobrien
35890075Sobrien  /* The system's page size.  */
35990075Sobrien  size_t pagesize;
36090075Sobrien  size_t lg_pagesize;
36190075Sobrien
36290075Sobrien  /* Bytes currently allocated.  */
36390075Sobrien  size_t allocated;
36490075Sobrien
36590075Sobrien  /* Bytes currently allocated at the end of the last collection.  */
36690075Sobrien  size_t allocated_last_gc;
36790075Sobrien
36890075Sobrien  /* Total amount of memory mapped.  */
36990075Sobrien  size_t bytes_mapped;
37090075Sobrien
371117395Skan  /* Bit N set if any allocations have been done at context depth N.  */
372117395Skan  unsigned long context_depth_allocations;
373117395Skan
374117395Skan  /* Bit N set if any collections have been done at context depth N.  */
375117395Skan  unsigned long context_depth_collections;
376117395Skan
37790075Sobrien  /* The current depth in the context stack.  */
37890075Sobrien  unsigned short context_depth;
37990075Sobrien
38090075Sobrien  /* A file descriptor open to /dev/zero for reading.  */
38190075Sobrien#if defined (HAVE_MMAP_DEV_ZERO)
38290075Sobrien  int dev_zero_fd;
38390075Sobrien#endif
38490075Sobrien
38590075Sobrien  /* A cache of free system pages.  */
38690075Sobrien  page_entry *free_pages;
38790075Sobrien
38890075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
38990075Sobrien  page_group *page_groups;
39090075Sobrien#endif
39190075Sobrien
39290075Sobrien  /* The file descriptor for debugging output.  */
39390075Sobrien  FILE *debug_file;
394117395Skan
395117395Skan  /* Current number of elements in use in depth below.  */
396117395Skan  unsigned int depth_in_use;
397117395Skan
398117395Skan  /* Maximum number of elements that can be used before resizing.  */
399117395Skan  unsigned int depth_max;
400117395Skan
401117395Skan  /* Each element of this arry is an index in by_depth where the given
402117395Skan     depth starts.  This structure is indexed by that given depth we
403117395Skan     are interested in.  */
404117395Skan  unsigned int *depth;
405117395Skan
406117395Skan  /* Current number of elements in use in by_depth below.  */
407117395Skan  unsigned int by_depth_in_use;
408117395Skan
409117395Skan  /* Maximum number of elements that can be used before resizing.  */
410117395Skan  unsigned int by_depth_max;
411117395Skan
412117395Skan  /* Each element of this array is a pointer to a page_entry, all
413117395Skan     page_entries can be found in here by increasing depth.
414117395Skan     index_by_depth in the page_entry is the index into this data
415117395Skan     structure where that page_entry can be found.  This is used to
416117395Skan     speed up finding all page_entries at a particular depth.  */
417117395Skan  page_entry **by_depth;
418117395Skan
419117395Skan  /* Each element is a pointer to the saved in_use_p bits, if any,
420117395Skan     zero otherwise.  We allocate them all together, to enable a
421117395Skan     better runtime data access pattern.  */
422117395Skan  unsigned long **save_in_use;
423169689Skan
424169689Skan#ifdef ENABLE_GC_ALWAYS_COLLECT
425169689Skan  /* List of free objects to be verified as actually free on the
426169689Skan     next collection.  */
427169689Skan  struct free_object
428169689Skan  {
429169689Skan    void *object;
430169689Skan    struct free_object *next;
431169689Skan  } *free_object_list;
432169689Skan#endif
433169689Skan
434132718Skan#ifdef GATHER_STATISTICS
435132718Skan  struct
436132718Skan  {
437132718Skan    /* Total memory allocated with ggc_alloc.  */
438132718Skan    unsigned long long total_allocated;
439132718Skan    /* Total overhead for memory to be allocated with ggc_alloc.  */
440132718Skan    unsigned long long total_overhead;
441117395Skan
442132718Skan    /* Total allocations and overhead for sizes less than 32, 64 and 128.
443132718Skan       These sizes are interesting because they are typical cache line
444132718Skan       sizes.  */
445132718Skan
446132718Skan    unsigned long long total_allocated_under32;
447132718Skan    unsigned long long total_overhead_under32;
448132718Skan
449132718Skan    unsigned long long total_allocated_under64;
450132718Skan    unsigned long long total_overhead_under64;
451132718Skan
452132718Skan    unsigned long long total_allocated_under128;
453132718Skan    unsigned long long total_overhead_under128;
454132718Skan
455132718Skan    /* The allocations for each of the allocation orders.  */
456132718Skan    unsigned long long total_allocated_per_order[NUM_ORDERS];
457132718Skan
458132718Skan    /* The overhead for each of the allocation orders.  */
459132718Skan    unsigned long long total_overhead_per_order[NUM_ORDERS];
460132718Skan  } stats;
461132718Skan#endif
46290075Sobrien} G;
46390075Sobrien
46490075Sobrien/* The size in bytes required to maintain a bitmap for the objects
46590075Sobrien   on a page-entry.  */
46690075Sobrien#define BITMAP_SIZE(Num_objects) \
46790075Sobrien  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
46890075Sobrien
46990075Sobrien/* Allocate pages in chunks of this size, to throttle calls to memory
47090075Sobrien   allocation routines.  The first page is used, the rest go onto the
47190075Sobrien   free list.  This cannot be larger than HOST_BITS_PER_INT for the
472169689Skan   in_use bitmask for page_group.  Hosts that need a different value
473169689Skan   can override this by defining GGC_QUIRE_SIZE explicitly.  */
474169689Skan#ifndef GGC_QUIRE_SIZE
475169689Skan# ifdef USING_MMAP
476169689Skan#  define GGC_QUIRE_SIZE 256
477169689Skan# else
478169689Skan#  define GGC_QUIRE_SIZE 16
479169689Skan# endif
480169689Skan#endif
481117395Skan
482117395Skan/* Initial guess as to how many page table entries we might need.  */
483117395Skan#define INITIAL_PTE_COUNT 128
48490075Sobrien
485132718Skanstatic int ggc_allocated_p (const void *);
486132718Skanstatic page_entry *lookup_page_table_entry (const void *);
487132718Skanstatic void set_page_table_entry (void *, page_entry *);
48890075Sobrien#ifdef USING_MMAP
489132718Skanstatic char *alloc_anon (char *, size_t);
49090075Sobrien#endif
49190075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
492132718Skanstatic size_t page_group_index (char *, char *);
493132718Skanstatic void set_page_group_in_use (page_group *, char *);
494132718Skanstatic void clear_page_group_in_use (page_group *, char *);
49590075Sobrien#endif
496132718Skanstatic struct page_entry * alloc_page (unsigned);
497132718Skanstatic void free_page (struct page_entry *);
498132718Skanstatic void release_pages (void);
499132718Skanstatic void clear_marks (void);
500132718Skanstatic void sweep_pages (void);
501132718Skanstatic void ggc_recalculate_in_use_p (page_entry *);
502132718Skanstatic void compute_inverse (unsigned);
503132718Skanstatic inline void adjust_depth (void);
504132718Skanstatic void move_ptes_to_front (int, int);
50590075Sobrien
506132718Skanvoid debug_print_page_list (int);
507132718Skanstatic void push_depth (unsigned int);
508132718Skanstatic void push_by_depth (page_entry *, unsigned long *);
509132718Skan
510117395Skan/* Push an entry onto G.depth.  */
51190075Sobrien
512117395Skaninline static void
513132718Skanpush_depth (unsigned int i)
514117395Skan{
515117395Skan  if (G.depth_in_use >= G.depth_max)
516117395Skan    {
517117395Skan      G.depth_max *= 2;
518132718Skan      G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
519117395Skan    }
520117395Skan  G.depth[G.depth_in_use++] = i;
521117395Skan}
522117395Skan
523117395Skan/* Push an entry onto G.by_depth and G.save_in_use.  */
524117395Skan
525117395Skaninline static void
526132718Skanpush_by_depth (page_entry *p, unsigned long *s)
527117395Skan{
528117395Skan  if (G.by_depth_in_use >= G.by_depth_max)
529117395Skan    {
530117395Skan      G.by_depth_max *= 2;
531132718Skan      G.by_depth = xrealloc (G.by_depth,
532132718Skan			     G.by_depth_max * sizeof (page_entry *));
533132718Skan      G.save_in_use = xrealloc (G.save_in_use,
534132718Skan				G.by_depth_max * sizeof (unsigned long *));
535117395Skan    }
536117395Skan  G.by_depth[G.by_depth_in_use] = p;
537117395Skan  G.save_in_use[G.by_depth_in_use++] = s;
538117395Skan}
539117395Skan
540132718Skan#if (GCC_VERSION < 3001)
541117395Skan#define prefetch(X) ((void) X)
542132718Skan#else
543132718Skan#define prefetch(X) __builtin_prefetch (X)
544132718Skan#endif
545117395Skan
546117395Skan#define save_in_use_p_i(__i) \
547117395Skan  (G.save_in_use[__i])
548117395Skan#define save_in_use_p(__p) \
549117395Skan  (save_in_use_p_i (__p->index_by_depth))
550117395Skan
551117395Skan/* Returns nonzero if P was allocated in GC'able memory.  */
552117395Skan
55390075Sobrienstatic inline int
554132718Skanggc_allocated_p (const void *p)
55590075Sobrien{
55690075Sobrien  page_entry ***base;
55790075Sobrien  size_t L1, L2;
55890075Sobrien
55990075Sobrien#if HOST_BITS_PER_PTR <= 32
56090075Sobrien  base = &G.lookup[0];
56190075Sobrien#else
56290075Sobrien  page_table table = G.lookup;
56390075Sobrien  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
56490075Sobrien  while (1)
56590075Sobrien    {
56690075Sobrien      if (table == NULL)
56790075Sobrien	return 0;
56890075Sobrien      if (table->high_bits == high_bits)
56990075Sobrien	break;
57090075Sobrien      table = table->next;
57190075Sobrien    }
57290075Sobrien  base = &table->table[0];
57390075Sobrien#endif
57490075Sobrien
57590075Sobrien  /* Extract the level 1 and 2 indices.  */
57690075Sobrien  L1 = LOOKUP_L1 (p);
57790075Sobrien  L2 = LOOKUP_L2 (p);
57890075Sobrien
57990075Sobrien  return base[L1] && base[L1][L2];
58090075Sobrien}
58190075Sobrien
582117395Skan/* Traverse the page table and find the entry for a page.
58390075Sobrien   Die (probably) if the object wasn't allocated via GC.  */
58490075Sobrien
58590075Sobrienstatic inline page_entry *
586132718Skanlookup_page_table_entry (const void *p)
58790075Sobrien{
58890075Sobrien  page_entry ***base;
58990075Sobrien  size_t L1, L2;
59090075Sobrien
59190075Sobrien#if HOST_BITS_PER_PTR <= 32
59290075Sobrien  base = &G.lookup[0];
59390075Sobrien#else
59490075Sobrien  page_table table = G.lookup;
59590075Sobrien  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
59690075Sobrien  while (table->high_bits != high_bits)
59790075Sobrien    table = table->next;
59890075Sobrien  base = &table->table[0];
59990075Sobrien#endif
60090075Sobrien
60190075Sobrien  /* Extract the level 1 and 2 indices.  */
60290075Sobrien  L1 = LOOKUP_L1 (p);
60390075Sobrien  L2 = LOOKUP_L2 (p);
60490075Sobrien
60590075Sobrien  return base[L1][L2];
60690075Sobrien}
60790075Sobrien
60890075Sobrien/* Set the page table entry for a page.  */
60990075Sobrien
61090075Sobrienstatic void
611132718Skanset_page_table_entry (void *p, page_entry *entry)
61290075Sobrien{
61390075Sobrien  page_entry ***base;
61490075Sobrien  size_t L1, L2;
61590075Sobrien
61690075Sobrien#if HOST_BITS_PER_PTR <= 32
61790075Sobrien  base = &G.lookup[0];
61890075Sobrien#else
61990075Sobrien  page_table table;
62090075Sobrien  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
62190075Sobrien  for (table = G.lookup; table; table = table->next)
62290075Sobrien    if (table->high_bits == high_bits)
62390075Sobrien      goto found;
62490075Sobrien
62590075Sobrien  /* Not found -- allocate a new table.  */
626132718Skan  table = xcalloc (1, sizeof(*table));
62790075Sobrien  table->next = G.lookup;
62890075Sobrien  table->high_bits = high_bits;
62990075Sobrien  G.lookup = table;
63090075Sobrienfound:
63190075Sobrien  base = &table->table[0];
63290075Sobrien#endif
63390075Sobrien
63490075Sobrien  /* Extract the level 1 and 2 indices.  */
63590075Sobrien  L1 = LOOKUP_L1 (p);
63690075Sobrien  L2 = LOOKUP_L2 (p);
63790075Sobrien
63890075Sobrien  if (base[L1] == NULL)
639169689Skan    base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
64090075Sobrien
64190075Sobrien  base[L1][L2] = entry;
64290075Sobrien}
64390075Sobrien
64490075Sobrien/* Prints the page-entry for object size ORDER, for debugging.  */
64590075Sobrien
64690075Sobrienvoid
647132718Skandebug_print_page_list (int order)
64890075Sobrien{
64990075Sobrien  page_entry *p;
650132718Skan  printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
651132718Skan	  (void *) G.page_tails[order]);
65290075Sobrien  p = G.pages[order];
65390075Sobrien  while (p != NULL)
65490075Sobrien    {
655132718Skan      printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
65690075Sobrien	      p->num_free_objects);
65790075Sobrien      p = p->next;
65890075Sobrien    }
65990075Sobrien  printf ("NULL\n");
66090075Sobrien  fflush (stdout);
66190075Sobrien}
66290075Sobrien
66390075Sobrien#ifdef USING_MMAP
66490075Sobrien/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
66590075Sobrien   (if non-null).  The ifdef structure here is intended to cause a
66690075Sobrien   compile error unless exactly one of the HAVE_* is defined.  */
66790075Sobrien
66890075Sobrienstatic inline char *
669132718Skanalloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
67090075Sobrien{
67190075Sobrien#ifdef HAVE_MMAP_ANON
672169689Skan  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
673169689Skan		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
67490075Sobrien#endif
67590075Sobrien#ifdef HAVE_MMAP_DEV_ZERO
676169689Skan  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
677169689Skan		     MAP_PRIVATE, G.dev_zero_fd, 0);
67890075Sobrien#endif
67990075Sobrien
68090075Sobrien  if (page == (char *) MAP_FAILED)
68190075Sobrien    {
68290075Sobrien      perror ("virtual memory exhausted");
68390075Sobrien      exit (FATAL_EXIT_CODE);
68490075Sobrien    }
68590075Sobrien
68690075Sobrien  /* Remember that we allocated this memory.  */
68790075Sobrien  G.bytes_mapped += size;
68890075Sobrien
689117395Skan  /* Pretend we don't have access to the allocated pages.  We'll enable
690117395Skan     access to smaller pieces of the area in ggc_alloc.  Discard the
691117395Skan     handle to avoid handle leak.  */
692117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
693117395Skan
69490075Sobrien  return page;
69590075Sobrien}
69690075Sobrien#endif
69790075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
69890075Sobrien/* Compute the index for this page into the page group.  */
69990075Sobrien
70090075Sobrienstatic inline size_t
701132718Skanpage_group_index (char *allocation, char *page)
70290075Sobrien{
70390075Sobrien  return (size_t) (page - allocation) >> G.lg_pagesize;
70490075Sobrien}
70590075Sobrien
70690075Sobrien/* Set and clear the in_use bit for this page in the page group.  */
70790075Sobrien
70890075Sobrienstatic inline void
709132718Skanset_page_group_in_use (page_group *group, char *page)
71090075Sobrien{
71190075Sobrien  group->in_use |= 1 << page_group_index (group->allocation, page);
71290075Sobrien}
71390075Sobrien
71490075Sobrienstatic inline void
715132718Skanclear_page_group_in_use (page_group *group, char *page)
71690075Sobrien{
71790075Sobrien  group->in_use &= ~(1 << page_group_index (group->allocation, page));
71890075Sobrien}
71990075Sobrien#endif
72090075Sobrien
72190075Sobrien/* Allocate a new page for allocating objects of size 2^ORDER,
72290075Sobrien   and return an entry for it.  The entry is not added to the
72390075Sobrien   appropriate page_table list.  */
72490075Sobrien
72590075Sobrienstatic inline struct page_entry *
726132718Skanalloc_page (unsigned order)
72790075Sobrien{
72890075Sobrien  struct page_entry *entry, *p, **pp;
72990075Sobrien  char *page;
73090075Sobrien  size_t num_objects;
73190075Sobrien  size_t bitmap_size;
73290075Sobrien  size_t page_entry_size;
73390075Sobrien  size_t entry_size;
73490075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
73590075Sobrien  page_group *group;
73690075Sobrien#endif
73790075Sobrien
73890075Sobrien  num_objects = OBJECTS_PER_PAGE (order);
73990075Sobrien  bitmap_size = BITMAP_SIZE (num_objects + 1);
74090075Sobrien  page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
74190075Sobrien  entry_size = num_objects * OBJECT_SIZE (order);
74290075Sobrien  if (entry_size < G.pagesize)
74390075Sobrien    entry_size = G.pagesize;
74490075Sobrien
74590075Sobrien  entry = NULL;
74690075Sobrien  page = NULL;
74790075Sobrien
74890075Sobrien  /* Check the list of free pages for one we can use.  */
74990075Sobrien  for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
75090075Sobrien    if (p->bytes == entry_size)
75190075Sobrien      break;
75290075Sobrien
75390075Sobrien  if (p != NULL)
75490075Sobrien    {
75590075Sobrien      /* Recycle the allocated memory from this page ...  */
75690075Sobrien      *pp = p->next;
75790075Sobrien      page = p->page;
75890075Sobrien
75990075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
76090075Sobrien      group = p->group;
76190075Sobrien#endif
76290075Sobrien
76390075Sobrien      /* ... and, if possible, the page entry itself.  */
76490075Sobrien      if (p->order == order)
76590075Sobrien	{
76690075Sobrien	  entry = p;
76790075Sobrien	  memset (entry, 0, page_entry_size);
76890075Sobrien	}
76990075Sobrien      else
77090075Sobrien	free (p);
77190075Sobrien    }
77290075Sobrien#ifdef USING_MMAP
77390075Sobrien  else if (entry_size == G.pagesize)
77490075Sobrien    {
77590075Sobrien      /* We want just one page.  Allocate a bunch of them and put the
77690075Sobrien	 extras on the freelist.  (Can only do this optimization with
77790075Sobrien	 mmap for backing store.)  */
77890075Sobrien      struct page_entry *e, *f = G.free_pages;
77990075Sobrien      int i;
78090075Sobrien
78190075Sobrien      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
78290075Sobrien
78390075Sobrien      /* This loop counts down so that the chain will be in ascending
78490075Sobrien	 memory order.  */
78590075Sobrien      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
78690075Sobrien	{
787132718Skan	  e = xcalloc (1, page_entry_size);
78890075Sobrien	  e->order = order;
78990075Sobrien	  e->bytes = G.pagesize;
79090075Sobrien	  e->page = page + (i << G.lg_pagesize);
79190075Sobrien	  e->next = f;
79290075Sobrien	  f = e;
79390075Sobrien	}
79490075Sobrien
79590075Sobrien      G.free_pages = f;
79690075Sobrien    }
79790075Sobrien  else
79890075Sobrien    page = alloc_anon (NULL, entry_size);
79990075Sobrien#endif
80090075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
80190075Sobrien  else
80290075Sobrien    {
80390075Sobrien      /* Allocate a large block of memory and serve out the aligned
80490075Sobrien	 pages therein.  This results in much less memory wastage
80590075Sobrien	 than the traditional implementation of valloc.  */
80690075Sobrien
80790075Sobrien      char *allocation, *a, *enda;
80890075Sobrien      size_t alloc_size, head_slop, tail_slop;
80990075Sobrien      int multiple_pages = (entry_size == G.pagesize);
81090075Sobrien
81190075Sobrien      if (multiple_pages)
81290075Sobrien	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
81390075Sobrien      else
81490075Sobrien	alloc_size = entry_size + G.pagesize - 1;
81590075Sobrien      allocation = xmalloc (alloc_size);
81690075Sobrien
81790075Sobrien      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
81890075Sobrien      head_slop = page - allocation;
81990075Sobrien      if (multiple_pages)
82090075Sobrien	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
82190075Sobrien      else
82290075Sobrien	tail_slop = alloc_size - entry_size - head_slop;
82390075Sobrien      enda = allocation + alloc_size - tail_slop;
82490075Sobrien
82590075Sobrien      /* We allocated N pages, which are likely not aligned, leaving
82690075Sobrien	 us with N-1 usable pages.  We plan to place the page_group
82790075Sobrien	 structure somewhere in the slop.  */
82890075Sobrien      if (head_slop >= sizeof (page_group))
82990075Sobrien	group = (page_group *)page - 1;
83090075Sobrien      else
83190075Sobrien	{
83290075Sobrien	  /* We magically got an aligned allocation.  Too bad, we have
83390075Sobrien	     to waste a page anyway.  */
83490075Sobrien	  if (tail_slop == 0)
83590075Sobrien	    {
83690075Sobrien	      enda -= G.pagesize;
83790075Sobrien	      tail_slop += G.pagesize;
83890075Sobrien	    }
839169689Skan	  gcc_assert (tail_slop >= sizeof (page_group));
84090075Sobrien	  group = (page_group *)enda;
84190075Sobrien	  tail_slop -= sizeof (page_group);
84290075Sobrien	}
84390075Sobrien
84490075Sobrien      /* Remember that we allocated this memory.  */
84590075Sobrien      group->next = G.page_groups;
84690075Sobrien      group->allocation = allocation;
84790075Sobrien      group->alloc_size = alloc_size;
84890075Sobrien      group->in_use = 0;
84990075Sobrien      G.page_groups = group;
85090075Sobrien      G.bytes_mapped += alloc_size;
85190075Sobrien
85290075Sobrien      /* If we allocated multiple pages, put the rest on the free list.  */
85390075Sobrien      if (multiple_pages)
85490075Sobrien	{
85590075Sobrien	  struct page_entry *e, *f = G.free_pages;
85690075Sobrien	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
85790075Sobrien	    {
858132718Skan	      e = xcalloc (1, page_entry_size);
85990075Sobrien	      e->order = order;
86090075Sobrien	      e->bytes = G.pagesize;
86190075Sobrien	      e->page = a;
86290075Sobrien	      e->group = group;
86390075Sobrien	      e->next = f;
86490075Sobrien	      f = e;
86590075Sobrien	    }
86690075Sobrien	  G.free_pages = f;
86790075Sobrien	}
86890075Sobrien    }
86990075Sobrien#endif
87090075Sobrien
87190075Sobrien  if (entry == NULL)
872132718Skan    entry = xcalloc (1, page_entry_size);
87390075Sobrien
87490075Sobrien  entry->bytes = entry_size;
87590075Sobrien  entry->page = page;
87690075Sobrien  entry->context_depth = G.context_depth;
87790075Sobrien  entry->order = order;
87890075Sobrien  entry->num_free_objects = num_objects;
87990075Sobrien  entry->next_bit_hint = 1;
88090075Sobrien
881117395Skan  G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
882117395Skan
88390075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
88490075Sobrien  entry->group = group;
88590075Sobrien  set_page_group_in_use (group, page);
88690075Sobrien#endif
88790075Sobrien
88890075Sobrien  /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
88990075Sobrien     increment the hint.  */
89090075Sobrien  entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
89190075Sobrien    = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
89290075Sobrien
89390075Sobrien  set_page_table_entry (page, entry);
89490075Sobrien
89590075Sobrien  if (GGC_DEBUG_LEVEL >= 2)
896117395Skan    fprintf (G.debug_file,
897117395Skan	     "Allocating page at %p, object size=%lu, data %p-%p\n",
898132718Skan	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
89990075Sobrien	     page + entry_size - 1);
90090075Sobrien
90190075Sobrien  return entry;
90290075Sobrien}
90390075Sobrien
904117395Skan/* Adjust the size of G.depth so that no index greater than the one
905117395Skan   used by the top of the G.by_depth is used.  */
906117395Skan
907117395Skanstatic inline void
908132718Skanadjust_depth (void)
909117395Skan{
910117395Skan  page_entry *top;
911117395Skan
912117395Skan  if (G.by_depth_in_use)
913117395Skan    {
914117395Skan      top = G.by_depth[G.by_depth_in_use-1];
915117395Skan
916132718Skan      /* Peel back indices in depth that index into by_depth, so that
917132718Skan	 as new elements are added to by_depth, we note the indices
918117395Skan	 of those elements, if they are for new context depths.  */
919117395Skan      while (G.depth_in_use > (size_t)top->context_depth+1)
920117395Skan	--G.depth_in_use;
921117395Skan    }
922117395Skan}
923117395Skan
92490075Sobrien/* For a page that is no longer needed, put it on the free page list.  */
92590075Sobrien
926169689Skanstatic void
927132718Skanfree_page (page_entry *entry)
92890075Sobrien{
92990075Sobrien  if (GGC_DEBUG_LEVEL >= 2)
930117395Skan    fprintf (G.debug_file,
931132718Skan	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
93290075Sobrien	     entry->page, entry->page + entry->bytes - 1);
93390075Sobrien
934117395Skan  /* Mark the page as inaccessible.  Discard the handle to avoid handle
935117395Skan     leak.  */
936117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
937117395Skan
93890075Sobrien  set_page_table_entry (entry->page, NULL);
93990075Sobrien
94090075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
94190075Sobrien  clear_page_group_in_use (entry->group, entry->page);
94290075Sobrien#endif
94390075Sobrien
944117395Skan  if (G.by_depth_in_use > 1)
945117395Skan    {
946117395Skan      page_entry *top = G.by_depth[G.by_depth_in_use-1];
947169689Skan      int i = entry->index_by_depth;
948117395Skan
949169689Skan      /* We cannot free a page from a context deeper than the current
950169689Skan	 one.  */
951169689Skan      gcc_assert (entry->context_depth == top->context_depth);
952169689Skan
953169689Skan      /* Put top element into freed slot.  */
954169689Skan      G.by_depth[i] = top;
955169689Skan      G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
956169689Skan      top->index_by_depth = i;
957117395Skan    }
958117395Skan  --G.by_depth_in_use;
959117395Skan
960117395Skan  adjust_depth ();
961117395Skan
96290075Sobrien  entry->next = G.free_pages;
96390075Sobrien  G.free_pages = entry;
96490075Sobrien}
96590075Sobrien
96690075Sobrien/* Release the free page cache to the system.  */
96790075Sobrien
96890075Sobrienstatic void
969132718Skanrelease_pages (void)
97090075Sobrien{
97190075Sobrien#ifdef USING_MMAP
97290075Sobrien  page_entry *p, *next;
97390075Sobrien  char *start;
97490075Sobrien  size_t len;
97590075Sobrien
97690075Sobrien  /* Gather up adjacent pages so they are unmapped together.  */
97790075Sobrien  p = G.free_pages;
97890075Sobrien
97990075Sobrien  while (p)
98090075Sobrien    {
98190075Sobrien      start = p->page;
98290075Sobrien      next = p->next;
98390075Sobrien      len = p->bytes;
98490075Sobrien      free (p);
98590075Sobrien      p = next;
98690075Sobrien
98790075Sobrien      while (p && p->page == start + len)
98890075Sobrien	{
98990075Sobrien	  next = p->next;
99090075Sobrien	  len += p->bytes;
99190075Sobrien	  free (p);
99290075Sobrien	  p = next;
99390075Sobrien	}
99490075Sobrien
99590075Sobrien      munmap (start, len);
99690075Sobrien      G.bytes_mapped -= len;
99790075Sobrien    }
99890075Sobrien
99990075Sobrien  G.free_pages = NULL;
100090075Sobrien#endif
100190075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
100290075Sobrien  page_entry **pp, *p;
100390075Sobrien  page_group **gp, *g;
100490075Sobrien
100590075Sobrien  /* Remove all pages from free page groups from the list.  */
100690075Sobrien  pp = &G.free_pages;
100790075Sobrien  while ((p = *pp) != NULL)
100890075Sobrien    if (p->group->in_use == 0)
100990075Sobrien      {
101090075Sobrien	*pp = p->next;
101190075Sobrien	free (p);
101290075Sobrien      }
101390075Sobrien    else
101490075Sobrien      pp = &p->next;
101590075Sobrien
101690075Sobrien  /* Remove all free page groups, and release the storage.  */
101790075Sobrien  gp = &G.page_groups;
101890075Sobrien  while ((g = *gp) != NULL)
101990075Sobrien    if (g->in_use == 0)
102090075Sobrien      {
102190075Sobrien	*gp = g->next;
1022117395Skan	G.bytes_mapped -= g->alloc_size;
102390075Sobrien	free (g->allocation);
102490075Sobrien      }
102590075Sobrien    else
102690075Sobrien      gp = &g->next;
102790075Sobrien#endif
102890075Sobrien}
102990075Sobrien
103090075Sobrien/* This table provides a fast way to determine ceil(log_2(size)) for
103190075Sobrien   allocation requests.  The minimum allocation size is eight bytes.  */
1032169689Skan#define NUM_SIZE_LOOKUP 512
1033169689Skanstatic unsigned char size_lookup[NUM_SIZE_LOOKUP] =
103490075Sobrien{
1035117395Skan  3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1036117395Skan  4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1037117395Skan  5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1038117395Skan  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1039117395Skan  6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
104090075Sobrien  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1041117395Skan  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1042117395Skan  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
104390075Sobrien  7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
104490075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
104590075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
104690075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
104790075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
104890075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
104990075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
105090075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1051169689Skan  8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1056169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1057169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1058169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1059169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1060169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1061169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1062169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1063169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1064169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1065169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1066169689Skan  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
106790075Sobrien};
106890075Sobrien
1069132718Skan/* Typed allocation function.  Does nothing special in this collector.  */
107090075Sobrien
107190075Sobrienvoid *
1072169689Skanggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1073169689Skan		      MEM_STAT_DECL)
107490075Sobrien{
1075169689Skan  return ggc_alloc_stat (size PASS_MEM_STAT);
1076132718Skan}
1077132718Skan
1078132718Skan/* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1079132718Skan
1080132718Skanvoid *
1081169689Skanggc_alloc_stat (size_t size MEM_STAT_DECL)
1082132718Skan{
1083169689Skan  size_t order, word, bit, object_offset, object_size;
108490075Sobrien  struct page_entry *entry;
108590075Sobrien  void *result;
108690075Sobrien
1087169689Skan  if (size < NUM_SIZE_LOOKUP)
1088169689Skan    {
1089169689Skan      order = size_lookup[size];
1090169689Skan      object_size = OBJECT_SIZE (order);
1091169689Skan    }
109290075Sobrien  else
109390075Sobrien    {
1094169689Skan      order = 10;
1095169689Skan      while (size > (object_size = OBJECT_SIZE (order)))
109690075Sobrien	order++;
109790075Sobrien    }
109890075Sobrien
109990075Sobrien  /* If there are non-full pages for this size allocation, they are at
110090075Sobrien     the head of the list.  */
110190075Sobrien  entry = G.pages[order];
110290075Sobrien
110390075Sobrien  /* If there is no page for this object size, or all pages in this
110490075Sobrien     context are full, allocate a new page.  */
110590075Sobrien  if (entry == NULL || entry->num_free_objects == 0)
110690075Sobrien    {
110790075Sobrien      struct page_entry *new_entry;
110890075Sobrien      new_entry = alloc_page (order);
1109117395Skan
1110117395Skan      new_entry->index_by_depth = G.by_depth_in_use;
1111117395Skan      push_by_depth (new_entry, 0);
1112117395Skan
1113117395Skan      /* We can skip context depths, if we do, make sure we go all the
1114117395Skan	 way to the new depth.  */
1115117395Skan      while (new_entry->context_depth >= G.depth_in_use)
1116117395Skan	push_depth (G.by_depth_in_use-1);
1117117395Skan
1118169689Skan      /* If this is the only entry, it's also the tail.  If it is not
1119169689Skan	 the only entry, then we must update the PREV pointer of the
1120169689Skan	 ENTRY (G.pages[order]) to point to our new page entry.  */
112190075Sobrien      if (entry == NULL)
112290075Sobrien	G.page_tails[order] = new_entry;
1123169689Skan      else
1124169689Skan	entry->prev = new_entry;
1125117395Skan
1126169689Skan      /* Put new pages at the head of the page list.  By definition the
1127169689Skan	 entry at the head of the list always has a NULL pointer.  */
112890075Sobrien      new_entry->next = entry;
1129169689Skan      new_entry->prev = NULL;
113090075Sobrien      entry = new_entry;
113190075Sobrien      G.pages[order] = new_entry;
113290075Sobrien
113390075Sobrien      /* For a new page, we know the word and bit positions (in the
113490075Sobrien	 in_use bitmap) of the first available object -- they're zero.  */
113590075Sobrien      new_entry->next_bit_hint = 1;
113690075Sobrien      word = 0;
113790075Sobrien      bit = 0;
113890075Sobrien      object_offset = 0;
113990075Sobrien    }
114090075Sobrien  else
114190075Sobrien    {
114290075Sobrien      /* First try to use the hint left from the previous allocation
114390075Sobrien	 to locate a clear bit in the in-use bitmap.  We've made sure
114490075Sobrien	 that the one-past-the-end bit is always set, so if the hint
114590075Sobrien	 has run over, this test will fail.  */
114690075Sobrien      unsigned hint = entry->next_bit_hint;
114790075Sobrien      word = hint / HOST_BITS_PER_LONG;
114890075Sobrien      bit = hint % HOST_BITS_PER_LONG;
1149117395Skan
115090075Sobrien      /* If the hint didn't work, scan the bitmap from the beginning.  */
115190075Sobrien      if ((entry->in_use_p[word] >> bit) & 1)
115290075Sobrien	{
115390075Sobrien	  word = bit = 0;
115490075Sobrien	  while (~entry->in_use_p[word] == 0)
115590075Sobrien	    ++word;
1156169689Skan
1157169689Skan#if GCC_VERSION >= 3004
1158169689Skan	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1159169689Skan#else
116090075Sobrien	  while ((entry->in_use_p[word] >> bit) & 1)
116190075Sobrien	    ++bit;
1162169689Skan#endif
1163169689Skan
116490075Sobrien	  hint = word * HOST_BITS_PER_LONG + bit;
116590075Sobrien	}
116690075Sobrien
116790075Sobrien      /* Next time, try the next bit.  */
116890075Sobrien      entry->next_bit_hint = hint + 1;
116990075Sobrien
1170169689Skan      object_offset = hint * object_size;
117190075Sobrien    }
117290075Sobrien
117390075Sobrien  /* Set the in-use bit.  */
117490075Sobrien  entry->in_use_p[word] |= ((unsigned long) 1 << bit);
117590075Sobrien
117690075Sobrien  /* Keep a running total of the number of free objects.  If this page
117790075Sobrien     fills up, we may have to move it to the end of the list if the
117890075Sobrien     next page isn't full.  If the next page is full, all subsequent
117990075Sobrien     pages are full, so there's no need to move it.  */
118090075Sobrien  if (--entry->num_free_objects == 0
118190075Sobrien      && entry->next != NULL
118290075Sobrien      && entry->next->num_free_objects > 0)
118390075Sobrien    {
1184169689Skan      /* We have a new head for the list.  */
118590075Sobrien      G.pages[order] = entry->next;
1186169689Skan
1187169689Skan      /* We are moving ENTRY to the end of the page table list.
1188169689Skan	 The new page at the head of the list will have NULL in
1189169689Skan	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1190169689Skan      entry->next->prev = NULL;
119190075Sobrien      entry->next = NULL;
1192169689Skan
1193169689Skan      /* Append ENTRY to the tail of the list.  */
1194169689Skan      entry->prev = G.page_tails[order];
119590075Sobrien      G.page_tails[order]->next = entry;
119690075Sobrien      G.page_tails[order] = entry;
119790075Sobrien    }
119890075Sobrien
119990075Sobrien  /* Calculate the object's address.  */
120090075Sobrien  result = entry->page + object_offset;
1201169689Skan#ifdef GATHER_STATISTICS
1202169689Skan  ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1203169689Skan		       result PASS_MEM_STAT);
1204169689Skan#endif
120590075Sobrien
1206117395Skan#ifdef ENABLE_GC_CHECKING
1207117395Skan  /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1208117395Skan     exact same semantics in presence of memory bugs, regardless of
1209117395Skan     ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1210117395Skan     handle to avoid handle leak.  */
1211169689Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1212117395Skan
121390075Sobrien  /* `Poison' the entire allocated object, including any padding at
121490075Sobrien     the end.  */
1215169689Skan  memset (result, 0xaf, object_size);
1216117395Skan
1217117395Skan  /* Make the bytes after the end of the object unaccessible.  Discard the
1218117395Skan     handle to avoid handle leak.  */
1219117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1220169689Skan					    object_size - size));
122190075Sobrien#endif
122290075Sobrien
1223117395Skan  /* Tell Valgrind that the memory is there, but its content isn't
1224117395Skan     defined.  The bytes at the end of the object are still marked
1225117395Skan     unaccessible.  */
1226117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1227117395Skan
122890075Sobrien  /* Keep track of how many bytes are being allocated.  This
122990075Sobrien     information is used in deciding when to collect.  */
1230169689Skan  G.allocated += object_size;
123190075Sobrien
1232169689Skan  /* For timevar statistics.  */
1233169689Skan  timevar_ggc_mem_total += object_size;
1234169689Skan
1235132718Skan#ifdef GATHER_STATISTICS
1236132718Skan  {
1237169689Skan    size_t overhead = object_size - size;
1238132718Skan
1239169689Skan    G.stats.total_overhead += overhead;
1240169689Skan    G.stats.total_allocated += object_size;
1241169689Skan    G.stats.total_overhead_per_order[order] += overhead;
1242169689Skan    G.stats.total_allocated_per_order[order] += object_size;
1243132718Skan
1244169689Skan    if (size <= 32)
1245169689Skan      {
1246169689Skan	G.stats.total_overhead_under32 += overhead;
1247169689Skan	G.stats.total_allocated_under32 += object_size;
1248169689Skan      }
1249169689Skan    if (size <= 64)
1250169689Skan      {
1251169689Skan	G.stats.total_overhead_under64 += overhead;
1252169689Skan	G.stats.total_allocated_under64 += object_size;
1253169689Skan      }
1254169689Skan    if (size <= 128)
1255169689Skan      {
1256169689Skan	G.stats.total_overhead_under128 += overhead;
1257169689Skan	G.stats.total_allocated_under128 += object_size;
1258169689Skan      }
1259132718Skan  }
1260132718Skan#endif
1261169689Skan
126290075Sobrien  if (GGC_DEBUG_LEVEL >= 3)
1263117395Skan    fprintf (G.debug_file,
1264117395Skan	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1265169689Skan	     (unsigned long) size, (unsigned long) object_size, result,
1266132718Skan	     (void *) entry);
126790075Sobrien
126890075Sobrien  return result;
126990075Sobrien}
127090075Sobrien
127190075Sobrien/* If P is not marked, marks it and return false.  Otherwise return true.
127290075Sobrien   P must have been allocated by the GC allocator; it mustn't point to
127390075Sobrien   static objects, stack variables, or memory allocated with malloc.  */
127490075Sobrien
127590075Sobrienint
1276132718Skanggc_set_mark (const void *p)
127790075Sobrien{
127890075Sobrien  page_entry *entry;
127990075Sobrien  unsigned bit, word;
128090075Sobrien  unsigned long mask;
128190075Sobrien
128290075Sobrien  /* Look up the page on which the object is alloced.  If the object
128390075Sobrien     wasn't allocated by the collector, we'll probably die.  */
128490075Sobrien  entry = lookup_page_table_entry (p);
1285169689Skan  gcc_assert (entry);
128690075Sobrien
128790075Sobrien  /* Calculate the index of the object on the page; this is its bit
128890075Sobrien     position in the in_use_p bitmap.  */
1289117395Skan  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
129090075Sobrien  word = bit / HOST_BITS_PER_LONG;
129190075Sobrien  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1292117395Skan
129390075Sobrien  /* If the bit was previously set, skip it.  */
129490075Sobrien  if (entry->in_use_p[word] & mask)
129590075Sobrien    return 1;
129690075Sobrien
129790075Sobrien  /* Otherwise set it, and decrement the free object count.  */
129890075Sobrien  entry->in_use_p[word] |= mask;
129990075Sobrien  entry->num_free_objects -= 1;
130090075Sobrien
130190075Sobrien  if (GGC_DEBUG_LEVEL >= 4)
130290075Sobrien    fprintf (G.debug_file, "Marking %p\n", p);
130390075Sobrien
130490075Sobrien  return 0;
130590075Sobrien}
130690075Sobrien
1307117395Skan/* Return 1 if P has been marked, zero otherwise.
130890075Sobrien   P must have been allocated by the GC allocator; it mustn't point to
130990075Sobrien   static objects, stack variables, or memory allocated with malloc.  */
131090075Sobrien
131190075Sobrienint
1312132718Skanggc_marked_p (const void *p)
131390075Sobrien{
131490075Sobrien  page_entry *entry;
131590075Sobrien  unsigned bit, word;
131690075Sobrien  unsigned long mask;
131790075Sobrien
131890075Sobrien  /* Look up the page on which the object is alloced.  If the object
131990075Sobrien     wasn't allocated by the collector, we'll probably die.  */
132090075Sobrien  entry = lookup_page_table_entry (p);
1321169689Skan  gcc_assert (entry);
132290075Sobrien
132390075Sobrien  /* Calculate the index of the object on the page; this is its bit
132490075Sobrien     position in the in_use_p bitmap.  */
1325117395Skan  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
132690075Sobrien  word = bit / HOST_BITS_PER_LONG;
132790075Sobrien  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1328117395Skan
132990075Sobrien  return (entry->in_use_p[word] & mask) != 0;
133090075Sobrien}
133190075Sobrien
133290075Sobrien/* Return the size of the gc-able object P.  */
133390075Sobrien
133490075Sobriensize_t
1335132718Skanggc_get_size (const void *p)
133690075Sobrien{
133790075Sobrien  page_entry *pe = lookup_page_table_entry (p);
133890075Sobrien  return OBJECT_SIZE (pe->order);
133990075Sobrien}
1340169689Skan
1341169689Skan/* Release the memory for object P.  */
1342169689Skan
1343169689Skanvoid
1344169689Skanggc_free (void *p)
1345169689Skan{
1346169689Skan  page_entry *pe = lookup_page_table_entry (p);
1347169689Skan  size_t order = pe->order;
1348169689Skan  size_t size = OBJECT_SIZE (order);
1349169689Skan
1350169689Skan#ifdef GATHER_STATISTICS
1351169689Skan  ggc_free_overhead (p);
1352169689Skan#endif
1353169689Skan
1354169689Skan  if (GGC_DEBUG_LEVEL >= 3)
1355169689Skan    fprintf (G.debug_file,
1356169689Skan	     "Freeing object, actual size=%lu, at %p on %p\n",
1357169689Skan	     (unsigned long) size, p, (void *) pe);
1358169689Skan
1359169689Skan#ifdef ENABLE_GC_CHECKING
1360169689Skan  /* Poison the data, to indicate the data is garbage.  */
1361169689Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1362169689Skan  memset (p, 0xa5, size);
1363169689Skan#endif
1364169689Skan  /* Let valgrind know the object is free.  */
1365169689Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1366169689Skan
1367169689Skan#ifdef ENABLE_GC_ALWAYS_COLLECT
1368169689Skan  /* In the completely-anal-checking mode, we do *not* immediately free
1369169689Skan     the data, but instead verify that the data is *actually* not
1370169689Skan     reachable the next time we collect.  */
1371169689Skan  {
1372169689Skan    struct free_object *fo = XNEW (struct free_object);
1373169689Skan    fo->object = p;
1374169689Skan    fo->next = G.free_object_list;
1375169689Skan    G.free_object_list = fo;
1376169689Skan  }
1377169689Skan#else
1378169689Skan  {
1379169689Skan    unsigned int bit_offset, word, bit;
1380169689Skan
1381169689Skan    G.allocated -= size;
1382169689Skan
1383169689Skan    /* Mark the object not-in-use.  */
1384169689Skan    bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1385169689Skan    word = bit_offset / HOST_BITS_PER_LONG;
1386169689Skan    bit = bit_offset % HOST_BITS_PER_LONG;
1387169689Skan    pe->in_use_p[word] &= ~(1UL << bit);
1388169689Skan
1389169689Skan    if (pe->num_free_objects++ == 0)
1390169689Skan      {
1391169689Skan	page_entry *p, *q;
1392169689Skan
1393169689Skan	/* If the page is completely full, then it's supposed to
1394169689Skan	   be after all pages that aren't.  Since we've freed one
1395169689Skan	   object from a page that was full, we need to move the
1396169689Skan	   page to the head of the list.
1397169689Skan
1398169689Skan	   PE is the node we want to move.  Q is the previous node
1399169689Skan	   and P is the next node in the list.  */
1400169689Skan	q = pe->prev;
1401169689Skan	if (q && q->num_free_objects == 0)
1402169689Skan	  {
1403169689Skan	    p = pe->next;
1404169689Skan
1405169689Skan	    q->next = p;
1406169689Skan
1407169689Skan	    /* If PE was at the end of the list, then Q becomes the
1408169689Skan	       new end of the list.  If PE was not the end of the
1409169689Skan	       list, then we need to update the PREV field for P.  */
1410169689Skan	    if (!p)
1411169689Skan	      G.page_tails[order] = q;
1412169689Skan	    else
1413169689Skan	      p->prev = q;
1414169689Skan
1415169689Skan	    /* Move PE to the head of the list.  */
1416169689Skan	    pe->next = G.pages[order];
1417169689Skan	    pe->prev = NULL;
1418169689Skan	    G.pages[order]->prev = pe;
1419169689Skan	    G.pages[order] = pe;
1420169689Skan	  }
1421169689Skan
1422169689Skan	/* Reset the hint bit to point to the only free object.  */
1423169689Skan	pe->next_bit_hint = bit_offset;
1424169689Skan      }
1425169689Skan  }
1426169689Skan#endif
1427169689Skan}
142890075Sobrien
1429117395Skan/* Subroutine of init_ggc which computes the pair of numbers used to
1430117395Skan   perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1431117395Skan
1432117395Skan   This algorithm is taken from Granlund and Montgomery's paper
1433117395Skan   "Division by Invariant Integers using Multiplication"
1434117395Skan   (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1435117395Skan   constants).  */
1436117395Skan
1437117395Skanstatic void
1438132718Skancompute_inverse (unsigned order)
1439117395Skan{
1440132718Skan  size_t size, inv;
1441132718Skan  unsigned int e;
1442117395Skan
1443117395Skan  size = OBJECT_SIZE (order);
1444117395Skan  e = 0;
1445117395Skan  while (size % 2 == 0)
1446117395Skan    {
1447117395Skan      e++;
1448117395Skan      size >>= 1;
1449117395Skan    }
1450117395Skan
1451117395Skan  inv = size;
1452117395Skan  while (inv * size != 1)
1453117395Skan    inv = inv * (2 - inv*size);
1454117395Skan
1455117395Skan  DIV_MULT (order) = inv;
1456117395Skan  DIV_SHIFT (order) = e;
1457117395Skan}
1458117395Skan
145990075Sobrien/* Initialize the ggc-mmap allocator.  */
146090075Sobrienvoid
1461132718Skaninit_ggc (void)
146290075Sobrien{
146390075Sobrien  unsigned order;
146490075Sobrien
146590075Sobrien  G.pagesize = getpagesize();
146690075Sobrien  G.lg_pagesize = exact_log2 (G.pagesize);
146790075Sobrien
146890075Sobrien#ifdef HAVE_MMAP_DEV_ZERO
146990075Sobrien  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
147090075Sobrien  if (G.dev_zero_fd == -1)
1471132718Skan    internal_error ("open /dev/zero: %m");
147290075Sobrien#endif
147390075Sobrien
147490075Sobrien#if 0
147590075Sobrien  G.debug_file = fopen ("ggc-mmap.debug", "w");
147690075Sobrien#else
147790075Sobrien  G.debug_file = stdout;
147890075Sobrien#endif
147990075Sobrien
148090075Sobrien#ifdef USING_MMAP
148190075Sobrien  /* StunOS has an amazing off-by-one error for the first mmap allocation
148290075Sobrien     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
148390075Sobrien     believe, is an unaligned page allocation, which would cause us to
148490075Sobrien     hork badly if we tried to use it.  */
148590075Sobrien  {
148690075Sobrien    char *p = alloc_anon (NULL, G.pagesize);
148790075Sobrien    struct page_entry *e;
148890075Sobrien    if ((size_t)p & (G.pagesize - 1))
148990075Sobrien      {
149090075Sobrien	/* How losing.  Discard this one and try another.  If we still
149190075Sobrien	   can't get something useful, give up.  */
149290075Sobrien
149390075Sobrien	p = alloc_anon (NULL, G.pagesize);
1494169689Skan	gcc_assert (!((size_t)p & (G.pagesize - 1)));
149590075Sobrien      }
149690075Sobrien
149790075Sobrien    /* We have a good page, might as well hold onto it...  */
1498169689Skan    e = XCNEW (struct page_entry);
149990075Sobrien    e->bytes = G.pagesize;
150090075Sobrien    e->page = p;
150190075Sobrien    e->next = G.free_pages;
150290075Sobrien    G.free_pages = e;
150390075Sobrien  }
150490075Sobrien#endif
150590075Sobrien
150690075Sobrien  /* Initialize the object size table.  */
150790075Sobrien  for (order = 0; order < HOST_BITS_PER_PTR; ++order)
150890075Sobrien    object_size_table[order] = (size_t) 1 << order;
150990075Sobrien  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
151090075Sobrien    {
151190075Sobrien      size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
151290075Sobrien
151390075Sobrien      /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
151490075Sobrien	 so that we're sure of getting aligned memory.  */
1515132718Skan      s = ROUND_UP (s, MAX_ALIGNMENT);
151690075Sobrien      object_size_table[order] = s;
151790075Sobrien    }
151890075Sobrien
1519117395Skan  /* Initialize the objects-per-page and inverse tables.  */
152090075Sobrien  for (order = 0; order < NUM_ORDERS; ++order)
152190075Sobrien    {
152290075Sobrien      objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
152390075Sobrien      if (objects_per_page_table[order] == 0)
152490075Sobrien	objects_per_page_table[order] = 1;
1525117395Skan      compute_inverse (order);
152690075Sobrien    }
152790075Sobrien
152890075Sobrien  /* Reset the size_lookup array to put appropriately sized objects in
152990075Sobrien     the special orders.  All objects bigger than the previous power
153090075Sobrien     of two, but no greater than the special size, should go in the
153190075Sobrien     new order.  */
153290075Sobrien  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
153390075Sobrien    {
153490075Sobrien      int o;
153590075Sobrien      int i;
153690075Sobrien
1537169689Skan      i = OBJECT_SIZE (order);
1538169689Skan      if (i >= NUM_SIZE_LOOKUP)
1539169689Skan	continue;
1540169689Skan
1541169689Skan      for (o = size_lookup[i]; o == size_lookup [i]; --i)
154290075Sobrien	size_lookup[i] = order;
154390075Sobrien    }
1544117395Skan
1545117395Skan  G.depth_in_use = 0;
1546117395Skan  G.depth_max = 10;
1547169689Skan  G.depth = XNEWVEC (unsigned int, G.depth_max);
1548117395Skan
1549117395Skan  G.by_depth_in_use = 0;
1550117395Skan  G.by_depth_max = INITIAL_PTE_COUNT;
1551169689Skan  G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1552169689Skan  G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
155390075Sobrien}
155490075Sobrien
1555132718Skan/* Start a new GGC zone.  */
1556132718Skan
1557132718Skanstruct alloc_zone *
1558132718Skannew_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1559132718Skan{
1560132718Skan  return NULL;
1561132718Skan}
1562132718Skan
1563132718Skan/* Destroy a GGC zone.  */
1564132718Skanvoid
1565132718Skandestroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1566132718Skan{
1567132718Skan}
1568132718Skan
156990075Sobrien/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
157090075Sobrien   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
157190075Sobrien
157290075Sobrienstatic void
1573132718Skanggc_recalculate_in_use_p (page_entry *p)
157490075Sobrien{
157590075Sobrien  unsigned int i;
157690075Sobrien  size_t num_objects;
157790075Sobrien
1578117395Skan  /* Because the past-the-end bit in in_use_p is always set, we
157990075Sobrien     pretend there is one additional object.  */
1580132718Skan  num_objects = OBJECTS_IN_PAGE (p) + 1;
158190075Sobrien
158290075Sobrien  /* Reset the free object count.  */
158390075Sobrien  p->num_free_objects = num_objects;
158490075Sobrien
158590075Sobrien  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1586117395Skan  for (i = 0;
158790075Sobrien       i < CEIL (BITMAP_SIZE (num_objects),
158890075Sobrien		 sizeof (*p->in_use_p));
158990075Sobrien       ++i)
159090075Sobrien    {
159190075Sobrien      unsigned long j;
159290075Sobrien
159390075Sobrien      /* Something is in use if it is marked, or if it was in use in a
159490075Sobrien	 context further down the context stack.  */
1595117395Skan      p->in_use_p[i] |= save_in_use_p (p)[i];
159690075Sobrien
159790075Sobrien      /* Decrement the free object count for every object allocated.  */
159890075Sobrien      for (j = p->in_use_p[i]; j; j >>= 1)
159990075Sobrien	p->num_free_objects -= (j & 1);
160090075Sobrien    }
160190075Sobrien
1602169689Skan  gcc_assert (p->num_free_objects < num_objects);
160390075Sobrien}
160490075Sobrien
160590075Sobrien/* Unmark all objects.  */
160690075Sobrien
1607169689Skanstatic void
1608132718Skanclear_marks (void)
160990075Sobrien{
161090075Sobrien  unsigned order;
161190075Sobrien
161290075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
161390075Sobrien    {
161490075Sobrien      page_entry *p;
161590075Sobrien
161690075Sobrien      for (p = G.pages[order]; p != NULL; p = p->next)
161790075Sobrien	{
1618132718Skan	  size_t num_objects = OBJECTS_IN_PAGE (p);
1619132718Skan	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1620132718Skan
162190075Sobrien	  /* The data should be page-aligned.  */
1622169689Skan	  gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
162390075Sobrien
162490075Sobrien	  /* Pages that aren't in the topmost context are not collected;
162590075Sobrien	     nevertheless, we need their in-use bit vectors to store GC
162690075Sobrien	     marks.  So, back them up first.  */
162790075Sobrien	  if (p->context_depth < G.context_depth)
162890075Sobrien	    {
1629117395Skan	      if (! save_in_use_p (p))
1630117395Skan		save_in_use_p (p) = xmalloc (bitmap_size);
1631117395Skan	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
163290075Sobrien	    }
163390075Sobrien
163490075Sobrien	  /* Reset reset the number of free objects and clear the
163590075Sobrien             in-use bits.  These will be adjusted by mark_obj.  */
163690075Sobrien	  p->num_free_objects = num_objects;
163790075Sobrien	  memset (p->in_use_p, 0, bitmap_size);
163890075Sobrien
163990075Sobrien	  /* Make sure the one-past-the-end bit is always set.  */
1640117395Skan	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
164190075Sobrien	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
164290075Sobrien	}
164390075Sobrien    }
164490075Sobrien}
164590075Sobrien
164690075Sobrien/* Free all empty pages.  Partially empty pages need no attention
164790075Sobrien   because the `mark' bit doubles as an `unused' bit.  */
164890075Sobrien
1649169689Skanstatic void
1650132718Skansweep_pages (void)
165190075Sobrien{
165290075Sobrien  unsigned order;
165390075Sobrien
165490075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
165590075Sobrien    {
165690075Sobrien      /* The last page-entry to consider, regardless of entries
165790075Sobrien	 placed at the end of the list.  */
165890075Sobrien      page_entry * const last = G.page_tails[order];
165990075Sobrien
1660132718Skan      size_t num_objects;
166190075Sobrien      size_t live_objects;
166290075Sobrien      page_entry *p, *previous;
166390075Sobrien      int done;
1664117395Skan
166590075Sobrien      p = G.pages[order];
166690075Sobrien      if (p == NULL)
166790075Sobrien	continue;
166890075Sobrien
166990075Sobrien      previous = NULL;
167090075Sobrien      do
167190075Sobrien	{
167290075Sobrien	  page_entry *next = p->next;
167390075Sobrien
167490075Sobrien	  /* Loop until all entries have been examined.  */
167590075Sobrien	  done = (p == last);
167690075Sobrien
1677132718Skan	  num_objects = OBJECTS_IN_PAGE (p);
1678132718Skan
167990075Sobrien	  /* Add all live objects on this page to the count of
168090075Sobrien             allocated memory.  */
168190075Sobrien	  live_objects = num_objects - p->num_free_objects;
168290075Sobrien
168390075Sobrien	  G.allocated += OBJECT_SIZE (order) * live_objects;
168490075Sobrien
168590075Sobrien	  /* Only objects on pages in the topmost context should get
168690075Sobrien	     collected.  */
168790075Sobrien	  if (p->context_depth < G.context_depth)
168890075Sobrien	    ;
168990075Sobrien
169090075Sobrien	  /* Remove the page if it's empty.  */
169190075Sobrien	  else if (live_objects == 0)
169290075Sobrien	    {
1693169689Skan	      /* If P was the first page in the list, then NEXT
1694169689Skan		 becomes the new first page in the list, otherwise
1695169689Skan		 splice P out of the forward pointers.  */
169690075Sobrien	      if (! previous)
169790075Sobrien		G.pages[order] = next;
169890075Sobrien	      else
169990075Sobrien		previous->next = next;
1700169689Skan
1701169689Skan	      /* Splice P out of the back pointers too.  */
1702169689Skan	      if (next)
1703169689Skan		next->prev = previous;
170490075Sobrien
170590075Sobrien	      /* Are we removing the last element?  */
170690075Sobrien	      if (p == G.page_tails[order])
170790075Sobrien		G.page_tails[order] = previous;
170890075Sobrien	      free_page (p);
170990075Sobrien	      p = previous;
171090075Sobrien	    }
171190075Sobrien
171290075Sobrien	  /* If the page is full, move it to the end.  */
171390075Sobrien	  else if (p->num_free_objects == 0)
171490075Sobrien	    {
171590075Sobrien	      /* Don't move it if it's already at the end.  */
171690075Sobrien	      if (p != G.page_tails[order])
171790075Sobrien		{
171890075Sobrien		  /* Move p to the end of the list.  */
171990075Sobrien		  p->next = NULL;
1720169689Skan		  p->prev = G.page_tails[order];
172190075Sobrien		  G.page_tails[order]->next = p;
172290075Sobrien
172390075Sobrien		  /* Update the tail pointer...  */
172490075Sobrien		  G.page_tails[order] = p;
172590075Sobrien
172690075Sobrien		  /* ... and the head pointer, if necessary.  */
172790075Sobrien		  if (! previous)
172890075Sobrien		    G.pages[order] = next;
172990075Sobrien		  else
173090075Sobrien		    previous->next = next;
1731169689Skan
1732169689Skan		  /* And update the backpointer in NEXT if necessary.  */
1733169689Skan		  if (next)
1734169689Skan		    next->prev = previous;
1735169689Skan
173690075Sobrien		  p = previous;
173790075Sobrien		}
173890075Sobrien	    }
173990075Sobrien
174090075Sobrien	  /* If we've fallen through to here, it's a page in the
174190075Sobrien	     topmost context that is neither full nor empty.  Such a
174290075Sobrien	     page must precede pages at lesser context depth in the
174390075Sobrien	     list, so move it to the head.  */
174490075Sobrien	  else if (p != G.pages[order])
174590075Sobrien	    {
174690075Sobrien	      previous->next = p->next;
1747169689Skan
1748169689Skan	      /* Update the backchain in the next node if it exists.  */
1749169689Skan	      if (p->next)
1750169689Skan		p->next->prev = previous;
1751169689Skan
1752169689Skan	      /* Move P to the head of the list.  */
175390075Sobrien	      p->next = G.pages[order];
1754169689Skan	      p->prev = NULL;
1755169689Skan	      G.pages[order]->prev = p;
1756169689Skan
1757169689Skan	      /* Update the head pointer.  */
175890075Sobrien	      G.pages[order] = p;
1759169689Skan
176090075Sobrien	      /* Are we moving the last element?  */
176190075Sobrien	      if (G.page_tails[order] == p)
176290075Sobrien	        G.page_tails[order] = previous;
176390075Sobrien	      p = previous;
176490075Sobrien	    }
176590075Sobrien
176690075Sobrien	  previous = p;
176790075Sobrien	  p = next;
1768117395Skan	}
176990075Sobrien      while (! done);
177090075Sobrien
177190075Sobrien      /* Now, restore the in_use_p vectors for any pages from contexts
177290075Sobrien         other than the current one.  */
177390075Sobrien      for (p = G.pages[order]; p; p = p->next)
177490075Sobrien	if (p->context_depth != G.context_depth)
177590075Sobrien	  ggc_recalculate_in_use_p (p);
177690075Sobrien    }
177790075Sobrien}
177890075Sobrien
1779117395Skan#ifdef ENABLE_GC_CHECKING
178090075Sobrien/* Clobber all free objects.  */
178190075Sobrien
1782169689Skanstatic void
1783132718Skanpoison_pages (void)
178490075Sobrien{
178590075Sobrien  unsigned order;
178690075Sobrien
178790075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
178890075Sobrien    {
178990075Sobrien      size_t size = OBJECT_SIZE (order);
179090075Sobrien      page_entry *p;
179190075Sobrien
179290075Sobrien      for (p = G.pages[order]; p != NULL; p = p->next)
179390075Sobrien	{
1794132718Skan	  size_t num_objects;
179590075Sobrien	  size_t i;
179690075Sobrien
179790075Sobrien	  if (p->context_depth != G.context_depth)
179890075Sobrien	    /* Since we don't do any collection for pages in pushed
179990075Sobrien	       contexts, there's no need to do any poisoning.  And
180090075Sobrien	       besides, the IN_USE_P array isn't valid until we pop
180190075Sobrien	       contexts.  */
180290075Sobrien	    continue;
180390075Sobrien
1804132718Skan	  num_objects = OBJECTS_IN_PAGE (p);
180590075Sobrien	  for (i = 0; i < num_objects; i++)
180690075Sobrien	    {
180790075Sobrien	      size_t word, bit;
180890075Sobrien	      word = i / HOST_BITS_PER_LONG;
180990075Sobrien	      bit = i % HOST_BITS_PER_LONG;
181090075Sobrien	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1811117395Skan		{
1812117395Skan		  char *object = p->page + i * size;
1813117395Skan
1814117395Skan		  /* Keep poison-by-write when we expect to use Valgrind,
1815117395Skan		     so the exact same memory semantics is kept, in case
1816117395Skan		     there are memory errors.  We override this request
1817117395Skan		     below.  */
1818117395Skan		  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1819117395Skan		  memset (object, 0xa5, size);
1820117395Skan
1821117395Skan		  /* Drop the handle to avoid handle leak.  */
1822117395Skan		  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1823117395Skan		}
182490075Sobrien	    }
182590075Sobrien	}
182690075Sobrien    }
182790075Sobrien}
1828169689Skan#else
1829169689Skan#define poison_pages()
183090075Sobrien#endif
183190075Sobrien
1832169689Skan#ifdef ENABLE_GC_ALWAYS_COLLECT
1833169689Skan/* Validate that the reportedly free objects actually are.  */
1834169689Skan
1835169689Skanstatic void
1836169689Skanvalidate_free_objects (void)
1837169689Skan{
1838169689Skan  struct free_object *f, *next, *still_free = NULL;
1839169689Skan
1840169689Skan  for (f = G.free_object_list; f ; f = next)
1841169689Skan    {
1842169689Skan      page_entry *pe = lookup_page_table_entry (f->object);
1843169689Skan      size_t bit, word;
1844169689Skan
1845169689Skan      bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1846169689Skan      word = bit / HOST_BITS_PER_LONG;
1847169689Skan      bit = bit % HOST_BITS_PER_LONG;
1848169689Skan      next = f->next;
1849169689Skan
1850169689Skan      /* Make certain it isn't visible from any root.  Notice that we
1851169689Skan	 do this check before sweep_pages merges save_in_use_p.  */
1852169689Skan      gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1853169689Skan
1854169689Skan      /* If the object comes from an outer context, then retain the
1855169689Skan	 free_object entry, so that we can verify that the address
1856169689Skan	 isn't live on the stack in some outer context.  */
1857169689Skan      if (pe->context_depth != G.context_depth)
1858169689Skan	{
1859169689Skan	  f->next = still_free;
1860169689Skan	  still_free = f;
1861169689Skan	}
1862169689Skan      else
1863169689Skan	free (f);
1864169689Skan    }
1865169689Skan
1866169689Skan  G.free_object_list = still_free;
1867169689Skan}
1868169689Skan#else
1869169689Skan#define validate_free_objects()
1870169689Skan#endif
1871169689Skan
187290075Sobrien/* Top level mark-and-sweep routine.  */
187390075Sobrien
187490075Sobrienvoid
1875132718Skanggc_collect (void)
187690075Sobrien{
187790075Sobrien  /* Avoid frequent unnecessary work by skipping collection if the
187890075Sobrien     total allocations haven't expanded much since the last
187990075Sobrien     collection.  */
1880117395Skan  float allocated_last_gc =
1881117395Skan    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1882117395Skan
1883117395Skan  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1884117395Skan
1885169689Skan  if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
188690075Sobrien    return;
188790075Sobrien
188890075Sobrien  timevar_push (TV_GC);
188990075Sobrien  if (!quiet_flag)
189090075Sobrien    fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1891169689Skan  if (GGC_DEBUG_LEVEL >= 2)
1892169689Skan    fprintf (G.debug_file, "BEGIN COLLECTING\n");
189390075Sobrien
189490075Sobrien  /* Zero the total allocated bytes.  This will be recalculated in the
189590075Sobrien     sweep phase.  */
189690075Sobrien  G.allocated = 0;
189790075Sobrien
1898117395Skan  /* Release the pages we freed the last time we collected, but didn't
189990075Sobrien     reuse in the interim.  */
190090075Sobrien  release_pages ();
190190075Sobrien
1902117395Skan  /* Indicate that we've seen collections at this context depth.  */
1903117395Skan  G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1904117395Skan
190590075Sobrien  clear_marks ();
190690075Sobrien  ggc_mark_roots ();
1907169689Skan#ifdef GATHER_STATISTICS
1908169689Skan  ggc_prune_overhead_list ();
1909169689Skan#endif
191090075Sobrien  poison_pages ();
1911169689Skan  validate_free_objects ();
191290075Sobrien  sweep_pages ();
191390075Sobrien
191490075Sobrien  G.allocated_last_gc = G.allocated;
191590075Sobrien
191690075Sobrien  timevar_pop (TV_GC);
191790075Sobrien
191890075Sobrien  if (!quiet_flag)
191990075Sobrien    fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1920169689Skan  if (GGC_DEBUG_LEVEL >= 2)
1921169689Skan    fprintf (G.debug_file, "END COLLECTING\n");
192290075Sobrien}
192390075Sobrien
192490075Sobrien/* Print allocation statistics.  */
192590075Sobrien#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
192690075Sobrien		  ? (x) \
192790075Sobrien		  : ((x) < 1024*1024*10 \
192890075Sobrien		     ? (x) / 1024 \
192990075Sobrien		     : (x) / (1024*1024))))
1930169689Skan#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
193190075Sobrien
193290075Sobrienvoid
1933132718Skanggc_print_statistics (void)
193490075Sobrien{
193590075Sobrien  struct ggc_statistics stats;
193690075Sobrien  unsigned int i;
193790075Sobrien  size_t total_overhead = 0;
193890075Sobrien
193990075Sobrien  /* Clear the statistics.  */
194090075Sobrien  memset (&stats, 0, sizeof (stats));
1941117395Skan
194290075Sobrien  /* Make sure collection will really occur.  */
194390075Sobrien  G.allocated_last_gc = 0;
194490075Sobrien
194590075Sobrien  /* Collect and print the statistics common across collectors.  */
194690075Sobrien  ggc_print_common_statistics (stderr, &stats);
194790075Sobrien
194890075Sobrien  /* Release free pages so that we will not count the bytes allocated
194990075Sobrien     there as part of the total allocated memory.  */
195090075Sobrien  release_pages ();
195190075Sobrien
1952117395Skan  /* Collect some information about the various sizes of
195390075Sobrien     allocation.  */
1954132718Skan  fprintf (stderr,
1955132718Skan           "Memory still allocated at the end of the compilation process\n");
1956132718Skan  fprintf (stderr, "%-5s %10s  %10s  %10s\n",
195790075Sobrien	   "Size", "Allocated", "Used", "Overhead");
195890075Sobrien  for (i = 0; i < NUM_ORDERS; ++i)
195990075Sobrien    {
196090075Sobrien      page_entry *p;
196190075Sobrien      size_t allocated;
196290075Sobrien      size_t in_use;
196390075Sobrien      size_t overhead;
196490075Sobrien
196590075Sobrien      /* Skip empty entries.  */
196690075Sobrien      if (!G.pages[i])
196790075Sobrien	continue;
196890075Sobrien
196990075Sobrien      overhead = allocated = in_use = 0;
197090075Sobrien
197190075Sobrien      /* Figure out the total number of bytes allocated for objects of
197290075Sobrien	 this size, and how many of them are actually in use.  Also figure
197390075Sobrien	 out how much memory the page table is using.  */
197490075Sobrien      for (p = G.pages[i]; p; p = p->next)
197590075Sobrien	{
197690075Sobrien	  allocated += p->bytes;
1977117395Skan	  in_use +=
1978132718Skan	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
197990075Sobrien
198090075Sobrien	  overhead += (sizeof (page_entry) - sizeof (long)
1981132718Skan		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
198290075Sobrien	}
1983117395Skan      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1984117395Skan	       (unsigned long) OBJECT_SIZE (i),
1985169689Skan	       SCALE (allocated), STAT_LABEL (allocated),
1986169689Skan	       SCALE (in_use), STAT_LABEL (in_use),
1987169689Skan	       SCALE (overhead), STAT_LABEL (overhead));
198890075Sobrien      total_overhead += overhead;
198990075Sobrien    }
1990117395Skan  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1991169689Skan	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
1992169689Skan	   SCALE (G.allocated), STAT_LABEL(G.allocated),
1993169689Skan	   SCALE (total_overhead), STAT_LABEL (total_overhead));
1994132718Skan
1995132718Skan#ifdef GATHER_STATISTICS
1996132718Skan  {
1997132718Skan    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1998132718Skan
1999132718Skan    fprintf (stderr, "Total Overhead:                        %10lld\n",
2000132718Skan             G.stats.total_overhead);
2001132718Skan    fprintf (stderr, "Total Allocated:                       %10lld\n",
2002132718Skan             G.stats.total_allocated);
2003132718Skan
2004132718Skan    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
2005132718Skan             G.stats.total_overhead_under32);
2006132718Skan    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
2007132718Skan             G.stats.total_allocated_under32);
2008132718Skan    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
2009132718Skan             G.stats.total_overhead_under64);
2010132718Skan    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
2011132718Skan             G.stats.total_allocated_under64);
2012132718Skan    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
2013132718Skan             G.stats.total_overhead_under128);
2014132718Skan    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
2015132718Skan             G.stats.total_allocated_under128);
2016132718Skan
2017132718Skan    for (i = 0; i < NUM_ORDERS; i++)
2018132718Skan      if (G.stats.total_allocated_per_order[i])
2019132718Skan        {
2020132718Skan          fprintf (stderr, "Total Overhead  page size %7d:     %10lld\n",
2021132718Skan                   OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2022132718Skan          fprintf (stderr, "Total Allocated page size %7d:     %10lld\n",
2023132718Skan                   OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2024132718Skan        }
2025132718Skan  }
2026132718Skan#endif
202790075Sobrien}
2028132718Skan
2029132718Skanstruct ggc_pch_data
2030132718Skan{
2031132718Skan  struct ggc_pch_ondisk
2032132718Skan  {
2033132718Skan    unsigned totals[NUM_ORDERS];
2034132718Skan  } d;
2035132718Skan  size_t base[NUM_ORDERS];
2036132718Skan  size_t written[NUM_ORDERS];
2037132718Skan};
2038132718Skan
2039132718Skanstruct ggc_pch_data *
2040132718Skaninit_ggc_pch (void)
2041132718Skan{
2042169689Skan  return XCNEW (struct ggc_pch_data);
2043132718Skan}
2044132718Skan
2045132718Skanvoid
2046132718Skanggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2047169689Skan		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2048169689Skan		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2049132718Skan{
2050132718Skan  unsigned order;
2051132718Skan
2052169689Skan  if (size < NUM_SIZE_LOOKUP)
2053132718Skan    order = size_lookup[size];
2054132718Skan  else
2055132718Skan    {
2056169689Skan      order = 10;
2057132718Skan      while (size > OBJECT_SIZE (order))
2058132718Skan	order++;
2059132718Skan    }
2060132718Skan
2061132718Skan  d->d.totals[order]++;
2062132718Skan}
2063132718Skan
2064132718Skansize_t
2065132718Skanggc_pch_total_size (struct ggc_pch_data *d)
2066132718Skan{
2067132718Skan  size_t a = 0;
2068132718Skan  unsigned i;
2069132718Skan
2070132718Skan  for (i = 0; i < NUM_ORDERS; i++)
2071132718Skan    a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2072132718Skan  return a;
2073132718Skan}
2074132718Skan
2075132718Skanvoid
2076132718Skanggc_pch_this_base (struct ggc_pch_data *d, void *base)
2077132718Skan{
2078132718Skan  size_t a = (size_t) base;
2079132718Skan  unsigned i;
2080132718Skan
2081132718Skan  for (i = 0; i < NUM_ORDERS; i++)
2082132718Skan    {
2083132718Skan      d->base[i] = a;
2084132718Skan      a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2085132718Skan    }
2086132718Skan}
2087132718Skan
2088132718Skan
2089132718Skanchar *
2090132718Skanggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2091169689Skan		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2092169689Skan		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2093132718Skan{
2094132718Skan  unsigned order;
2095132718Skan  char *result;
2096132718Skan
2097169689Skan  if (size < NUM_SIZE_LOOKUP)
2098132718Skan    order = size_lookup[size];
2099132718Skan  else
2100132718Skan    {
2101169689Skan      order = 10;
2102132718Skan      while (size > OBJECT_SIZE (order))
2103132718Skan	order++;
2104132718Skan    }
2105132718Skan
2106132718Skan  result = (char *) d->base[order];
2107132718Skan  d->base[order] += OBJECT_SIZE (order);
2108132718Skan  return result;
2109132718Skan}
2110132718Skan
2111132718Skanvoid
2112132718Skanggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2113132718Skan		       FILE *f ATTRIBUTE_UNUSED)
2114132718Skan{
2115132718Skan  /* Nothing to do.  */
2116132718Skan}
2117132718Skan
2118132718Skanvoid
2119132718Skanggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2120132718Skan		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2121132718Skan		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2122132718Skan{
2123132718Skan  unsigned order;
2124132718Skan  static const char emptyBytes[256];
2125132718Skan
2126169689Skan  if (size < NUM_SIZE_LOOKUP)
2127132718Skan    order = size_lookup[size];
2128132718Skan  else
2129132718Skan    {
2130169689Skan      order = 10;
2131132718Skan      while (size > OBJECT_SIZE (order))
2132132718Skan	order++;
2133132718Skan    }
2134132718Skan
2135132718Skan  if (fwrite (x, size, 1, f) != 1)
2136132718Skan    fatal_error ("can't write PCH file: %m");
2137132718Skan
2138132718Skan  /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2139132718Skan     object out to OBJECT_SIZE(order).  This happens for strings.  */
2140132718Skan
2141132718Skan  if (size != OBJECT_SIZE (order))
2142132718Skan    {
2143132718Skan      unsigned padding = OBJECT_SIZE(order) - size;
2144132718Skan
2145132718Skan      /* To speed small writes, we use a nulled-out array that's larger
2146132718Skan         than most padding requests as the source for our null bytes.  This
2147132718Skan         permits us to do the padding with fwrite() rather than fseek(), and
2148169689Skan         limits the chance the OS may try to flush any outstanding writes.  */
2149132718Skan      if (padding <= sizeof(emptyBytes))
2150132718Skan        {
2151132718Skan          if (fwrite (emptyBytes, 1, padding, f) != padding)
2152132718Skan            fatal_error ("can't write PCH file");
2153132718Skan        }
2154132718Skan      else
2155132718Skan        {
2156132718Skan          /* Larger than our buffer?  Just default to fseek.  */
2157132718Skan          if (fseek (f, padding, SEEK_CUR) != 0)
2158132718Skan            fatal_error ("can't write PCH file");
2159132718Skan        }
2160132718Skan    }
2161132718Skan
2162132718Skan  d->written[order]++;
2163132718Skan  if (d->written[order] == d->d.totals[order]
2164132718Skan      && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2165132718Skan				   G.pagesize),
2166132718Skan		SEEK_CUR) != 0)
2167132718Skan    fatal_error ("can't write PCH file: %m");
2168132718Skan}
2169132718Skan
2170132718Skanvoid
2171132718Skanggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2172132718Skan{
2173132718Skan  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2174132718Skan    fatal_error ("can't write PCH file: %m");
2175132718Skan  free (d);
2176132718Skan}
2177132718Skan
2178132718Skan/* Move the PCH PTE entries just added to the end of by_depth, to the
2179132718Skan   front.  */
2180132718Skan
2181132718Skanstatic void
2182132718Skanmove_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2183132718Skan{
2184132718Skan  unsigned i;
2185132718Skan
2186132718Skan  /* First, we swap the new entries to the front of the varrays.  */
2187132718Skan  page_entry **new_by_depth;
2188132718Skan  unsigned long **new_save_in_use;
2189132718Skan
2190169689Skan  new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2191169689Skan  new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2192132718Skan
2193132718Skan  memcpy (&new_by_depth[0],
2194132718Skan	  &G.by_depth[count_old_page_tables],
2195132718Skan	  count_new_page_tables * sizeof (void *));
2196132718Skan  memcpy (&new_by_depth[count_new_page_tables],
2197132718Skan	  &G.by_depth[0],
2198132718Skan	  count_old_page_tables * sizeof (void *));
2199132718Skan  memcpy (&new_save_in_use[0],
2200132718Skan	  &G.save_in_use[count_old_page_tables],
2201132718Skan	  count_new_page_tables * sizeof (void *));
2202132718Skan  memcpy (&new_save_in_use[count_new_page_tables],
2203132718Skan	  &G.save_in_use[0],
2204132718Skan	  count_old_page_tables * sizeof (void *));
2205132718Skan
2206132718Skan  free (G.by_depth);
2207132718Skan  free (G.save_in_use);
2208132718Skan
2209132718Skan  G.by_depth = new_by_depth;
2210132718Skan  G.save_in_use = new_save_in_use;
2211132718Skan
2212132718Skan  /* Now update all the index_by_depth fields.  */
2213132718Skan  for (i = G.by_depth_in_use; i > 0; --i)
2214132718Skan    {
2215132718Skan      page_entry *p = G.by_depth[i-1];
2216132718Skan      p->index_by_depth = i-1;
2217132718Skan    }
2218132718Skan
2219132718Skan  /* And last, we update the depth pointers in G.depth.  The first
2220132718Skan     entry is already 0, and context 0 entries always start at index
2221132718Skan     0, so there is nothing to update in the first slot.  We need a
2222132718Skan     second slot, only if we have old ptes, and if we do, they start
2223132718Skan     at index count_new_page_tables.  */
2224132718Skan  if (count_old_page_tables)
2225132718Skan    push_depth (count_new_page_tables);
2226132718Skan}
2227132718Skan
2228132718Skanvoid
2229132718Skanggc_pch_read (FILE *f, void *addr)
2230132718Skan{
2231132718Skan  struct ggc_pch_ondisk d;
2232132718Skan  unsigned i;
2233132718Skan  char *offs = addr;
2234132718Skan  unsigned long count_old_page_tables;
2235132718Skan  unsigned long count_new_page_tables;
2236132718Skan
2237132718Skan  count_old_page_tables = G.by_depth_in_use;
2238132718Skan
2239132718Skan  /* We've just read in a PCH file.  So, every object that used to be
2240132718Skan     allocated is now free.  */
2241132718Skan  clear_marks ();
2242132718Skan#ifdef ENABLE_GC_CHECKING
2243132718Skan  poison_pages ();
2244132718Skan#endif
2245132718Skan
2246132718Skan  /* No object read from a PCH file should ever be freed.  So, set the
2247132718Skan     context depth to 1, and set the depth of all the currently-allocated
2248132718Skan     pages to be 1 too.  PCH pages will have depth 0.  */
2249169689Skan  gcc_assert (!G.context_depth);
2250132718Skan  G.context_depth = 1;
2251132718Skan  for (i = 0; i < NUM_ORDERS; i++)
2252132718Skan    {
2253132718Skan      page_entry *p;
2254132718Skan      for (p = G.pages[i]; p != NULL; p = p->next)
2255132718Skan	p->context_depth = G.context_depth;
2256132718Skan    }
2257132718Skan
2258132718Skan  /* Allocate the appropriate page-table entries for the pages read from
2259132718Skan     the PCH file.  */
2260132718Skan  if (fread (&d, sizeof (d), 1, f) != 1)
2261132718Skan    fatal_error ("can't read PCH file: %m");
2262132718Skan
2263132718Skan  for (i = 0; i < NUM_ORDERS; i++)
2264132718Skan    {
2265132718Skan      struct page_entry *entry;
2266132718Skan      char *pte;
2267132718Skan      size_t bytes;
2268132718Skan      size_t num_objs;
2269132718Skan      size_t j;
2270132718Skan
2271132718Skan      if (d.totals[i] == 0)
2272132718Skan	continue;
2273132718Skan
2274132718Skan      bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2275132718Skan      num_objs = bytes / OBJECT_SIZE (i);
2276132718Skan      entry = xcalloc (1, (sizeof (struct page_entry)
2277132718Skan			   - sizeof (long)
2278132718Skan			   + BITMAP_SIZE (num_objs + 1)));
2279132718Skan      entry->bytes = bytes;
2280132718Skan      entry->page = offs;
2281132718Skan      entry->context_depth = 0;
2282132718Skan      offs += bytes;
2283132718Skan      entry->num_free_objects = 0;
2284132718Skan      entry->order = i;
2285132718Skan
2286132718Skan      for (j = 0;
2287132718Skan	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2288132718Skan	   j += HOST_BITS_PER_LONG)
2289132718Skan	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2290132718Skan      for (; j < num_objs + 1; j++)
2291132718Skan	entry->in_use_p[j / HOST_BITS_PER_LONG]
2292132718Skan	  |= 1L << (j % HOST_BITS_PER_LONG);
2293132718Skan
2294132718Skan      for (pte = entry->page;
2295132718Skan	   pte < entry->page + entry->bytes;
2296132718Skan	   pte += G.pagesize)
2297132718Skan	set_page_table_entry (pte, entry);
2298132718Skan
2299132718Skan      if (G.page_tails[i] != NULL)
2300132718Skan	G.page_tails[i]->next = entry;
2301132718Skan      else
2302132718Skan	G.pages[i] = entry;
2303132718Skan      G.page_tails[i] = entry;
2304132718Skan
2305132718Skan      /* We start off by just adding all the new information to the
2306132718Skan	 end of the varrays, later, we will move the new information
2307132718Skan	 to the front of the varrays, as the PCH page tables are at
2308132718Skan	 context 0.  */
2309132718Skan      push_by_depth (entry, 0);
2310132718Skan    }
2311132718Skan
2312132718Skan  /* Now, we update the various data structures that speed page table
2313132718Skan     handling.  */
2314132718Skan  count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2315132718Skan
2316132718Skan  move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2317132718Skan
2318132718Skan  /* Update the statistics.  */
2319132718Skan  G.allocated = G.allocated_last_gc = offs - (char *)addr;
2320132718Skan}
2321