1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    Definitions.h
22    Global Definitions
23    Copyright (c) 2004-2011 Apple Inc. All rights reserved.
24 */
25
26#pragma once
27#ifndef __AUTO_DEFS__
28#define __AUTO_DEFS__
29
30#include <stdio.h>
31#include <stdlib.h>
32#include <string.h>
33#include <sys/resource.h>
34
35#include <mach/mach_init.h>
36#include <mach/mach_port.h>
37#include <mach/mach_time.h>
38#include <mach/task.h>
39#include <mach/thread_act.h>
40#include <mach/vm_map.h>
41#include <sys/mman.h>
42
43#include <pthread.h>
44#include <unistd.h>
45#include <malloc/malloc.h>
46
47#include <map>
48#include <vector>
49#include <ext/hash_map>
50#include <ext/hash_set>
51#include <libkern/OSAtomic.h>
52#include <System/pthread_machdep.h>
53#include <TargetConditionals.h>
54
55#include "Environment.h"
56#include "auto_impl_utilities.h"
57
58//
59// utilities and definitions used throughout the Auto namespace
60//
61
62#ifdef DEBUG
63extern "C" void* WatchPoint;
64#endif
65extern "C" malloc_zone_t *aux_zone;
66extern "C" const char *auto_prelude(void);
67
68
69
70namespace Auto {
71
72    //
73    // auto_prelude
74    //
75    // Generate the prelude used for error reporting
76    //
77    inline const char *prelude(void) { return auto_prelude(); }
78
79
80#if defined(DEBUG)
81#define ASSERTION(expression) if(!(expression)) { \
82        malloc_printf("*** %s: Assertion %s %s.%d\n", prelude(), #expression, __FILE__, __LINE__); \
83        __builtin_trap(); \
84    }
85#else
86#define ASSERTION(expression) (void)(expression)
87#endif
88
89    //
90    // Workaround for declaration problems
91    //
92    typedef kern_return_t (*auto_memory_reader_t)(task_t remote_task, vm_address_t remote_address, vm_size_t size, void **local_memory);
93    typedef void (*auto_vm_range_recorder_t)(task_t, void *, unsigned type, vm_range_t *, unsigned);
94
95
96    typedef unsigned long usword_t;                         // computational word guaranteed to be unsigned
97                                                            // assumed to be either 32 or 64 bit
98    typedef   signed long  sword_t;                         // computational word guaranteed to be signed
99                                                            // assumed to be either 32 or 64 bit
100
101    inline usword_t auto_atomic_compare_and_swap(usword_t old_value, usword_t new_value, volatile usword_t *addr)
102    {
103#if defined(__x86_64__)
104        return OSAtomicCompareAndSwap64(old_value, new_value, (volatile int64_t *)addr);
105#elif defined(__i386__)
106        return OSAtomicCompareAndSwap32(old_value, new_value, (volatile int32_t *)addr);
107#else
108#error unknown architecture
109#endif
110    }
111
112    inline usword_t auto_atomic_add(sword_t amount, volatile usword_t *addr)
113    {
114        usword_t old_value, new_value;
115        do {
116            old_value = *addr;
117            new_value = old_value + amount;
118        } while (!auto_atomic_compare_and_swap(old_value, new_value, addr));
119        return new_value;
120    }
121
122
123
124
125    //
126    // Useful constants (descriptives for self commenting)
127    //
128    enum {
129
130        page_size = 0x1000u,                                // vm_page_size but faster since we don't have to load global
131        page_size_log2 = 12,                                // ilog2 of page_size
132
133        bits_per_byte = 8,                                  // standard bits per byte
134        bits_per_byte_log2 = 3,                             // ilog2 of bits_per_byte
135
136        is_64BitWord = sizeof(usword_t) == 8u,              // true if 64-bit computational word
137        is_32BitWord = sizeof(usword_t) == 4u,              // true if 32-bit computational word
138
139        bytes_per_word = is_64BitWord ? 8u : 4u,            // number of bytes in an unsigned long word
140        bytes_per_word_log2 = is_64BitWord ? 3u : 2u,       // ilog2 of bytes_per_word
141
142        bits_per_word = is_64BitWord ? 64u : 32u ,          // number of bits in an unsigned long word
143        bits_per_word_log2 = is_64BitWord  ? 6u : 5u,       // ilog2 of bits_per_word
144
145        bytes_per_quad = 16,                                // bytes in a quad word (vector)
146        bytes_per_quad_log2 = 4,                            // ilog2 of bytes_per_quad
147
148        bits_per_quad = 128,                                // bits in a quad word (vector)
149        bits_per_quad_log2 = 7,                             // ilog2 of bits_per_quad
150
151        bits_mask = (usword_t)(bits_per_word - 1),          // mask to get the bit index (shift) in a word
152
153        all_zeros = (usword_t)0,                            // a word of all 0 bits
154        all_ones = ~all_zeros,                              // a word of all 1 bits
155
156        not_found = all_ones,                               // a negative result of search methods
157
158        pointer_alignment = is_64BitWord ? 3u : 2u,         // bit alignment required for pointers
159        block_alignment = is_64BitWord ? 5u : 4u,           // bit alignment required for allocated blocks
160    };
161
162
163    //
164    // Useful functions
165    //
166
167
168    //
169    // is_all_ones
170    //
171    // Returns true if the value 'x' contains all 1s.
172    //
173    inline const bool is_all_ones(usword_t x) { return !(~x); }
174
175
176    //
177    // is_all_zeros
178    //
179    // Returns true if the value 'x' contains all 0s.
180    //
181    inline const bool is_all_zeros(usword_t x) { return !x; }
182
183    //
184    // is_some_ones
185    //
186    // Returns true if the value 'x' contains some 1s.
187    //
188    inline const bool is_some_ones(usword_t x) { return x != 0; }
189
190
191    //
192    // is_some_zeros
193    //
194    // Returns true if the value 'x' contains some 0s.
195    //
196    inline const bool is_some_zeros(usword_t x) { return ~x != 0; }
197
198
199    //
200    // displace
201    //
202    // Adjust an address by specified number of bytes.
203    //
204    inline void *displace(void *address, const intptr_t offset) { return (void *)((char *)address + offset); }
205
206
207    //
208    // min
209    //
210    // Return the minumum of two usword_t values.
211    //
212    static inline const usword_t min(usword_t a, usword_t b) { return a < b ? a : b; }
213
214
215    //
216    // max
217    //
218    // Return the maximum of two usword_t values.
219    //
220    static inline const usword_t max(usword_t a, usword_t b) { return a > b ? a : b; }
221
222
223    //
224    // mask
225    //
226    // Generate a sequence of n one bits beginning with the least significant bit.
227    //
228    static inline const usword_t mask(usword_t n) {
229        ASSERTION(0 < n && n <= bits_per_word);
230        return (2L << (n - 1)) - 1;
231    }
232
233
234    //
235    // is_power_of_2
236    //
237    // Returns true if x is an exact power of 2.
238    //
239    static inline const bool is_power_of_2(usword_t x) { return ((x - 1) & x) == 0; }
240
241
242    //
243    // count_leading_zeros
244    //
245    // Count the number of leading zeroes
246    //
247    static inline const usword_t count_leading_zeros(register usword_t value) {
248    #if __LP64__
249        return value ? __builtin_clzll(value) : bits_per_word;
250    #else
251        return value ? __builtin_clz(value) : bits_per_word;
252    #endif
253    }
254
255
256    //
257    // rotate_bits_left
258    //
259    // Rotates bits to the left 'n' bits
260    //
261    inline usword_t rotate_bits_left(usword_t value, usword_t n) { ASSERTION(0 < n && n < bits_per_word); return (value << n) | (value >> (bits_per_word - n)); }
262
263
264    //
265    // rotate_bits_right
266    //
267    // Rotates bits to the right 'n' bits
268    //
269    inline usword_t rotate_bits_right(usword_t value, usword_t n) { ASSERTION(0 < n && n < bits_per_word); return (value << (bits_per_word - n)) | (value >> n); }
270
271
272    //
273    // ilog2
274    //
275    // Compute the integer log2 of x such that (x >> ilog2(x)) == 1, ilog2(0) == -1.
276    //
277    static inline const usword_t ilog2(register usword_t value) { return (bits_per_word - 1) - count_leading_zeros(value); }
278
279
280    //
281    // partition
282    //
283    // Determine the partition of 'x' in sets of size 'y'.
284    //
285    static inline const usword_t partition(usword_t x, usword_t y) { return (x + y - 1) / y; }
286
287
288    //
289    // partition2
290    //
291    // Determine the partition of 'x' in sets of size 2^'y'.
292    //
293    static inline const usword_t partition2(usword_t x, usword_t y) { return (x + mask(y)) >> y; }
294
295
296    //
297    // align
298    //
299    // Align 'x' up to nearest multiple of alignment 'y'.
300    //
301    static inline const usword_t align(usword_t x, usword_t y) { return partition(x, y) * y; }
302
303
304    //
305    // align2
306    //
307    // Align 'x' up to nearest multiple of alignment specified by 2^'y'.
308    //
309    static inline const usword_t align2(usword_t x, usword_t y) { usword_t m = mask(y); return (x + m) & ~m; }
310
311
312    //
313    // align_down
314    //
315    // Align and address down to nearest address where the 'n' bits are zero.
316    //
317    static inline void *align_down(void *address, usword_t n = page_size_log2) {
318        usword_t m = mask(n);
319        return (void *)((uintptr_t)address & ~m);
320    }
321
322
323    //
324    // align_up
325    //
326    // Align and address up to nearest address where the 'n' bits are zero.
327    //
328    static inline void *align_up(void *address, usword_t n = page_size_log2) {
329        usword_t m = mask(n);
330        return (void *)(((uintptr_t)address + m) & ~m);
331    }
332
333
334    //
335    // count_trailing_bits
336    //
337    // Count the number of trailing bits, that is, the number of bits following the leading zeroes.
338    // Or, out by one ilog2.
339    //
340    static inline const usword_t count_trailing_bits(register usword_t value) { return bits_per_word - count_leading_zeros(value); }
341
342
343    //
344    // trailing_zeros
345    //
346    // Return a mask of ones for each consecutive zero starting at the least significant bit.
347    //
348    static inline const usword_t trailing_zeros(usword_t x) { return (x - 1) & ~ x; }
349
350
351    //
352    // trailing_ones
353    //
354    // Return a mask of ones for each consecutive one starting at the least significant bit.
355    //
356    static inline const usword_t trailing_ones(usword_t x) { return x & ~(x + 1); }
357
358
359    //
360    // count_trailing_zeros
361    //
362    // Return a count of consecutive zeros starting at the least significant bit.
363    //
364    static inline const usword_t count_trailing_zeros(usword_t x) { return count_trailing_bits(trailing_zeros(x)); }
365
366
367    //
368    // count_trailing_ones
369    //
370    // Return a count of consecutive ones starting at the least significant bit.
371    //
372    static inline const usword_t count_trailing_ones(usword_t x) { return count_trailing_bits(trailing_ones(x)); }
373
374
375    //
376    // is_bit_aligned
377    //
378    // Returns true if the specified address is aligned in a specific bit alignment.
379    //
380    inline bool is_bit_aligned(void *address, usword_t n) { return !((uintptr_t)address & mask(n)); }
381
382
383    //
384    // is_pointer_aligned
385    //
386    // Returns true if the specified address is aligned on a word boundary.
387    //
388    inline bool is_pointer_aligned(void *address) { return is_bit_aligned(address, pointer_alignment); }
389
390
391    //
392    // is_block_aligned
393    //
394    // Returns true if the specified address is aligned on a block boundary (16/32 bytes.)
395    //
396    inline bool is_block_aligned(void *address) { return is_bit_aligned(address, block_alignment); }
397
398
399
400    //
401    // is_equal
402    //
403    // String equality.
404    //
405    inline bool is_equal(const char *x, const char *y) { return strcmp(x, y) == 0; }
406
407
408    //
409    // error
410    //
411    // Report an error.
412    //
413    inline void error(const char *msg, const void *address = NULL) {
414        if (address) malloc_printf("*** %s: agc error for object %p: %s\n", prelude(), address, msg);
415        else         malloc_printf("*** %s: agc error: %s\n", prelude(), msg);
416#if 0 && defined(DEBUG)
417        __builtin_trap();
418#endif
419    }
420
421
422    //
423    // allocate_memory
424    //
425    // Allocate vm memory aligned to specified alignment.
426    //
427    inline void *allocate_memory(usword_t size, usword_t alignment = page_size, signed label = VM_MEMORY_MALLOC) {
428        // start search at address zero
429        vm_address_t address = 0;
430
431#if 0
432        switch (label) {
433        default:
434        case VM_MEMORY_MALLOC: label = VM_MEMORY_APPLICATION_SPECIFIC_1; break; // 240 admin
435        case VM_MEMORY_MALLOC_SMALL: label = VM_MEMORY_APPLICATION_SPECIFIC_1 + 1; break; // 241 small/med
436        case VM_MEMORY_MALLOC_LARGE: label = VM_MEMORY_APPLICATION_SPECIFIC_1 + 2; break; // 242 large
437        }
438#endif
439        // vm allocate space
440        kern_return_t err = vm_map(mach_task_self(), &address, size, alignment - 1,
441                                     VM_FLAGS_ANYWHERE | VM_MAKE_TAG(label),    // first available space
442                                     MACH_PORT_NULL,                            // NULL object, so dynamically allocated
443                                     0,                                         // offset into object
444                                     FALSE,                                     // no need to copy the object
445                                     VM_PROT_DEFAULT,                           // current protection
446                                     VM_PROT_ALL,                               // maximum protection must be VM_PROT_ALL for madvise() to work. <rdar://problem/7792285>
447                                     VM_INHERIT_DEFAULT);                       // standard copy-on-write at fork()
448
449        // verify allocation
450        if (err != KERN_SUCCESS) {
451            malloc_printf("*** %s: Zone::Can not allocate 0x%lx bytes\n", prelude(), size);
452            return NULL;
453        }
454
455#if defined(DEBUG)
456        if (Environment::print_allocs) malloc_printf("vm_map @%p %d\n", address, size);
457#endif
458        // return result
459        return (void *)address;
460    }
461
462
463    //
464    // deallocate_memory
465    //
466    // Deallocate vm memory.
467    //
468    inline void deallocate_memory(void *address, usword_t size) {
469#if defined(DEBUG)
470        if (Environment::print_allocs) malloc_printf("vm_deallocate @%p %d\n", address, size);
471#endif
472        kern_return_t err = vm_deallocate(mach_task_self(), (vm_address_t)address, size);
473        ASSERTION(err == KERN_SUCCESS);
474    }
475
476    //
477    // copy_memory
478    //
479    // Copy vm pages.
480    //
481    inline void copy_memory(void *dest, void *source, usword_t size) {
482        kern_return_t err = vm_copy(mach_task_self(), (vm_address_t)source, size, (vm_address_t)dest);
483        ASSERTION(err == KERN_SUCCESS);
484    }
485
486    //
487    // uncommit_memory
488    //
489    // Give memory back to the system.
490    //
491    void uncommit_memory(void *address, usword_t size);
492
493    //
494    // commit_memory
495    //
496    // Get uncommitted memory back from the system.
497    //
498    void commit_memory(void *address, usword_t size);
499
500
501    //
502    // guard_page
503    //
504    // Guards one page of memory from all access
505    //
506    inline void guard_page(void *address) { vm_protect(mach_task_self(), (vm_address_t)address, page_size, false, VM_PROT_NONE); }
507
508
509    //
510    // unguard_page
511    //
512    // Removes guard from page.
513    //
514    inline void unguard_page(void *address) { vm_protect(mach_task_self(), (vm_address_t)address, page_size, false, VM_PROT_DEFAULT); }
515
516
517    //
518    // allocate_guarded_memory
519    //
520    // Allocate vm memory bounded by guard pages at either end.
521    //
522    inline void *allocate_guarded_memory(usword_t size) {
523        usword_t needed = align2(size, page_size_log2);
524        // allocate two extra pages, one for either end
525        void * allocation = allocate_memory(needed + 2 * page_size, page_size, VM_MEMORY_MALLOC);
526
527        if (allocation) {
528            // front guard
529            guard_page(allocation);
530            // rear guard
531            guard_page(displace(allocation, page_size + needed));
532            // return allocation skipping front guard
533            return displace(allocation, page_size);
534        }
535
536        // return NULL
537        return allocation;
538    }
539
540
541    //
542    // deallocate_guarded_memory
543    //
544    // Deallocate vm memory and surrounding guard pages.
545    //
546    inline void deallocate_guarded_memory(void *address, usword_t size) {
547        usword_t needed = align2(size, page_size_log2);
548        deallocate_memory(displace(address, -page_size), needed + 2 * page_size);
549    }
550
551
552    //
553    // watchpoint
554    //
555    // Trap if the address matches the specified trigger.
556    //
557    inline void watchpoint(void *address) {
558#if DEBUG
559        if (address == WatchPoint) __builtin_trap();
560#endif
561    }
562
563
564    //
565    // micro_time
566    //
567    // Returns execution time in microseconds (not real time.)
568    //
569    uint64_t micro_time();
570
571
572    //
573    // MicroTimer
574    //
575    // Execution timer convenience class.
576    //
577    class MicroTimer {
578        uint64_t _start, _stop;
579    public:
580        MicroTimer() : _start(0), _stop(0) {}
581        void start() { _start = micro_time(); }
582        void stop() { _stop = micro_time(); }
583        uint64_t elapsed() { return _stop - _start; }
584    };
585
586    //
587    // nano_time
588    //
589    // Returns machine time in nanoseconds (rolls over rapidly.)
590    //
591    double nano_time();
592
593    //
594    // NanoTimer
595    //
596    // Wall clock execution timer.
597    //
598    class NanoTimer {
599        uint64_t _start, _stop;
600        mach_timebase_info_data_t &_timebase;
601        static mach_timebase_info_data_t &cached_timebase_info();
602    public:
603        NanoTimer() : _start(0), _stop(0), _timebase(cached_timebase_info()) {}
604
605        void start()        { _start = mach_absolute_time(); }
606        void stop()         { _stop = mach_absolute_time(); }
607        uint64_t elapsed()  { return ((_stop - _start) * _timebase.numer) / _timebase.denom; }
608    };
609
610
611    //
612    // zone locks
613    //
614    extern "C" void auto_gc_lock(malloc_zone_t *zone);
615    extern "C" void auto_gc_unlock(malloc_zone_t *zone);
616
617    //----- MemoryReader -----//
618
619    //
620    // Used to read another task's memory.
621    //
622
623    class MemoryReader {
624        task_t                  _task;                          // task being probed
625        auto_memory_reader_t    _reader;                        // reader used to laod task memory
626      public:
627        MemoryReader(task_t task, auto_memory_reader_t reader) : _task(task), _reader(reader) {}
628
629        //
630        // read
631        //
632        // Read memory from the task into current memory.
633        //
634        inline void *read(void *task_address, usword_t size) {
635            void *local_address;                           // location where memory was read
636            kern_return_t err = _reader(_task, (vm_address_t)task_address, (vm_size_t)size, &local_address);
637            if (err) return NULL;
638            return local_address;
639        }
640    };
641
642    //----- Preallocated -----//
643
644    //
645    // Used in classes where memory is presupplied.
646    //
647
648    class Preallocated {
649
650      public:
651
652        // prevent incorrect use of new
653        void *operator new(size_t size) { error("Must use alternate form of new operator."); return aux_malloc(size); }
654
655        // must supply an address
656        void *operator new(size_t size, void *address) { return address; }
657
658        // do not delete
659        void operator delete(void *x) {  }
660
661    };
662
663
664    //----- AuxAllocated -----//
665
666    //
667    // Used in classes where memory needs to be allocated from aux malloc.
668    //
669
670    class AuxAllocated {
671
672      public:
673
674        //
675        // allocator
676        //
677        inline void *operator new(const size_t size) {
678            void *memory = aux_malloc(size);
679            if (!memory) error("Failed of allocate memory for auto internal use.");
680            return memory;
681        }
682
683        inline void *operator new(const size_t size, const size_t extra) {
684            void *memory = aux_malloc(size+extra);
685            if (!memory) error("Failed of allocate memory for auto internal use.");
686            return memory;
687        }
688
689
690        //
691        // deallocator
692        //
693        inline void operator delete(void *address) {
694            if (address) aux_free(address);
695        }
696    };
697
698
699    //----- AuxAllocator -----//
700
701    //
702    // Support for STL allocation in aux malloc.
703    //
704
705    template <typename T> struct AuxAllocator {
706
707        typedef T                 value_type;
708        typedef value_type*       pointer;
709        typedef const value_type *const_pointer;
710        typedef value_type&       reference;
711        typedef const value_type& const_reference;
712        typedef size_t            size_type;
713        typedef ptrdiff_t         difference_type;
714
715        template <typename U> struct rebind { typedef AuxAllocator<U> other; };
716
717        template <typename U> AuxAllocator(const AuxAllocator<U>&) {}
718        AuxAllocator() {}
719        AuxAllocator(const AuxAllocator&) {}
720        ~AuxAllocator() {}
721
722        pointer address(reference x) const { return &x; }
723        const_pointer address(const_reference x) const {
724            return x;
725        }
726
727        pointer allocate(size_type n, const_pointer = 0) {
728            return static_cast<pointer>(aux_calloc(n, sizeof(T)));
729        }
730
731        void deallocate(pointer p, size_type) { ::aux_free(p); }
732
733        size_type max_size() const {
734            return static_cast<size_type>(-1) / sizeof(T);
735        }
736
737        void construct(pointer p, const value_type& x) {
738            new(p) value_type(x);
739        }
740
741        void destroy(pointer p) { p->~value_type(); }
742
743        void operator=(const AuxAllocator&);
744
745    };
746
747
748    template<> struct AuxAllocator<void> {
749        typedef void        value_type;
750        typedef void*       pointer;
751        typedef const void *const_pointer;
752
753        template <typename U> struct rebind { typedef AuxAllocator<U> other; };
754    };
755
756
757    template <typename T>
758    inline bool operator==(const AuxAllocator<T>&,
759                           const AuxAllocator<T>&) {
760        return true;
761    }
762
763
764    template <typename T>
765    inline bool operator!=(const AuxAllocator<T>&,
766                           const AuxAllocator<T>&) {
767        return false;
768    }
769
770    struct AuxPointerLess {
771      bool operator()(const void *p1, const void *p2) const {
772        return p1 < p2;
773      }
774    };
775
776    struct AuxPointerEqual {
777        bool operator()(void *p1, void *p2) const {
778            return p1 == p2;
779        }
780    };
781
782    struct AuxPointerHash {
783        uintptr_t operator()(void *p) const {
784            return (uintptr_t)p;
785        }
786    };
787
788    typedef std::vector<void *, AuxAllocator<void *> > PtrVector;
789    typedef std::map<void *, int, AuxPointerLess, AuxAllocator<std::pair<void * const, int> > > PtrIntMap;
790    typedef std::map<void *, void *, AuxPointerLess, AuxAllocator<std::pair<void * const, void * const> > > PtrPtrMap;
791    typedef __gnu_cxx::hash_map<void *, void *, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrPtrHashMap;
792    typedef __gnu_cxx::hash_map<void *, PtrPtrHashMap, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrAssocHashMap;
793    typedef __gnu_cxx::hash_map<void *, int, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrIntHashMap;
794    typedef __gnu_cxx::hash_map<void *, size_t, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrSizeHashMap;
795    typedef __gnu_cxx::hash_set<void *, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrHashSet;
796
797    //
798    // PointerArray
799    // Stores a contiguous array of pointers, which is resized by amortized doubling.
800    // Uses a MemoryAllocator template parameter which must implement the methods:
801    //   void *allocator_memory(size_t size);
802    //   void deallocator_memory(void *pointer, size_t size);
803    //   void uncommit_memory(void *pointer, size_t size);
804    //   void copy_memory(void *dest, void *source, size_t size);
805    //
806
807    template <typename MemoryAllocator>
808    class PointerArray : AuxAllocated {
809        usword_t                        _count;
810        usword_t                        _capacity;
811        void                          **_buffer;
812        MemoryAllocator                 _allocator;
813    public:
814        PointerArray() : _count(0), _capacity(0), _buffer(NULL) {}
815        PointerArray(MemoryAllocator allocator) : _count(0), _capacity(0), _buffer(NULL), _allocator(allocator) {}
816        ~PointerArray()                 { if (_buffer) _allocator.deallocate_memory(_buffer, _capacity * sizeof(void *)); }
817
818        usword_t count()          const { return _count; }
819        void clear_count()              { _count = 0; }
820        void set_count(usword_t n)      { _count = n; }
821        void **buffer()                 { return _buffer; }
822        usword_t size()                 { return _capacity * sizeof(void*); }
823
824        void uncommit()                 { if (_buffer) uncommit_memory(_buffer, _capacity * sizeof(void*)); }
825        void commit()                   { if (_buffer) commit_memory(_buffer, _capacity * sizeof(void*)); }
826
827        void grow() {
828            if (!_buffer) {
829                // start off with 1 page.
830                _capacity = page_size / sizeof(void*);
831                _buffer = (void **) _allocator.allocate_memory(page_size);
832             } else {
833                // double the capacity.
834                vm_size_t old_size = _capacity * sizeof(void *);
835                void **new_buffer = (void **) _allocator.allocate_memory(old_size * 2);
836                if (!new_buffer) {
837                    auto_fatal("PointerArray::grow() _capacity=%lu failed.\n", _capacity * 2);
838                }
839                _capacity *= 2;
840                _allocator.copy_memory(new_buffer, _buffer, old_size);
841                _allocator.deallocate_memory(_buffer, old_size);
842                _buffer = new_buffer;
843            }
844        }
845
846        void grow(usword_t count) {
847            if (count > _capacity) {
848                usword_t old_size = _capacity * sizeof(void *);
849                if (_capacity == 0L) _capacity = page_size / sizeof(void *);
850                while (count > _capacity) _capacity *= 2;
851                void **new_buffer = (void **) _allocator.allocate_memory(_capacity * sizeof(void*));
852                if (!new_buffer) {
853                    auto_fatal("PointerArray::grow(count=%lu) failed.\n", _capacity);
854                }
855                if (_buffer) {
856                    // only copy contents if _count != 0.
857                    if (new_buffer && _count) {
858                        _allocator.copy_memory(new_buffer, _buffer, old_size);
859                    }
860                    _allocator.deallocate_memory(_buffer, old_size);
861                }
862                _buffer = new_buffer;
863            }
864        }
865
866        void add(void *pointer) {
867            if (_count == _capacity) grow();
868            _buffer[_count++] = pointer;
869        }
870    };
871
872    //
873    // PointerQueue
874    // Manages a set of pointers as a queue of discontinguous page-sized chunks. This uses memory more efficiently
875    // since it never has to copy the buffers themselves, and grows mores slowly than a PointerArray. Also parametrized
876    // with a MemoryAllocator type.
877    //
878    struct PointerChunk : Preallocated {
879        PointerChunk *_next;
880        enum { chunk_size = (Auto::page_size / sizeof(void*)) - 1 };
881        void *_pointers[chunk_size];
882
883        void **pointers() { return _pointers; }
884        void **limit() { return _pointers + chunk_size; }
885        PointerChunk *next() { return _next; }
886    };
887
888    template <class Visitor> inline void visitPointerChunks(PointerChunk *chunks, usword_t count, Visitor& visitor) {
889        PointerChunk *chunk = chunks;
890        while (chunk != NULL) {
891            PointerChunk *next = chunk->next();
892            void **pointers = chunk->pointers();
893            void **limit = pointers + (count > PointerChunk::chunk_size ? PointerChunk::chunk_size : count);
894            visitor(pointers, limit);
895            count -= (limit - pointers);
896            chunk = next;
897        }
898    }
899
900    template <typename MemoryAllocator>
901    class PointerQueue : AuxAllocated {
902        MemoryAllocator _allocator;
903        PointerChunk *_head;
904        PointerChunk *_tail;
905        PointerChunk *_current;
906        void **_cursor, **_limit;
907        usword_t _count;
908
909        void next_chunk() {
910            if (_current == NULL) {
911                // reset() pointed back to the beginning.
912                _current = _head;
913            } else {
914                _current = _current->_next;
915            }
916            if (_current == NULL) {
917                _current = (PointerChunk *)_allocator.allocate_memory(sizeof(PointerChunk));
918                if (_head == NULL) {
919                    _head = _tail = _current;
920                } else {
921                    _tail->_next = _current;
922                    _tail = _current;
923                }
924            }
925            _cursor = _current->pointers();
926            _limit = _current->limit();
927        }
928
929    public:
930        PointerQueue() : _head(NULL), _tail(NULL), _current(NULL), _cursor(NULL), _limit(NULL), _count(0) {}
931        PointerQueue(MemoryAllocator allocator) : _allocator(allocator), _head(NULL), _tail(NULL), _current(NULL), _cursor(NULL), _limit(NULL), _count(0) {}
932
933        ~PointerQueue() {
934            PointerChunk *chunk = _head;
935            while (chunk != NULL) {
936                PointerChunk *next = chunk->_next;
937                _allocator.deallocate_memory(chunk, sizeof(PointerChunk));
938                chunk = next;
939            }
940        }
941
942        void add(void *pointer) {
943            if (_cursor == _limit) next_chunk();
944            *_cursor++ = pointer;
945            ++_count;
946        }
947
948        void reset() {
949            _current = NULL;
950            _cursor = _limit = NULL;
951            _count = 0;
952        }
953
954        PointerChunk *chunks() { return _head; }
955        usword_t count() { return _count; }
956
957        usword_t size() {
958            usword_t size = 0;
959            PointerChunk *chunk = _head;
960            while (chunk != NULL) {
961                size += Auto::page_size;
962                chunk = chunk->_next;
963            }
964            return size;
965        }
966    };
967
968    class VMMemoryAllocator {
969    public:
970        inline void *allocate_memory(usword_t size) {
971            return Auto::allocate_memory(size);
972        }
973
974        inline void deallocate_memory(void *address, usword_t size) {
975            Auto::deallocate_memory(address, size);
976        }
977
978        inline void uncommit_memory(void *address, usword_t size) {
979            Auto::uncommit_memory(address, size);
980        }
981
982        inline void copy_memory(void *dest, void *source, usword_t size) {
983            Auto::copy_memory(dest, source, size);
984        }
985    };
986};
987
988#endif // __AUTO_DEFS__
989