1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    ZoneCompaction.cpp
22    Mostly Copying Compaction Algorithms
23    Copyright (c) 2010-2001 Apple Inc. All rights reserved.
24 */
25
26#include "ReferenceIterator.h"
27#include "Zone.h"
28
29extern "C" {
30void *CompactionWatchPoint = NULL;
31char CompactionObserverKey;
32}
33
34// this controls whether or not non-object backing stores containing weak references should pin. seems to be a requirement for now.
35#define NON_OBJECT_WEAK_REFERENCES_SHOULD_PIN 1
36
37namespace Auto {
38    static malloc_zone_t *_compaction_zone = NULL;
39    static malloc_logger_t *_old_malloc_logger = NULL;
40
41    static void _disabled_malloc_logger(uint32_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uint32_t) {}
42
43    class CompactionZone {
44    public:
45        CompactionZone() {
46            _compaction_zone = malloc_create_zone(4096, 0);
47            malloc_set_zone_name(_compaction_zone, "compaction_zone");
48            // malloc_zone_unregister(_compaction_zone);
49            if (malloc_logger) {
50                _old_malloc_logger = malloc_logger;
51                malloc_logger = _disabled_malloc_logger;
52            }
53        }
54
55       ~CompactionZone() {
56            if (_old_malloc_logger) {
57                malloc_logger = _old_malloc_logger;
58                _old_malloc_logger = NULL;
59            }
60            malloc_destroy_zone(_compaction_zone);
61            _compaction_zone = NULL;
62        }
63    };
64
65    template <typename T> struct CompactionZoneAllocator {
66        typedef T                 value_type;
67        typedef value_type*       pointer;
68        typedef const value_type *const_pointer;
69        typedef value_type&       reference;
70        typedef const value_type& const_reference;
71        typedef size_t            size_type;
72        typedef ptrdiff_t         difference_type;
73
74        template <typename U> struct rebind { typedef CompactionZoneAllocator<U> other; };
75
76        template <typename U> CompactionZoneAllocator(const CompactionZoneAllocator<U>&) {}
77        CompactionZoneAllocator() {}
78        CompactionZoneAllocator(const CompactionZoneAllocator&) {}
79        ~CompactionZoneAllocator() {}
80
81        pointer address(reference x) const { return &x; }
82        const_pointer address(const_reference x) const { return x; }
83
84        pointer allocate(size_type n, const_pointer = 0) {
85            return static_cast<pointer>(::malloc_zone_calloc(_compaction_zone, n, sizeof(T)));
86        }
87
88        void deallocate(pointer p, size_type) { ::malloc_zone_free(_compaction_zone, p); }
89
90        size_type max_size() const {
91            return static_cast<size_type>(-1) / sizeof(T);
92        }
93
94        void construct(pointer p, const value_type& x) {
95            new(p) value_type(x);
96        }
97
98        void destroy(pointer p) { p->~value_type(); }
99
100        void operator=(const CompactionZoneAllocator&);
101    };
102
103
104    template<> struct CompactionZoneAllocator<void> {
105        typedef void        value_type;
106        typedef void*       pointer;
107        typedef const void *const_pointer;
108
109        template <typename U> struct rebind { typedef CompactionZoneAllocator<U> other; };
110    };
111
112
113    template <typename T>
114    inline bool operator==(const CompactionZoneAllocator<T>&,
115                           const CompactionZoneAllocator<T>&) {
116        return true;
117    }
118
119    template <typename T>
120    inline bool operator!=(const CompactionZoneAllocator<T>&,
121                           const CompactionZoneAllocator<T>&) {
122        return false;
123    }
124
125    template <typename ReferenceIterator> class ZonePendingStack {
126        typedef std::vector<uintptr_t, CompactionZoneAllocator<uintptr_t> > uintptr_vector;
127        uintptr_vector _small_stack, _large_stack;
128    public:
129        void push(Subzone *subzone, usword_t q) {
130            _small_stack.push_back(subzone->pack(q));
131        }
132
133        void push(Large *large) {
134            _large_stack.push_back(uintptr_t(large));
135        }
136
137        static bool mark(Subzone *subzone, usword_t q) { return subzone->test_and_set_mark(q); }
138        static bool mark(Large *large) { return large->test_and_set_mark(); }
139
140        static bool is_marked(Subzone *subzone, usword_t q) { return subzone->is_marked(q); }
141        static bool is_marked(Large *large) { return large->is_marked(); }
142
143        void scan(ReferenceIterator &scanner) {
144            for (;;) {
145                // prefer scanning small blocks to large blocks, to keep the stacks shallow.
146                if (_small_stack.size()) {
147                    uintptr_t back = _small_stack.back();
148                    _small_stack.pop_back();
149                    usword_t q;
150                    Subzone *subzone = Subzone::unpack(back, q);
151                    scanner.scan(subzone, q);
152                } else if (_large_stack.size()) {
153                    Large *large = reinterpret_cast<Large*>(_large_stack.back());
154                    _large_stack.pop_back();
155                    scanner.scan(large);
156                } else {
157                    return;
158                }
159            }
160        }
161
162        const PendingStackHint hints() { return PendingStackWantsEagerScanning; }
163        template <typename U> struct rebind { typedef ZonePendingStack<U> other; };
164    };
165
166    // Used by fixup_phase below, which needs no actual pending stack.
167    template <typename ReferenceIterator> class EmptyPendingStack {
168    public:
169        void push(Subzone *subzone, usword_t q) {}
170        void push(Large *large) {}
171
172        static bool mark(Subzone *subzone, usword_t q) { return false; }
173        static bool mark(Large *large) { return false; }
174        static bool is_marked(Subzone *subzone, usword_t q) { return false; }
175        static bool is_marked(Large *large) { return false; }
176
177        void scan(ReferenceIterator &scanner) {}
178
179        template <typename U> struct rebind { typedef EmptyPendingStack<U> other; };
180    };
181
182
183    typedef void (^mark_pinned_t) (void **slot, Subzone *subzone, usword_t q, ReferenceKind kind);
184
185    class CompactionClassifier {
186        size_t _kindCounts[kReferenceKindCount];
187        Zone *_zone;
188        mark_pinned_t _marker;
189        struct Configuration;
190        typedef ReferenceIterator<Configuration> CompactingReferenceIterator;
191        class CompactionScanningStrategy : public FullScanningStrategy<CompactingReferenceIterator> {
192        public:
193            inline static bool visit_interior_pointers() { return true; }
194            inline static bool scan_threads_suspended() { return false; }
195            inline static pthread_rwlock_t *associations_lock(Zone *zone) { return NULL; }  // already owned, non-reentrant.
196        };
197        struct Configuration {
198            typedef CompactionClassifier ReferenceVisitor;
199            typedef ZonePendingStack<CompactingReferenceIterator> PendingStack;
200            typedef CompactionScanningStrategy ScanningStrategy;
201        };
202    public:
203        CompactionClassifier(Zone *zone, mark_pinned_t marker) : _zone(zone), _marker(marker) { bzero(_kindCounts, sizeof(_kindCounts)); }
204
205        inline void mark_pinned(void **slot, Subzone *subzone, usword_t q, ReferenceKind kind) { _marker(slot, subzone, q, kind); }
206
207        bool is_weak_slot_ivar(void *slot, void *slot_base, usword_t slot_layout) {
208            if (slot_layout & AUTO_OBJECT) {
209                const unsigned char *weak_layout = _zone->weak_layout_map_for_block(slot_base);
210                if (weak_layout) {
211                    void **slots = (void **)slot_base;
212                    while (unsigned char byte = *weak_layout++) {
213                        uint8_t skips = (byte >> 4);
214                        slots += skips;
215                        uint8_t weaks = (byte & 0x0F);
216                        while (weaks--) {
217                            if (slot == (void *)slots++)
218                                return true;
219                        }
220                    }
221                }
222            }
223            return false;
224        }
225
226        void visit(const ReferenceInfo &info, void **slot, Subzone *subzone, usword_t q) {
227            if (subzone->quantum_address(q) == CompactionWatchPoint) {
228                printf("visiting a reference to CompactionWatchPoint = %p\n", CompactionWatchPoint);
229            }
230            ReferenceKind kind = info.kind();
231            _kindCounts[kind]++;
232            switch (kind) {
233            case kAllPointersHeapReference:
234            case kAssociativeReference:
235            case kExactHeapReference:
236            case kRootReference:
237                break;
238            case kWeakReference:
239                {
240                    __block void *slot_block = NULL;
241                    __block usword_t slot_layout = 0;
242                    blockStartNoLockDo(_zone, slot, ^(Subzone *slot_subzone, usword_t slot_q) {
243                        slot_block = slot_subzone->quantum_address(slot_q);
244                        slot_layout = slot_subzone->layout(slot_q);
245                        // don't pin weakly referenced objects that come from objects with layout, or unscanned memory marked as AUTO_MEMORY_ALL_WEAK_POINTERS.
246                        if (!is_weak_slot_ivar(slot, slot_block, slot_layout)) {
247                            // since there's no layout describing this object containing a weak reference
248                            // pin slot_block. this saves having to call weak_transfer_weak_contents_unscanned() for
249                            // every unscanned block.
250                            // allow weak references from this special layout backing store to be moved. This layout gives
251                            // permission to move these pointers. It will never be used for hashed backing stores, unless the
252                            // hash functions are indepedent of object pointer values.
253                            if (slot_layout != AUTO_MEMORY_ALL_WEAK_POINTERS) {
254                                mark_pinned(NULL, slot_subzone, slot_q, kWeakSlotReference);
255                                mark_pinned(slot, subzone, q, kWeakReference);
256                            } else if (slot == slot_block) {
257                                // first word contains a weak reference, so pin the slot's block to avoid problems with forwarding.
258                                mark_pinned(NULL, slot_subzone, slot_q, kWeakSlotReference);
259                            }
260                        }
261                    }, ^(Large *slot_large) {
262                        slot_block = slot_large->address();
263                        slot_layout = slot_large->layout();
264                        if (!is_weak_slot_ivar(slot, slot_block, slot_layout)) {
265                            // large blocks are never compacted.
266                            if (slot_layout != AUTO_MEMORY_ALL_WEAK_POINTERS)
267                                mark_pinned(slot, subzone, q, kWeakReference);
268                        }
269                    });
270                    if (!slot_block) {
271                        /* can safely compact blocks referenced weakly from outside the gc zone? */
272                        if (!_zone->is_global_address_nolock(slot)) {
273                            // we can relocate GLOBAL weak references (and malloc blocks for that matter).
274                            mark_pinned(slot, subzone, q, kWeakReference);
275                        }
276                    }
277                }
278                break;
279            default:
280                mark_pinned(slot, subzone, q, kind);
281                break;
282            }
283        }
284
285        void visit(const ReferenceInfo &info, void **slot, Large *large) {
286            if (info.kind() == kWeakReference) {
287                // weakly referenced Large block. Pin slot's block if not from a __weak ivar.
288                if (_zone->in_subzone_memory(slot)) {
289                    usword_t slot_q;
290                    Subzone *slot_subzone = Subzone::subzone(slot);
291                    void *slot_block = slot_subzone->block_start(slot, slot_q);
292                    if (slot_block) {
293                        usword_t slot_layout = slot_subzone->layout(slot_q);
294                        // don't pin weakly referenced objects that come from objects with layout, or unscanned memory marked as ALL_POINTERS.
295                        if (!is_weak_slot_ivar(slot, slot_block, slot_layout)) {
296                            // since there's no layout describing this object containing a weak reference
297                            // pin slot_block. this saves having to call weak_transfer_weak_contents_unscanned() for
298                            // every unscanned block.
299                            if (slot_layout != AUTO_MEMORY_ALL_WEAK_POINTERS || slot == slot_block) {
300                                mark_pinned(NULL, slot_subzone, slot_q, kWeakSlotReference);
301                            }
302                        }
303                    }
304                }
305            }
306        }
307
308        void visit_weak_callback(auto_weak_callback_block_t *callback) {
309            // check the callback slot. if it's inside a subzone block, pin that object (for now).
310            if (_zone->in_subzone_memory(callback)) {
311                usword_t q;
312                Subzone *subzone = Subzone::subzone(callback);
313                void *callback_start = subzone->block_start(callback, q);
314                if (callback_start) {
315                    if (!subzone->is_pinned(q)) {
316                        // NOTE:  this will pin any object that contains an embedded auto_weak_callback_block_t.
317                        mark_pinned(NULL, subzone, q, kWeakReference);
318                    }
319                }
320            }
321        }
322
323        void visit_weak_slot(void **slot) {
324            // weak reference to SOMETHING that is neither a Subzone nor Large block.
325            // pin the owning block if it's neither a __weak ivar or has layout of type AUTO_MEMORY_ALL_WEAK_POINTERS.
326            // e.g. [NSNull null] or a constant NSString is used as a key in a map table.
327            if (_zone->in_subzone_memory(slot)) {
328                usword_t slot_q;
329                Subzone *slot_subzone = Subzone::subzone(slot);
330                void *slot_block = slot_subzone->block_start(slot, slot_q);
331                if (slot_block) {
332                    usword_t slot_layout = slot_subzone->layout(slot_q);
333                    if (!is_weak_slot_ivar(slot, slot_block, slot_layout)) {
334                        if (slot_layout != AUTO_MEMORY_ALL_WEAK_POINTERS || slot == slot_block)
335                            mark_pinned(NULL, slot_subzone, slot_q, kWeakSlotReference);
336                    }
337                }
338            }
339        }
340
341        void classify_weak_reference(weak_referrer_t &ref) {
342            void **slot = ref.referrer;
343            blockDo(_zone, (void*)*slot,
344                    ^(Subzone *subzone, usword_t q) { visit(kWeakReference, slot, subzone, q); },
345                    ^(Large *large) { visit(kWeakReference, slot, large); },
346                    ^(void *block) { visit_weak_slot(slot); });
347            if (uintptr_t(ref.block) & 1) {
348                // pin old-school callbacks only.
349                visit_weak_callback((auto_weak_callback_block_t *)displace(ref.block, -1));
350            }
351        }
352
353        //
354        // scan_garbage() - Last pass through the heap, scan the otherwise unreachable blocks, classifying all
355        // pinning references to live objects. Could just pin all unreachable blocks here, to avoid waisting cycles
356        // on moving soon to be collected objects. However, moving them out of the way may still be worth the effort
357        // if it allows the heap to shrink.
358        //
359        void scan_garbage(CompactingReferenceIterator &scanner) {
360            // iterate through the regions first
361            for (Region *region = _zone->region_list(); region != NULL; region = region->next()) {
362                // iterate through the subzones
363                SubzoneRangeIterator iterator(region->subzone_range());
364                while (Subzone *subzone = iterator.next()) {
365                    // get the number of quantum in the subzone
366                    usword_t n = subzone->allocation_count();
367                    for (usword_t q = 0; q < n; q = subzone->next_quantum(q)) {
368                        if (!subzone->is_free(q) && !subzone->test_and_set_mark(q)) {
369                            // pin AUTO_OBJECT_UNSCANNED garbage to avoid finalize crashes (e.g. _ripdata_finalize())
370                            // caused by the corruption of interior pointers in unscanned memory. this population should be fairly limited.
371                            // FIXME:  remove this heuristic in NMOS.
372                            usword_t layout = subzone->layout(q);
373                            if (layout & AUTO_OBJECT_UNSCANNED) {
374                                mark_pinned(NULL, subzone, q, kGarbageReference);
375                            } else if (::is_scanned(layout)) {
376                                scanner.scan(subzone, q);
377                                scanner.flush();
378                            }
379                        }
380                    }
381                }
382            }
383
384            // iterate through the large blocks
385            for (Large *large = _zone->large_list(); large != NULL; large = large->next()) {
386                if (!large->test_and_set_mark() && large->is_scanned()) {
387                    scanner.scan(large);
388                    scanner.flush();
389                }
390            }
391        }
392
393        void scan(void *stack_bottom) {
394            Configuration::PendingStack stack;
395            CompactingReferenceIterator scanner(_zone, *this, stack, stack_bottom, false, false);
396            scanner.scan();
397#if NON_OBJECT_WEAK_REFERENCES_SHOULD_PIN
398            weak_enumerate_table_fixup(_zone, ^(weak_referrer_t &ref) {
399                classify_weak_reference(ref);
400            });
401#endif
402            // now, scan and pin all unmarked (garbage) blocks.
403            scan_garbage(scanner);
404        }
405
406        void printKindCounts() {
407            for (int i = 0; i < kReferenceKindCount; ++i) {
408                printf("<%s> : %lu\n", ReferenceInfo::name(ReferenceKind(i)), _kindCounts[i]);
409            }
410        }
411    };
412
413    struct page_statistics_visitor {
414        size_t _pageIndex, _prevUnpinnedPageIndex;
415        size_t _blocksPerPage, _pinnedBlocksPerPage;
416        size_t _pinnedPages, _unpinnedPages;
417        size_t _pinnedBlocks, _unpinnedBlocks;
418        size_t _pinnedBytes, _unpinnedBytes;
419
420        page_statistics_visitor()
421            : _pageIndex(0), _prevUnpinnedPageIndex(0), _blocksPerPage(0), _pinnedBlocksPerPage(0),
422              _pinnedPages(0), _unpinnedPages(0), _pinnedBlocks(0), _unpinnedBlocks(0),
423              _pinnedBytes(0), _unpinnedBytes(0)
424        {
425        }
426
427        inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
428            size_t page = (uintptr_t(subzone->quantum_address(q)) >> page_size_log2);
429            if (page != _pageIndex) {
430                if (_pageIndex) {
431                    if (_pinnedBlocksPerPage == 0) {
432                        // printf("page[%lu] = %lu blocks {%lu} %c\n", _pageIndex, _blocksPerPage, subzone->quantum_size(1), (_pageIndex == (_prevUnpinnedPageIndex + 1)) ? '#' : ' ');
433                        _prevUnpinnedPageIndex = _pageIndex;
434                        ++_unpinnedPages;
435                    } else {
436                        ++_pinnedPages;
437                        _pinnedBlocksPerPage = 0;
438                    }
439                }
440                _pageIndex = page;
441                _blocksPerPage = 0;
442            }
443            ++_blocksPerPage;
444            if (subzone->is_pinned(q)) {
445                ++_pinnedBlocksPerPage;
446                ++_pinnedBlocks;
447                _pinnedBytes += subzone->size(q);
448            } else {
449                ++_unpinnedBlocks;
450                _unpinnedBytes += subzone->size(q);
451            }
452            return true;
453        }
454        inline bool visit(Zone *zone, Large *large) { return false; }
455    };
456
457    static void examine_heap_fragmentation(Zone *zone) {
458        // for fun, can we make the heap stable?
459        zone->suspend_all_registered_threads();
460
461        struct quantum_counts {
462            size_t subzones;
463            size_t total;
464            size_t holes;
465            size_t allocs;
466            size_t unscanned_allocs;
467            size_t unused;
468        };
469        quantum_counts small_counts = { 0 }, medium_counts = { 0 };
470
471        for (Region *region = zone->region_list(); region != NULL; region = region->next()) {
472            // iterate through the subzones
473            SubzoneRangeIterator iterator(region->subzone_range());
474            while (Subzone *subzone = iterator.next()) {
475                quantum_counts &counts = subzone->is_small() ? small_counts : medium_counts;
476                bool unscanned = (subzone->admin()->layout() & AUTO_UNSCANNED);
477                ++counts.subzones;
478                usword_t q = 0, n = subzone->allocation_count();    // only examine quanta that have been handed out.
479                while (q < n) {
480                    usword_t nq = subzone->next_quantum(q);
481                    usword_t count = nq - q;
482                    if (subzone->is_free(q))
483                        counts.holes += count;
484                    else {
485                        counts.allocs += count;
486                        if (unscanned) counts.unscanned_allocs += count;
487                    }
488                    q = nq;
489                }
490                counts.unused += (subzone->allocation_limit() - n);
491                counts.total += n;
492            }
493        }
494
495        size_t largeSize = 0;
496        for (Large *large = zone->large_list(); large != NULL; large = large->next()) {
497            largeSize += large->size();
498        }
499
500        printf("largeSize = %ld\n", largeSize);
501        printf("subzones = { %lu, %lu }\n", small_counts.subzones, medium_counts.subzones);
502        printf("q total  = { %lu, %lu }\n", small_counts.total, medium_counts.total);
503        printf("q allocs = { %lu, %lu }\n", small_counts.allocs, medium_counts.allocs);
504        printf("q uallocs= { %lu, %lu }\n", small_counts.unscanned_allocs, medium_counts.unscanned_allocs);
505        printf("q holes  = { %lu, %lu }\n", small_counts.holes, medium_counts.holes);
506        printf("q unused = { %lu, %lu }\n", small_counts.unused, medium_counts.unused);
507
508        zone->resume_all_registered_threads();
509    }
510
511    typedef struct { size_t counts[kReferenceKindCount]; } KindCounts;
512    typedef std::map<void *, KindCounts, AuxPointerLess, CompactionZoneAllocator<std::pair<void * const, KindCounts> > > PtrKindCountsMap;
513    typedef __gnu_cxx::hash_set<void *, AuxPointerHash, AuxPointerEqual, CompactionZoneAllocator<void *> > PtrSet;
514
515    static void printPinnedReference(FILE *out, void *address, KindCounts &kind) {
516        fprintf(out, "%p", address);
517        for (int i = 1; i < kReferenceKindCount; ++i) {
518            if (kind.counts[i]) fprintf(out, " <%s>[%lu]", ReferenceInfo::name(ReferenceKind(i)), kind.counts[i]);
519        }
520        fprintf(out, "\n");
521    }
522
523    static void printPinnedReference(FILE *out, Zone *zone, void *slot_base, void **slot, void *address, const char *name, ReferenceKind kind) {
524        if (slot_base) {
525            if (zone->block_layout(slot_base) & AUTO_OBJECT)
526                fprintf(out, "%p + %ld (%s) -> %p (%s) <%s>\n", slot_base, (long)((char*)slot - (char*)slot_base), zone->name_for_object(slot_base), address, name, ReferenceInfo::name(kind));
527            else
528                fprintf(out, "%p + %lu -> %p (%s) <%s>\n", slot_base, (long)((char*)slot - (char*)slot_base), address, name, ReferenceInfo::name(kind));
529        } else {
530            switch (kind) {
531            case kRetainedReference:
532            case kWeakSlotReference:
533            case kGarbageReference:
534                fprintf(out, "%p -> %p (%s) <%s>\n", (void*)NULL, address, name, ReferenceInfo::name(kind));    // hack for Pinpoint analysis.
535                break;
536            default:
537                fprintf(out, "%p -> %p (%s) <%s>\n", slot, address, name, ReferenceInfo::name(kind));
538            }
539        }
540    }
541
542    void Zone::analyze_heap(const char *path) {
543        CompactionZone compactionZone;
544
545        if (true) examine_heap_fragmentation(this);
546        __block struct { char buffer[36]; } name;
547        FILE *out = fopen(path, "w");
548        PtrKindCountsMap pinMap;
549        __block PtrKindCountsMap &pinMapRef = pinMap;
550        mark_pinned_t marker = ^(void **slot, Subzone *subzone, usword_t q, ReferenceKind kind) {
551            subzone->mark_pinned(q);
552            pinMapRef[subzone->quantum_address(q)].counts[kind]++;
553            printPinnedReference(out, this, block_start(slot), slot, subzone->quantum_address(q),
554                                 (subzone->layout(q) & AUTO_OBJECT) ?
555                                 name_for_object(subzone->quantum_address(q)) :
556                                 (snprintf(name.buffer, sizeof(name.buffer), "%lu bytes", subzone->size(q)), name.buffer),
557                                 kind);
558        };
559
560        // grab all necessary locks to get a coherent picture of the heap.
561        Mutex marksLock(&_mark_bits_mutex);
562        ReadLock assocLock(&_associations_lock);
563        SpinLock weakLock(&weak_refs_table_lock);
564
565        reset_all_pinned();
566        suspend_all_registered_threads();
567
568        // compute the pinned object set.
569        CompactionClassifier classifier(this, marker);
570        classifier.scan((void *)auto_get_sp());
571
572        page_statistics_visitor visitor;
573        visitAllocatedBlocks(this, visitor);
574        printf("unpinned { pages = %lu, blocks = %lu, bytes = %lu }\n", visitor._unpinnedPages, visitor._unpinnedBlocks, visitor._unpinnedBytes);
575        printf("  pinned { pages = %lu, blocks = %lu, bytes = %lu }\n", visitor._pinnedPages, visitor._pinnedBlocks, visitor._pinnedBytes);
576
577        reset_all_marks();
578        resume_all_registered_threads();
579
580        if (false) {
581            classifier.printKindCounts();
582            // dump the pinned object map.
583            std::for_each(pinMap.begin(), pinMap.end(), ^(PtrKindCountsMap::value_type &pair) {
584                printPinnedReference(out, pair.first, pair.second);
585            });
586        }
587
588        fclose(out);
589    }
590
591    template <class Visitor> void visitAllocatedSubzoneBlocksInReverse(Zone *zone, Visitor& visitor) {
592        // iterate through the regions first
593        for (Region *region = zone->region_list(); region != NULL; region = region->next()) {
594            // iterate through the subzones (in reverse)
595            SubzoneRangeIterator iterator(region->subzone_range());
596            while (Subzone *subzone = iterator.previous()) {
597                // enumerate the allocated blocks in reverse.
598                usword_t q = subzone->allocation_count();
599                while ((q = subzone->previous_quantum(q)) != not_found) {
600                    // skip free blocks, and unmarked blocks. unmarked blocks are on their way
601                    // to becoming garbage, and won't have been classified, since they weren't visited.
602                    if (!subzone->is_free(q)) {
603                        visitor(zone, subzone, q);
604                    }
605                }
606            }
607        }
608    }
609
610    template <class Visitor> void visitAllocatedBlocksForCompaction(Zone *zone, Visitor& visitor) {
611        // iterate through the regions first
612        for (Region *region = zone->region_list(); region != NULL; region = region->next()) {
613            // iterate through the subzones
614            SubzoneRangeIterator iterator(region->subzone_range());
615            while (Subzone *subzone = iterator.next()) {
616                // get the number of quantum in the subzone
617                usword_t n = subzone->allocation_count();
618                for (usword_t q = 0; q < n; q = subzone->next_quantum(q)) {
619                    if (!subzone->is_free(q)) {
620                        visitor(zone, subzone, q);
621                    }
622                }
623            }
624        }
625
626        // iterate through the large blocks
627        for (Large *large = zone->large_list(); large != NULL; large = large->next()) {
628            // let the visitor visit the write barrier
629            visitor(zone, large);
630        }
631    }
632
633    #pragma mark class forwarding_phase
634
635    struct forwarding_phase {
636        size_t _objectsMoved, _bytesMoved;
637        size_t _pagesCompacted;
638        void *_currentPage;
639        bool _currentPagePinned;
640        bool _pinnedPagesOnly;
641
642        forwarding_phase() : _objectsMoved(0), _bytesMoved(0), _pagesCompacted(0), _currentPage(NULL), _currentPagePinned(false), _pinnedPagesOnly(false) {}
643
644        void setPinnedPagesOnly(bool pinnedPagesOnly) { _pinnedPagesOnly = pinnedPagesOnly; }
645
646        bool is_page_pinned(Subzone *subzone, void *block_page) {
647            // see if this page is the first page of a subzone. if so, it contains write-barrier cards which are pinned.
648            Range page_range(block_page, page_size);
649            void *q_zero_address = subzone->quantum_address(0);
650            if (page_range.in_range(q_zero_address) && q_zero_address > block_page) return true;
651            // see if this page is pinned by scanning forward.
652            usword_t q_page = subzone->quantum_index(block_page);
653            usword_t q_prev = subzone->previous_quantum(q_page);
654            if (q_prev != not_found && !subzone->is_free(q_prev)) {
655                // see if the previous quantum is pinned and overlaps the start of this page.
656                Range r(subzone->quantum_address(q_prev), subzone->quantum_size(q_prev));
657                if (r.in_range(block_page) && subzone->is_pinned(q_prev)) return true;
658            }
659            // otherwise, check all of the blocks that span this page for pinnedness.
660            usword_t n = subzone->allocation_limit();
661            usword_t q_start = q_prev != not_found ? subzone->next_quantum(q_prev) : q_page;
662            for (usword_t q = q_start; q < n && page_range.in_range(subzone->quantum_address(q)); q = subzone->next_quantum(q)) {
663                if (subzone->is_start(q) && !subzone->is_free(q) && subzone->is_pinned(q)) return true;
664            }
665            return false;
666        }
667
668        inline void forward_block(Zone *zone, Subzone *subzone, usword_t q, void *block) {
669            if (subzone->layout(q) & AUTO_OBJECT) {
670                // for now, don't compact objects without layout maps. eventually, there shouldn't be any of these.
671                if (zone->layout_map_for_block(block) == NULL) return;
672            }
673            void *newBlock = zone->forward_block(subzone, q, block);
674            if (newBlock != block) {
675                ++_objectsMoved;
676                _bytesMoved += subzone->quantum_size(q);
677            }
678        }
679
680        inline void operator() (Zone *zone, Subzone *subzone, usword_t q) {
681            if (_pinnedPagesOnly) {
682                // first pass, only compact blocks from unpinned pages.
683                void *block = subzone->quantum_address(q);
684                void *block_page = align_down(block);
685                if (block_page != _currentPage) {
686                    _currentPage = block_page;
687                    _currentPagePinned = is_page_pinned(subzone, block_page);
688                    if (!_currentPagePinned) ++_pagesCompacted;
689                }
690                if (!_currentPagePinned && subzone->is_compactable(q)) {
691                    forward_block(zone, subzone, q, block);
692                }
693            } else if (subzone->is_compactable(q)) {
694                // second pass, compact the rest. filter out already moved objects.
695                if (!subzone->is_forwarded(q))
696                    forward_block(zone, subzone, q, subzone->quantum_address(q));
697            }
698        }
699        size_t objectsMoved() { return _objectsMoved; }
700        size_t pagesCompacted() { return _pagesCompacted; }
701    };
702
703    #pragma mark class fixup_phase
704
705    struct fixup_phase {
706        // FIXME:  should refactor the scanner to make this a little bit easier.
707        struct Configuration;
708        typedef ReferenceIterator<Configuration> FixupReferenceIterator;
709        class CompactionScanningStrategy : public FullScanningStrategy<FixupReferenceIterator> {
710        public:
711            inline static const unsigned char *layout_map_for_block(Zone *zone, void *block) {
712                if (zone->in_subzone_memory(block)) {
713                    Subzone *subzone = Subzone::subzone(block);
714                    usword_t q = subzone->quantum_index_unchecked(block);
715                    if (subzone->is_forwarded(q)) {
716                        // get the layout information from the forwarded block.
717                        block = *(void **)block;
718                    }
719                }
720                return zone->layout_map_for_block(block);
721            }
722        };
723        struct Configuration {
724            typedef fixup_phase ReferenceVisitor;
725            typedef EmptyPendingStack<FixupReferenceIterator> PendingStack;
726            typedef CompactionScanningStrategy ScanningStrategy;
727        };
728        Configuration::PendingStack _stack;
729        FixupReferenceIterator _scanner;
730        PtrSet _observers;
731
732        fixup_phase(Zone *zone) : _scanner(zone, *this, _stack, (void *)auto_get_sp()) {}
733
734        inline bool is_compacted_pointer(Zone *zone, void *address) {
735            if (zone->in_subzone_memory(address)) {
736                usword_t q;
737                Subzone *subzone = Subzone::subzone(address);
738                if (subzone->block_is_start(address, &q) && subzone->is_forwarded(q)) return true;
739            }
740            return false;
741        }
742
743        void check_slot_for_observations(void **slot) {
744            __block void *slot_block = NULL;
745            Zone *zone = _scanner.zone();
746            blockStartNoLockDo(zone, slot, ^(Subzone *slot_subzone, usword_t slot_q) {
747                slot_block = slot_subzone->quantum_address(slot_q);
748            },^(Large *slot_large) {
749                slot_block = slot_large->address();
750            });
751            if (slot_block) {
752                // refactor. need zone.get_assocative_ref_internal().
753                AssociationsHashMap &associations = zone->associations();
754                AssociationsHashMap::iterator i = associations.find(slot_block);
755                if (i != associations.end()) {
756                    ObjectAssociationMap *refs = i->second;
757                    ObjectAssociationMap::iterator j = refs->find(&CompactionObserverKey);
758                    if (j != refs->end()) {
759                        void *observer = j->second;
760                        if (is_compacted_pointer(zone, observer)) {
761                            j->second = observer = *(void **)observer;
762                        }
763                        _observers.insert(observer);
764                    }
765                }
766            }
767        }
768
769        void visit(const ReferenceInfo &info, void **slot, Subzone *subzone, usword_t q) {
770            if (subzone->is_forwarded(q)) {
771                void *address = *slot;
772                if (address == CompactionWatchPoint) {
773                    printf("fixing up a reference to CompactionWatchPoint = %p\n", CompactionWatchPoint);
774                }
775                *slot = *(void**)address;
776                check_slot_for_observations(slot);
777            }
778        }
779
780        void visit(const ReferenceInfo &info, void **slot, Large *large) {}
781
782        inline void operator() (Zone *zone, Subzone *subzone, usword_t q) {
783            // ignore moved blocks themselves.
784            usword_t layout = subzone->layout(q);
785            if ((layout & AUTO_UNSCANNED) == 0) {
786                if (layout & AUTO_OBJECT) {
787                    _scanner.scan(subzone, q);
788                } else if (layout == AUTO_POINTERS_ONLY) {
789                    ReferenceInfo all_pointers_info(kAllPointersHeapReference, &subzone->write_barrier());
790                    _scanner.scan_range(all_pointers_info, Range(subzone->quantum_address(q), subzone->size(q)));
791                }
792            }
793        }
794
795        inline void operator() (Zone *zone, Large *large) {
796            usword_t layout = large->layout();
797            if ((layout & AUTO_UNSCANNED) == 0) {
798                if (layout & AUTO_OBJECT) {
799                    _scanner.scan(large);
800                } else if (layout == AUTO_POINTERS_ONLY) {
801                    ReferenceInfo all_pointers_info(kAllPointersHeapReference, &large->write_barrier());
802                    _scanner.scan_range(all_pointers_info, Range(large->address(), large->size()));
803                }
804            }
805        }
806
807        // fixup associative reference slots.
808        inline bool operator() (Zone *zone, void *object, void *key, void *&value) {
809            void *address = value;
810            if (is_compacted_pointer(zone, address)) {
811                value = *(void **)address;
812            }
813            return true;
814        }
815
816        // fixup root slots.
817        inline void operator() (Zone *zone, void **root) {
818            void *address = *root;
819            if (is_compacted_pointer(zone, address)) {
820                *root = *(void **)address;
821            }
822        }
823
824        // visit weak slots.
825        inline void operator () (Zone *zone, weak_referrer_t &ref) {
826            // check to see if the referrer is pointing to a block that has been forwarded.
827            // Zone::forward_block() should never leave any dangling pointers to blocks in the table,
828            // so we check that here with an assertion.
829            void *referent = *ref.referrer;
830            ASSERTION(!is_compacted_pointer(zone, referent));
831            // fixup the callback slot.
832            if (ref.block && is_compacted_pointer(zone, ref.block)) {
833                ref.block = (auto_weak_callback_block_t *)*(void **)ref.block;
834            }
835        }
836    };
837
838    #pragma mark class move_object_phase
839
840    struct move_object_phase {
841        inline void operator() (Zone *zone, Subzone *subzone, usword_t q) {
842            if (subzone->is_forwarded(q)) {
843                zone->move_block(subzone, q, subzone->quantum_address(q));
844            }
845        }
846        inline void operator() (Zone *zone, Large *large) {}
847    };
848
849    #pragma mark class deallocate_phase
850
851    struct relocation {
852        size_t _size;
853        void *_old_block;
854        void *_new_block;
855
856        relocation(size_t size, void *old_block, void *new_block) : _size(size), _old_block(old_block), _new_block(new_block) {}
857    };
858
859    typedef std::vector<relocation, CompactionZoneAllocator<relocation> > relocation_vector;
860
861    struct deallocate_phase {
862        size_t _objectsDeallocated;
863        relocation_vector &_logVector;
864
865        deallocate_phase(relocation_vector &logVector) : _objectsDeallocated(0), _logVector(logVector) {}
866
867        inline void operator() (Zone *zone, Subzone *subzone, usword_t q) {
868            if (subzone->is_forwarded(q)) {
869                ++_objectsDeallocated;
870
871                void *block = subzone->quantum_address(q);
872                void *copied_block = ((void **)block)[0];
873                if (subzone->layout(q) & AUTO_OBJECT) {
874                    if (Environment::log_compactions) printf("moved %p -> %p (%s)\n", block, copied_block, zone->name_for_object(copied_block));
875                } else {
876                    if (Environment::log_compactions) printf("moved %p -> %p (%lu bytes)\n", block, copied_block, subzone->size(q));
877                }
878
879                if (_old_malloc_logger) _logVector.push_back(relocation(subzone->size(q), block, copied_block));
880
881#if DEBUG
882                // this ensures that the old pointer is no longer in the weak table.
883                weak_enumerate_weak_references(zone, subzone->quantum_address(q), ^(const weak_referrer_t &ref) {
884                    printf("slot = %p, *slot == %p\n", ref.referrer, *ref.referrer);
885                    __builtin_trap();
886                });
887#endif
888
889                subzone->admin()->deallocate_no_lock(subzone, q, block);
890                subzone->clear_forwarded(q);
891            }
892        }
893        inline void operator() (Zone *zone, Large *large) {}
894        size_t objectsDeallocated() { return _objectsDeallocated; }
895    };
896
897    void *Zone::forward_block(Subzone *subzone, usword_t q, void *block) {
898        // used by compacting collector exclusively.
899        void *forwarded_block = block;
900        if (subzone->is_forwarded(q)) {
901            forwarded_block = ((void **)block)[0];
902        } else {
903            usword_t size = subzone->size(q);
904            Admin *block_admin = subzone->admin();
905            forwarded_block = block_admin->allocate_destination_block_no_lock(subzone, q, block);
906            if (forwarded_block != block) {
907                if (subzone->is_scanned(q)) bzero(forwarded_block, size);
908                // save the original first word in the first word of the new block.
909                ((void **)forwarded_block)[0] = ((void **)block)[0];
910                // leave a forwarding address in the old block.
911                ((void **)block)[0] = forwarded_block;
912                subzone->mark_forwarded(q);
913
914                // transfer ownership of any associative references to forwarded_block.
915                AssociationsHashMap::iterator i = _associations.find(block);
916                if (i != _associations.end()) {
917                    // need to rehash the top level entry.
918                    ObjectAssociationMap* refs = i->second;
919                    _associations.erase(i);
920                    _associations[forwarded_block] = refs;
921                }
922
923                // transfer hash code to forwarded block.
924                PtrSizeHashMap::iterator h = _hashes.find(block);
925                if (h != _hashes.end()) {
926                    // need to rehash the top level entry.
927                    size_t hashValue = h->second;
928                    _hashes.erase(h);
929                    _hashes[forwarded_block] = hashValue;
930                }
931
932                // transfer weak references OF block TO forwarded_block.
933                weak_transfer_weak_referents(this, block, forwarded_block);
934            }
935        }
936        return forwarded_block;
937    }
938
939    void Zone::move_block(Subzone *subzone, usword_t q, void *block) {
940        // used by compacting collector exclusively.
941        ASSERTION(subzone->is_forwarded(q));
942        void *copied_block = ((void **)block)[0];
943        ASSERTION(in_subzone_memory(copied_block));
944        usword_t size = subzone->size(q);
945
946        // bitwise copy the rest of the old block into the new block.
947        memmove(displace(copied_block, sizeof(void*)), displace(block, sizeof(void*)), size - sizeof(void*));
948        usword_t layout = subzone->layout(q);
949        if (is_scanned(layout)) {
950            // be very conservative. if the block has ANY marked cards, mark all of the cards that span
951            // the copied block. otherwise due to block alignment, we could lose information.
952            if (subzone->write_barrier().range_has_marked_cards(block, size)) {
953                Subzone::subzone(copied_block)->write_barrier().mark_cards(copied_block, size);
954            }
955        }
956
957        // Transfer ownership of weak references inside the old block. For objects, we assume that they contain no weak
958        // references if they have no weak layout map. For non-objects, a conservative scan is performed which
959        // searches for explicitly registered weak references and transfers their ownership. If any callback blocks
960        // were registered, they are NOT transferred. This can only be fixed by the did_move_object callback.
961        if (layout & AUTO_OBJECT) {
962            const unsigned char *weak_map = weak_layout_map_for_block(copied_block);
963            if (weak_map != NULL) weak_transfer_weak_contents(this, (void **)block, (void **)copied_block, weak_map);
964        } else if (layout == AUTO_MEMORY_ALL_WEAK_POINTERS) {
965            // revisit this later, it's not possible to move a weak backing store simply, because the forwarding pointer
966            // may be stored over a live weak reference, which makes updating all weak references to an object complex.
967            // see the comment in CompactionClassifier::visit(). If an object with this layout has no pointers stored in
968            // it yet, then it is safe to move.
969            weak_transfer_weak_contents_unscanned(this, (void **)block, (void **)copied_block, size, true);
970        }
971
972        // transfer the age of the block.
973        Subzone *copied_subzone = Subzone::subzone(copied_block);
974        usword_t copied_q = copied_subzone->quantum_index_unchecked(copied_block);
975        copied_subzone->set_age(copied_q, subzone->age(q));
976    }
977
978    typedef void (^compaction_observer_t) (void);
979
980    inline void region_apply(Zone *zone, void (^block) (Region *region)) {
981        for (Region *region = zone->region_list(); region != NULL; region = region->next()) {
982            block(region);
983        }
984    }
985
986    void Zone::compact_heap() {
987        // try to start a compaction cycle.
988        if (_compaction_disabled) return;
989        pthread_mutex_lock(&_compaction_lock);
990        if (_compaction_disabled) {
991            pthread_mutex_unlock(&_compaction_lock);
992            return;
993        }
994
995        // compaction operates entirely in its own zone.
996        CompactionZone compactionZone;
997        relocation_vector logVector;
998
999        // grab all necessary locks to get a coherent picture of the heap.
1000        pthread_mutex_lock(&_mark_bits_mutex);
1001        pthread_rwlock_wrlock(&_associations_lock);
1002        spin_lock(&_datasegments_lock);
1003        spin_lock(&weak_refs_table_lock);
1004        pthread_mutex_lock(&_roots_lock);
1005
1006        // we're sorting free lists and allocating new blocks, take those locks too.
1007        _partition.lock();
1008
1009        // the registered threads will remain suspended until compaction completes.
1010        suspend_all_registered_threads();
1011
1012        // clear all the pinned & mark bits.
1013        region_apply(this, ^(Region *region) {
1014            region->pinned().commit();
1015            region->pinned().clear();
1016            region->clear_marks();
1017        });
1018
1019        // sort the free lists so that the lowest block can be found by examining the head of each free list.
1020        // no need to sort if we're in scramble_heap mode.
1021        if (!Environment::scramble_heap) sort_free_lists();
1022
1023        // examine the amount of purgabe space BEFORE compaction, so we can compare it to after compaction.
1024        __block usword_t initial_purgeable_bytes = 0;
1025        _partition.for_each(^(Admin &admin) {
1026            initial_purgeable_bytes += admin.purgeable_free_space_no_lock();
1027        });
1028
1029        // auto_date_t start = auto_date_now();
1030        // compute the pinned object set.
1031        __block size_t objectsPinned = 0;
1032        mark_pinned_t marker = ^(void **slot, Subzone *subzone, usword_t q, ReferenceKind kind) {
1033            if (!subzone->test_and_set_pinned(q)) ++objectsPinned;
1034        };
1035        CompactionClassifier classifier(this, marker);
1036        classifier.scan((void *)auto_get_sp());
1037
1038        // auto_date_t end = auto_date_now();
1039        // printf("classification took %lld microseconds.\n", (end - start));
1040
1041        // use the pinned object set to perform the copies.
1042
1043        // Compaction is simple. Take two passes through the heap, moving all unpinned blocks. Then another pass through the heap
1044        // to fix up all of the objects that point at the newly moved objejcts.
1045        forwarding_phase forwarder;
1046        forwarder.setPinnedPagesOnly(true);
1047        visitAllocatedSubzoneBlocksInReverse(this, forwarder);
1048        forwarder.setPinnedPagesOnly(false);
1049        visitAllocatedSubzoneBlocksInReverse(this, forwarder);
1050
1051        // 2. fixup all pointers from old to new.
1052        fixup_phase fixer(this);
1053        visitAllocatedBlocksForCompaction(this, fixer);
1054        visitAssociationsNoLock(this, fixer);
1055        __block fixup_phase &fixer_ref = fixer;
1056        visitRootsNoLock(this, ^(void **root) { fixer_ref(this, root); return true; });
1057        weak_enumerate_table_fixup(this, ^(weak_referrer_t &ref) { fixer_ref(this, ref); });
1058
1059        // 3. call -moveTo: or bitwise/copy and fixup weak references.
1060        move_object_phase mover;
1061        visitAllocatedBlocksForCompaction(this, mover);
1062
1063        // 4. deallocate the compacted objects.
1064        deallocate_phase deallocator(logVector);
1065        visitAllocatedBlocksForCompaction(this, deallocator);
1066        ASSERTION(deallocator.objectsDeallocated() == forwarder.objectsMoved());
1067
1068        if (Environment::log_compactions) {
1069            printf("pinned %ld objects.\n", objectsPinned);
1070            size_t objectsMoved = forwarder.objectsMoved();
1071            if (objectsMoved) printf("compacted %ld objects, %ld pages\n", objectsMoved, forwarder.pagesCompacted());
1072
1073            // purge the free lists.
1074            usword_t bytes_purged = _partition.purge_free_space_no_lock();
1075            printf("purgeable before compaction %lu bytes (%lu pages).\n", initial_purgeable_bytes, initial_purgeable_bytes / page_size);
1076            printf("purged %lu bytes (%lu pages)\n", bytes_purged, bytes_purged / page_size);
1077        }
1078
1079        region_apply(this, ^(Region *region) {
1080            region->pinned().uncommit();
1081        });
1082
1083        reset_all_marks();
1084
1085        resume_all_registered_threads();
1086
1087        // do the logging of relocated blocks.
1088        std::for_each(logVector.begin(), logVector.end(), ^(const relocation& r) {
1089            _old_malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)this, (uintptr_t)r._old_block, (uintptr_t)r._size, (uintptr_t)r._new_block, 0);
1090        });
1091
1092        // release locks.
1093        _partition.unlock();
1094        pthread_mutex_unlock(&_roots_lock);
1095        spin_unlock(&weak_refs_table_lock);
1096        spin_unlock(&_datasegments_lock);
1097        pthread_rwlock_unlock(&_associations_lock);
1098        pthread_mutex_unlock(&_mark_bits_mutex);
1099
1100        // Unblock any threads trying to disable compaction.
1101        pthread_mutex_unlock(&_compaction_lock);
1102    }
1103
1104    void Zone::disable_compaction() {
1105        if (!_compaction_disabled) {
1106            Mutex compaction_lock(&_compaction_lock);
1107            _compaction_disabled = true;
1108        }
1109    }
1110
1111    void Zone::set_in_compaction() {
1112        Mutex mutex(&_registered_threads_mutex);
1113        for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
1114            // don't block the collector thread for compaction (avoid deadlock).
1115            if (thread->is_current_thread()) continue;
1116            LockedBoolean &in_compaction = thread->in_compaction();
1117            assert(in_compaction.state == false);
1118            SpinLock lock(&in_compaction.lock);
1119            in_compaction.state = true;
1120        }
1121    }
1122
1123    void Zone::compaction_barrier() {
1124        // Thread Local Enlivening.
1125        // TODO:  we could optimize this to allow threads to enter during one pass, and then do another pass fully locked down.
1126        pthread_mutex_lock(&_registered_threads_mutex);
1127        for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
1128            // don't block the collector thread for enlivening (avoid deadlock).
1129            if (thread->is_current_thread()) continue;
1130            LockedBoolean &needs_enlivening = thread->needs_enlivening();
1131            spin_lock(&needs_enlivening.lock);
1132        }
1133        _enlivening_complete = true;
1134    }
1135
1136    void Zone::clear_in_compaction() {
1137        for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
1138            // don't block the collector thread for compaction (avoid deadlock).
1139            if (thread->is_current_thread()) continue;
1140            LockedBoolean &in_compaction = thread->in_compaction();
1141            assert(in_compaction.state == true);
1142            SpinLock lock(&in_compaction.lock);
1143            in_compaction.state = false;
1144        }
1145        pthread_mutex_unlock(&_registered_threads_mutex);
1146    }
1147
1148} // namespace Auto
1149