1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    ReferenceIterator.h
22    Generalized Heap Scanner
23    Copyright (c) 2008-2011 Apple Inc. All rights reserved.
24 */
25
26#pragma once
27#ifndef __AUTO_REFERENCE_ITERATOR__
28#define __AUTO_REFERENCE_ITERATOR__
29
30
31#include "Admin.h"
32#include "Definitions.h"
33#include "Large.h"
34#include "Zone.h"
35#include "BlockIterator.h"
36
37namespace Auto {
38
39    //----- ReferenceIterator -----//
40
41    enum ReferenceKind {
42        kRootReference = 0,
43        kRetainedReference,
44        kRegistersReference,
45        kRegistersIndirectReference,
46        kStackReference,
47        kStackIndirectReference,
48        kConservativeHeapReference,
49        kConservativeHeapIndirectReference,
50        kThreadLocalReference,
51        kOldReference,
52        kEnlivenedReference,
53        kGarbageReference,
54        kWeakReference,
55        kWeakSlotReference,
56        kExactHeapReference,
57        kAllPointersHeapReference,
58        kAssociativeReference,
59        kReferenceKindCount,
60    };
61
62    // tagged union describing a slot reference.
63    class ReferenceInfo {
64        const ReferenceKind       _kind;
65        union {
66            WriteBarrier    *_wb;           // kind == kConservativeHeapReference || kExactHeapReference || kAllPointersHeapReference
67            Thread          *_thread;       // kind == kStackReference || kRegistersReference
68            struct {
69                void        *_object;       // kind == kAssociativeReference
70                void        *_key;
71            };
72        };
73        // Uncomment this if you want to check for calls to the copy constructor.
74        // ReferenceInfo(const ReferenceInfo &other) : _kind(kRootReference), _wb(NULL), _thread(NULL), _object(NULL), _key(NULL) {}
75    public:
76        ReferenceInfo(ReferenceKind kind) : _kind(kind), _object(NULL), _key(NULL) {}
77        ReferenceInfo(ReferenceKind kind, WriteBarrier * const wb) : _kind(kind), _wb(wb) {}
78        ReferenceInfo(ReferenceKind kind, Thread *thread) : _kind(kind), _thread(thread) {}
79        ReferenceInfo(void *object, void *key) : _kind(kAssociativeReference), _object(object), _key(key) {}
80
81        // automatically generated copy-constructor is perfectly fine.
82        // ReferenceInfo(const ReferenceInfo &other) : _kind(other._kind), _object(other._object), _key(other._key) {}
83
84        ReferenceKind      kind()       const { return _kind; }
85        WriteBarrier       &wb()        const { return *_wb; }
86        Thread             &thread()    const { return *_thread; }
87        void               *object()    const { return _object; }
88        void               *key()       const { return _key; }
89
90        static const char  *name(ReferenceKind kind) {
91            static const char *kReferenceKindNames[] = {
92                "root",
93                "retained",
94                "registers", "registersIndirect",
95                "stack", "stackIndirect",
96                "conservative", "conservativeIndirect",
97                "threadLocal", "old", "enlivened", "garbage",
98                "weak", "weakslot",
99                "exact", "allPointers", "associative"
100            };
101            return kReferenceKindNames[kind];
102        }
103    };
104
105    enum {
106        PendingStackIsFixedSize = 0x1,
107        PendingStackWantsEagerScanning = 0x2,
108        PendingStackChecksPendingBitmap = 0x4,
109        PendingStackSupportsThreadedScan = 0x8,
110    };
111
112    typedef uint32_t PendingStackHint;
113
114    //
115    // Visits all live block references. The Configuration type parameter must contains 3 typedefs:
116    //
117    //  struct MyConfiguration {
118    //      typedef MyReferenceVisitor ReferenceVisitor;
119    //      typedef MyPendingStack PendingStack;
120    //      typedef GenerationalScanningStrategy ScanningStrategy;
121    //  };
122    //
123    //  The ReferenceVisitor type must implement the following methods:
124    //
125    // class MyReferenceVistor {
126    // public:
127    //      void visit(const ReferenceInfo &info, void **slot, Subzone *subzone, usword_t q);
128    //      void visit(const ReferenceInfo &info, void **slot, Large *large);
129    // };
130    //
131    // The info parameter classifies the reference slot, root, stack, conservatively scanned block, exact scanned block, etc. It is a tagged
132    // union that contains a pointer to the controlling write-barrier, Thread* or associtiative reference key.
133    // The block being referenced is either a small/medium block, represented by the subzone / quantum pair, or a Large block.
134    //
135    // The ScanningStrategy type implements how objects are scanned. With careful forward declarations,
136    // it is possible to use ReferenceIterator<MyConfiguration>::FullScanningStrategy or GenerationalScanningStrategy.
137    // Consult those types for more information on how to implement other scanning strategies.
138    //
139    // The PendingStack type should be a template type for use with any of a family of ReferenceIterators. Therefore, it
140    // must also provide a rebind as follows:
141    //
142    // template <typename RefererenceIterator> class MyPendingStack {
143    //      void push(Subzone *subzone, usword_t q);
144    //      void push(Large *large);
145    //      void scan(ReferenceIterator &scanner);
146    //      const PendingStackHint hints();
147    //      template <typename U> struct rebind { typedef MyPendingStack<U> other; };
148    // };
149    //
150    // Periodically, the ReferenceIterator will call the scan() method, which pops all pushed blocks and passes them to
151    // scanner.scan(Subzone *subzone, usword_t) or scanner.scan(Large *large).
152    // Perhaps marking should also be performed in the pending stack?
153    //
154
155    template <class Configuration> class ReferenceIterator {
156    private:
157        typedef typename Configuration::ReferenceVisitor ReferenceVisitor;
158        typedef typename Configuration::PendingStack PendingStack;
159        Zone                       *_zone;                                      // zone containing blocks
160        ReferenceVisitor           &_visitor;                                   // object visiting blocks
161        PendingStack               &_pending_stack;                             // stack for deferred scanning.
162        void                       *_stack_bottom;                              // stack pointer of current thread.
163        const bool                  _needs_enlivening_barrier;                  // need to enliven in this cycle?
164        const bool                  _repair_write_barrier;                      // repairing write barrier in this cycle?
165
166        // hints the pending stack provides about its implementation
167        const inline bool test_pending_stack_hint(PendingStackHint hint) { return (_pending_stack.hints() & hint) == hint; }
168        const inline bool prefer_shallow_pending_stack() { return !test_pending_stack_hint(PendingStackIsFixedSize); }
169        const inline bool perform_eager_scanning() { return test_pending_stack_hint(PendingStackWantsEagerScanning); }
170        const inline bool scan_pending() { return !test_pending_stack_hint(PendingStackChecksPendingBitmap); }
171        const inline bool scan_threaded() { return test_pending_stack_hint(PendingStackSupportsThreadedScan); }
172
173    public:
174        // provide a way to rebind this template type to another just like STL allocators can do.
175        template <typename U> struct rebind { typedef ReferenceIterator<U> other; };
176
177        //
178        // Constructor
179        //
180        ReferenceIterator(Zone *zone, ReferenceVisitor &visitor, PendingStack &stack, void *stack_bottom,
181                          const bool needs_enlivening = false, const bool repair_write_barrier = false)
182            : _zone(zone), _visitor(visitor), _pending_stack(stack), _stack_bottom(stack_bottom),
183              _needs_enlivening_barrier(needs_enlivening), _repair_write_barrier(repair_write_barrier)
184        {
185        }
186
187        Zone *zone() { return _zone; }
188
189        void flush() { _pending_stack.scan(*this); }
190
191        void push(Subzone *subzone, usword_t q) { _pending_stack.push(subzone, q); }
192        void push(Large *large) { _pending_stack.push(large); }
193
194        bool mark(Subzone *subzone, usword_t q) { return _pending_stack.mark(subzone, q); }
195        bool mark(Large *large) { return _pending_stack.mark(large); }
196
197        bool is_marked(Subzone *subzone, usword_t q) { return _pending_stack.is_marked(subzone, q); }
198        bool is_marked(Large *large) { return _pending_stack.is_marked(large); }
199
200        static bool should_scan(Subzone *subzone, usword_t q) { return Configuration::ScanningStrategy::should_scan(subzone, q); }
201        static bool should_scan(Large *large) { return Configuration::ScanningStrategy::should_scan(large); }
202
203        static bool should_mark(Subzone *subzone, usword_t q) { return Configuration::ScanningStrategy::should_mark(subzone, q); }
204        static bool should_mark(Large *large) { return Configuration::ScanningStrategy::should_mark(large); }
205
206        const unsigned char* layout_map_for_block(void *block) { return Configuration::ScanningStrategy::layout_map_for_block(_zone, block); }
207        static bool visit_interior_pointers() { return Configuration::ScanningStrategy::visit_interior_pointers(); }
208        static bool scan_threads_suspended() { return Configuration::ScanningStrategy::scan_threads_suspended(); }
209        inline static pthread_rwlock_t *associations_lock(Zone *zone) { return Configuration::ScanningStrategy::associations_lock(zone); }
210
211        // most primitive scanning operation, scans a single pointer-sized word of memory, checking the pointer
212        // to see if it actually points in the collector heap. if it does, it is marked. If it a block that should be
213        // scanned itself, then it is pushed on to the pending stack for later examination.
214
215        inline void scan_reference(const ReferenceInfo &info, void **reference) __attribute__((always_inline)) {
216            void *pointer = *reference;
217            if (pointer == NULL) return;
218            if (_zone->in_subzone_memory(pointer)) {
219                Subzone *subzone = Subzone::subzone(pointer);
220                usword_t q;
221                if (subzone->block_is_start(pointer, &q)) {
222                    _visitor.visit(info, reference, subzone, q);
223                    if (!mark(subzone, q) && should_scan(subzone, q)) {
224                        push(subzone, q);
225                    }
226                }
227            } else if (_zone->block_is_start_large(pointer)) {
228                Large *large = Large::large(pointer);
229                _visitor.visit(info, reference, large);
230                if (!mark(large) && should_scan(large)) {
231                    push(large);
232                }
233            }
234        }
235
236        inline void scan_reference_indirect(const ReferenceInfo &info, const ReferenceInfo &indirectInfo, void **reference) {
237            void *pointer = *reference;
238            if (pointer == NULL) return;
239            if (_zone->in_subzone_memory(pointer)) {
240                Subzone *subzone = Subzone::subzone(pointer);
241                usword_t q;
242                if (subzone->block_is_start(pointer, &q)) {
243                    _visitor.visit(info, reference, subzone, q);
244                    if (!mark(subzone, q) && should_scan(subzone, q)) {
245                        push(subzone, q);
246                    }
247                } else {
248                    void *block = subzone->block_start(pointer, q);
249                    if (block != NULL) {
250                        _visitor.visit(indirectInfo, reference, subzone, subzone->quantum_index_unchecked(block));
251                        // we don't MARK interior pointers, but the visitor may be interested.
252                    }
253                }
254            } else if (_zone->block_is_start_large(pointer)) {
255                Large *large = Large::large(pointer);
256                _visitor.visit(info, reference, large);
257                if (!mark(large) && should_scan(large)) {
258                    push(large);
259                }
260            }
261        }
262
263        inline void scan_reference_repair_write_barrier(const ReferenceInfo &info, void **reference, WriteBarrier *wb) {
264            void *pointer = *reference;
265            if (pointer == NULL) return;
266            if (_zone->in_subzone_memory(pointer)) {
267                Subzone *subzone = Subzone::subzone(pointer);
268                usword_t q;
269                if (subzone->block_is_start(pointer, &q)) {
270                    _visitor.visit(info, reference, subzone, q);
271                    if (subzone->is_thread_local(q) || subzone->is_new(q)) {
272                        wb->mark_card(reference);
273                    }
274                    if (!mark(subzone, q) && should_scan(subzone, q)) {
275                        push(subzone, q);
276                    }
277                }
278            } else if (_zone->block_is_start_large(pointer)) {
279                Large *large = Large::large(pointer);
280                _visitor.visit(info, reference, large);
281                if (large->is_new()) {
282                    wb->mark_card(reference);
283                }
284                if (!mark(large) && should_scan(large)) {
285                    push(large);
286                }
287            }
288        }
289
290        // compatiblity with Thread::scan_other_thread() and WriteBarrier::scan_marked_ranges().
291        // when we can use blocks from templates this layer will be unnecessary.
292
293        inline void scan_range(const ReferenceInfo &info, const Range &range) {
294            void **slots = (void **)range.address();
295            void **last = (void **)displace(slots, range.size() - sizeof(void*));
296            AUTO_PROBE(auto_probe_scan_range(slots, last));
297            while (slots <= last)
298                scan_reference(info, slots++);
299        }
300
301        inline void scan_range_indirect(const ReferenceInfo &info, const ReferenceInfo &indirectInfo, const Range &range) {
302            void **slots = (void **)range.address();
303            void **last = (void **)displace(slots, range.size() - sizeof(void*));
304            AUTO_PROBE(auto_probe_scan_range(slots, last));
305            while (slots <= last)
306                scan_reference_indirect(info, indirectInfo, slots++);
307        }
308
309        inline void scan_range_repair_write_barrier(const ReferenceInfo &info, const Range &range, WriteBarrier *wb) {
310            void **slots = (void **)range.address();
311            void **last = (void **)displace(slots, range.size() - sizeof(void*));
312            AUTO_PROBE(auto_probe_scan_range(slots, last));
313            while (slots <= last)
314                scan_reference_repair_write_barrier(info, slots++, wb);
315        }
316
317        static void scan_thread_range(Thread *thread, const Range &range, void *arg) {
318            ReferenceIterator *scanner = (ReferenceIterator*)arg;
319            ReferenceKind kind = (range.end() == thread->stack_base() ? kStackReference : kRegistersReference);
320            ReferenceInfo info(kind, thread);
321            if (scanner->visit_interior_pointers()) {
322                ReferenceInfo interior_info(ReferenceKind(kind + 1), thread);
323                scanner->scan_range_indirect(info, interior_info, range);
324            } else
325                scanner->scan_range(info, range);
326            if (scanner->prefer_shallow_pending_stack()) scanner->flush();
327        }
328
329        static void scan_exact_range(const Range &range, WriteBarrier *wb, void *arg) {
330            ReferenceIterator *scanner = (ReferenceIterator*)arg;
331            ReferenceInfo info(kExactHeapReference, wb);
332            if (scanner->_repair_write_barrier)
333                scanner->scan_range_repair_write_barrier(info, range, wb);
334            else
335                scanner->scan_range(info, range);
336        }
337
338        static void scan_all_pointers_range(const Range &range, WriteBarrier *wb, void *arg) {
339            ReferenceIterator *scanner = (ReferenceIterator*)arg;
340            ReferenceInfo info(kAllPointersHeapReference, wb);
341            if (scanner->_repair_write_barrier)
342                scanner->scan_range_repair_write_barrier(info, range, wb);
343            else
344                scanner->scan_range(info, range);
345        }
346
347        static void scan_conservative_range(const Range &range, WriteBarrier *wb, void *arg) {
348            ReferenceIterator *scanner = (ReferenceIterator*)arg;
349            ReferenceInfo info(kConservativeHeapReference, wb);
350            if (scanner->_repair_write_barrier)
351                scanner->scan_range_repair_write_barrier(info, range, wb);
352            else
353                scanner->scan_range(info, range);
354        }
355
356        inline void scan(Subzone *subzone, usword_t q) {
357            void *block = subzone->quantum_address(q);
358            usword_t size = subzone->size(q);
359            usword_t layout = subzone->layout(q);
360            WriteBarrier *wb = &subzone->write_barrier();
361            if (layout & AUTO_OBJECT) {
362                Configuration::ScanningStrategy::scan_object(*this, block, size, layout, layout_map_for_block(block), wb);
363            } else {
364                Configuration::ScanningStrategy::scan_block(*this, block, size, layout, wb);
365            }
366        }
367
368        inline void scan(Large *large) {
369            void *block = large->address();
370            usword_t size = large->size();
371            usword_t layout = large->layout();
372            WriteBarrier *wb = &large->write_barrier();
373            if (layout & AUTO_OBJECT) {
374                Configuration::ScanningStrategy::scan_object(*this, block, size, layout, layout_map_for_block(block), wb);
375            } else {
376                Configuration::ScanningStrategy::scan_block(*this, block, size, layout, wb);
377            }
378        }
379
380        class rooted_blocks_visitor {
381            ReferenceIterator &_scanner;
382            ReferenceVisitor &_visitor;
383        public:
384            rooted_blocks_visitor(ReferenceIterator &scanner) : _scanner(scanner), _visitor(scanner._visitor) {}
385
386            // visitor function for subzone
387            inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
388                bool is_local = subzone->is_thread_local(q);
389                bool has_refcount = subzone->has_refcount(q);
390                if (is_local || has_refcount || should_mark(subzone, q)) {
391                    ReferenceInfo info(is_local ? kThreadLocalReference : has_refcount ? kRetainedReference : kOldReference);
392                    _visitor.visit(info, NULL, subzone, q);
393
394                    if (!_scanner.mark(subzone, q) && _scanner.should_scan(subzone, q)) {
395                        if (_scanner.perform_eager_scanning()) {
396                            _scanner.scan(subzone, q);
397                        } else {
398                            _scanner.push(subzone, q);
399                        }
400                    }
401                }
402
403                // always continue
404                return true;
405            }
406
407            // visitor function for large
408            inline bool visit(Zone *zone, Large *large) {
409                if (large->refcount() || should_mark(large)) {
410                    ReferenceInfo info(kRetainedReference);
411                    _scanner._visitor.visit(info, NULL, large);
412
413                    if (!_scanner.mark(large) && _scanner.should_scan(large)) {
414                        if (_scanner.perform_eager_scanning()) {
415                            _scanner.scan(large);
416                        } else {
417                            _scanner.push(large);
418                        }
419                    }
420                }
421
422                // always continue
423                return true;
424            }
425
426            // visitor function for registered root
427            inline bool operator ()(void **root) {
428                ReferenceInfo info(kRootReference);
429                _scanner.scan_reference(info, root);
430                if (_scanner.prefer_shallow_pending_stack()) _scanner.flush();
431                return true;
432            }
433        };
434
435        class rooted_blocks_concurrent_visitor {
436            rooted_blocks_visitor &_standard_visitor;
437
438        public:
439            rooted_blocks_concurrent_visitor(rooted_blocks_visitor &visitor) : _standard_visitor(visitor) {}
440
441            const inline bool visit_larges_concurrently()   const { return false; }
442            void visit(Zone *zone, Subzone *subzone, usword_t q)  {}
443
444            void inline visit(Zone *z, Subzone *sz) {
445                Subzone::PendingCountAccumulator accumulator(z->registered_thread());
446                usword_t n = sz->allocation_limit();
447                for (usword_t q = 0; q < n; q = sz->next_quantum(q)) {
448                    if (!sz->is_free(q)) {
449                        _standard_visitor.visit(z, sz, q);
450                    }
451                }
452            }
453
454            inline void visit(Zone *z, Large *large) {
455                Subzone::PendingCountAccumulator accumulator(z->registered_thread());
456                _standard_visitor.visit(z, large);
457            }
458        };
459
460        inline void scan_roots() {
461            // visit rooted blocks.
462            rooted_blocks_visitor visitor(*this);
463            if (scan_threaded()) {
464                rooted_blocks_concurrent_visitor concurrent_visitor(visitor);
465                visitAllocatedBlocks_concurrent(_zone, concurrent_visitor);
466            } else {
467                visitAllocatedBlocks(_zone, visitor);
468            }
469
470            if (prefer_shallow_pending_stack()) flush();
471
472            // visit registered roots.
473            Mutex lock(_zone->roots_lock());
474            visitRootsNoLock(_zone, visitor);
475        }
476
477        // typedef void (&scanner_block_t) (Range *range);
478
479        static void scan_one_thread(Thread *thread, void *arg) {
480            // Until <rdar://problem/6393321&6182276> are fixed, have to use the function pointer variants.
481            ReferenceIterator* scanner = (ReferenceIterator *)arg;
482            if (thread->is_current_thread()) {
483                thread->scan_current_thread(&ReferenceIterator::scan_thread_range, arg, scanner->_stack_bottom);
484            } else {
485                thread->scan_other_thread(&ReferenceIterator::scan_thread_range, arg, scan_threads_suspended());
486            }
487        }
488
489        inline void scan_threads() {
490            // TODO:  coordinate with dying threads.
491            // Until <rdar://problem/6393321&6182276> are fixed, have to use a function pointer.
492            // scanner_block_t block_scanner = ^(Range *range) { this->scan_range(kStackReference, *range); };
493
494            _zone->scan_registered_threads(&ReferenceIterator::scan_one_thread, this);
495        }
496
497        inline void push_associations(void *block) {
498            AssociationsHashMap &associations(_zone->associations());
499            AssociationsHashMap::iterator i = associations.find(block);
500            if (i != associations.end()) {
501                ObjectAssociationMap *refs = i->second;
502                for (ObjectAssociationMap::iterator j = refs->begin(), jend = refs->end(); j != jend; j++) {
503                    ObjectAssociationMap::value_type &pair = *j;
504                    void *key = pair.first;
505                    void *value = pair.second;
506                    ReferenceInfo info(block, key);
507                    if (_zone->in_subzone_memory(value)) {
508                        Subzone *subzone = Subzone::subzone(value);
509                        usword_t q = subzone->quantum_index(value);
510                        _visitor.visit(info, &pair.second, subzone, q);
511                        if (!mark(subzone, q)) {
512                            push(subzone, q);
513                        }
514                    } else if (_zone->block_is_start_large(value)) {
515                        Large *large = Large::large(value);
516                        _visitor.visit(info, &pair.second, large);
517                        if (!mark(large)) {
518                            push(large);
519                        }
520                    }
521                }
522            }
523        }
524
525        // internal scanning strategy used to implement association scanning.
526        class AssociationScanningStrategy;
527        struct AssociationScanningConfiguration;
528        typedef typename rebind<AssociationScanningConfiguration>::other AssociationReferenceIterator;
529
530        struct AssociationScanningConfiguration {
531            typedef typename ReferenceIterator::ReferenceVisitor ReferenceVisitor;
532            typedef typename PendingStack::template rebind<AssociationReferenceIterator>::other PendingStack;
533            typedef typename AssociationReferenceIterator::AssociationScanningStrategy ScanningStrategy;
534            typedef typename Configuration::ScanningStrategy::template rebind<AssociationReferenceIterator>::other OriginalScanningStrategy;
535        };
536
537        class AssociationScanningStrategy {
538        public:
539            inline static bool should_mark(Subzone *subzone, usword_t q) { return Configuration::OriginalScanningStrategy::should_mark(subzone, q); }
540            inline static bool should_mark(Large *large) { return Configuration::OriginalScanningStrategy::should_mark(large); }
541            inline static bool should_scan(Subzone *subzone, usword_t q) { return true; }
542            inline static bool should_scan(Large *large) { return true; }
543
544            inline static void scan_block(ReferenceIterator &scanner, void *block, usword_t size, usword_t layout, WriteBarrier *wb) {
545                // NOTE:  scan_associations() always pushes blocks whether scanned or unscanned.
546                if (is_scanned(layout))
547                    Configuration::OriginalScanningStrategy::scan_block(scanner, block, size, layout, wb);
548                scanner.push_associations(block);
549            }
550
551            inline static const unsigned char *layout_map_for_block(Zone *zone, void *block) { return Configuration::OriginalScanningStrategy::layout_map_for_block(zone, block); }
552            inline static bool visit_interior_pointers() { return Configuration::OriginalScanningStrategy::visit_interior_pointers(); }
553
554            inline static void scan_object(ReferenceIterator &scanner, void *object, usword_t size, usword_t layout, const unsigned char* map, WriteBarrier *wb) {
555                // NOTE:  scan_associations() always pushes blocks whether scanned or unscanned.
556                if (is_scanned(layout))
557                    Configuration::OriginalScanningStrategy::scan_object(scanner, object, size, layout, map, wb);
558                scanner.push_associations(object);
559            }
560        };
561
562        inline bool associations_should_be_scanned(void *block) {
563            if (_zone->in_subzone_memory(block)) {
564                Subzone *subzone = Subzone::subzone(block);
565                usword_t q;
566                return subzone->block_is_start(block, &q) && is_marked(subzone, q);
567            } else if (_zone->block_is_start_large(block)) {
568                return is_marked(Large::large(block));
569            }
570            // <rdar://problem/6463922> Treat non-block pointers as unconditionally live.
571            return true;
572        }
573
574        inline void scan_associations() {
575            typename AssociationScanningConfiguration::PendingStack pendingStack;
576            AssociationReferenceIterator associationScanner(_zone, _visitor, pendingStack, _stack_bottom, false, _repair_write_barrier);
577
578            // Prevent other threads from breaking existing associations. We already own the enlivening lock.
579            ReadLock lock(associations_lock(_zone));
580            AssociationsHashMap &associations(_zone->associations());
581
582            // consider associative references. these are only reachable if their primary block is.
583            for (AssociationsHashMap::iterator i = associations.begin(), iend = associations.end(); i != iend; i++) {
584                void *block = i->first;
585                if (associations_should_be_scanned(block)) {
586                    ObjectAssociationMap *refs = i->second;
587                    for (ObjectAssociationMap::iterator j = refs->begin(), jend = refs->end(); j != jend; j++) {
588                        ObjectAssociationMap::value_type &pair = *j;
589                        void *key = pair.first;
590                        void *value = pair.second;
591                        ReferenceInfo info(block, key);
592                        if (_zone->in_subzone_memory(value)) {
593                            Subzone *subzone = Subzone::subzone(value);
594                            usword_t q = subzone->quantum_index(value);
595                            _visitor.visit(info, &pair.second, subzone, q);
596                            if (!associationScanner.mark(subzone, q)) {
597                                pendingStack.push(subzone, q);
598                            }
599                        } else if (_zone->block_is_start_large(value)) {
600                            Large *large = Large::large(value);
601                            _visitor.visit(info, &pair.second, large);
602                            if (!associationScanner.mark(large)) {
603                                pendingStack.push(large);
604                            }
605                        }
606                    }
607                    if (prefer_shallow_pending_stack())
608                        pendingStack.scan(associationScanner);
609                }
610            }
611            if (!prefer_shallow_pending_stack())
612                pendingStack.scan(associationScanner);
613        }
614
615        // pending_visitor is used to scan all blocks that have their pending bit set
616        // (for pending stack implementations that do not do this)
617        class pending_visitor {
618            PendingStack _stack;
619            ReferenceIterator _scanner;
620        public:
621            pending_visitor(ReferenceIterator* scanner)
622            : _scanner(scanner->_zone, scanner->_visitor, _stack, scanner->_stack_bottom,
623                       false, scanner->_repair_write_barrier)
624            {
625            }
626
627            pending_visitor(const pending_visitor &visitor)
628            : _scanner(visitor._scanner._zone, visitor._scanner._visitor, _stack, visitor._scanner._stack_bottom,
629                       false, visitor._scanner->_repair_write_barrier)
630            {
631            }
632
633            void operator() (Subzone *subzone, usword_t q) {
634                _scanner.scan(subzone, q);
635                _scanner.flush();
636            }
637
638            void operator() (Large *large) {
639                _scanner.scan(large);
640                _scanner.flush();
641            }
642        };
643
644        inline void scan() {
645            scan_roots();
646
647            // scan all pending blocks before we set the enlivening barrier to
648            // reduce the amount of time threads are blocked in write barriers
649            flush();
650
651            if (_needs_enlivening_barrier) {
652                _zone->enlivening_barrier();
653            }
654
655            if (scan_pending()) {
656                pending_visitor visitor(this);
657                visitPendingBlocks(_zone, visitor);
658            }
659
660            scan_threads();
661
662            // flush again because scanning with the associations scanner is more expensive
663            flush();
664
665            scan_associations();
666        }
667    };
668
669    // Predefined scanning strategies.
670
671    template <typename ReferenceIterator> class FullScanningStrategy {
672    public:
673        // provide a way to rebind this template type to another just like STL allocators can do.
674        template <typename U> struct rebind { typedef FullScanningStrategy<U> other; };
675
676        inline static bool should_mark(Subzone *subzone, usword_t q) { return false; }
677        inline static bool should_mark(Large *large) { return false; };
678
679        inline static bool should_scan(Subzone *subzone, usword_t q) {
680            return subzone->is_scanned(q);
681        }
682
683        inline static bool should_scan(Large *large) {
684            return large->is_scanned();
685        }
686
687        // non-object block scan.
688        inline static void scan_block(ReferenceIterator &scanner, void *block, usword_t size, usword_t layout, WriteBarrier *wb) {
689            Range range(block, size);
690            if (layout == AUTO_POINTERS_ONLY) {
691                ReferenceInfo info(kAllPointersHeapReference, wb);
692                scanner.scan_range(info, range);
693            } else {
694                ReferenceInfo info(kConservativeHeapReference, wb);
695                if (scanner.visit_interior_pointers()) {
696                    // check interior pointers to handle CoreText nastiness.
697                    ReferenceInfo indirect_info(kConservativeHeapIndirectReference, wb);
698                    scanner.scan_range_indirect(info, indirect_info, range);
699                } else {
700                    scanner.scan_range(info, range);
701                }
702            }
703        }
704
705        inline static const unsigned char *layout_map_for_block(Zone *zone, void *block) { return zone->layout_map_for_block(block); }
706        inline static bool visit_interior_pointers() { return false; }
707        inline static bool scan_threads_suspended() { return true; }
708        inline static pthread_rwlock_t *associations_lock(Zone *zone) { return zone->associations_lock(); }
709
710        // exact object scan.
711        inline static void scan_object(ReferenceIterator &scanner, void *object, usword_t size, usword_t layout, const unsigned char* map, WriteBarrier *wb) __attribute__((always_inline)) {
712            if (map == NULL) {
713                // gcc optimization: all ivars are objects, no map provided.  All additional space treated conservatively.  This isn't good for compaction since
714                // we can't cheaply know the limit of where we could relocate and so all the memory is treated conservatively. clang issues a complete map
715                // (see <rdar://problem/7159643>). Additionally, if AUTO_POINTERS_ONLY is marked, space beyond the map is treated as relocatable pointers.
716                Range range(object, size);
717                ReferenceInfo info(layout & AUTO_POINTERS_ONLY ? kAllPointersHeapReference : kConservativeHeapReference, wb);
718                if (scanner.visit_interior_pointers()) {
719                    ReferenceInfo indirect_info(kConservativeHeapIndirectReference, wb);
720                    scanner.scan_range_indirect(info, indirect_info, range);
721                } else
722                    scanner.scan_range(info, range);
723                return;
724            }
725            ReferenceInfo exactInfo(kExactHeapReference, wb);
726            void **slots = (void **)object;
727            void **last = (void **)displace(slots, size - sizeof(void*));
728            AUTO_PROBE(auto_probe_scan_with_layout(slots, last, map));
729            // while not '\0' terminator
730            while (unsigned data = *map++) {
731                // extract the skip and run
732                unsigned skip = data >> 4;
733                unsigned run = data & 0xf;
734
735                // advance the reference by the skip
736                slots += skip;
737
738                while (run--) scanner.scan_reference(exactInfo, slots++);
739            }
740
741            // since objects can be allocated with extra data at end, scan the remainder. If the AUTO_POINTERS_ONLY bit is
742            // turned on in the layout, scan the remainder exactly, otherwise scan conservatively.
743            ReferenceInfo remainderInfo((layout & AUTO_POINTERS_ONLY) ? kAllPointersHeapReference : kConservativeHeapReference, wb);
744            if (scanner.visit_interior_pointers()) {
745                // CFStorage objects keep indirect pointers to themselves, and thus should be pinned.
746                ReferenceInfo indirect_info(kConservativeHeapIndirectReference, wb);
747                while (slots <= last) scanner.scan_reference_indirect(remainderInfo, indirect_info, slots++);
748            } else {
749                while (slots <= last) scanner.scan_reference(remainderInfo, slots++);
750            }
751        }
752    };
753
754    template <typename ReferenceIterator> class GenerationalScanningStrategy {
755    public:
756        // provide a way to rebind this template type to another just like STL allocators can do.
757        template <typename U> struct rebind { typedef GenerationalScanningStrategy<U> other; };
758
759        // always mark old objects, even if they aren't scanned.
760        inline static bool should_mark(Subzone *subzone, usword_t q) { return !subzone->is_new(q); }
761        inline static bool should_mark(Large *large) { return !large->is_new(); };
762
763        inline static bool should_scan(Subzone *subzone, usword_t q) {
764            // only scan a block, if it has ever been written through its write-barrier.
765            return subzone->is_scanned(q) && subzone->write_barrier().range_has_marked_cards(subzone->quantum_address(q), subzone->size(q));
766        }
767
768        inline static bool should_scan(Large *large) {
769            return large->is_scanned() && large->write_barrier().range_has_marked_cards(large->address(), large->size());
770        }
771
772        inline static void scan_block(ReferenceIterator &scanner, void *block, usword_t size, usword_t layout, WriteBarrier *wb) {
773            wb->scan_marked_ranges(block, size, layout == AUTO_POINTERS_ONLY ? &ReferenceIterator::scan_all_pointers_range : &ReferenceIterator::scan_conservative_range, &scanner);
774        }
775
776        inline static const unsigned char *layout_map_for_block(Zone *zone, void *block) { return zone->layout_map_for_block(block); }
777        inline static bool visit_interior_pointers() { return false; }
778        inline static bool scan_threads_suspended() { return true; }
779        inline static pthread_rwlock_t *associations_lock(Zone *zone) { return zone->associations_lock(); }
780
781        inline static void scan_object(ReferenceIterator &scanner, void *object, usword_t size, usword_t layout, const unsigned char* map, WriteBarrier *wb) {
782            if (map == NULL) {
783                // gcc optimization: all ivars are objects, no map provided.  All additional space treated conservatively.  This isn't good for compaction since
784                // we can't cheaply know the limit of where we could relocate and so all the memory is treated conservatively. clang issues a complete map
785                // (see <rdar://problem/7159643>). Additionally, if AUTO_POINTERS_ONLY is marked, space beyond the map is treated as relocatable pointers.
786                wb->scan_marked_ranges(object, size, (layout & AUTO_POINTERS_ONLY) ? &ReferenceIterator::scan_all_pointers_range : &ReferenceIterator::scan_conservative_range, &scanner);
787                return;
788            }
789            void **slots = (void **)object;
790            void **end = (void **)displace(slots, size);
791            AUTO_PROBE(auto_probe_scan_with_layout(slots, end, map));
792            // while not '\0' terminator
793            while (unsigned data = *map++) {
794                // extract the skip and run
795                unsigned skip = data >> 4;
796                unsigned run = data & 0xf;
797
798                // advance the reference by the skip
799                slots += skip;
800
801                if (run) {
802                    // <rdar://problem/6516045>:  make sure we only scan valid ranges.
803                    if (slots < end && (slots + run) <= end) {
804                        wb->scan_marked_ranges(slots, run * sizeof(void*), &ReferenceIterator::scan_exact_range, &scanner);
805                    } else {
806                        break;
807                    }
808                }
809
810                slots += run;
811            }
812
813            // since objects can be allocated with extra data at end, scan the remainder. If the AUTO_POINTERS_ONLY bit is
814            // turned on in the layout, scan the remainder exactly, otherwise scan conservatively.
815            if (slots < end) {
816                wb->scan_marked_ranges(slots, (end - slots) * sizeof(void*),
817                                       (layout & AUTO_POINTERS_ONLY) ?
818                                       &ReferenceIterator::scan_exact_range :
819                                       &ReferenceIterator::scan_conservative_range, &scanner);
820            }
821        }
822    };
823
824};
825
826#endif // __AUTO_REFERENCE_ITERATOR__
827