1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    ZoneDump.cpp
22    Zone Dumping
23    Copyright (c) 2009-2011 Apple Inc. All rights reserved.
24 */
25
26#include "Admin.h"
27#include "Bitmap.h"
28#include "BlockIterator.h"
29#include "Configuration.h"
30#include "Definitions.h"
31#include "Environment.h"
32#include "Large.h"
33#include "Locks.h"
34#include "Range.h"
35#include "Region.h"
36#include "Statistics.h"
37#include "Subzone.h"
38#include "Thread.h"
39#include "WriteBarrierIterator.h"
40#include "ThreadLocalCollector.h"
41#include "Zone.h"
42
43#include "auto_weak.h"
44#include "auto_trace.h"
45
46namespace Auto {
47
48    struct dump_all_blocks_visitor {
49        void (^node_dump)(const void *address, unsigned long size, unsigned int layout, unsigned long refcount);
50
51        // Constructor
52        dump_all_blocks_visitor(void) {}
53
54        // visitor function for subzone
55        inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
56            // send single block information
57            void *block = subzone->quantum_address(q);
58            SubzoneBlockRef ref(subzone, q);
59            node_dump(block, subzone->size(q), subzone->layout(q), ref.refcount());
60            // always continue
61            return true;
62        }
63
64        // visitor function for large
65        inline bool visit(Zone *zone, Large *large) {
66            // send single block information
67            node_dump(large->address(), large->size(), large->layout(), large->refcount());
68            // always continue
69            return true;
70        }
71    };
72
73    void Zone::dump_zone(
74            auto_zone_stack_dump stack_dump,
75            auto_zone_register_dump register_dump,
76            auto_zone_node_dump thread_local_node_dump,  // unused
77            auto_zone_root_dump root_dump,
78            auto_zone_node_dump global_node_dump,
79            auto_zone_weak_dump weak_dump_entry)
80    {
81        // Lock out new threads and suspend all others.
82        // We don't take many locks nor are we dependent on anything (much) that requires a lock.
83        // We don't take, for example, the large lock and are willing to miss a brand new one.
84        // We don't take, for example, the regions lock, and are willing to miss a new empty region.
85        // We don't take, for example, the refcounts lock, and are willing to provide an inexact refcount.
86        // We don't take, for example, the admin locks, and will miss a not-quite-born object.
87
88        // XXX need associative refs, too
89
90        // XXX_PCB:  grab the thread list mutex, so newly registered threads can't interfere.
91        // This can deadlock if called from gdb with other threads suspended
92        Mutex lock(&_registered_threads_mutex);
93
94        // suspend all threads...
95        Thread *thread = threads();
96        while (thread != NULL) {
97            if (!thread->is_current_thread() && thread->is_bound()) {
98                thread->suspend();
99            }
100            thread = thread->next();
101        }
102
103        // for all nodes
104        dump_all_blocks_visitor visitor;
105        visitor.node_dump = global_node_dump;
106        visitAllocatedBlocks(this, visitor);
107
108        thread = threads();
109        while (thread != NULL) {
110            thread->dump(stack_dump, register_dump, thread_local_node_dump);
111            thread = thread->next();
112        }
113
114        // for all roots
115        if (root_dump) {
116            Mutex lock(&_roots_lock);
117            PtrHashSet::iterator i = _roots.begin();
118            while (i != _roots.end()) {
119                root_dump((const void **)*i);
120                i++;
121            }
122        }
123
124
125        // for all weak
126        if (weak_dump_entry) {
127            SpinLock lock(&weak_refs_table_lock);
128            weak_enumerate_table(this, ^(const weak_referrer_t &ref) { weak_dump_entry((const void **)ref.referrer, *ref.referrer); });
129        }
130
131        // resume threads
132
133        thread = threads();
134        while (thread != NULL) {
135            if (!thread->is_current_thread() && thread->is_bound()) thread->resume();
136            thread = thread->next();
137        }
138    }
139
140    struct allocated_blocks_visitor {
141        auto_zone_visitor_t *_visitor;
142
143        // Constructor
144        allocated_blocks_visitor(auto_zone_visitor_t *visitor) : _visitor(visitor) {}
145
146        // visitor function for subzone
147        inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
148            // send single block information
149            void *block = subzone->quantum_address(q);
150            SubzoneBlockRef ref(subzone, q);
151            _visitor->visit_node(block, subzone->size(q), subzone->layout(q), ref.refcount(), subzone->is_thread_local(q));
152            // always continue
153            return true;
154        }
155
156        // visitor function for large
157        inline bool visit(Zone *zone, Large *large) {
158            // send single block information
159            _visitor->visit_node(large->address(), large->size(), large->layout(), large->refcount(), false);
160            // always continue
161            return true;
162        }
163    };
164
165
166    void Zone::visit_zone(auto_zone_visitor_t *visitor) {
167        // Lock out new threads and suspend all others.
168        // This can deadlock if called from gdb with other threads suspended
169        suspend_all_registered_threads();
170
171        // for all threads
172        if (visitor->visit_thread) {
173            scan_registered_threads(^(Thread *thread) { thread->visit(visitor); });
174        }
175
176        // for all nodes
177        if (visitor->visit_node) {
178            allocated_blocks_visitor ab_visitor(visitor);
179            visitAllocatedBlocks(this, ab_visitor);
180        }
181
182        // for all roots
183        if (visitor->visit_root) {
184            Mutex lock(&_roots_lock);
185            for (PtrHashSet::iterator i = _roots.begin(), end = _roots.end(); i != end; i++) {
186                visitor->visit_root((const void **)*i);
187            }
188        }
189
190        // for all weak
191        if (visitor->visit_weak) {
192            SpinLock lock(&weak_refs_table_lock);
193            weak_enumerate_table(this, ^(const weak_referrer_t &ref) {
194                visitor->visit_weak(*ref.referrer, ref.referrer, ref.block);
195            });
196        }
197
198        // for all associations
199        if (visitor->visit_association) {
200            ReadLock lock(&_associations_lock);
201            for (AssociationsHashMap::iterator i = _associations.begin(), iend = _associations.end(); i != iend; i++) {
202                void *block = i->first;
203                ObjectAssociationMap *refs = i->second;
204                for (ObjectAssociationMap::iterator j = refs->begin(), jend = refs->end(); j != jend; j++) {
205                    ObjectAssociationMap::value_type &pair = *j;
206                    visitor->visit_association(block, pair.first, pair.second);
207                }
208            }
209        }
210
211        // resume threads
212        resume_all_registered_threads();
213    }
214};
215