1/* 2 * Copyright (c) 2011 Apple Inc. All rights reserved. 3 * 4 * @APPLE_APACHE_LICENSE_HEADER_START@ 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * @APPLE_APACHE_LICENSE_HEADER_END@ 19 */ 20/* 21 BlockIterator.h 22 Template Functions to visit all blocks in the GC heap. 23 Copyright (c) 2004-2011 Apple Inc. All rights reserved. 24 */ 25 26#pragma once 27#ifndef __AUTO_BLOCK_ITERATOR__ 28#define __AUTO_BLOCK_ITERATOR__ 29 30 31#include "Admin.h" 32#include "Definitions.h" 33#include "Large.h" 34#include "Region.h" 35#include "Zone.h" 36#include "BlockRef.h" 37 38namespace Auto { 39 40 // 41 // visitAllocatedBlocks 42 // 43 // Visit all allocated blocks. 44 // 45 46 template <class Visitor> bool visitAllocatedBlocks(Zone *zone, Visitor& visitor) { 47 // iterate through the regions first 48 for (Region *region = zone->region_list(); region != NULL; region = region->next()) { 49 // iterate through the subzones 50 SubzoneRangeIterator iterator(region->subzone_range()); 51 while (Subzone *subzone = iterator.next()) { 52 // get the number of quantum in the subzone 53 usword_t n = subzone->allocation_limit(); 54 55 for (usword_t q = 0; q < n; q = subzone->next_quantum(q)) { 56 if (!subzone->is_free(q)) { 57 visitor.visit(zone, subzone, q); 58 } 59 } 60 } 61 } 62 63 // iterate through the large blocks 64 for (Large *large = zone->large_list(); large != NULL; large = large->next()) { 65 // let the visitor visit the write barrier 66 if (!visitor.visit(zone, large)) return false; 67 } 68 69 return true; 70 } 71 72 // 73 // ConcurrentVisitorHelper 74 // 75 // Helper class for visitAllocatedBlocks_concurrent that maintains state to drive the enumeration. 76 // Visitor must provide: 77 // const boolean_t visitLargesConcurrently const() - return true if larges are heavyweight and should be processed one at a time, false if the entire large list should be treated as one work unit. The visit function will be called once for each large either way. 78 // void visit(Zone *, Large *) - visit a Large 79 // const boolean_t visit_subzone_quanta() const - return true if the visitor wants to iterate over subzone quanta, or false if it just wants to get the subzone 80 // void visit(Zone *, Subzone *, usword_t) - called if visit_subzone_quanta() returns true 81 // void visit(Zone *, Subzone *) - called if visit_subzone_quanta() returns false 82 // 83 84 template <class Visitor> class ConcurrentVisitorHelper { 85 Zone *_zone; 86 Visitor &_visitor; 87 Large *_current_large; // the next large to visit 88 Region *_current_region; // the region currently being visited 89 SubzoneRangeIterator _iterator; // subzone iterator for _current_region 90 spin_lock_t _lock; // protects _current_large, _current_region, and _iterator 91 92 private: 93 94 // These members invoke the visitor functions. They are separated out so the visitor members can be inlined into them 95 // without requiring logic that picks the Large/Subzone to be customized by the Visitor. 96 97 // Visit a Large. If the visitor wants to process them all at once, visit them all. 98 void visit_large(Large *large_to_visit) { 99 do { 100 _visitor.visit(_zone, large_to_visit); 101 large_to_visit = _visitor.visit_larges_concurrently() ? NULL : large_to_visit->next(); 102 } while (large_to_visit != NULL); 103 } 104 105 // Visit the subzone. 106 void visit_subzone(Subzone *subzone_to_visit) { 107 _visitor.visit(_zone, subzone_to_visit); 108 } 109 110 public: 111 112 // 113 // Constructor 114 // 115 // The constructor sets up the initial state for the visit function. 116 // 117 ConcurrentVisitorHelper(Zone *zone, Visitor &visitor) : _zone(zone), _visitor(visitor), _current_large(zone->large_list()), _current_region(zone->region_list()), _iterator(_current_region->subzone_range()), _lock(0) {} 118 119 // 120 // visit 121 // 122 // Implements a state machine to pick the next piece of work and call the visitor with it. 123 // 124 inline boolean_t visit(boolean_t is_dedicated, boolean_t work_to_completion) { 125 boolean_t did_work; 126 do { 127 did_work = false; 128 Large *large_to_visit = NULL; 129 Subzone *subzone_to_visit = NULL; 130 { 131 // Hold the lock while picking out a large/subzone to visit 132 SpinLock lock(&_lock); 133 if (_current_large != NULL) { 134 large_to_visit = _current_large; 135 if (_visitor.visit_larges_concurrently()) { 136 _current_large = _current_large->next(); 137 } else { 138 // the visitor wants to handle all larges at once so clear out the list 139 _current_large = NULL; 140 } 141 did_work = true; 142 } 143 // if no large, look for a subzone 144 if (large_to_visit == NULL) { 145 subzone_to_visit = _iterator.next(); 146 if (!subzone_to_visit && _current_region) { 147 // the current region is done, proceed to the next 148 _current_region = _current_region->next(); 149 if (_current_region) { 150 _iterator = SubzoneRangeIterator(_current_region->subzone_range()); 151 subzone_to_visit = _iterator.next(); 152 } 153 } 154 } 155 } 156 157 if (large_to_visit) visit_large(large_to_visit); 158 159 // If we found a subzone, visit all the allocated blocks. 160 if (subzone_to_visit != NULL) { 161 did_work = true; 162 visit_subzone(subzone_to_visit); 163 } 164 } while (did_work && work_to_completion); 165 return did_work; 166 } 167 168 // 169 // visitor_wrapper 170 // 171 // wraps the call to visitor, to interface with the zone worker api 172 // 173 static boolean_t visitor_wrapper(void *arg, boolean_t is_dedicated, boolean_t work_to_completion) { 174 ConcurrentVisitorHelper<Visitor> *helper = (ConcurrentVisitorHelper *)arg; 175 return helper->visit(is_dedicated, work_to_completion); 176 } 177 }; 178 179 // 180 // visitAllocatedBlocks_concurrent 181 // 182 // Visit all allocated blocks in a concurrent manner using multiple worker threads. 183 // Each subzone is visited separately, and larges may be visited individually or all at once depending on the Visitor. 184 // Refer to ConcurrentVisitorHelper for what Visitor must implement. 185 // 186 template <class Visitor> void visitAllocatedBlocks_concurrent(Zone *zone, Visitor& visitor) { 187 ConcurrentVisitorHelper<Visitor> helper(zone, visitor); 188 zone->perform_work_with_helper_threads(ConcurrentVisitorHelper<Visitor>::visitor_wrapper, &helper); 189 } 190 191 // 192 // visitPendingBlocks 193 // 194 // Visit all pending blocks. 195 // 196 197 template <class Visitor> void visitPendingBlocks(Zone *zone, Visitor& visitor) { 198 // iterate through the regions first 199 for (Region *region = zone->region_list(); region != NULL; region = region->next()) { 200 // iterate through the subzones 201 SubzoneRangeIterator iterator(region->subzone_range()); 202 while (Subzone *subzone = iterator.next()) { 203 // get the number of quantum in the subzone 204 usword_t n = subzone->allocation_limit(); 205 206 for (usword_t q = 0; q < n; q = subzone->next_quantum(q)) { 207 if (!subzone->is_free(q) && subzone->test_and_clear_pending(q)) { 208 visitor(subzone, q); 209 } 210 } 211 } 212 } 213 214 // iterate through the large blocks 215 for (Large *large = zone->large_list(); large != NULL; large = large->next()) { 216 // let the visitor visit the write barrier 217 if (large->is_pending()) { 218 large->clear_pending(); 219 visitor(large); 220 } 221 } 222 } 223 224 // 225 // visitAllBlocks 226 // 227 // Visit all the blocks including free blocks. 228 // 229 // BlockRef FIXME: block visitors should hand out BlockRefs 230 template <class Visitor> bool visitAllBlocks(Zone *zone, Visitor& visitor) { 231 // iterate through the regions first 232 for (Region *region = zone->region_list(); region != NULL; region = region->next()) { 233 // iterate through the subzones 234 SubzoneRangeIterator iterator(region->subzone_range()); 235 while (Subzone *subzone = iterator.next()) { 236 // get the number of quantum in the subzone 237 usword_t n = subzone->allocation_limit(); 238 239 for (usword_t q = 0; q < n; q = subzone->next_quantum(q)) { 240 if (!visitor.visit(zone, subzone, q)) return false; 241 } 242 } 243 } 244 245 // iterate through the large 246 for (Large *large = zone->large_list(); large != NULL; large = large->next()) { 247 // let the visitor visit the write barrier 248 if (!visitor.visit(zone, large)) return false; 249 } 250 251 return true; 252 } 253 254 // 255 // visitAssociationsNoLock 256 // 257 // Visits all known associative references. Assumes _associations_lock is held. 258 // 259 260 template <class Visitor> bool visitAssociationsNoLock(Zone *zone, Visitor& visitor) { 261 AssociationsHashMap &associations(zone->associations()); 262 for (AssociationsHashMap::iterator i = associations.begin(), iend = associations.end(); i != iend; i++) { 263 void *block = i->first; 264 ObjectAssociationMap *refs = i->second; 265 for (ObjectAssociationMap::iterator j = refs->begin(), jend = refs->end(); j != jend; j++) { 266 ObjectAssociationMap::value_type &pair = *j; 267 if (!visitor(zone, block, pair.first, pair.second)) return false; 268 } 269 } 270 return true; 271 } 272 273 // 274 // visitRootsNoLock 275 // 276 // Visits all registered roots. Assumes _roots_lock is held. 277 // 278 279 template <typename Visitor> bool visitRootsNoLock(Zone *zone, Visitor visitor) { 280 PtrHashSet &roots(zone->roots()); 281 for (PtrHashSet::const_iterator i = roots.begin(), end = roots.end(); i != end; ++i) { 282 if (!visitor((void **)*i)) return false; 283 } 284 return true; 285 } 286 287 // 288 // blockDo 289 // 290 // Applies an operation to a block, calling the appropriate operator() according 291 // to the kind of block small/medium vs. large. 292 // 293 294 template <class BlockDo> void blockDo(Zone *zone, void *block, BlockDo &op) { 295 if (zone->in_subzone_memory(block)) { 296 Subzone *subzone = Subzone::subzone(block); 297 usword_t q; 298 if (subzone->block_is_start(block, &q)) op(subzone, q); 299 } else if (zone->block_is_start_large(block)) { 300 op(Large::large(block)); 301 } 302 } 303 304 inline void blockDo(Zone *zone, void *block, void (^subzoneDo) (Subzone *subzone, usword_t q), void (^largeDo) (Large *large), void (^elseDo) (void *block) = NULL) { 305 if (zone->in_subzone_memory(block)) { 306 Subzone *subzone = Subzone::subzone(block); 307 usword_t q; 308 if (subzone->block_is_start(block, &q)) subzoneDo(subzone, q); 309 } else if (zone->block_is_start_large(block)) { 310 largeDo(Large::large(block)); 311 } else if (elseDo) { 312 elseDo(block); 313 } 314 } 315 316 // 317 // blockStartNoLockDo 318 // 319 // Applies either subzoneDo, or largeDo to the block address, depending on what kind of block it is. Takes no locks. 320 // Used by compaction. 321 // 322 323 inline void blockStartNoLockDo(Zone *zone, void *address, void (^subzoneDo) (Subzone *subzone, usword_t q), void (^largeDo) (Large *large)) { 324 if (zone->in_subzone_memory(address)) { 325 Subzone *subzone = Subzone::subzone(address); 326 usword_t q; 327 if (subzone->block_start(address, q)) subzoneDo(subzone, q); 328 } else if (zone->in_large_memory(address)) { 329 Large *large = zone->block_start_large(address); 330 if (large) largeDo(large); 331 } 332 } 333}; 334 335#endif // __AUTO_BLOCK_ITERATOR__ 336