1/* 2 * Copyright (c) 2011 Apple Inc. All rights reserved. 3 * 4 * @APPLE_APACHE_LICENSE_HEADER_START@ 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * @APPLE_APACHE_LICENSE_HEADER_END@ 19 */ 20/* 21 auto_zone.cpp 22 Automatic Garbage Collection 23 Copyright (c) 2002-2011 Apple Inc. All rights reserved. 24 */ 25 26#include <CoreFoundation/CoreFoundation.h> 27 28#include "auto_zone.h" 29#include "auto_impl_utilities.h" 30#include "auto_weak.h" 31#include "auto_trace.h" 32#include "auto_dtrace.h" 33#include "Zone.h" 34#include "Locks.h" 35#include "InUseEnumerator.h" 36#include "ThreadLocalCollector.h" 37#include "auto_tester/auto_tester.h" 38#include "BlockIterator.h" 39 40#include <crt_externs.h> 41#include <mach-o/dyld.h> 42#include <stdlib.h> 43#include <libc.h> 44#include <dlfcn.h> 45#include <sys/syslimits.h> 46#include <sys/types.h> 47#include <sys/event.h> 48#include <sys/time.h> 49#include <msgtracer_client.h> 50 51#ifdef __BLOCKS__ 52#include <Block.h> 53#include <notify.h> 54#include <dispatch/private.h> 55#endif 56 57#define USE_INTERPOSING 0 58 59#if USE_INTERPOSING 60#include <mach-o/dyld-interposing.h> 61#endif 62 63using namespace Auto; 64 65static char *b2s(uint64_t bytes, char *buf, int bufsize); 66 67/********* Globals ************/ 68 69#ifdef AUTO_TESTER 70AutoProbeFunctions *auto_probe_functions = NULL; 71#endif 72 73bool auto_set_probe_functions(AutoProbeFunctions *functions) { 74#ifdef AUTO_TESTER 75 auto_probe_functions = functions; 76 return true; 77#else 78 return false; 79#endif 80} 81 82// Reference count logging support for ObjectAlloc et. al. 83void (*__auto_reference_logger)(uint32_t eventtype, void *ptr, uintptr_t data) = NULL; 84 85/********* Parameters ************/ 86 87#define VM_COPY_THRESHOLD (40 * 1024) 88 89 90/********* Zone callbacks ************/ 91 92 93boolean_t auto_zone_is_finalized(auto_zone_t *zone, const void *ptr) { 94 Zone *azone = (Zone *)zone; 95 // detects if the specified pointer is about to become garbage 96 return (ptr && azone->block_is_garbage((void *)ptr)); 97} 98 99static void auto_collect_internal(Zone *zone, boolean_t generational) { 100 if (zone->_collector_disable_count) return; 101 CollectionTimer timer; 102 103 Statistics &zone_stats = zone->statistics(); 104 105 timer.total_time().start(); 106 zone_stats.idle_timer().stop(); 107 if (zone->control.log & AUTO_LOG_TIMINGS) timer.enable_scan_timer(); 108 109 zone_stats.reset_for_heap_collection(); 110 111 AUTO_PROBE(auto_probe_begin_heap_scan(generational)); 112 113 // bound the bottom of the stack. 114 vm_address_t stack_bottom = auto_get_sp(); 115 if (zone->control.disable_generational) generational = false; 116 GARBAGE_COLLECTION_COLLECTION_BEGIN((auto_zone_t*)zone, generational ? AUTO_TRACE_GENERATIONAL : AUTO_TRACE_FULL); 117 zone->set_state(scanning); 118 119 Thread &collector_thread = zone->register_thread(); 120 collector_thread.set_in_collector(true); 121 zone->collect_begin(); 122 123 zone->collect((bool)generational, (void *)stack_bottom, timer); 124 PointerList &list = zone->garbage_list(); 125 size_t garbage_count = list.count(); 126 void **garbage = list.buffer(); 127 size_t large_garbage_count = zone->large_garbage_count(); 128 void **large_garbage = (large_garbage_count ? garbage + garbage_count - large_garbage_count : NULL); 129 130 AUTO_PROBE(auto_probe_end_heap_scan(garbage_count, garbage)); 131 132 size_t bytes_freed = 0; 133 134 // note the garbage so the write-barrier can detect resurrection 135 GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)zone, AUTO_TRACE_FINALIZING_PHASE); 136 zone->set_state(finalizing); 137 size_t block_count = garbage_count, byte_count = 0; 138 zone->invalidate_garbage(garbage_count, garbage); 139 GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)zone, AUTO_TRACE_FINALIZING_PHASE, (uint64_t)block_count, (uint64_t)byte_count); 140 zone->set_state(reclaiming); 141 GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)zone, AUTO_TRACE_SCAVENGING_PHASE); 142 bytes_freed = zone->free_garbage(garbage_count - large_garbage_count, garbage, large_garbage_count, large_garbage, block_count, byte_count); 143 zone->clear_zombies(); 144 GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)zone, AUTO_TRACE_SCAVENGING_PHASE, (uint64_t)block_count, (uint64_t)bytes_freed); 145 146 timer.total_time().stop(); 147 zone->collect_end(timer, bytes_freed); 148 collector_thread.set_in_collector(false); 149 150 GARBAGE_COLLECTION_COLLECTION_END((auto_zone_t*)zone, (uint64_t)garbage_count, (uint64_t)bytes_freed, (uint64_t)zone_stats.count(), (uint64_t)zone_stats.size()); 151 152 zone->set_state(idle); 153 AUTO_PROBE(auto_probe_heap_collection_complete()); 154 155 WallClockTimer &idle_timer = zone->statistics().idle_timer(); 156 if (zone->control.log & AUTO_LOG_TIMINGS) { 157 const char *idle = idle_timer.time_string(); 158 char bytes[16]; 159 b2s(zone->statistics().bytes_scanned(), bytes, sizeof(bytes)); 160 malloc_printf("%s: %s GC completed in %s after %s idle. scanned %5llu blocks (%s) in %s\n", 161 auto_prelude(), (generational ? "gen." : "full"), timer.total_time().time_string(), idle, 162 zone->statistics().blocks_scanned(), bytes, timer.scan_timer().time_string()); 163 } 164 if (zone->control.log & AUTO_LOG_COLLECTIONS) { 165 malloc_statistics_t stats; 166 zone->malloc_statistics(&stats); 167 char freed[16], in_use[16]; 168 b2s(zone->statistics().size(), in_use, sizeof(in_use)); 169 b2s(bytes_freed, freed, sizeof(freed)); 170 malloc_printf("%s: %s GC collected %5llu blocks (%s). blocks in use: %7llu (%s)\n", 171 auto_prelude(), (generational ? "gen." : "full"), 172 (unsigned long long)garbage_count, freed, 173 zone->statistics().count(), in_use); 174 } 175#ifdef MEASURE_TLC_STATS 176 zone->statistics().print_tlc_stats(); 177#endif 178 idle_timer.reset(); 179 idle_timer.start(); 180} 181 182// 183// old external entry point for collection 184// 185void auto_collect(auto_zone_t *zone, auto_collection_mode_t mode, void *collection_context) { 186 Zone *azone = (Zone *)zone; 187 if (!azone->_collection_queue) return; 188 auto_collection_mode_t heap_mode = mode & 0x3; 189 if ((mode & AUTO_COLLECT_IF_NEEDED) || (mode == 0)) { 190 auto_zone_collect(zone, AUTO_ZONE_COLLECT_NO_OPTIONS); 191 } else { 192 static uintptr_t options_translation[] = {AUTO_ZONE_COLLECT_RATIO_COLLECTION, AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION, AUTO_ZONE_COLLECT_FULL_COLLECTION, AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION}; 193 194 auto_zone_options_t request_mode = options_translation[heap_mode]; 195 auto_zone_collect(zone, request_mode); 196 if (mode & AUTO_COLLECT_SYNCHRONOUS) { 197 Zone *azone = (Zone *)zone; 198 // For synchronous requests we have a problem: we must avoid deadlock with main thread finalization. 199 // For dispatch, use a group to implement a wait with timeout. 200 dispatch_group_t group = dispatch_group_create(); 201 dispatch_group_async(group, azone->_collection_queue, ^{}); 202 dispatch_group_wait(group, dispatch_time(0, 10*NSEC_PER_SEC)); 203 dispatch_release(group); 204 } 205 } 206} 207 208 209static inline bool _increment_pending_count(Zone *azone, auto_zone_options_t global_mode, bool coalesce_requested) { 210 bool did_coalesce = true; 211 Mutex lock(&azone->_collection_mutex); 212 if (global_mode < AUTO_ZONE_COLLECT_GLOBAL_MODE_COUNT) { 213 if (!coalesce_requested || azone->_pending_collections[global_mode] == 0) { 214 /* Check for overflow on the pending count. This should only happen if someone is doing something wrong. */ 215 if (azone->_pending_collections[global_mode] == UINT8_MAX) { 216 /* Overflow. Force the request to coalesce. We already have many of the same type queued, so probably benign. */ 217 auto_error(azone, "pending collection count overflowed", NULL); 218 } else { 219 azone->_pending_collections[global_mode]++; 220 did_coalesce = false; 221 } 222 } 223 } 224 return did_coalesce; 225} 226 227static inline void _decrement_pending_count(Zone *azone, auto_zone_options_t global_mode) { 228 Mutex lock(&azone->_collection_mutex); 229 assert(global_mode < AUTO_ZONE_COLLECT_GLOBAL_MODE_COUNT); 230 assert(azone->_pending_collections[global_mode] > 0); 231 azone->_pending_collections[global_mode]--; 232 AUTO_PROBE(auto_probe_collection_complete()); 233} 234 235static void auto_zone_generational_collection(Zone *zone) 236{ 237 auto_collect_internal(zone, true); 238 _decrement_pending_count(zone, AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION); 239} 240 241static void auto_zone_full_collection(Zone *zone) 242{ 243 auto_collect_internal(zone, false); 244 _decrement_pending_count(zone, AUTO_ZONE_COLLECT_FULL_COLLECTION); 245 246 // If collection checking is enabled, run a check. 247 if (zone->collection_checking_enabled()) 248 zone->increment_check_counts(); 249} 250 251static void auto_zone_exhaustive_collection(Zone *zone) 252{ 253 // run collections until objects are no longer reclaimed. 254 Statistics &stats = zone->statistics(); 255 uint64_t count, collections = 0; 256 do { 257 count = stats.count(); 258 auto_collect_internal(zone, false); 259 } while (stats.count() < count && ((Environment::exhaustive_collection_limit == 0) || (++collections < Environment::exhaustive_collection_limit))); 260 _decrement_pending_count(zone, AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION); 261 262 // If collection checking is enabled, run a check. 263 if (zone->collection_checking_enabled()) 264 zone->increment_check_counts(); 265} 266 267static void auto_zone_ratio_collection(Zone *zone) 268{ 269 if (zone->_collection_count++ == zone->control.full_vs_gen_frequency) { 270 zone->_collection_count = 0; 271 auto_collect_internal(zone, false); 272 } else { 273 auto_collect_internal(zone, true); 274 } 275 _decrement_pending_count(zone, AUTO_ZONE_COLLECT_RATIO_COLLECTION); 276} 277 278void auto_zone_collect(auto_zone_t *zone, auto_zone_options_t options) 279{ 280 AUTO_PROBE(auto_probe_auto_zone_collect(options)); 281 Zone *azone = (Zone *)zone; 282 Thread &thread = azone->registered_thread(); 283 284 // First, handle the no options case by promoting to the appropriate mode. 285 if (options == AUTO_ZONE_COLLECT_NO_OPTIONS) { 286 if (azone->should_collect()) 287 options = AUTO_ZONE_COLLECT_COALESCE|AUTO_ZONE_COLLECT_RATIO_COLLECTION; 288 if (ThreadLocalCollector::should_collect(azone, thread, true)) 289 options |= AUTO_ZONE_COLLECT_LOCAL_COLLECTION; 290 } 291 292 // Run TLC modes 293 if (options & AUTO_ZONE_COLLECT_LOCAL_COLLECTION) { 294 ThreadLocalCollector tlc(azone, (void *)auto_get_sp(), thread); 295 tlc.collect(true); 296 } 297 298 // Volunteer for parallel scanning work. 299 if (!pthread_main_np()) azone->volunteer_for_work(true); 300 301 auto_zone_options_t global_mode = options & AUTO_ZONE_COLLECT_GLOBAL_COLLECTION_MODE_MASK; 302 303 if (global_mode != 0) { 304 if (!_increment_pending_count(azone, global_mode, options & AUTO_ZONE_COLLECT_COALESCE)) { 305 // Enqueue global collection request 306 dispatch_block_t collect_func; 307 switch (global_mode) { 308 case AUTO_ZONE_COLLECT_NO_OPTIONS: 309 /* This case is impossible */ 310 collect_func = NULL; 311 break; 312 case AUTO_ZONE_COLLECT_RATIO_COLLECTION: 313 collect_func = ^{ 314 auto_zone_ratio_collection((Zone *)dispatch_get_context(dispatch_get_current_queue())); 315 }; 316 break; 317 case AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION: 318 collect_func = ^{ 319 auto_zone_generational_collection((Zone *)dispatch_get_context(dispatch_get_current_queue())); 320 }; 321 break; 322 case AUTO_ZONE_COLLECT_FULL_COLLECTION: 323 collect_func = ^{ 324 auto_zone_full_collection((Zone *)dispatch_get_context(dispatch_get_current_queue())); 325 }; 326 break; 327 case AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION: 328 collect_func = ^{ 329 auto_zone_exhaustive_collection((Zone *)dispatch_get_context(dispatch_get_current_queue())); 330 }; 331 break; 332 default: 333 collect_func = NULL; 334 malloc_printf("%s: Unknown mode %d passed to auto_zone_collect() ignored.\n", auto_prelude(), global_mode); 335 break; 336 } 337 if (collect_func && azone->_collection_queue) { 338 dispatch_async(azone->_collection_queue, collect_func); 339 } 340 } 341 } 342} 343 344extern void auto_zone_reap_all_local_blocks(auto_zone_t *zone) 345{ 346 Zone *azone = (Zone *)zone; 347 Thread *thread = azone->current_thread(); 348 if (thread) 349 thread->reap_all_local_blocks(); 350} 351 352void auto_zone_collect_and_notify(auto_zone_t *zone, auto_zone_options_t options, dispatch_queue_t callback_queue, dispatch_block_t completion_callback) { 353 Zone *azone = (Zone *)zone; 354 auto_zone_collect(zone, options); 355 if (callback_queue && completion_callback && azone->_collection_queue) { 356 // ensure the proper lifetimes of the callback queue/block. 357 dispatch_retain(callback_queue); 358 completion_callback = Block_copy(completion_callback); 359 dispatch_async(azone->_collection_queue, ^{ 360 dispatch_async(callback_queue, completion_callback); 361 Block_release(completion_callback); 362 dispatch_release(callback_queue); 363 }); 364 } 365} 366 367void auto_zone_compact(auto_zone_t *zone, auto_zone_compact_options_t options, dispatch_queue_t callback_queue, dispatch_block_t completion_callback) { 368 Zone *azone = (Zone *)zone; 369 if (!azone->compaction_disabled() && azone->_collection_queue) { 370 switch (options) { 371 case AUTO_ZONE_COMPACT_ANALYZE: { 372 if (callback_queue && completion_callback) { 373 dispatch_retain(callback_queue); 374 completion_callback = Block_copy(completion_callback); 375 } 376 dispatch_async(azone->_collection_queue, ^{ 377 Zone *zone = (Zone *)dispatch_get_context(dispatch_get_current_queue()); 378 static const char *analyze_name = Environment::get("AUTO_ANALYZE_NOTIFICATION"); 379 zone->analyze_heap(analyze_name); 380 if (callback_queue && completion_callback) { 381 dispatch_async(callback_queue, completion_callback); 382 Block_release(completion_callback); 383 dispatch_release(callback_queue); 384 } 385 }); 386 break; 387 } 388 case AUTO_ZONE_COMPACT_IF_IDLE: { 389 if (azone->_compaction_timer && !azone->_compaction_pending) { 390 // schedule a compaction for 10 seconds in the future or _compaction_next_time, whichever is later. 391 // this will be canceled if more dispatch threads arrive sooner. 392 dispatch_time_t when = dispatch_time(0, 10 * NSEC_PER_SEC); 393 if (when < azone->_compaction_next_time) 394 when = azone->_compaction_next_time; 395 if (when != DISPATCH_TIME_FOREVER) { 396 dispatch_source_set_timer(azone->_compaction_timer, when, 0, 0); 397 azone->_compaction_pending = true; 398 } 399 } 400 break; 401 } 402 case AUTO_ZONE_COMPACT_NO_OPTIONS: { 403 if (callback_queue && completion_callback) { 404 dispatch_retain(callback_queue); 405 completion_callback = Block_copy(completion_callback); 406 } 407 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC), azone->_collection_queue, ^{ 408 Zone *zone = (Zone *)dispatch_get_context(dispatch_get_current_queue()); 409 zone->compact_heap(); 410 if (callback_queue && completion_callback) { 411 dispatch_async(callback_queue, completion_callback); 412 Block_release(completion_callback); 413 dispatch_release(callback_queue); 414 } 415 }); 416 break; 417 } 418 } 419 } 420} 421 422void auto_zone_disable_compaction(auto_zone_t *zone) { 423 Zone *azone = (Zone *)zone; 424 azone->disable_compaction(); 425} 426 427void auto_zone_register_resource_tracker(auto_zone_t *zone, const char *description, boolean_t (^should_collect)(void)) 428{ 429 Zone *azone = (Zone *)zone; 430 azone->register_resource_tracker(description, should_collect); 431} 432 433void auto_zone_unregister_resource_tracker(auto_zone_t *zone, const char *description) 434{ 435 Zone *azone = (Zone *)zone; 436 azone->unregister_resource_tracker(description); 437} 438 439 440boolean_t auto_zone_is_valid_pointer(auto_zone_t *zone, const void *ptr) { 441 auto_block_info_sieve<AUTO_BLOCK_INFO_IS_BLOCK> sieve((Zone *)zone, (void *)ptr); 442 return sieve.is_block(); 443} 444 445size_t auto_zone_size(auto_zone_t *zone, const void *ptr) { 446 auto_block_info_sieve<AUTO_BLOCK_INFO_SIZE> sieve((Zone *)zone, (void *)ptr); 447 return sieve.size(); 448} 449 450const void *auto_zone_base_pointer(auto_zone_t *zone, const void *ptr) { 451 auto_block_info_sieve<AUTO_BLOCK_INFO_BASE_POINTER> sieve((Zone *)zone, (void *)ptr); 452 return sieve.base(); 453} 454 455#if DEBUG 456void *WatchPoint = (void *)-1L; 457void blainer() { 458 sleep(0); 459} 460#endif 461 462 463static inline void *auto_malloc(auto_zone_t *zone, size_t size) { 464 Zone *azone = (Zone *)zone; 465 Thread &thread = azone->registered_thread(); 466 void *result = azone->block_allocate(thread, size, AUTO_MEMORY_UNSCANNED, false, true); 467 return result; 468} 469 470// Sieve class that deallocates a block. 471class auto_free_sieve : public sieve_base { 472 Zone *_zone; 473 474public: 475 476 auto_free_sieve(Zone *zone, const void *ptr) __attribute__((always_inline)) : _zone(zone) { 477 sieve_base_pointer(zone, ptr, *this); 478 } 479 480 template <class BlockRef> inline void processBlock(BlockRef ref) TEMPLATE_INLINE { 481 unsigned refcount = ref.refcount(); 482 if (refcount != 1) { 483 malloc_printf("*** free() called on collectable block with %p with refcount %d (ignored)\n", ref.address(), refcount); 484 } else { 485 _zone->block_deallocate(ref); 486 } 487 } 488 489 inline void nonBlock(const void *ptr) { 490 if (ptr != NULL) 491 error("Deallocating a non-block", ptr); 492 } 493}; 494 495static void auto_free(auto_zone_t *azone, void *ptr) { 496 auto_free_sieve sieve((Zone *)azone, (void *)ptr); 497} 498 499static void *auto_calloc(auto_zone_t *zone, size_t size1, size_t size2) { 500 Zone *azone = (Zone *)zone; 501 size1 *= size2; 502 void *ptr; 503 Thread &thread = azone->registered_thread(); 504 ptr = azone->block_allocate(thread, size1, AUTO_MEMORY_UNSCANNED, true, true); 505 return ptr; 506} 507 508static void *auto_valloc(auto_zone_t *zone, size_t size) { 509 Zone *azone = (Zone *)zone; 510 Thread &thread = azone->registered_thread(); 511 void *result = azone->block_allocate(thread, auto_round_page(size), AUTO_MEMORY_UNSCANNED, true, true); 512 return result; 513} 514 515static void *auto_realloc(auto_zone_t *zone, void *ptr, size_t size) { 516 Zone *azone = (Zone*)zone; 517 if (!ptr) return auto_malloc(zone, size); 518 519 auto_block_info_sieve<AUTO_BLOCK_INFO_SIZE|AUTO_BLOCK_INFO_LAYOUT|AUTO_BLOCK_INFO_REFCOUNT> block_info(azone, (void *)ptr); 520 size_t block_size = block_info.size(); 521 auto_memory_type_t layout = block_info.layout(); 522 523 // preserve the layout type, and retain count of the realloc'd object. 524 525 if (!block_info.is_block()) { 526 auto_error(azone, "auto_realloc: can't get type or retain count, ptr from ordinary malloc zone?", ptr); 527 // If we're here because someone used the wrong zone we should let them have what they intended. 528 return malloc_zone_realloc(malloc_zone_from_ptr(ptr), ptr, size); 529 } 530 531 // malloc man page says to allocate a "minimum sized" object if size==0 532 if (size == 0) size = allocate_quantum_small; 533 534 if (block_size >= size) { 535 size_t delta = block_size - size; 536 // When reducing the size check if the reduction would result in a smaller block being used. If not, reuse the same block. 537 // We can reuse the same block if any of these are true: 538 // 1) original is a small block, reduced by less than small quanta 539 // 2) original is a medium block, new size is still medium, and reduced by less than medium quanta 540 // 3) original is a large block, new size is still large, and block occupies the same number of pages 541 if ((block_size <= allocate_quantum_medium && delta < allocate_quantum_small) || 542 (block_size <= allocate_quantum_large && size >= allocate_quantum_medium && delta < allocate_quantum_medium) || 543 (size > allocate_quantum_large && auto_round_page(block_size) == auto_round_page(size))) { 544 // if the block is scanned, resizing smaller should clear the extra space 545 if (layout == AUTO_MEMORY_SCANNED) 546 bzero(displace(ptr,size), delta); 547 else if (layout == AUTO_MEMORY_ALL_WEAK_POINTERS) 548 weak_unregister_range(azone, (void **)displace(ptr, size), delta / sizeof(void*)); 549 return ptr; 550 } 551 } 552 553 // We could here optimize realloc by adding a primitive for small blocks to try to grow in place 554 // But given that this allocator is intended for objects, this is not necessary 555 Thread &thread = azone->registered_thread(); 556 void *new_ptr = azone->block_allocate(thread, size, layout, is_allocated_cleared(layout), (block_info.refcount() != 0)); 557 if (new_ptr) { 558 size_t min_size = MIN(size, block_size); 559 if (is_scanned(layout)) { 560 auto_zone_write_barrier_memmove((auto_zone_t *)azone, new_ptr, ptr, min_size); 561 } else if (layout == AUTO_MEMORY_ALL_WEAK_POINTERS) { 562 memmove(new_ptr, ptr, min_size); 563 Auto::SpinLock lock(&azone->weak_refs_table_lock); 564 weak_transfer_weak_contents_unscanned(azone, (void **)ptr, (void **)new_ptr, min_size, false); 565 if (block_size > size) weak_unregister_range_no_lock(azone, (void **)displace(ptr, size), (block_size - size) / sizeof(void*)); 566 } else { 567 memmove(new_ptr, ptr, min_size); 568 } 569 570 // BlockRef FIXME: we have already categorized ptr above, we should not need to do it again here 571 if (block_info.refcount() != 0) auto_zone_release(zone, ptr); // don't forget to let go rdar://6593098 572 } 573 574 // Don't bother trying to eagerly free old memory, even if it seems to be from malloc since, 575 // well, that's just a hueristic that can be wrong. In particular CF has on occasion bumped 576 // the refcount of GC memory to guard against use in unregistered threads, and we don't know 577 // how often or where this practice has spread. rdar://6063041 578 579 return new_ptr; 580} 581 582static unsigned auto_batch_malloc(auto_zone_t *zone, size_t size, void **results, unsigned num_requested) { 583 return auto_zone_batch_allocate(zone, size, AUTO_MEMORY_UNSCANNED, true, false, results, num_requested); 584} 585 586static void auto_zone_destroy(auto_zone_t *zone) { 587 Zone *azone = (Zone*)zone; 588 auto_error(azone, "auto_zone_destroy", zone); 589} 590 591static kern_return_t auto_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) { 592 *ptr = (void *)address; 593 return KERN_SUCCESS; 594} 595 596static kern_return_t auto_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder) { 597 kern_return_t err; 598 599 if (!reader) reader = auto_default_reader; 600 601 // make sure the zone version numbers match. 602 union { 603 unsigned *version; 604 void *voidStarVersion; 605 } u; 606 err = reader(task, zone_address + offsetof(malloc_zone_t, version), sizeof(unsigned), &u.voidStarVersion); 607 if (err != KERN_SUCCESS || *u.version != AUTO_ZONE_VERSION) return KERN_FAILURE; 608 609 InUseEnumerator enumerator(task, context, type_mask, zone_address, reader, recorder); 610 err = enumerator.scan(); 611 612 return err; 613} 614 615static size_t auto_good_size(malloc_zone_t *azone, size_t size) { 616 return ((Zone *)azone)->good_block_size(size); 617} 618 619unsigned auto_check_counter = 0; 620unsigned auto_check_start = 0; 621unsigned auto_check_modulo = 1; 622 623static boolean_t auto_check(malloc_zone_t *zone) { 624 if (! (++auto_check_counter % 10000)) { 625 malloc_printf("%s: At auto_check counter=%d\n", auto_prelude(), auto_check_counter); 626 } 627 if (auto_check_counter < auto_check_start) return 1; 628 if (auto_check_counter % auto_check_modulo) return 1; 629 return 1; 630} 631 632static char *b2s(uint64_t bytes, char *buf, int bufsize) { 633 if (bytes < 1024) { 634 snprintf(buf, bufsize, "%4llu bytes", bytes); 635 } else if (bytes < 1024*1024) { 636 snprintf(buf, bufsize, "%4.3g Kb", (float)bytes / 1024); 637 } else if (bytes < 1024*1024*1024) { 638 snprintf(buf, bufsize, "%4.3g Mb", (float)bytes / (1024*1024)); 639 } else { 640 snprintf(buf, bufsize, "%4.3g Gb", (float)bytes / (1024*1024*1024)); 641 } 642 return buf; 643} 644 645static void auto_zone_print(malloc_zone_t *zone, boolean_t verbose) { 646 malloc_statistics_t stats; 647 Zone *azone = (Zone *)zone; 648 azone->malloc_statistics(&stats); 649 char buf1[256]; 650 char buf2[256]; 651 printf("auto zone %p: in_use=%u used=%s allocated=%s\n", azone, stats.blocks_in_use, b2s(stats.size_in_use, buf1, sizeof(buf1)), b2s(stats.size_allocated, buf2, sizeof(buf2))); 652 if (verbose) azone->print_all_blocks(); 653} 654 655static void auto_zone_log(malloc_zone_t *zone, void *log_address) { 656} 657 658// these force_lock() calls get called when a process calls fork(). we need to be careful not to be in the collector when this happens. 659 660static void auto_zone_force_lock(malloc_zone_t *zone) { 661 // if (azone->control.log & AUTO_LOG_UNUSUAL) malloc_printf("%s: auto_zone_force_lock\n", auto_prelude()); 662 // need to grab the allocation locks in each Admin in each Region 663 // After we fork, need to zero out the thread list. 664} 665 666static void auto_zone_force_unlock(malloc_zone_t *zone) { 667 // if (azone->control.log & AUTO_LOG_UNUSUAL) malloc_printf("%s: auto_zone_force_unlock\n", auto_prelude()); 668} 669 670static void auto_malloc_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { 671 Zone *azone = (Zone *)zone; 672 azone->malloc_statistics(stats); 673} 674 675static boolean_t auto_malloc_zone_locked(malloc_zone_t *zone) { 676 // this is called by malloc_gdb_po_unsafe, on behalf of GDB, with all other threads suspended. 677 // we have to check to see if any of our spin locks or mutexes are held. 678 return ((Zone *)zone)->is_locked(); 679} 680 681/********* Entry points ************/ 682 683static struct malloc_introspection_t auto_zone_introspect = { 684 auto_in_use_enumerator, 685 auto_good_size, 686 auto_check, 687 auto_zone_print, 688 auto_zone_log, 689 auto_zone_force_lock, 690 auto_zone_force_unlock, 691 auto_malloc_statistics, 692 auto_malloc_zone_locked, 693 auto_zone_enable_collection_checking, 694 auto_zone_disable_collection_checking, 695 auto_zone_track_pointer, 696 (void (*)(malloc_zone_t *, void (^)(void *,void *)))auto_zone_enumerate_uncollected 697}; 698 699struct malloc_introspection_t auto_zone_introspection() { 700 return auto_zone_introspect; 701} 702 703static auto_zone_t *gc_zone = NULL; 704 705// DEPRECATED 706auto_zone_t *auto_zone(void) { 707 return gc_zone; 708} 709 710auto_zone_t *auto_zone_from_pointer(void *pointer) { 711 malloc_zone_t *zone = malloc_zone_from_ptr(pointer); 712 return (zone && zone->introspect == &auto_zone_introspect) ? zone : NULL; 713} 714 715static void * volatile queues[__PTK_FRAMEWORK_GC_KEY9-__PTK_FRAMEWORK_GC_KEY0+1]; 716static void * volatile pressure_sources[__PTK_FRAMEWORK_GC_KEY9-__PTK_FRAMEWORK_GC_KEY0+1]; 717static void * volatile compaction_timers[__PTK_FRAMEWORK_GC_KEY9-__PTK_FRAMEWORK_GC_KEY0+1]; 718 719static void _auto_zone_log_usage(void *_unused) { 720 void *cf_lib = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", 721 RTLD_LAZY|RTLD_NOLOAD); 722 723 if (!cf_lib) return; 724 725 auto CFBundleGetMainBundle_ = (typeof(CFBundleGetMainBundle) *)dlsym(cf_lib, "CFBundleGetMainBundle"); 726 auto CFBundleGetIdentifier_ = (typeof(CFBundleGetIdentifier) *)dlsym(cf_lib, "CFBundleGetIdentifier"); 727 auto CFStringGetCString_ = (typeof(CFStringGetCString) *) dlsym(cf_lib, "CFStringGetCString"); 728 729 if (!CFBundleGetMainBundle_ || !CFBundleGetIdentifier_ || !CFStringGetCString_) return; 730 731 CFBundleRef bundle = CFBundleGetMainBundle_(); 732 if (!bundle) return; 733 CFStringRef string = CFBundleGetIdentifier_(bundle); 734 if (!string) return; 735 char bundle_name[1024]; 736 bool got_bundle_name = CFStringGetCString_(string, bundle_name, sizeof(bundle_name), kCFStringEncodingUTF8); 737 if (!got_bundle_name) return; 738 739#define STRN_EQ(x, y) (strncmp((x), (y), strlen(y)) == 0) 740 if (STRN_EQ(bundle_name, "com.apple.")) return; 741 742 msgtracer_log_with_keys("com.apple.runtime.gcusage", ASL_LEVEL_NOTICE, 743 "com.apple.message.signature", bundle_name, 744 "com.apple.message.summarize", "YES", NULL); 745} 746 747// there can be several autonomous auto_zone's running, in theory at least. 748auto_zone_t *auto_zone_create(const char *name) { 749 aux_init(); 750 pthread_key_t key = Zone::allocate_thread_key(); 751 if (key == 0) return NULL; 752 753 // CoreFoundation is not up and running yet, therefore defer this work. 754 dispatch_async_f(dispatch_get_main_queue(), NULL, _auto_zone_log_usage); 755 756 Zone *azone = new Zone(key); 757 azone->basic_zone.size = auto_zone_size; 758 azone->basic_zone.malloc = auto_malloc; 759 azone->basic_zone.free = auto_free; 760 azone->basic_zone.calloc = auto_calloc; 761 azone->basic_zone.valloc = auto_valloc; 762 azone->basic_zone.realloc = auto_realloc; 763 azone->basic_zone.destroy = auto_zone_destroy; 764 azone->basic_zone.batch_malloc = auto_batch_malloc; 765 azone->basic_zone.zone_name = name; // ; 766 azone->basic_zone.introspect = &auto_zone_introspect; 767 azone->basic_zone.version = AUTO_ZONE_VERSION; 768 azone->basic_zone.memalign = NULL; 769 // mark version field with current size of structure. 770 azone->control.version = sizeof(auto_collection_control_t); 771 azone->control.disable_generational = Environment::read_bool("AUTO_DISABLE_GENERATIONAL", false); 772 azone->control.malloc_stack_logging = (Environment::get("MallocStackLogging") != NULL || Environment::get("MallocStackLoggingNoCompact") != NULL); 773 azone->control.log = AUTO_LOG_NONE; 774 if (Environment::read_bool("AUTO_LOG_TIMINGS")) azone->control.log |= AUTO_LOG_TIMINGS; 775 if (Environment::read_bool("AUTO_LOG_ALL")) azone->control.log |= AUTO_LOG_ALL; 776 if (Environment::read_bool("AUTO_LOG_COLLECTIONS")) azone->control.log |= AUTO_LOG_COLLECTIONS; 777 if (Environment::read_bool("AUTO_LOG_REGIONS")) azone->control.log |= AUTO_LOG_REGIONS; 778 if (Environment::read_bool("AUTO_LOG_UNUSUAL")) azone->control.log |= AUTO_LOG_UNUSUAL; 779 if (Environment::read_bool("AUTO_LOG_WEAK")) azone->control.log |= AUTO_LOG_WEAK; 780 781 azone->control.collection_threshold = (size_t)Environment::read_long("AUTO_COLLECTION_THRESHOLD", 1024L * 1024L); 782 azone->control.full_vs_gen_frequency = Environment::read_long("AUTO_COLLECTION_RATIO", 10); 783 784 malloc_zone_register((auto_zone_t*)azone); 785 786 pthread_mutex_init(&azone->_collection_mutex, NULL); 787 788 // register our calling thread so that the zone is ready to go 789 azone->register_thread(); 790 791 if (!gc_zone) gc_zone = (auto_zone_t *)azone; // cache first one for debugging, monitoring 792 793 return (auto_zone_t*)azone; 794} 795 796/********* Reference counting ************/ 797 798void auto_zone_retain(auto_zone_t *zone, void *ptr) { 799 Zone *azone = (Zone *)zone; 800 auto_refcount_sieve<AUTO_REFCOUNT_INCREMENT> refcount_sieve(azone, ptr); 801#if DEBUG 802 if (ptr == WatchPoint) { 803 malloc_printf("auto_zone_retain watchpoint: %p\n", WatchPoint); 804 blainer(); 805 } 806#endif 807 if (__auto_reference_logger) __auto_reference_logger(AUTO_RETAIN_EVENT, ptr, uintptr_t(refcount_sieve.refcount)); 808 if (Environment::log_reference_counting && malloc_logger) { 809 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(zone), auto_zone_size(zone, ptr), 0, uintptr_t(ptr), 0); 810 } 811} 812 813unsigned int auto_zone_release(auto_zone_t *zone, void *ptr) { 814 Zone *azone = (Zone *)zone; 815 auto_refcount_sieve<AUTO_REFCOUNT_DECREMENT> refcount_sieve(azone, ptr); 816 817#if DEBUG 818 if (ptr == WatchPoint) { 819 malloc_printf("auto_zone_release watchpoint: %p\n", WatchPoint); 820 blainer(); 821 } 822#endif 823 if (__auto_reference_logger) __auto_reference_logger(AUTO_RELEASE_EVENT, ptr, uintptr_t(refcount_sieve.refcount)); 824 if (Environment::log_reference_counting && malloc_logger) { 825 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(zone), uintptr_t(ptr), 0, 0, 0); 826 } 827 return refcount_sieve.refcount; 828} 829 830 831unsigned int auto_zone_retain_count(auto_zone_t *zone, const void *ptr) { 832 auto_block_info_sieve<AUTO_BLOCK_INFO_REFCOUNT> refcount_sieve((Zone *)zone, ptr); 833 return refcount_sieve.refcount(); 834} 835 836/********* Write-barrier ************/ 837 838 839// BlockRef FIXME: retire 840static void handle_resurrection(Zone *azone, const void *recipient, bool recipient_is_block, const void *new_value, size_t offset) 841{ 842 if (!recipient_is_block || ((auto_memory_type_t)azone->block_layout((void*)recipient) & AUTO_UNSCANNED) != AUTO_UNSCANNED) { 843 auto_memory_type_t new_value_type = (auto_memory_type_t) azone->block_layout((void*)new_value); 844 char msg[256]; 845 snprintf(msg, sizeof(msg), "resurrection error for block %p while assigning %p[%d] = %p", new_value, recipient, (int)offset, new_value); 846 if ((new_value_type & AUTO_OBJECT_UNSCANNED) == AUTO_OBJECT) { 847 // mark the object for zombiehood. 848 bool thread_local = false; 849 if (azone->in_subzone_memory((void*)new_value)) { 850 Subzone *sz = Subzone::subzone((void*)new_value); 851 usword_t q = sz->quantum_index((void*)new_value); 852 if (sz->is_thread_local(q)) { 853 thread_local = true; 854 Thread &thread = azone->registered_thread(); 855 ThreadLocalCollector *tlc = thread.thread_local_collector(); 856 if (tlc) { 857 thread.thread_local_collector()->add_zombie((void*)new_value); 858 } else { 859 auto_error(azone, "resurrection of thread local garbage belonging to another thread", new_value); 860 } 861 } 862 } 863 auto_zone_retain((auto_zone_t*)azone, (void*)new_value); // mark the object ineligible for freeing this time around. 864 if (!thread_local) { 865 azone->add_zombie((void*)new_value); 866 } 867 if (azone->control.name_for_address) { 868 char *recipient_name = azone->control.name_for_address((auto_zone_t *)azone, (vm_address_t)recipient, offset); 869 char *new_value_name = azone->control.name_for_address((auto_zone_t *)azone, (vm_address_t)new_value, 0); 870 snprintf(msg, sizeof(msg), "resurrection error for object %p while assigning %s(%p)[%d] = %s(%p)", 871 new_value, recipient_name, recipient, (int)offset, new_value_name, new_value); 872 free(recipient_name); 873 free(new_value_name); 874 } 875 } 876 malloc_printf("%s\ngarbage pointer stored into reachable memory, break on auto_zone_resurrection_error to debug\n", msg); 877 auto_zone_resurrection_error(); 878 } 879} 880 881template <class BlockRef> static void handle_resurrection(Zone *azone, void *recipient, BlockRef new_value, size_t offset) 882{ 883 char msg[256]; 884 snprintf(msg, sizeof(msg), "resurrection error for block %p while assigning %p[%d] = %p", new_value.address(), recipient, (int)offset, new_value.address()); 885 if (new_value.is_object()) { 886 // mark the object for zombiehood. 887 bool thread_local = false; 888 if (new_value.is_thread_local()) { 889 thread_local = true; 890 Thread &thread = azone->registered_thread(); 891 ThreadLocalCollector *tlc = thread.thread_local_collector(); 892 if (tlc) { 893 thread.thread_local_collector()->add_zombie((void*)new_value.address()); 894 } else { 895 auto_error(azone, "resurrection of thread local garbage belonging to another thread", new_value.address()); 896 } 897 } 898 auto_zone_retain((auto_zone_t*)azone, (void*)new_value.address()); // mark the object ineligible for freeing this time around. 899 if (!thread_local) { 900 azone->add_zombie((void*)new_value.address()); 901 } 902 if (azone->control.name_for_address) { 903 char *recipient_name = azone->control.name_for_address((auto_zone_t *)azone, (vm_address_t)recipient, offset); 904 char *new_value_name = azone->control.name_for_address((auto_zone_t *)azone, (vm_address_t)new_value.address(), 0); 905 snprintf(msg, sizeof(msg), "resurrection error for object %p while assigning %s(%p)[%d] = %s(%p)", 906 new_value.address(), recipient_name, recipient, (int)offset, new_value_name, new_value.address()); 907 free(recipient_name); 908 free(new_value_name); 909 } 910 } 911 malloc_printf("%s\ngarbage pointer stored into reachable memory, break on auto_zone_resurrection_error to debug\n", msg); 912 auto_zone_resurrection_error(); 913} 914 915template <class DestBlockRef, class ValueBlockRef> static void handle_resurrection(Zone *azone, DestBlockRef recipient, ValueBlockRef new_value, size_t offset) 916{ 917 if (recipient.is_scanned()) { 918 handle_resurrection(azone, recipient.address(), new_value, offset); 919 } 920} 921 922// make the resurrection test an inline to be as fast as possible in the write barrier 923// recipient may be a GC block or not, as determined by recipient_is_block 924// returns true if a resurrection occurred, false if not 925// BlockRef FIXME: retire 926inline static bool check_resurrection(Thread &thread, Zone *azone, void *recipient, bool recipient_is_block, const void *new_value, size_t offset) { 927 if (new_value && 928 azone->is_block((void *)new_value) && 929 azone->block_is_garbage((void *)new_value) && 930 (!recipient_is_block || !azone->block_is_garbage(recipient))) { 931 handle_resurrection(azone, recipient, recipient_is_block, new_value, offset); 932 return true; 933 } 934 return false; 935} 936 937template <class DestBlockRef, class ValueBlockRef> inline static bool check_resurrection(Thread &thread, Zone *azone, DestBlockRef recipient, ValueBlockRef new_value, size_t offset) { 938 if (new_value.is_garbage() && (!recipient.is_garbage())) { 939 handle_resurrection(azone, recipient, new_value, offset); 940 return true; 941 } 942 return false; 943} 944 945template <class BlockRef> inline static bool check_resurrection(Thread &thread, Zone *azone, void *global_recipient, BlockRef new_value, size_t offset) { 946 if (new_value.is_garbage()) { 947 handle_resurrection(azone, global_recipient, new_value, offset); 948 return true; 949 } 950 return false; 951} 952 953// 954// set_write_barrier_dest_sieve 955// 956// set_write_barrier_dest_sieve performs write barrier processing based on the block type of the destination. 957// If the destination is a GC block then a resurrection check is performed and the assignment is done. 958// Otherwise no operation is performed. 959template <class ValueBlockRef> class set_write_barrier_dest_sieve : public sieve_base { 960public: 961 Zone *_zone; 962 const void *_dest; 963 ValueBlockRef _new_value; 964 const void *_new_value_addr; 965 bool _result; 966 967 set_write_barrier_dest_sieve(Zone *zone, const void *dest, ValueBlockRef new_value, const void *new_value_addr) __attribute__((always_inline)) : _zone(zone), _dest(dest), _new_value(new_value), _new_value_addr(new_value_addr), _result(true) { 968 sieve_interior_pointer(_zone, _dest, *this); 969 } 970 971 template <class DestBlockRef> inline void processBlock(DestBlockRef ref) TEMPLATE_INLINE { 972 Thread &thread = _zone->registered_thread(); 973 size_t offset_in_bytes = (char *)_dest - (char *)ref.address(); 974 check_resurrection(thread, _zone, ref, _new_value, offset_in_bytes); 975 976 if (Environment::unscanned_store_warning && _zone->compaction_enabled() && !ref.is_scanned() && !_new_value.has_refcount()) { 977 auto_error(_zone, "auto_zone_set_write_barrier: Storing a GC-managed pointer in unscanned memory location. Break on auto_zone_unscanned_store_error() to debug.", _new_value_addr); 978 auto_zone_unscanned_store_error(_dest, _new_value_addr); 979 } 980 981 _zone->set_write_barrier(thread, ref, (const void **)_dest, _new_value, _new_value_addr); 982 } 983 984 inline void nonBlock(const void *ptr) __attribute__((always_inline)) { 985 Thread &thread = _zone->registered_thread(); 986 if (thread.is_stack_address((void *)_dest)) { 987 *(const void **)_dest = _new_value_addr; 988 } else { 989 if (Environment::unscanned_store_warning && _zone->compaction_enabled() && !_new_value.has_refcount() && !_zone->is_global_address((void*)_dest)) { 990 auto_error(_zone, "auto_zone_set_write_barrier: Storing a GC-managed pointer in unscanned memory location. Break on auto_zone_unscanned_store_error() to debug.", _new_value_addr); 991 auto_zone_unscanned_store_error(_dest, _new_value_addr); 992 } 993 _result = false; 994 } 995 } 996}; 997 998// 999// set_write_barrier_value_sieve 1000// 1001// set_write_barrier_value_sieve determines whether the value being assigned is a block start pointer. 1002// If it is, then set_write_barrier_dest_sieve is used to do further write barrier procession based on the destination. 1003// If it is not then the value is simply assigned. 1004class set_write_barrier_value_sieve : public sieve_base { 1005public: 1006 Zone *_zone; 1007 const void *_dest; 1008 const void *_new_value; 1009 bool _result; 1010 1011 set_write_barrier_value_sieve(Zone *zone, const void *dest, const void *new_value) __attribute__((always_inline)) : _zone(zone), _dest(dest), _new_value(new_value), _result(true) { 1012 sieve_base_pointer(_zone, _new_value, *this); 1013 } 1014 1015 template <class BlockRef> inline void processBlock(BlockRef ref) TEMPLATE_INLINE { 1016 set_write_barrier_dest_sieve<BlockRef> dest(_zone, _dest, ref, _new_value); 1017 _result = dest._result; 1018 } 1019 1020 inline void nonBlock(const void *ptr) __attribute__((always_inline)) { 1021 *(void **)_dest = (void *)ptr; 1022 } 1023}; 1024 1025// called by objc assignIvar assignStrongCast 1026boolean_t auto_zone_set_write_barrier(auto_zone_t *zone, const void *dest, const void *new_value) { 1027 set_write_barrier_value_sieve value((Zone *)zone, dest, new_value); 1028 return value._result; 1029} 1030 1031void *auto_zone_write_barrier_memmove(auto_zone_t *zone, void *dst, const void *src, size_t size) { 1032 if (size == 0 || dst == src) 1033 return dst; 1034 Zone *azone = (Zone *)zone; 1035 // speculatively determine the base of the destination 1036 void *base = azone->block_start(dst); 1037 // If the destination is a scanned block then mark the write barrier 1038 // We used to check for resurrection but this is a conservative move without exact knowledge 1039 // and we don't want to choke on a false positive. 1040 if (base && is_scanned(azone->block_layout(base))) { 1041 // range check for extra safety. 1042 size_t block_size = auto_zone_size(zone, base); 1043 ptrdiff_t block_overrun = (ptrdiff_t(dst) + size) - (ptrdiff_t(base) + block_size); 1044 if (block_overrun > 0) { 1045 auto_fatal("auto_zone_write_barrier_memmove: will overwrite block %p, size %ld by %ld bytes.", base, block_size, block_overrun); 1046 } 1047 // we are only interested in scanning for pointers, so align to pointer boundaries 1048 const void *ptrSrc; 1049 size_t ptrSize; 1050 if (is_pointer_aligned((void *)src) && ((size & (sizeof(void *)-1)) == 0)) { 1051 // common case, src is pointer aligned and size is multiple of pointer size 1052 ptrSrc = src; 1053 ptrSize = size; 1054 } else { 1055 // compute pointer aligned src, end, and size within the source buffer 1056 ptrSrc = align_up((void *)src, pointer_alignment); 1057 const void *ptrEnd = align_down(displace((void *)src, size), pointer_alignment); 1058 if ((vm_address_t)ptrEnd > (vm_address_t)ptrSrc) { 1059 ptrSize = (vm_address_t)ptrEnd - (vm_address_t)ptrSrc; 1060 } else { 1061 ptrSize = 0; // copying a range that cannot contain an aligned pointer 1062 } 1063 } 1064 if (ptrSize >= sizeof(void *)) { 1065 Thread &thread = azone->registered_thread(); 1066 // Pass in aligned src/size. Since dst is only used to determine thread locality it is ok to not align that value 1067 // Even if we're storing into garbage it might be visible to other garbage long after a TLC collects it, so we need to escape it. 1068 thread.track_local_memcopy(ptrSrc, dst, ptrSize); 1069 if (azone->set_write_barrier_range(dst, size)) { 1070 // must hold enlivening lock for duration of the move; otherwise if we get scheduled out during the move 1071 // and GC starts and scans our destination before we finish filling it with unique values we lose them 1072 UnconditionalBarrier barrier(thread.needs_enlivening()); 1073 if (barrier) { 1074 // add all values in the range. 1075 // We could/should only register those that are as yet unmarked. 1076 // We also only add values that are objects. 1077 void **start = (void **)ptrSrc; 1078 void **end = (void **)displace(start, ptrSize); 1079 while (start < end) { 1080 void *candidate = *start++; 1081 if (azone->is_block(candidate)) thread.enliven_block(candidate); 1082 } 1083 } 1084 return memmove(dst, src, size); 1085 } 1086 } 1087 } else if (base == NULL) { 1088 // since dst is not in out heap, it is by definition unscanned. unfortunately, many clients already use this on malloc()'d blocks, so we can't warn for that case. 1089 // if the source pointer comes from a scanned block in our heap, and the destination pointer is in global data, warn about bypassing global write-barriers. 1090 void *srcbase = azone->block_start((void*)src); 1091 if (srcbase && is_scanned(azone->block_layout(srcbase)) && azone->is_global_address(dst)) { 1092 // make this a warning in SnowLeopard. 1093 auto_error(zone, "auto_zone_write_barrier_memmove: Copying a scanned block into global data. Break on auto_zone_global_data_memmove_error() to debug.", dst); 1094 auto_zone_global_data_memmove_error(); 1095 } 1096 } 1097 // perform the copy 1098 return memmove(dst, src, size); 1099} 1100 1101#define CHECK_STACK_READS 0 1102 1103void *auto_zone_strong_read_barrier(auto_zone_t *zone, void **source) { 1104 // block a thread during compaction. 1105 void *volatile *location = (void *volatile *)source; 1106 void *value = *location; 1107 Zone *azone = (Zone*)zone; 1108 if (azone->in_subzone_memory(value)) { 1109 Thread &thread = azone->registered_thread(); 1110 if (CHECK_STACK_READS) { 1111 // TODO: how common are indirections through the stack? 1112 // allow reads from the stack without blocking, since these will always be pinned. 1113 if (thread.is_stack_address(source)) return value; 1114 } 1115 SpinLock lock(&thread.in_compaction().lock); 1116 value = *location; 1117 usword_t q; 1118 Subzone *subzone = Subzone::subzone(value); 1119 if (subzone->block_start(value, q)) subzone->mark_pinned(q); 1120 } 1121 return value; 1122} 1123 1124/********* Layout ************/ 1125 1126void* auto_zone_allocate_object(auto_zone_t *zone, size_t size, auto_memory_type_t type, boolean_t initial_refcount_to_one, boolean_t clear) { 1127 void *ptr; 1128// if (allocate_meter) allocate_meter_start(); 1129 Zone *azone = (Zone *)zone; 1130 Thread &thread = azone->registered_thread(); 1131 // ALWAYS clear if scanned memory <rdar://problem/5341463>. 1132 // ALWAYS clear if type is AUTO_MEMORY_ALL_WEAK_POINTERS. 1133 ptr = azone->block_allocate(thread, size, type, clear || is_allocated_cleared(type), initial_refcount_to_one); 1134 // We only log here because this is the only entry point that normal malloc won't already catch 1135 if (ptr && malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | (clear ? MALLOC_LOG_TYPE_CLEARED : 0), uintptr_t(zone), size, initial_refcount_to_one ? 1 : 0, uintptr_t(ptr), 0); 1136// if (allocate_meter) allocate_meter_stop(); 1137 return ptr; 1138} 1139 1140extern unsigned auto_zone_batch_allocate(auto_zone_t *zone, size_t size, auto_memory_type_t type, boolean_t initial_refcount_to_one, boolean_t clear, void **results, unsigned num_requested) { 1141 Zone *azone = (Zone *)zone; 1142 Thread &thread = azone->registered_thread(); 1143 unsigned count = azone->batch_allocate(thread, size, type, clear || is_allocated_cleared(type), initial_refcount_to_one, results, num_requested); 1144 if (count && malloc_logger) { 1145 for (unsigned i=0; i<count; i++) 1146 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, uintptr_t(zone), size, initial_refcount_to_one ? 1 : 0, uintptr_t(results[i]), 0); 1147 } 1148 return count; 1149} 1150 1151extern "C" void *auto_zone_create_copy(auto_zone_t *zone, void *ptr) { 1152 Zone *azone = (Zone *)zone; 1153 1154 auto_block_info_sieve<AUTO_BLOCK_INFO_SIZE|AUTO_BLOCK_INFO_LAYOUT|AUTO_BLOCK_INFO_REFCOUNT> block_info(azone, ptr); 1155 1156 auto_memory_type_t type; 1157 int rc = 0; 1158 size_t size; 1159 if (block_info.is_block()) { 1160 type = block_info.layout(); 1161 rc = block_info.refcount(); 1162 size = block_info.size(); 1163 } else { 1164 // from "somewhere else" 1165 type = AUTO_MEMORY_UNSCANNED; 1166 rc = 0; 1167 size = malloc_size(ptr); 1168 } 1169 1170 if (type & AUTO_OBJECT) { 1171 // if no weak layouts we could be more friendly 1172 auto_error(azone, "auto_zone_copy_memory called on object", ptr); 1173 return (void *)0; 1174 } 1175 void *result = auto_zone_allocate_object(zone, size, type, (rc == 1), false); 1176 if (result) memmove(result, ptr, size); 1177 return result; 1178} 1179 1180// Change type to non-object. This happens when, obviously, no finalize is needed. 1181void auto_zone_set_nofinalize(auto_zone_t *zone, void *ptr) { 1182 Zone *azone = (Zone *)zone; 1183 auto_memory_type_t type = azone->block_layout(ptr); 1184 if (type == AUTO_TYPE_UNKNOWN) return; 1185 // preserve scanned-ness but drop AUTO_OBJECT 1186 if ((type & AUTO_OBJECT) && azone->weak_layout_map_for_block(ptr)) 1187 return; // ignore request for objects that have weak instance variables 1188 azone->block_set_layout(ptr, type & ~AUTO_OBJECT); 1189} 1190 1191// Change type to unscanned. This is used in very rare cases where a block sometimes holds 1192// a strong reference and sometimes not. 1193void auto_zone_set_unscanned(auto_zone_t *zone, void *ptr) { 1194 Zone *azone = (Zone *)zone; 1195 auto_memory_type_t type = azone->block_layout(ptr); 1196 if (type == AUTO_TYPE_UNKNOWN) return; 1197 azone->block_set_layout(ptr, type|AUTO_UNSCANNED); 1198} 1199 1200// Turn on the AUTO_POINTERS_ONLY flag for scanned blocks only. This tells the collector 1201// to treat the remainder of an object as containing pointers only, which is 1202// needed for compaction. 1203void auto_zone_set_scan_exactly(auto_zone_t *zone, void *ptr) { 1204 Zone *azone = (Zone *)zone; 1205 auto_memory_type_t type = azone->block_layout(ptr); 1206 if (type & AUTO_UNSCANNED) return; // not appropriate for unscanned memory types. 1207 azone->block_set_layout(ptr, type|AUTO_POINTERS_ONLY); 1208} 1209 1210extern void auto_zone_clear_stack(auto_zone_t *zone, unsigned long options) 1211{ 1212 Zone *azone = (Zone *)zone; 1213 Thread *thread = azone->current_thread(); 1214 if (thread) { 1215 thread->clear_stack(); 1216 } 1217} 1218 1219 1220auto_memory_type_t auto_zone_get_layout_type(auto_zone_t *zone, void *ptr) { 1221 auto_block_info_sieve<AUTO_BLOCK_INFO_LAYOUT> block_info((Zone *)zone, ptr); 1222 return block_info.layout(); 1223} 1224 1225 1226void auto_zone_register_thread(auto_zone_t *zone) { 1227 ((Zone *)zone)->register_thread(); 1228} 1229 1230 1231void auto_zone_unregister_thread(auto_zone_t *zone) { 1232 ((Zone *)zone)->unregister_thread(); 1233} 1234 1235void auto_zone_assert_thread_registered(auto_zone_t *zone) { 1236 Zone *azone = (Zone *)zone; 1237 azone->registered_thread(); 1238} 1239 1240void auto_zone_register_datasegment(auto_zone_t *zone, void *address, size_t size) { 1241 ((Zone *)zone)->add_datasegment(address, size); 1242} 1243 1244void auto_zone_unregister_datasegment(auto_zone_t *zone, void *address, size_t size) { 1245 ((Zone *)zone)->remove_datasegment(address, size); 1246} 1247 1248/********* Garbage Collection and Compaction ************/ 1249 1250auto_collection_control_t *auto_collection_parameters(auto_zone_t *zone) { 1251 Zone *azone = (Zone *)zone; 1252 return &azone->control; 1253} 1254 1255 1256 1257// public entry point. 1258void auto_zone_statistics(auto_zone_t *zone, auto_statistics_t *stats) { 1259 if (!stats) return; 1260 bzero(stats, sizeof(auto_statistics_t)); 1261} 1262 1263// work in progress 1264typedef struct { 1265 FILE *f; 1266 char *buff; 1267 size_t buff_size; 1268 size_t buff_pos; 1269} AutoZonePrintInfo; 1270 1271__private_extern__ malloc_zone_t *aux_zone; 1272 1273void auto_collector_reenable(auto_zone_t *zone) { 1274 Zone *azone = (Zone *)zone; 1275 // although imperfect, try to avoid dropping below zero 1276 Mutex lock(&azone->_collection_mutex); 1277 if (azone->_collector_disable_count == 0) return; 1278 azone->_collector_disable_count--; 1279} 1280 1281void auto_collector_disable(auto_zone_t *zone) { 1282 Zone *azone = (Zone *)zone; 1283 Mutex lock(&azone->_collection_mutex); 1284 azone->_collector_disable_count++; 1285} 1286 1287boolean_t auto_zone_is_enabled(auto_zone_t *zone) { 1288 Zone *azone = (Zone *)zone; 1289 Mutex lock(&azone->_collection_mutex); 1290 return azone->_collector_disable_count == 0; 1291} 1292 1293boolean_t auto_zone_is_collecting(auto_zone_t *zone) { 1294 Zone *azone = (Zone *)zone; 1295 // FIXME: the result of this function only valid on the collector thread (main for now). 1296 return !azone->is_state(idle); 1297} 1298 1299void auto_collect_multithreaded(auto_zone_t *zone) { 1300 Zone *azone = (Zone *)zone; 1301 dispatch_once(&azone->_zone_init_predicate, ^{ 1302 // In general libdispatch limits the number of concurrent jobs based on various factors (# cpus). 1303 // But we don't want the collector to be kept waiting while long running jobs generate garbage. 1304 // We avoid collection latency using a special attribute which tells dispatch this queue should 1305 // service jobs immediately even if that requires exceeding the usual concurrent limit. 1306 azone->_collection_queue = dispatch_queue_create("Garbage Collection Work Queue", NULL); 1307 dispatch_queue_t target_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, DISPATCH_QUEUE_OVERCOMMIT); 1308 dispatch_set_target_queue(azone->_collection_queue, target_queue); 1309 dispatch_set_context(azone->_collection_queue, azone); 1310 const char *notify_name; 1311 1312#if COMPACTION_ENABLED 1313 // compaction trigger: a call to notify_post() with $AUTO_COMPACT_NOTIFICATION 1314 notify_name = Environment::get("AUTO_COMPACT_NOTIFICATION"); 1315 if (notify_name != NULL) { 1316 int compact_token_unused = 0; 1317 notify_register_dispatch(notify_name, &compact_token_unused, azone->_collection_queue, ^(int token) { 1318 Zone *zone = (Zone *)dispatch_get_context(dispatch_get_current_queue()); 1319 auto_date_t start = auto_date_now(); 1320 zone->compact_heap(); 1321 auto_date_t end = auto_date_now(); 1322 if (Environment::log_compactions) malloc_printf("compaction took %lld microseconds.\n", (end - start)); 1323 }); 1324 } else { 1325 // compaction timer: prime it to run forever in the future. 1326 azone->_compaction_timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); 1327 dispatch_source_set_timer(azone->_compaction_timer, DISPATCH_TIME_FOREVER, 0, 0); 1328 dispatch_source_set_event_handler(azone->_compaction_timer, ^{ 1329 if (!azone->compaction_disabled()) { 1330 azone->_compaction_next_time = DISPATCH_TIME_FOREVER; 1331 dispatch_source_set_timer(azone->_compaction_timer, DISPATCH_TIME_FOREVER, 0, 0); 1332 dispatch_async(azone->_collection_queue, ^{ 1333 auto_date_t start = auto_date_now(); 1334 azone->compact_heap(); 1335 auto_date_t end = auto_date_now(); 1336 malloc_printf("compaction took %lld microseconds.\n", (end - start)); 1337 // compute the next allowed time to start a compaction; must wait at least 30 seconds. 1338 azone->_compaction_next_time = dispatch_time(0, 30 * NSEC_PER_SEC); 1339 azone->_compaction_pending = false; 1340 }); 1341 } 1342 }); 1343 dispatch_resume(azone->_compaction_timer); 1344 } 1345 1346 // analysis trigger: a call to notify_post() with $AUTO_ANALYZE_NOTIFICATION 1347 // currently used by HeapVisualizer to generate an analyis file in /tmp/AppName.analyze. 1348 notify_name = Environment::get("AUTO_ANALYZE_NOTIFICATION"); 1349 if (notify_name != NULL) { 1350 int analyze_token_unused = 0; 1351 notify_register_dispatch(notify_name, &analyze_token_unused, azone->_collection_queue, ^(int token) { 1352 Zone *zone = (Zone *)dispatch_get_context(dispatch_get_current_queue()); 1353 static const char *analyze_name = Environment::get("AUTO_ANALYZE_NOTIFICATION"); 1354 zone->analyze_heap(analyze_name); 1355 static const char *reply_name = Environment::get("AUTO_ANALYZE_REPLY"); 1356 if (reply_name) notify_post(reply_name); 1357 }); 1358 } 1359#endif /* COMPACTION_ENABLED */ 1360 1361 // simulated memory pressure notification. 1362 notify_name = Environment::get("AUTO_MEMORY_PRESSURE_NOTIFICATION"); 1363 if (notify_name != NULL) { 1364 int pressure_token_unused = 0; 1365 notify_register_dispatch(notify_name, &pressure_token_unused, azone->_collection_queue, ^(int token) { 1366 Zone *zone = (Zone *)dispatch_get_context(dispatch_get_current_queue()); 1367 usword_t size = zone->purge_free_space(); 1368 printf("purged %ld bytes.\n", size); 1369 }); 1370 } else { 1371 // If not simulated, then field memory pressure triggers directly from the kernel. 1372 // TODO: consider using a concurrent queue to allow purging to happen concurrently with collection/compaction. 1373#if TARGET_OS_IPHONE 1374# warning no memory pressure dispatch source on iOS 1375#else 1376 azone->_pressure_source = dispatch_source_create(DISPATCH_SOURCE_TYPE_VM, 0, DISPATCH_VM_PRESSURE, azone->_collection_queue); 1377 if (azone->_pressure_source != NULL) { 1378 dispatch_source_set_event_handler(azone->_pressure_source, ^{ 1379 Zone *zone = (Zone *)dispatch_get_context(dispatch_get_current_queue()); 1380 zone->purge_free_space(); 1381 }); 1382 dispatch_resume(azone->_pressure_source); 1383 } 1384#endif 1385 } 1386 1387 // exhaustive collection notification. 1388 notify_name = Environment::get("AUTO_COLLECT_NOTIFICATION"); 1389 if (notify_name != NULL) { 1390 int collect_token_unused = 0; 1391 notify_register_dispatch(notify_name, &collect_token_unused, dispatch_get_main_queue(), ^(int token) { 1392 malloc_printf("collecting on demand.\n"); 1393 auto_zone_collect((auto_zone_t *)azone, AUTO_ZONE_COLLECT_LOCAL_COLLECTION | AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION); 1394 const char *reply_name = Environment::get("AUTO_COLLECT_REPLY"); 1395 if (reply_name) notify_post(reply_name); 1396 }); 1397 } 1398 1399 // Work around an idiosynchrocy with leaks. These dispatch objects will be reported as leaks because the 1400 // zone structure is not (and cannot be) scanned by leaks. Since we only support a small number of zones 1401 // just store these objects in global memory where leaks will find them to suppress the leak report. 1402 // In practice these are never deallocated anyway, as we don't support freeing an auto zone. 1403 queues[azone->thread_key()-__PTK_FRAMEWORK_GC_KEY0] = azone->_collection_queue; 1404 pressure_sources[azone->thread_key()-__PTK_FRAMEWORK_GC_KEY0] = azone->_pressure_source; 1405 compaction_timers[azone->thread_key()-__PTK_FRAMEWORK_GC_KEY0] = azone->_compaction_timer; 1406 }); 1407} 1408 1409 1410// 1411// Called by Instruments to lay down a heap dump (via dumpster) 1412// 1413void auto_enumerate_references(auto_zone_t *zone, void *referent, 1414 auto_reference_recorder_t callback, // f(zone, ctx, {ref, referrer, offset}) 1415 void *stack_bottom, void *ctx) 1416{ 1417 // obsolete. use auto_gdb_enumerate_references() or auto_zone_dump(). 1418} 1419 1420 1421/********* Weak References ************/ 1422 1423 1424// auto_assign_weak 1425// The new and improved one-stop entry point to the weak system 1426// Atomically assign value to *location and track it for zero'ing purposes. 1427// Assign a value of NULL to deregister from the system. 1428void auto_assign_weak_reference(auto_zone_t *zone, const void *value, const void **location, auto_weak_callback_block_t *block) { 1429 Zone *azone = (Zone *)zone; 1430 Thread &thread = azone->registered_thread(); 1431 void *base = azone->block_start((void *)location); 1432 if (value) { 1433 if ((base && azone->block_is_garbage(base)) || check_resurrection(thread, azone, (void *)base, base != NULL, value, (size_t)location - (size_t)base)) { 1434 // Never allow garbage to be registered since it will never be cleared. 1435 // Go further and zero it out since it would have been cleared had it been done earlier 1436 // To address <rdar://problem/7217252>, disallow forming new weak references inside garbage objects. 1437 value = NULL; 1438 } 1439 } 1440 1441 // Check if this is a store to the stack and don't register it. 1442 // This handles id *__weak as a parameter 1443 if (!thread.is_stack_address((void *)location)) { 1444 // unregister old, register new (if non-NULL) 1445 weak_register(azone, value, (void **)location, block); 1446 if (value != NULL) { 1447 // note: we could check to see if base is local, but then we have to change 1448 // all weak references on make_global. 1449 if (base) thread.block_escaped(base); 1450 thread.block_escaped((void *)value); 1451 // also zap destination so that dead locals don't need to be pulled out of weak tables 1452 //thread->track_local_assignment(azone, (void *)location, (void *)value); 1453 } 1454 } else { 1455 // write the value even though the location is not registered, for __block __weak foo=x case. 1456 *location = value; 1457 } 1458} 1459 1460void *auto_read_weak_reference(auto_zone_t *zone, void **referrer) { 1461 void *result = *referrer; 1462 if (result != NULL) { 1463 // We grab the condition barrier. Missing the transition is not a real issue. 1464 // For a missed transition to be problematic the collector would have had to mark 1465 // the transition before we entered this routine, scanned this thread (not seeing the 1466 // enlivened read), scanned the heap, and scanned this thread exhaustively before we 1467 // load *referrer 1468 Zone *azone = (Zone*)zone; 1469 Thread &thread = azone->registered_thread(); 1470 ConditionBarrier barrier(thread.needs_enlivening()); 1471 if (barrier) { 1472 // need to tell the collector this block should be scanned. 1473 result = *referrer; 1474 if (result) thread.enliven_block(result); 1475 } else { 1476 result = *referrer; 1477 } 1478 } 1479 return result; 1480} 1481 1482extern char CompactionObserverKey; 1483 1484void auto_zone_set_compaction_observer(auto_zone_t *zone, void *block, void (^observer) (void)) { 1485 if (observer) { 1486 observer = Block_copy(observer); 1487 Block_release(observer); 1488 } 1489 auto_zone_set_associative_ref(zone, block, &CompactionObserverKey, observer); 1490} 1491 1492/********* Associative References ************/ 1493 1494void auto_zone_set_associative_ref(auto_zone_t *zone, void *object, void *key, void *value) { 1495 Zone *azone = (Zone*)zone; 1496 Thread &thread = azone->registered_thread(); 1497 bool object_is_block = azone->is_block(object); 1498 // <rdar://problem/6463922> Treat global pointers as unconditionally live. 1499 if (!object_is_block && !azone->is_global_address(object)) { 1500 auto_error(zone, "auto_zone_set_associative_ref: object should point to a GC block or a global address, otherwise associations will leak. Break on auto_zone_association_error() to debug.", object); 1501 auto_zone_association_error(object); 1502 return; 1503 } 1504 check_resurrection(thread, azone, object, object_is_block, value, 0); 1505 azone->set_associative_ref(object, key, value); 1506} 1507 1508void *auto_zone_get_associative_ref(auto_zone_t *zone, void *object, void *key) { 1509 Zone *azone = (Zone*)zone; 1510 return azone->get_associative_ref(object, key); 1511} 1512 1513void auto_zone_erase_associative_refs(auto_zone_t *zone, void *object) { 1514 Zone *azone = (Zone*)zone; 1515 return azone->erase_associations(object); 1516} 1517 1518void auto_zone_enumerate_associative_refs(auto_zone_t *zone, void *key, boolean_t (^block) (void *object, void *value)) { 1519 Zone *azone = (Zone*)zone; 1520 azone->visit_associations_for_key(key, block); 1521} 1522 1523size_t auto_zone_get_associative_hash(auto_zone_t *zone, void *object) { 1524 Zone *azone = (Zone*)zone; 1525 return azone->get_associative_hash(object); 1526} 1527 1528/********* Root References ************/ 1529 1530class auto_zone_add_root_sieve : public sieve_base { 1531public: 1532 Zone * const _zone; 1533 void * const _root; 1534 1535 auto_zone_add_root_sieve(Zone *zone, void *root, void *ptr) __attribute__((always_inline)) : _zone(zone), _root(root) { 1536 sieve_base_pointer(zone, ptr, *this); 1537 } 1538 1539 template <class BlockRef> inline void processBlock(BlockRef ref) TEMPLATE_INLINE { 1540 Thread &thread = _zone->registered_thread(); 1541 check_resurrection(thread, _zone, _root, ref, 0); 1542 _zone->add_root(_root, ref); 1543 } 1544 1545 inline void nonBlock(const void *ptr) __attribute__((always_inline)) { 1546 *(void **)_root = (void *)ptr; 1547 } 1548}; 1549 1550void auto_zone_add_root(auto_zone_t *zone, void *root, void *value) 1551{ 1552 auto_zone_add_root_sieve((Zone *)zone, root, value); 1553} 1554 1555void auto_zone_remove_root(auto_zone_t *zone, void *root) { 1556 ((Zone *)zone)->remove_root(root); 1557} 1558 1559void auto_zone_root_write_barrier(auto_zone_t *zone, void *address_of_possible_root_ptr, void *value) { 1560 if (!value) { 1561 *(void **)address_of_possible_root_ptr = NULL; 1562 return; 1563 } 1564 Zone *azone = (Zone *)zone; 1565 Thread &thread = azone->registered_thread(); 1566 if (thread.is_stack_address(address_of_possible_root_ptr)) { 1567 // allow writes to the stack without checking for resurrection to allow finalizers to do work 1568 // always write directly to the stack. 1569 *(void **)address_of_possible_root_ptr = value; 1570 } else if (azone->is_root(address_of_possible_root_ptr)) { 1571 // if local make global before possibly enlivening 1572 thread.block_escaped(value); 1573 // might need to tell the collector this block should be scanned. 1574 UnconditionalBarrier barrier(thread.needs_enlivening()); 1575 if (barrier) thread.enliven_block(value); 1576 check_resurrection(thread, azone, address_of_possible_root_ptr, false, value, 0); 1577 *(void **)address_of_possible_root_ptr = value; 1578 } else if (azone->is_global_address(address_of_possible_root_ptr)) { 1579 // add_root performs a resurrection check 1580 auto_zone_add_root(zone, address_of_possible_root_ptr, value); 1581 } else { 1582 // This should only be something like storing a globally retained value 1583 // into a malloc'ed/vm_allocated hunk of memory. It might be that it is held 1584 // by GC at some other location and they're storing a go-stale pointer. 1585 // That "some other location" might in fact be the stack. 1586 // If so we can't really assert that it's either not-thread-local or retained. 1587 thread.block_escaped(value); 1588 check_resurrection(thread, azone, address_of_possible_root_ptr, false, value, 0); 1589 // Always write 1590 *(void **)address_of_possible_root_ptr = value; 1591 1592 if (Environment::unscanned_store_warning && azone->compaction_enabled()) { 1593 // catch writes to unscanned memory. 1594 auto_block_info_sieve<AUTO_BLOCK_INFO_REFCOUNT> info(azone, value); 1595 if (info.is_block() && info.refcount() == 0) { 1596 auto_error(zone, "auto_zone_root_write_barrier: Storing a GC-managed pointer in unscanned memory location. Break on auto_zone_unscanned_store_error() to debug.", value); 1597 auto_zone_unscanned_store_error(address_of_possible_root_ptr, value); 1598 } 1599 } 1600 } 1601} 1602 1603 1604void auto_zone_print_roots(auto_zone_t *zone) { 1605 Zone *azone = (Zone *)zone; 1606 PointerList roots; 1607 azone->copy_roots(roots); 1608 usword_t count = roots.count(); 1609 printf("### %lu roots. ###\n", count); 1610 void ***buffer = (void ***)roots.buffer(); 1611 for (usword_t i = 0; i < count; ++i) { 1612 void **root = buffer[i]; 1613 printf("%p -> %p\n", root, *root); 1614 } 1615} 1616 1617/********** Atomic operations *********************/ 1618 1619boolean_t auto_zone_atomicCompareAndSwap(auto_zone_t *zone, void *existingValue, void *newValue, void *volatile *location, boolean_t isGlobal, boolean_t issueBarrier) { 1620 Zone *azone = (Zone *)zone; 1621 Thread &thread = azone->registered_thread(); 1622 if (isGlobal) { 1623 azone->add_root_no_barrier((void *)location); 1624 } 1625 check_resurrection(thread, azone, (void *)location, !isGlobal, newValue, 0); 1626 thread.block_escaped(newValue); 1627 UnconditionalBarrier barrier(thread.needs_enlivening()); 1628 boolean_t result; 1629 if (issueBarrier) 1630 result = OSAtomicCompareAndSwapPtrBarrier(existingValue, newValue, location); 1631 else 1632 result = OSAtomicCompareAndSwapPtr(existingValue, newValue, location); 1633 if (!isGlobal) { 1634 // mark write-barrier w/o storing 1635 azone->set_write_barrier((char*)location); 1636 } 1637 if (result && barrier) thread.enliven_block(newValue); 1638 return result; 1639} 1640 1641/************ Collection Checking ***********************/ 1642 1643boolean_t auto_zone_enable_collection_checking(auto_zone_t *zone) { 1644 Zone *azone = (Zone *)zone; 1645 azone->enable_collection_checking(); 1646 return true; 1647} 1648 1649void auto_zone_disable_collection_checking(auto_zone_t *zone) { 1650 Zone *azone = (Zone *)zone; 1651 azone->disable_collection_checking(); 1652} 1653 1654void auto_zone_track_pointer(auto_zone_t *zone, void *pointer) { 1655 Zone *azone = (Zone *)zone; 1656 if (azone->collection_checking_enabled()) 1657 azone->track_pointer(pointer); 1658} 1659 1660void auto_zone_enumerate_uncollected(auto_zone_t *zone, auto_zone_collection_checking_callback_t callback) { 1661 Zone *azone = (Zone *)zone; 1662 azone->enumerate_uncollected(callback); 1663} 1664 1665 1666/************ Experimental ***********************/ 1667 1668#ifdef __BLOCKS__ 1669 1670void auto_zone_dump(auto_zone_t *zone, 1671 auto_zone_stack_dump stack_dump, 1672 auto_zone_register_dump register_dump, 1673 auto_zone_node_dump thread_local_node_dump, 1674 auto_zone_root_dump root_dump, 1675 auto_zone_node_dump global_node_dump, 1676 auto_zone_weak_dump weak_dump 1677 ) { 1678 1679 Auto::Zone *azone = (Auto::Zone *)zone; 1680 azone->dump_zone(stack_dump, register_dump, thread_local_node_dump, root_dump, global_node_dump, weak_dump); 1681} 1682 1683void auto_zone_visit(auto_zone_t *zone, auto_zone_visitor_t *visitor) { 1684 Auto::Zone *azone = (Auto::Zone *)zone; 1685 azone->visit_zone(visitor); 1686} 1687 1688auto_probe_results_t auto_zone_probe_unlocked(auto_zone_t *zone, void *address) { 1689 Zone *azone = (Zone *)zone; 1690 auto_probe_results_t result = azone->block_is_start(address) ? auto_is_auto : auto_is_not_auto; 1691 if ((result & auto_is_auto) && azone->is_local(address)) 1692 result |= auto_is_local; 1693 return result; 1694} 1695 1696void auto_zone_scan_exact(auto_zone_t *zone, void *address, void (^callback)(void *base, unsigned long byte_offset, void *candidate)) { 1697 Zone *azone = (Zone *)zone; 1698 auto_memory_type_t layout = azone->block_layout(address); 1699 // invalid addresses will return layout == -1 and hence won't == 0 below 1700 if ((layout & AUTO_UNSCANNED) == 0) { 1701 const unsigned char *map = NULL; 1702 if (layout & AUTO_OBJECT) { 1703 map = azone->layout_map_for_block((void *)address); 1704 } 1705 unsigned int byte_offset = 0; 1706 unsigned int size = auto_zone_size(zone, address); 1707 if (map) { 1708 while (*map) { 1709 int skip = (*map >> 4) & 0xf; 1710 int refs = (*map) & 0xf; 1711 byte_offset = byte_offset + skip*sizeof(void *); 1712 while (refs--) { 1713 callback(address, byte_offset, *(void **)(((char *)address)+byte_offset)); 1714 byte_offset += sizeof(void *); 1715 } 1716 ++map; 1717 } 1718 } 1719 while (byte_offset < size) { 1720 callback(address, byte_offset, *(void **)(((char *)address)+byte_offset)); 1721 byte_offset += sizeof(void *); 1722 } 1723 } 1724} 1725#endif 1726 1727 1728/************* API ****************/ 1729 1730boolean_t auto_zone_atomicCompareAndSwapPtr(auto_zone_t *zone, void *existingValue, void *newValue, void *volatile *location, boolean_t issueBarrier) { 1731 Zone *azone = (Zone *)zone; 1732 return auto_zone_atomicCompareAndSwap(zone, existingValue, newValue, location, azone->is_global_address((void*)location), issueBarrier); 1733} 1734 1735#if DEBUG 1736////////////////// SmashMonitor /////////////////// 1737 1738static void range_check(void *pointer, size_t size) { 1739 Zone *azone = (Zone *)gc_zone; 1740 if (azone) { 1741 void *base_pointer = azone->block_start(pointer); 1742 if (base_pointer) { 1743 size_t block_size = auto_zone_size((auto_zone_t *)azone,base_pointer); 1744 if ((uintptr_t(pointer) + size) > (uintptr_t(base_pointer) + block_size)) { 1745 malloc_printf("SmashMonitor: range check violation for pointer = %p, size = %lu", pointer, size); 1746 __builtin_trap(); 1747 } 1748 } 1749 } 1750} 1751 1752void *SmashMonitor_memcpy(void *dst, const void* src, size_t size) { 1753 // add some range checking code for auto allocated blocks. 1754 range_check(dst, size); 1755 return memcpy(dst, src, size); 1756} 1757 1758void *SmashMonitor_memmove(void *dst, const void* src, size_t size) { 1759 // add some range checking code for auto allocated blocks. 1760 range_check(dst, size); 1761 return memmove(dst, src, size); 1762} 1763 1764void *SmashMonitor_memset(void *pointer, int value, size_t size) { 1765 // add some range checking code for auto allocated blocks. 1766 range_check(pointer, size); 1767 return memset(pointer, value, size); 1768} 1769 1770void SmashMonitor_bzero(void *pointer, size_t size) { 1771 // add some range checking code for auto allocated blocks. 1772 range_check(pointer, size); 1773 bzero(pointer, size); 1774} 1775 1776 1777#if USE_INTERPOSING 1778DYLD_INTERPOSE(SmashMonitor_memcpy, memcpy) 1779DYLD_INTERPOSE(SmashMonitor_memmove, memmove) 1780DYLD_INTERPOSE(SmashMonitor_memset, memset) 1781DYLD_INTERPOSE(SmashMonitor_bzero, bzero) 1782#endif 1783 1784#endif 1785