1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    auto_impl_utilities.c
22    Implementation Utilities
23    Copyright (c) 2002-2011 Apple Inc. All rights reserved.
24*/
25
26#include "auto_impl_utilities.h"
27#include "auto_tester.h"
28
29#include <malloc/malloc.h>
30#include <mach/mach.h>
31#include <libc.h>
32#include <stdarg.h>
33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/sysctl.h>
36
37#if !TARGET_OS_IPHONE
38#   include <CrashReporterClient.h>
39#else
40    // CrashReporterClient not available on iOS
41    static void CRSetCrashLogMessage(const char *buffer) { }
42#endif
43
44/*********  Implementation utilities    ************/
45
46vm_address_t auto_get_sp() {
47    return (vm_address_t)__builtin_frame_address(0);
48}
49
50size_t auto_round_page(size_t size) {
51    if (!size) return vm_page_size;
52    return (size + vm_page_size - 1) & ~ (vm_page_size - 1);
53}
54
55/*********  Utilities   ************/
56int auto_ncpus()
57{
58    static int ncpus = 0;
59    if (ncpus == 0) {
60        int mib[2];
61        size_t len = sizeof(ncpus);
62        mib[0] = CTL_HW;
63        mib[1] = HW_NCPU;
64        if (sysctl(mib, 2, &ncpus, &len, NULL, 0) != 0) {
65            // sysctl failed. just return 1 and try again next time
66            ncpus = 0;
67            return 1;
68        }
69    }
70    return ncpus;
71}
72
73const char *auto_prelude(void) {
74    static char buf[32] = { 0 };
75    if (!buf[0]) {
76        snprintf(buf, sizeof(buf), "auto malloc[%d]", getpid());
77    }
78    return (const char *)buf;
79}
80
81void auto_error(void *azone, const char *msg, const void *ptr) {
82    if (ptr) {
83        malloc_printf("*** %s: error for object %p: %s\n", auto_prelude(), ptr, msg);
84    } else {
85        malloc_printf("*** %s: error: %s\n", auto_prelude(), msg);
86    }
87#if 0 && DEBUG
88    malloc_printf("*** Sleeping to help debug\n");
89    sleep(3600); // to help debug
90#endif
91}
92
93void auto_fatal(const char *format, ...) {
94    static char buffer[512];
95    va_list args;
96    va_start(args, format);
97    vsnprintf(buffer, sizeof(buffer), format, args);
98    va_end(args);
99    CRSetCrashLogMessage(buffer);
100    malloc_printf("%s", buffer);
101    __builtin_trap();
102}
103
104/*********  Internal allocation ************/
105
106// GrP fixme assumes only one auto zone is ever active in a process
107
108__private_extern__ malloc_zone_t *aux_zone = NULL;
109
110__private_extern__ void aux_init(void) {
111    if (!aux_zone) {
112        aux_zone = malloc_create_zone(4096, 0); // GrP fixme size?
113        malloc_set_zone_name(aux_zone, "aux_zone");
114#if !DEBUG
115        // make it possible to call free() on these blocks while debugging.
116        malloc_zone_unregister(aux_zone);       // PCB don't let fork() mess with aux_zone <rdar://problem/4580709>
117#endif
118    }
119}
120
121/*********  Dealing with time   ************/
122
123double auto_time_interval(auto_date_t after, auto_date_t before) {
124#if 0
125    static double rate = 0.0;
126    if (rate == 0.0) {
127        struct mach_timebase_info info;
128        mach_timebase_info(&info);
129        rate = (double)info.numer / (1.0E9 * (double)info.denom);
130    }
131    return (after - before) * rate;
132#else
133    return (after - before) / 1.0E6;
134#endif
135}
136
137
138malloc_zone_t *auto_debug_zone(void)
139{
140    static malloc_zone_t *z = NULL;
141    if (!z) {
142        z = malloc_create_zone(4096, 0);
143        malloc_set_zone_name(z, "auto debug");
144    }
145    return z;
146}
147
148#define CHECK_ADDR(n) \
149    entry->stack[n] = (vm_address_t)(__builtin_return_address(n + 1) - 4); \
150    if ((__builtin_frame_address(n + 3) == NULL) || (__builtin_return_address(n + 2) == NULL)) { \
151        goto done; \
152    }
153
154#define CHECK_ADDR2(n) \
155CHECK_ADDR(n) \
156CHECK_ADDR(n + 1)
157
158#define CHECK_ADDR4(n) \
159CHECK_ADDR2(n) \
160CHECK_ADDR2(n + 2)
161
162#define CHECK_ADDR8(n) \
163CHECK_ADDR4(n) \
164CHECK_ADDR4(n + 4)
165
166#define UNUSED         0xffffffff
167#define ENTRY_COUNT 32
168
169typedef struct {
170    vm_address_t stack[8];
171    unsigned int oldCount;  // may be UNUSED
172    unsigned int newCount;  // oldCount == newCount for new allocations
173} auto_refcount_history_entry_t;
174
175typedef struct {
176    vm_address_t address;
177    int nextEntry;
178    auto_refcount_history_entry_t entries[ENTRY_COUNT];
179} auto_refcount_history_t;
180
181
182static auto_refcount_history_t *history_list = NULL;
183static int history_allocated = 0;
184static int history_used = 0;
185
186
187static int history_hash(vm_address_t target)
188{
189    int index = (target >> 2) % history_allocated;
190    while (history_list[index].address != 0  &&
191           history_list[index].address != target)
192    {
193        index++;
194        if (index == history_allocated) index = 0;
195    }
196    return index;
197}
198
199static spin_lock_t refcount_stacks_lock;
200
201#define auto_refcount_stacks_lock() spin_lock(&refcount_stacks_lock)
202#define auto_refcount_stacks_unlock() spin_unlock(&refcount_stacks_lock)
203
204
205void auto_record_refcount_stack(auto_zone_t *azone, void *ptr, int delta)
206{
207    int h, e;
208    vm_address_t addr = (vm_address_t)ptr;
209    auto_refcount_history_t *history = NULL;
210    auto_refcount_history_entry_t *entry;
211
212    auto_refcount_stacks_lock();
213
214    if (history_used >= history_allocated * 3 / 4) {
215        auto_refcount_history_t *old_list = history_list;
216        int old_allocated = history_allocated;
217        history_allocated = old_allocated ? old_allocated * 2 : 10000;
218        history_list =
219            malloc_zone_calloc(auto_debug_zone(),
220                               history_allocated, sizeof(*history_list));
221        // history_used remains unchanged
222
223        // rehash
224        for (h = 0; h < history_used; h++) {
225            if (old_list[h].address) {
226                history_list[history_hash(old_list[h].address)] = old_list[h];
227            }
228        }
229        malloc_zone_free(auto_debug_zone(), old_list);
230    }
231
232    history = &history_list[history_hash(addr)];
233    if (history->address != addr) {
234        // initialize new location
235        history->address = (vm_address_t)ptr;
236        history->nextEntry = 0;
237        for (e = 0; e < ENTRY_COUNT; e++) {
238            history->entries[e].oldCount = UNUSED;
239        }
240        history_used++;
241    }
242
243    entry = &history->entries[history->nextEntry++];
244    if (history->nextEntry == ENTRY_COUNT) history->nextEntry = 0;
245
246    bzero(entry, sizeof(*entry));
247    CHECK_ADDR8(0);
248 done:
249    entry->oldCount = auto_zone_retain_count((void *)azone, ptr);
250    entry->newCount = entry->oldCount + delta;
251
252    auto_refcount_stacks_unlock();
253}
254
255
256void auto_print_refcount_stacks(void *ptr)
257{
258    int e;
259    vm_address_t addr = (vm_address_t)ptr;
260    auto_refcount_history_t *history = NULL;
261    auto_refcount_history_entry_t *entry;
262
263    auto_refcount_stacks_lock();
264
265    history = &history_list[history_hash(addr)];
266
267    if (history->address != addr) {
268        malloc_printf("No refcount history for address %p\n", ptr);
269        return;
270    }
271
272    fprintf(stderr, "\nREFCOUNT HISTORY FOR %p\n\n", ptr);
273
274    e = history->nextEntry;
275    do {
276        entry = &history->entries[e];
277        if (entry->oldCount != UNUSED) {
278            int s;
279
280            if (entry->oldCount == entry->newCount) {
281                fprintf(stderr, "\nrefcount %d (new)\tadecodestack  ", entry->newCount);
282            } else {
283                fprintf(stderr, "refcount %d -> %d \tadecodestack  ",
284                        entry->oldCount, entry->newCount);
285            }
286
287            for (s = 0; s < 8  &&  entry->stack[s]; s++) {
288                fprintf(stderr, "%p   ", (void *)entry->stack[s]);
289            }
290
291            fprintf(stderr, "\n");
292        }
293        e++;
294        if (e == ENTRY_COUNT) e = 0;
295    } while (e != history->nextEntry);
296
297    auto_refcount_stacks_unlock();
298
299    fprintf(stderr, "\ndone\n");
300}
301
302//
303// ptr_set utilities
304//
305// Pointer sets are used to track the use of allocated objects.
306//
307
308#define PTR_SET_DEPTH   4
309#define PTR_SET_GROWTH  333
310
311// Internals
312
313struct ptr_set {
314    spin_lock_t lock;
315    unsigned length;                    // length of set
316    void **members;                     // closed hash table of pointers
317    void **end;                         // end + 1 of hash table (faster next pointer)
318};
319
320static void **ptr_set_find_slot(ptr_set *set, void *ptr);
321static void ptr_set_grow(ptr_set *set);
322
323static inline void **ptr_set_next(ptr_set *set, void **slot) { return ++slot == set->end ? set->members : slot; }
324    // return the next slot (wrap around)
325
326
327static inline intptr_t ptr_set_hash(ptr_set *set, void *ptr) { return (((intptr_t)ptr >> 2) * 2654435761u) % set->length; }
328    // return the hash index of the specified pointer
329
330
331static boolean_t ptr_set_add_no_lock_did_grow(ptr_set *set, void *ptr) {
332    // add a member to the set
333
334    boolean_t didGrow = false;
335
336    // current slot
337    void **slot;
338
339    // don't add NULL entries
340    if (ptr == NULL) return false;
341
342    // make sure it is 4 byte aligned (or will grow forever)
343    if ((intptr_t)ptr & 0x3) {
344        malloc_printf("*** %s: ptr_set_add(ptr=%p) not pointer aligned\n", auto_prelude(), ptr);
345        return false;
346    }
347
348    // try and try again
349    while (1) {
350        // find the pointers slot
351        slot = ptr_set_find_slot(set, ptr);
352        // if found escape loop
353        if (slot != NULL) break;
354        // grow the table to make more room for the hash group
355        ptr_set_grow(set);
356	didGrow = true;
357    }
358
359    // set the slot (may be redundant if the pointer is already in hashtable)
360    *slot = ptr;
361    return didGrow;
362}
363
364static void ptr_set_grow(ptr_set *set) {
365    // Provide more room in the hashtable and rehash the old entries.
366
367    // current slot
368    void **slot;
369
370    // capture old hashtable length
371    unsigned old_length = set->length;
372    // capture old members
373    void **old_members = set->members;
374    // capture old end
375    void **old_end = set->end;
376
377    /// new hashtable length
378    set->length = (old_length + PTR_SET_GROWTH) | 1;
379    // allocate new hashtable
380    set->members = (void **)aux_calloc(set->length, sizeof(void *));
381    // set end
382    set->end = set->members + set->length;
383
384    // rehash old entries
385    for (slot = old_members; slot < old_end; slot++) ptr_set_add_no_lock_did_grow(set, *slot);
386
387    // release old hashtable
388    aux_free(old_members);
389}
390
391
392static void **ptr_set_find_slot(ptr_set *set, void *ptr) {
393    // find the slot the ptr should reside
394
395    // current slot
396    void **slot;
397    // depth index
398    unsigned depth;
399    // ptr  hash
400    unsigned hash = ptr_set_hash(set, ptr);
401
402    // iterate for the closed hash depth
403    for (depth = 0, slot = set->members + hash; depth < PTR_SET_DEPTH; depth++, slot = ptr_set_next(set, slot)) {
404        // get the old member in the slot
405        void *old_member = *slot;
406        // return the slot if the slot is empty or already contains the ptr
407        if (old_member == NULL || old_member == ptr) return slot;
408        // otherwise check to see if the entry is a member of the same hash group
409        if (hash != ptr_set_hash(set, old_member)) return NULL;
410    }
411
412    // not found
413    return NULL;
414}
415
416// Externals
417
418__private_extern__ ptr_set *ptr_set_new() {
419    // initialize the pointer set
420
421    ptr_set *set = aux_malloc(sizeof(ptr_set));
422
423    // zero lock
424    set->lock = 0;
425    // set length
426    set->length = PTR_SET_GROWTH;
427    // allocate members
428    set->members = (void **)aux_calloc(PTR_SET_GROWTH, sizeof(void *));
429    // set end
430    set->end = set->members + PTR_SET_GROWTH;
431
432    return set;
433}
434
435
436__private_extern__ void ptr_set_dispose(ptr_set *set) {
437    // release memory allocate by the set
438    aux_free(set->members);
439    aux_free(set);
440}
441
442
443__private_extern__ void ptr_set_add(ptr_set *set, void *ptr) {
444    spin_lock(&set->lock);
445    ptr_set_add_no_lock_did_grow(set, ptr);
446    spin_unlock(&set->lock);
447}
448
449__private_extern__ int ptr_set_is_member_no_lock(ptr_set *set, void *ptr) {
450    // check to see if the pointer is a member of the set
451
452    // find the slot
453    void **slot = ptr_set_find_slot(set, ptr);
454    // return true if the slot is found and contains the pointer
455    return (slot != NULL && *slot == ptr);
456}
457
458__private_extern__ int ptr_set_is_member(ptr_set *set, void *ptr) {
459    // check to see if the pointer is a member of the set
460
461    spin_lock(&set->lock);
462    // find the slot
463    void **slot = ptr_set_find_slot(set, ptr);
464    // return true if the slot is found and contains the pointer
465    boolean_t result = slot != NULL && *slot == ptr;
466    spin_unlock(&set->lock);
467    return result;
468}
469
470
471__private_extern__ void ptr_set_remove(ptr_set *set, void *ptr) {
472    // remove an entry from the set
473
474    spin_lock(&set->lock);
475    // find the slot
476    void **slot = ptr_set_find_slot(set, ptr);
477
478    // if the pointer is found
479    if (slot != NULL && *slot == ptr) {
480        // find out which hash goup it belongs
481        unsigned hash = ptr_set_hash(set, ptr);
482
483        // next member slot
484        void **next_slot;
485
486        // searching for other members to fillin gap
487        for (next_slot =  ptr_set_next(set, slot); 1; next_slot =  ptr_set_next(set, next_slot)) {
488            // get the next candidate
489            void *old_member = *next_slot;
490            // if NULL or not member of the same hash group
491            if (old_member == NULL || hash != ptr_set_hash(set, old_member)) {
492                // NULL out the last slot in the group
493                *slot = NULL;
494                break;
495            }
496
497            // shift down the slots
498            *slot = *next_slot;
499            slot = next_slot;
500        }
501    }
502    spin_unlock(&set->lock);
503}
504
505struct ptr_map {
506    spin_lock_t lock;
507    unsigned length;                    // length of set
508    void **members;                     // closed hash table of pointers
509    void **end;                         // end + 1 of hash table (faster next pointer)
510};
511
512static void **ptr_map_find_slot(ptr_map *map, void *ptr);
513static void ptr_map_grow(ptr_map *map);
514
515static inline void **ptr_map_next(ptr_map *map, void **slot) { return ++slot == map->end ? map->members : slot; }
516    // return the next slot (wrap around)
517
518
519static inline intptr_t ptr_map_hash(ptr_map *map, void *ptr) { return (((uintptr_t)ptr >> 4) * 2654435761u) % map->length; }
520    // return the hash index of the specified pointer
521
522
523static boolean_t ptr_map_add_no_lock_did_grow(ptr_map *map, void *ptr, void *value) {
524    // add a member to the map
525
526    boolean_t didGrow = false;
527
528    // current slot
529    void **slot;
530
531    // don't add NULL entries
532    if (ptr == NULL) return false;
533
534    // make sure it is 16 byte aligned (or will grow forever)
535    if ((intptr_t)ptr & 15) {
536        malloc_printf("*** %s: ptr_map_add(ptr=%p) not object aligned\n", auto_prelude(), ptr);
537        return false;
538    }
539
540    // try and try again
541    while (1) {
542        // find the pointers slot
543        slot = ptr_map_find_slot(map, ptr);
544        // if found escape loop
545        if (slot != NULL) break;
546        // grow the table to make more room for the hash group
547        ptr_map_grow(map);
548	didGrow = true;
549    }
550
551    // map the slot (may be redundant if the pointer is already in hashtable)
552    *slot = ptr;
553    *(slot + map->length) = value;
554    return didGrow;
555}
556
557static void ptr_map_grow(ptr_map *map) {
558    // Provide more room in the hashtable and rehash the old entries.
559
560    // current slot
561    void **slot;
562
563    // capture old hashtable length
564    unsigned old_length = map->length;
565    // capture old members
566    void **old_members = map->members;
567    // capture old end
568    void **old_end = map->end;
569
570    /// new hashtable length
571    map->length = (old_length + PTR_SET_GROWTH) | 1;
572    // allocation size
573    size_t size = map->length * sizeof(void *);
574
575     // allocate & clear new hashtable
576    map->members = (void **)aux_calloc(2, size); // enough room for values, too
577    // set end
578    map->end = map->members + map->length;
579
580    // rehash old entries
581    for (slot = old_members; slot < old_end; slot++) ptr_map_add_no_lock_did_grow(map, *slot, *(slot+old_length));
582
583    // release old hashtable
584    aux_free(old_members);
585}
586
587
588static void **ptr_map_find_slot(ptr_map *map, void *ptr) {
589    // find the slot the ptr should reside
590
591    // current slot
592    void **slot;
593    // depth index
594    unsigned depth;
595    // ptr  hash
596    unsigned hash = ptr_map_hash(map, ptr);
597
598    // iterate for the closed hash depth
599    for (depth = 0, slot = map->members + hash; depth < PTR_SET_DEPTH; depth++, slot = ptr_map_next(map, slot)) {
600        // get the old member in the slot
601        void *old_member = *slot;
602        // return the slot if the slot is empty or already contains the ptr
603        if (old_member == NULL || old_member == ptr) return slot;
604        // otherwise check to see if the entry is a member of the same hash group
605        if (hash != ptr_map_hash(map, old_member)) return NULL;
606    }
607
608    // not found
609    return NULL;
610}
611
612// Externals
613
614__private_extern__ ptr_map * ptr_map_new() {
615    // initialize the pointer map
616
617    ptr_map *map = aux_malloc(sizeof(ptr_map));
618
619    // zero lock
620    map->lock = 0;
621    // set length
622    map->length = PTR_SET_GROWTH;
623    // allocation size
624    size_t size = PTR_SET_GROWTH * sizeof(void *);
625    // allocate & clear members
626    map->members = (void **)aux_calloc(2, size);
627    // set end
628    map->end = map->members + PTR_SET_GROWTH;
629
630    return map;
631}
632
633
634__private_extern__ void ptr_map_dispose(ptr_map *map) {
635    // release memory allocate by the map
636    aux_free(map->members);
637    aux_free(map);
638}
639
640
641__private_extern__ void ptr_map_set(ptr_map *map, void *ptr, void *value) {
642    spin_lock(&map->lock);
643    ptr_map_add_no_lock_did_grow(map, ptr, value);
644    spin_unlock(&map->lock);
645}
646
647__private_extern__ void * ptr_map_get(ptr_map *map, void *ptr) {
648    // check to see if the pointer is a member of the set
649
650    spin_lock(&map->lock);
651    // find the slot
652    void **slot = ptr_map_find_slot(map, ptr);
653    // return true if the slot is found and contains the pointer
654    void *result = (slot != NULL && *slot == ptr) ? *(slot + map->length) : NULL;
655    spin_unlock(&map->lock);
656    return result;
657}
658
659__private_extern__ void *ptr_map_remove(ptr_map *map, void *ptr) {
660    // remove an entry from the map
661
662    void *result = NULL;
663
664    spin_lock(&map->lock);
665    // find the slot
666    void **slot = ptr_map_find_slot(map, ptr);
667
668    // if the pointer is found
669    if (slot != NULL && *slot == ptr) {
670	result = *(slot + map->length);
671        // find out which hash goup it belongs
672        unsigned hash = ptr_map_hash(map, ptr);
673
674        // next member slot
675        void **next_slot;
676
677        // searching for other members to fillin gap
678        for (next_slot =  ptr_map_next(map, slot); 1; next_slot =  ptr_map_next(map, next_slot)) {
679            // get the next candidate
680            void *old_member = *next_slot;
681            // if NULL or not member of the same hash group
682            if (old_member == NULL || hash != ptr_map_hash(map, old_member)) {
683                // NULL out the last slot in the group
684                *slot = NULL;
685                break;
686            }
687
688            // shift down the slots
689            *slot = *next_slot; *(slot+map->length) = *(next_slot+map->length);
690            slot = next_slot;
691        }
692    }
693    spin_unlock(&map->lock);
694
695    return result;
696}
697
698/************ Miscellany **************************/
699
700
701// Watching
702
703#define WatchLimit 16
704static const void *WatchPoints[WatchLimit];
705
706void auto_zone_watch(const void *ptr) {
707    for (int i = 0; i < WatchLimit; ++i) {
708        if (WatchPoints[i]) {
709            if (WatchPoints[i] == ptr) return;
710        } else {
711            WatchPoints[i] = ptr;
712            return;
713        }
714    }
715    printf("too many watchpoints already, skipping %p\n", ptr);
716}
717
718void auto_zone_watch_free(const void *ptr, watch_block_t block) {
719    for (int i = 0; i < WatchLimit; ++i) {
720        if (WatchPoints[i] == NULL) return;
721        if (WatchPoints[i] == ptr) {
722            block();
723            while(++i < WatchLimit)
724                WatchPoints[i-1] = WatchPoints[i];
725            WatchPoints[WatchLimit-1] = NULL;
726            return;
727        }
728    }
729}
730
731void auto_zone_watch_apply(void *ptr, watch_block_t block) {
732    for (int i = 0; i < WatchLimit; ++i) {
733        if (WatchPoints[i] == NULL) return;
734        if (WatchPoints[i] == ptr) {
735            block();
736            return;
737        }
738    }
739}
740
741__private_extern__ void auto_refcount_underflow_error(void *block) { }
742
743__private_extern__ void auto_zone_resurrection_error()
744{
745}
746
747__private_extern__ void auto_zone_thread_local_error(void) { }
748
749__private_extern__ void auto_zone_thread_registration_error()
750{
751    AUTO_PROBE(auto_probe_unregistered_thread_error());
752}
753
754__private_extern__ void auto_zone_global_data_memmove_error() { }
755
756__private_extern__ void auto_zone_association_error(void *address) { }
757
758__private_extern__ void auto_zone_unscanned_store_error(const void *destination, const void *value) { }
759