1/*
2 * runtime.c
3 * libclosure
4 *
5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6 *
7 * @APPLE_LLVM_LICENSE_HEADER@
8 */
9
10
11#include "Block_private.h"
12#include <stdio.h>
13#include <stdlib.h>
14#include <string.h>
15#include <stdint.h>
16#include <dlfcn.h>
17#if TARGET_IPHONE_SIMULATOR
18// workaround: 10682842
19#define osx_assumes(_x) (_x)
20#define osx_assert(_x) if (!(_x)) abort()
21#else
22#include <assumes.h>
23#ifndef osx_assumes
24#define osx_assumes(_x) os_assumes(_x)
25#endif
26#ifndef osx_assert
27#define osx_assert(_x) os_assert(_x)
28#endif
29#endif
30
31#if TARGET_OS_WIN32
32#define _CRT_SECURE_NO_WARNINGS 1
33#include <windows.h>
34static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
35{
36    // fixme barrier is overkill -- see objc-os.h
37    long original = InterlockedCompareExchange(dst, newl, oldl);
38    return (original == oldl);
39}
40
41static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
42{
43    // fixme barrier is overkill -- see objc-os.h
44    int original = InterlockedCompareExchange(dst, newi, oldi);
45    return (original == oldi);
46}
47#else
48#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
49#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
50#endif
51
52
53/***********************
54Globals
55************************/
56
57static void *_Block_copy_class = _NSConcreteMallocBlock;
58static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
59static int _Block_copy_flag = BLOCK_NEEDS_FREE;
60static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4;  // logical 2
61
62static bool isGC = false;
63
64/*******************************************************************************
65Internal Utilities
66********************************************************************************/
67
68
69static int32_t latching_incr_int(volatile int32_t *where) {
70    while (1) {
71        int32_t old_value = *where;
72        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
73            return BLOCK_REFCOUNT_MASK;
74        }
75        if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
76            return old_value+2;
77        }
78    }
79}
80
81static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
82    while (1) {
83        int32_t old_value = *where;
84        if (old_value & BLOCK_DEALLOCATING) {
85            // if deallocating we can't do this
86            return false;
87        }
88        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
89            // if latched, we're leaking this block, and we succeed
90            return true;
91        }
92        if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
93            // otherwise, we must store a new retained value without the deallocating bit set
94            return true;
95        }
96    }
97}
98
99
100// return should_deallocate?
101static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
102    while (1) {
103        int32_t old_value = *where;
104        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
105            return false; // latched high
106        }
107        if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
108            return false;   // underflow, latch low
109        }
110        int32_t new_value = old_value - 2;
111        bool result = false;
112        if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) {
113            new_value = old_value - 1;
114            result = true;
115        }
116        if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
117            return result;
118        }
119    }
120}
121
122// hit zero?
123static bool latching_decr_int_now_zero(volatile int32_t *where) {
124    while (1) {
125        int32_t old_value = *where;
126        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
127            return false; // latched high
128        }
129        if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
130            return false;   // underflow, latch low
131        }
132        int32_t new_value = old_value - 2;
133        if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
134            return (new_value & BLOCK_REFCOUNT_MASK) == 0;
135        }
136    }
137}
138
139
140/***********************
141GC support stub routines
142************************/
143#if !TARGET_OS_WIN32
144#pragma mark GC Support Routines
145#endif
146
147
148
149static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne __unused, const bool isObject __unused) {
150    return malloc(size);
151}
152
153static void _Block_assign_default(void *value, void **destptr) {
154    *destptr = value;
155}
156
157static void _Block_setHasRefcount_default(const void *ptr __unused, const bool hasRefcount __unused) {
158}
159
160static void _Block_do_nothing(const void *aBlock __unused) { }
161
162static void _Block_retain_object_default(const void *ptr __unused) {
163}
164
165static void _Block_release_object_default(const void *ptr __unused) {
166}
167
168static void _Block_assign_weak_default(const void *ptr, void *dest) {
169#if !TARGET_OS_WIN32
170    *(long *)dest = (long)ptr;
171#else
172    *(void **)dest = (void *)ptr;
173#endif
174}
175
176static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
177    memmove(dst, src, (size_t)size);
178}
179
180static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
181    void **destp = (void **)dest;
182    void **srcp = (void **)src;
183    while (size) {
184        _Block_assign_default(*srcp, destp);
185        destp++;
186        srcp++;
187        size -= sizeof(void *);
188    }
189}
190
191static void _Block_destructInstance_default(const void *aBlock __unused) {}
192
193/**************************************************************************
194GC support callout functions - initially set to stub routines
195***************************************************************************/
196
197static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
198static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
199static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
200static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
201static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
202static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
203static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
204static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
205static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
206
207
208/**************************************************************************
209GC support SPI functions - called from ObjC runtime and CoreFoundation
210***************************************************************************/
211
212// Public SPI
213// Called from objc-auto to turn on GC.
214// version 3, 4 arg, but changed 1st arg
215void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
216                    void (*setHasRefcount)(const void *, const bool),
217                    void (*gc_assign)(void *, void **),
218                    void (*gc_assign_weak)(const void *, void *),
219                    void (*gc_memmove)(void *, void *, unsigned long)) {
220
221    isGC = true;
222    _Block_allocator = alloc;
223    _Block_deallocator = _Block_do_nothing;
224    _Block_assign = gc_assign;
225    _Block_copy_flag = BLOCK_IS_GC;
226    _Block_copy_class = _NSConcreteAutoBlock;
227    // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
228    _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
229    _Block_setHasRefcount = setHasRefcount;
230    _Byref_flag_initial_value = BLOCK_BYREF_IS_GC;   // no refcount
231    _Block_retain_object = _Block_do_nothing;
232    _Block_release_object = _Block_do_nothing;
233    _Block_assign_weak = gc_assign_weak;
234    _Block_memmove = gc_memmove;
235}
236
237// transitional
238void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
239                    void (*setHasRefcount)(const void *, const bool),
240                    void (*gc_assign)(void *, void **),
241                    void (*gc_assign_weak)(const void *, void *)) {
242    // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
243    _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
244}
245
246
247// Called from objc-auto to alternatively turn on retain/release.
248// Prior to this the only "object" support we can provide is for those
249// super special objects that live in libSystem, namely dispatch queues.
250// Blocks and Block_byrefs have their own special entry points.
251void _Block_use_RR( void (*retain)(const void *),
252                    void (*release)(const void *)) {
253    _Block_retain_object = retain;
254    _Block_release_object = release;
255    _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance");
256}
257
258// Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
259// without defining a new entry point.
260void _Block_use_RR2(const Block_callbacks_RR *callbacks) {
261    _Block_retain_object = callbacks->retain;
262    _Block_release_object = callbacks->release;
263    _Block_destructInstance = callbacks->destructInstance;
264}
265
266/****************************************************************************
267Accessors for block descriptor fields
268*****************************************************************************/
269#if 0
270static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock)
271{
272    return aBlock->descriptor;
273}
274#endif
275
276static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock)
277{
278    if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL;
279    uint8_t *desc = (uint8_t *)aBlock->descriptor;
280    desc += sizeof(struct Block_descriptor_1);
281    return (struct Block_descriptor_2 *)desc;
282}
283
284static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock)
285{
286    if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL;
287    uint8_t *desc = (uint8_t *)aBlock->descriptor;
288    desc += sizeof(struct Block_descriptor_1);
289    if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
290        desc += sizeof(struct Block_descriptor_2);
291    }
292    return (struct Block_descriptor_3 *)desc;
293}
294
295static __inline bool _Block_has_layout(struct Block_layout *aBlock) {
296    if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false;
297    uint8_t *desc = (uint8_t *)aBlock->descriptor;
298    desc += sizeof(struct Block_descriptor_1);
299    if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
300        desc += sizeof(struct Block_descriptor_2);
301    }
302    return ((struct Block_descriptor_3 *)desc)->layout != NULL;
303}
304
305static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
306{
307    struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
308    if (!desc) return;
309
310    (*desc->copy)(result, aBlock); // do fixup
311}
312
313static void _Block_call_dispose_helper(struct Block_layout *aBlock)
314{
315    struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
316    if (!desc) return;
317
318    (*desc->dispose)(aBlock);
319}
320
321/*******************************************************************************
322Internal Support routines for copying
323********************************************************************************/
324
325#if !TARGET_OS_WIN32
326#pragma mark Copy/Release support
327#endif
328
329// Copy, or bump refcount, of a block.  If really copying, call the copy helper if present.
330static void *_Block_copy_internal(const void *arg, const bool wantsOne) {
331    struct Block_layout *aBlock;
332
333    if (!arg) return NULL;
334
335
336    // The following would be better done as a switch statement
337    aBlock = (struct Block_layout *)arg;
338    if (aBlock->flags & BLOCK_NEEDS_FREE) {
339        // latches on high
340        latching_incr_int(&aBlock->flags);
341        return aBlock;
342    }
343    else if (aBlock->flags & BLOCK_IS_GC) {
344        // GC refcounting is expensive so do most refcounting here.
345        if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) {
346            // Tell collector to hang on this - it will bump the GC refcount version
347            _Block_setHasRefcount(aBlock, true);
348        }
349        return aBlock;
350    }
351    else if (aBlock->flags & BLOCK_IS_GLOBAL) {
352        return aBlock;
353    }
354
355    // Its a stack block.  Make a copy.
356    if (!isGC) {
357        struct Block_layout *result = malloc(aBlock->descriptor->size);
358        if (!result) return NULL;
359        memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
360        // reset refcount
361        result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING);    // XXX not needed
362        result->flags |= BLOCK_NEEDS_FREE | 2;  // logical refcount 1
363        result->isa = _NSConcreteMallocBlock;
364        _Block_call_copy_helper(result, aBlock);
365        return result;
366    }
367    else {
368        // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
369        // This allows the copy helper routines to make non-refcounted block copies under GC
370        int32_t flags = aBlock->flags;
371        bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
372        struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock));
373        if (!result) return NULL;
374        memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
375        // reset refcount
376        // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
377        flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING);   // XXX not needed
378        if (wantsOne)
379            flags |= BLOCK_IS_GC | 2;
380        else
381            flags |= BLOCK_IS_GC;
382        result->flags = flags;
383        _Block_call_copy_helper(result, aBlock);
384        if (hasCTOR) {
385            result->isa = _NSConcreteFinalizingBlock;
386        }
387        else {
388            result->isa = _NSConcreteAutoBlock;
389        }
390        return result;
391    }
392}
393
394
395
396
397
398// Runtime entry points for maintaining the sharing knowledge of byref data blocks.
399
400// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
401// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
402// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
403// Otherwise we need to copy it and update the stack forwarding pointer
404static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
405    struct Block_byref **destp = (struct Block_byref **)dest;
406    struct Block_byref *src = (struct Block_byref *)arg;
407
408    if (src->forwarding->flags & BLOCK_BYREF_IS_GC) {
409        ;   // don't need to do any more work
410    }
411    else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
412        // src points to stack
413        bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
414        // if its weak ask for an object (only matters under GC)
415        struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
416        copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
417        copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
418        src->forwarding = copy;  // patch stack to point to heap copy
419        copy->size = src->size;
420        if (isWeak) {
421            copy->isa = &_NSConcreteWeakBlockVariable;  // mark isa field so it gets weak scanning
422        }
423        if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
424            // Trust copy helper to copy everything of interest
425            // If more than one field shows up in a byref block this is wrong XXX
426            struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1);
427            struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1);
428            copy2->byref_keep = src2->byref_keep;
429            copy2->byref_destroy = src2->byref_destroy;
430
431            if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
432                struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1);
433                struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1);
434                copy3->layout = src3->layout;
435            }
436
437            (*src2->byref_keep)(copy, src);
438        }
439        else {
440            // just bits.  Blast 'em using _Block_memmove in case they're __strong
441            // This copy includes Block_byref_3, if any.
442            _Block_memmove(copy+1, src+1,
443                           src->size - sizeof(struct Block_byref));
444        }
445    }
446    // already copied to heap
447    else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
448        latching_incr_int(&src->forwarding->flags);
449    }
450    // assign byref data block pointer into new Block
451    _Block_assign(src->forwarding, (void **)destp);
452}
453
454// Old compiler SPI
455static void _Block_byref_release(const void *arg) {
456    struct Block_byref *byref = (struct Block_byref *)arg;
457    int32_t refcount;
458
459    // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
460    byref = byref->forwarding;
461
462    // To support C++ destructors under GC we arrange for there to be a finalizer for this
463    // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
464    if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) {
465        return; // stack or GC or global
466    }
467    refcount = byref->flags & BLOCK_REFCOUNT_MASK;
468	osx_assert(refcount);
469    if (latching_decr_int_should_deallocate(&byref->flags)) {
470        if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
471            struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1);
472            (*byref2->byref_destroy)(byref);
473        }
474        _Block_deallocator((struct Block_layout *)byref);
475    }
476}
477
478
479/************************************************************
480 *
481 * API supporting SPI
482 * _Block_copy, _Block_release, and (old) _Block_destroy
483 *
484 ***********************************************************/
485
486#if !TARGET_OS_WIN32
487#pragma mark SPI/API
488#endif
489
490void *_Block_copy(const void *arg) {
491    return _Block_copy_internal(arg, true);
492}
493
494
495// API entry point to release a copied Block
496void _Block_release(const void *arg) {
497    struct Block_layout *aBlock = (struct Block_layout *)arg;
498    if (!aBlock
499        || (aBlock->flags & BLOCK_IS_GLOBAL)
500        || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0)
501        ) return;
502    if (aBlock->flags & BLOCK_IS_GC) {
503        if (latching_decr_int_now_zero(&aBlock->flags)) {
504            // Tell GC we no longer have our own refcounts.  GC will decr its refcount
505            // and unless someone has done a CFRetain or marked it uncollectable it will
506            // now be subject to GC reclamation.
507            _Block_setHasRefcount(aBlock, false);
508        }
509    }
510    else if (aBlock->flags & BLOCK_NEEDS_FREE) {
511        if (latching_decr_int_should_deallocate(&aBlock->flags)) {
512            _Block_call_dispose_helper(aBlock);
513            _Block_destructInstance(aBlock);
514            _Block_deallocator(aBlock);
515        }
516    }
517}
518
519bool _Block_tryRetain(const void *arg) {
520    struct Block_layout *aBlock = (struct Block_layout *)arg;
521    return latching_incr_int_not_deallocating(&aBlock->flags);
522}
523
524bool _Block_isDeallocating(const void *arg) {
525    struct Block_layout *aBlock = (struct Block_layout *)arg;
526    return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
527}
528
529// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
530static void _Block_destroy(const void *arg) {
531    struct Block_layout *aBlock;
532    if (!arg) return;
533    aBlock = (struct Block_layout *)arg;
534    if (aBlock->flags & BLOCK_IS_GC) {
535        // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
536        return; // ignore, we are being called because of a DTOR
537    }
538    _Block_release(aBlock);
539}
540
541
542
543/************************************************************
544 *
545 * SPI used by other layers
546 *
547 ***********************************************************/
548
549// SPI, also internal.  Called from NSAutoBlock only under GC
550void *_Block_copy_collectable(const void *aBlock) {
551    return _Block_copy_internal(aBlock, false);
552}
553
554
555// SPI
556size_t Block_size(void *aBlock) {
557    return ((struct Block_layout *)aBlock)->descriptor->size;
558}
559
560bool _Block_use_stret(void *aBlock) {
561    struct Block_layout *layout = (struct Block_layout *)aBlock;
562
563    int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
564    return (layout->flags & requiredFlags) == requiredFlags;
565}
566
567// Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
568bool _Block_has_signature(void *aBlock) {
569    return _Block_signature(aBlock) ? true : false;
570}
571
572const char * _Block_signature(void *aBlock)
573{
574    struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
575    if (!desc3) return NULL;
576
577    return desc3->signature;
578}
579
580const char * _Block_layout(void *aBlock)
581{
582    // Don't return extended layout to callers expecting GC layout
583    struct Block_layout *layout = (struct Block_layout *)aBlock;
584    if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL;
585
586    struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
587    if (!desc3) return NULL;
588
589    return desc3->layout;
590}
591
592const char * _Block_extended_layout(void *aBlock)
593{
594    // Don't return GC layout to callers expecting extended layout
595    struct Block_layout *layout = (struct Block_layout *)aBlock;
596    if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL;
597
598    struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
599    if (!desc3) return NULL;
600
601    // Return empty string (all non-object bytes) instead of NULL
602    // so callers can distinguish "empty layout" from "no layout".
603    if (!desc3->layout) return "";
604    else return desc3->layout;
605}
606
607#if !TARGET_OS_WIN32
608#pragma mark Compiler SPI entry points
609#endif
610
611
612/*******************************************************
613
614Entry points used by the compiler - the real API!
615
616
617A Block can reference four different kinds of things that require help when the Block is copied to the heap.
6181) C++ stack based objects
6192) References to Objective-C objects
6203) Other Blocks
6214) __block variables
622
623In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
624
625The flags parameter of _Block_object_assign and _Block_object_dispose is set to
626	* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
627	* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
628	* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
629If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
630
631So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
632
633When  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
634
635So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
636	__block id                   128+3       (0x83)
637	__block (^Block)             128+7       (0x87)
638    __weak __block id            128+3+16    (0x93)
639	__weak __block (^Block)      128+7+16    (0x97)
640
641
642********************************************************/
643
644//
645// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
646// to do the assignment.
647//
648void _Block_object_assign(void *destAddr, const void *object, const int flags) {
649    switch (osx_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
650      case BLOCK_FIELD_IS_OBJECT:
651        /*******
652        id object = ...;
653        [^{ object; } copy];
654        ********/
655
656        _Block_retain_object(object);
657        _Block_assign((void *)object, destAddr);
658        break;
659
660      case BLOCK_FIELD_IS_BLOCK:
661        /*******
662        void (^object)(void) = ...;
663        [^{ object; } copy];
664        ********/
665
666        _Block_assign(_Block_copy_internal(object, false), destAddr);
667        break;
668
669      case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
670      case BLOCK_FIELD_IS_BYREF:
671        /*******
672         // copy the onstack __block container to the heap
673         __block ... x;
674         __weak __block ... x;
675         [^{ x; } copy];
676         ********/
677
678        _Block_byref_assign_copy(destAddr, object, flags);
679        break;
680
681      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
682      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
683        /*******
684         // copy the actual field held in the __block container
685         __block id object;
686         __block void (^object)(void);
687         [^{ object; } copy];
688         ********/
689
690        // under manual retain release __block object/block variables are dangling
691        _Block_assign((void *)object, destAddr);
692        break;
693
694      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
695      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK  | BLOCK_FIELD_IS_WEAK:
696        /*******
697         // copy the actual field held in the __block container
698         __weak __block id object;
699         __weak __block void (^object)(void);
700         [^{ object; } copy];
701         ********/
702
703        _Block_assign_weak(object, destAddr);
704        break;
705
706      default:
707        break;
708    }
709}
710
711// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
712// to help dispose of the contents
713// Used initially only for __attribute__((NSObject)) marked pointers.
714void _Block_object_dispose(const void *object, const int flags) {
715    switch (osx_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
716      case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
717      case BLOCK_FIELD_IS_BYREF:
718        // get rid of the __block data structure held in a Block
719        _Block_byref_release(object);
720        break;
721      case BLOCK_FIELD_IS_BLOCK:
722        _Block_destroy(object);
723        break;
724      case BLOCK_FIELD_IS_OBJECT:
725        _Block_release_object(object);
726        break;
727      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
728      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
729      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
730      case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK  | BLOCK_FIELD_IS_WEAK:
731        break;
732      default:
733        break;
734    }
735}
736