1/*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21#include "internal.h"
22
23#pragma mark -
24#pragma mark _os_object_t
25
26unsigned long
27_os_object_retain_count(_os_object_t obj)
28{
29	int xref_cnt = obj->os_obj_xref_cnt;
30	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
31		return ULONG_MAX; // global object
32	}
33	return (unsigned long)(xref_cnt + 1);
34}
35
36DISPATCH_NOINLINE
37_os_object_t
38_os_object_retain_internal(_os_object_t obj)
39{
40	return _os_object_retain_internal_inline(obj);
41}
42
43DISPATCH_NOINLINE
44void
45_os_object_release_internal(_os_object_t obj)
46{
47	return _os_object_release_internal_inline(obj);
48}
49
50DISPATCH_NOINLINE
51_os_object_t
52_os_object_retain(_os_object_t obj)
53{
54	int xref_cnt = obj->os_obj_xref_cnt;
55	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
56		return obj; // global object
57	}
58	xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed);
59	if (slowpath(xref_cnt <= 0)) {
60		_OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
61	}
62	return obj;
63}
64
65DISPATCH_NOINLINE
66void
67_os_object_release(_os_object_t obj)
68{
69	int xref_cnt = obj->os_obj_xref_cnt;
70	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
71		return; // global object
72	}
73	xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed);
74	if (fastpath(xref_cnt >= 0)) {
75		return;
76	}
77	if (slowpath(xref_cnt < -1)) {
78		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
79	}
80	return _os_object_xref_dispose(obj);
81}
82
83bool
84_os_object_retain_weak(_os_object_t obj)
85{
86	int xref_cnt = obj->os_obj_xref_cnt;
87	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
88		return true; // global object
89	}
90retry:
91	if (slowpath(xref_cnt == -1)) {
92		return false;
93	}
94	if (slowpath(xref_cnt < -1)) {
95		goto overrelease;
96	}
97	if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt,
98			xref_cnt + 1, &xref_cnt, relaxed))) {
99		goto retry;
100	}
101	return true;
102overrelease:
103	_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
104}
105
106bool
107_os_object_allows_weak_reference(_os_object_t obj)
108{
109	int xref_cnt = obj->os_obj_xref_cnt;
110	if (slowpath(xref_cnt == -1)) {
111		return false;
112	}
113	if (slowpath(xref_cnt < -1)) {
114		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
115	}
116	return true;
117}
118
119#pragma mark -
120#pragma mark dispatch_object_t
121
122void *
123_dispatch_alloc(const void *vtable, size_t size)
124{
125	return _os_object_alloc_realized(vtable, size);
126}
127
128void
129dispatch_retain(dispatch_object_t dou)
130{
131	DISPATCH_OBJECT_TFB(_dispatch_objc_retain, dou);
132	(void)_os_object_retain(dou._os_obj);
133}
134
135void
136dispatch_release(dispatch_object_t dou)
137{
138	DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou);
139	_os_object_release(dou._os_obj);
140}
141
142static void
143_dispatch_dealloc(dispatch_object_t dou)
144{
145	dispatch_queue_t tq = dou._do->do_targetq;
146	dispatch_function_t func = dou._do->do_finalizer;
147	void *ctxt = dou._do->do_ctxt;
148
149	_os_object_dealloc(dou._os_obj);
150
151	if (func && ctxt) {
152		dispatch_async_f(tq, ctxt, func);
153	}
154	_dispatch_release(tq);
155}
156
157void
158_dispatch_xref_dispose(dispatch_object_t dou)
159{
160	if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
161		// Arguments for and against this assert are within 6705399
162		DISPATCH_CLIENT_CRASH("Release of a suspended object");
163	}
164#if !USE_OBJC
165	if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) {
166		_dispatch_source_xref_dispose(dou._ds);
167	} else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) {
168		_dispatch_runloop_queue_xref_dispose(dou._dq);
169	}
170	return _dispatch_release(dou._os_obj);
171#endif
172}
173
174void
175_dispatch_dispose(dispatch_object_t dou)
176{
177	if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) {
178		DISPATCH_CRASH("Release while enqueued");
179	}
180	dx_dispose(dou._do);
181	return _dispatch_dealloc(dou);
182}
183
184void *
185dispatch_get_context(dispatch_object_t dou)
186{
187	DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou);
188	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
189			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
190		return NULL;
191	}
192	return dou._do->do_ctxt;
193}
194
195void
196dispatch_set_context(dispatch_object_t dou, void *context)
197{
198	DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context);
199	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
200			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
201		return;
202	}
203	dou._do->do_ctxt = context;
204}
205
206void
207dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer)
208{
209	DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer);
210	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
211			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
212		return;
213	}
214	dou._do->do_finalizer = finalizer;
215}
216
217void
218dispatch_suspend(dispatch_object_t dou)
219{
220	DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou);
221	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
222			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
223		return;
224	}
225	// rdar://8181908 explains why we need to do an internal retain at every
226	// suspension.
227	(void)dispatch_atomic_add2o(dou._do, do_suspend_cnt,
228			DISPATCH_OBJECT_SUSPEND_INTERVAL, acquire);
229	_dispatch_retain(dou._do);
230}
231
232DISPATCH_NOINLINE
233static void
234_dispatch_resume_slow(dispatch_object_t dou)
235{
236	_dispatch_wakeup(dou._do);
237	// Balancing the retain() done in suspend() for rdar://8181908
238	_dispatch_release(dou._do);
239}
240
241void
242dispatch_resume(dispatch_object_t dou)
243{
244	DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou);
245	// Global objects cannot be suspended or resumed. This also has the
246	// side effect of saturating the suspend count of an object and
247	// guarding against resuming due to overflow.
248	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
249			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
250		return;
251	}
252	// Check the previous value of the suspend count. If the previous
253	// value was a single suspend interval, the object should be resumed.
254	// If the previous value was less than the suspend interval, the object
255	// has been over-resumed.
256	unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do,
257			 do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, release);
258	if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
259		// Balancing the retain() done in suspend() for rdar://8181908
260		return _dispatch_release(dou._do);
261	}
262	if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
263		return _dispatch_resume_slow(dou);
264	}
265	DISPATCH_CLIENT_CRASH("Over-resume of an object");
266}
267
268size_t
269_dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz)
270{
271	return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, "
272			"suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1,
273			dou._do->do_ref_cnt + 1,
274			dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL,
275			dou._do->do_suspend_cnt & 1);
276}
277