1/*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21/*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27#ifndef __DISPATCH_QUEUE_INTERNAL__
28#define __DISPATCH_QUEUE_INTERNAL__
29
30#ifndef __DISPATCH_INDIRECT__
31#error "Please #include <dispatch/dispatch.h> instead of this file directly."
32#include <dispatch/base.h> // for HeaderDoc
33#endif
34
35#if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
36#define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
37#endif
38
39/* x86 & cortex-a8 have a 64 byte cacheline */
40#define DISPATCH_CACHELINE_SIZE 64u
41#define ROUND_UP_TO_CACHELINE_SIZE(x) \
42		(((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
43		~(DISPATCH_CACHELINE_SIZE - 1u))
44#define DISPATCH_CACHELINE_ALIGN \
45		__attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
46
47
48#pragma mark -
49#pragma mark dispatch_queue_t
50
51#define DISPATCH_QUEUE_HEADER \
52	uint32_t volatile dq_running; \
53	struct dispatch_object_s *volatile dq_items_head; \
54	/* LP64 global queue cacheline boundary */ \
55	struct dispatch_object_s *volatile dq_items_tail; \
56	dispatch_queue_t dq_specific_q; \
57	uint16_t dq_width; \
58	uint16_t dq_is_thread_bound:1; \
59	pthread_priority_t dq_priority; \
60	mach_port_t dq_thread; \
61	mach_port_t volatile dq_tqthread; \
62	uint32_t volatile dq_override; \
63	unsigned long dq_serialnum; \
64	const char *dq_label; \
65	DISPATCH_INTROSPECTION_QUEUE_LIST;
66
67#define DISPATCH_QUEUE_WIDTH_MAX UINT16_MAX
68
69#define DISPATCH_QUEUE_CACHELINE_PADDING \
70		char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
71#ifdef __LP64__
72#define DISPATCH_QUEUE_CACHELINE_PAD (( \
73		(0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
74		+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
75#else
76#define DISPATCH_QUEUE_CACHELINE_PAD (( \
77		(13*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
78		+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
79#endif
80
81DISPATCH_CLASS_DECL(queue);
82struct dispatch_queue_s {
83	DISPATCH_STRUCT_HEADER(queue);
84	DISPATCH_QUEUE_HEADER;
85	DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
86};
87
88DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
89DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
90DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue);
91
92DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue);
93DISPATCH_CLASS_DECL(queue_specific_queue);
94
95void _dispatch_queue_destroy(dispatch_object_t dou);
96void _dispatch_queue_dispose(dispatch_queue_t dq);
97void _dispatch_queue_invoke(dispatch_queue_t dq);
98void _dispatch_queue_push_list_slow(dispatch_queue_t dq,
99		pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n,
100		bool retained);
101void _dispatch_queue_push_slow(dispatch_queue_t dq,
102		pthread_priority_t pp, struct dispatch_object_s *obj, bool retained);
103unsigned long _dispatch_queue_probe(dispatch_queue_t dq);
104dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou);
105dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq);
106void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq,
107		pthread_priority_t pp);
108void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq,
109		pthread_priority_t pp);
110_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou);
111void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
112		dqsq);
113unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq);
114void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
115unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq);
116void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
117void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
118void _dispatch_mgr_queue_drain(void);
119unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq);
120#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
121void _dispatch_mgr_priority_init(void);
122#else
123static inline void _dispatch_mgr_priority_init(void) {}
124#endif
125void _dispatch_after_timer_callback(void *ctxt);
126void _dispatch_async_redirect_invoke(void *ctxt);
127void _dispatch_sync_recurse_invoke(void *ctxt);
128void _dispatch_apply_invoke(void *ctxt);
129void _dispatch_apply_redirect_invoke(void *ctxt);
130void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
131		dispatch_function_t func);
132void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
133		dispatch_function_t func);
134
135#if DISPATCH_DEBUG
136void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
137#else
138static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED,
139		const char* str DISPATCH_UNUSED) {}
140#endif
141
142size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
143size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
144		size_t bufsiz);
145
146#define DISPATCH_QUEUE_QOS_COUNT 6
147#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
148
149// must be in lowest to highest qos order (as encoded in pthread_priority_t)
150// overcommit qos index values need bit 1 set
151enum {
152	DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
153	DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
154	DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
155	DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
156	DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
157	DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
158	DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
159	DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
160	DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
161	DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
162	DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
163	DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
164};
165
166extern unsigned long volatile _dispatch_queue_serial_numbers;
167extern struct dispatch_queue_s _dispatch_root_queues[];
168extern struct dispatch_queue_s _dispatch_mgr_q;
169
170#if HAVE_PTHREAD_WORKQUEUE_QOS
171extern pthread_priority_t _dispatch_background_priority;
172extern pthread_priority_t _dispatch_user_initiated_priority;
173#endif
174
175#pragma mark -
176#pragma mark dispatch_queue_attr_t
177
178DISPATCH_CLASS_DECL(queue_attr);
179struct dispatch_queue_attr_s {
180	DISPATCH_STRUCT_HEADER(queue_attr);
181	qos_class_t dqa_qos_class;
182	int dqa_relative_priority;
183	unsigned int dqa_overcommit:1, dqa_concurrent:1;
184};
185
186enum {
187	DQA_INDEX_NON_OVERCOMMIT = 0,
188	DQA_INDEX_OVERCOMMIT,
189};
190
191enum {
192	DQA_INDEX_CONCURRENT = 0,
193	DQA_INDEX_SERIAL,
194};
195
196#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
197
198typedef enum {
199	DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0,
200	DQA_INDEX_QOS_CLASS_MAINTENANCE,
201	DQA_INDEX_QOS_CLASS_BACKGROUND,
202	DQA_INDEX_QOS_CLASS_UTILITY,
203	DQA_INDEX_QOS_CLASS_DEFAULT,
204	DQA_INDEX_QOS_CLASS_USER_INITIATED,
205	DQA_INDEX_QOS_CLASS_USER_INTERACTIVE,
206} _dispatch_queue_attr_index_qos_class_t;
207
208extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
209		[DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2];
210
211#pragma mark -
212#pragma mark dispatch_continuation_t
213
214// If dc_vtable is less than 127, then the object is a continuation.
215// Otherwise, the object has a private layout and memory management rules. The
216// layout until after 'do_next' must align with normal objects.
217#if __LP64__
218#define DISPATCH_CONTINUATION_HEADER(x) \
219	const void *do_vtable; \
220	union { \
221		pthread_priority_t dc_priority; \
222		int dc_cache_cnt; \
223		uintptr_t dc_pad; \
224	}; \
225	struct dispatch_##x##_s *volatile do_next; \
226	struct voucher_s *dc_voucher; \
227	dispatch_function_t dc_func; \
228	void *dc_ctxt; \
229	void *dc_data; \
230	void *dc_other;
231#define _DISPATCH_SIZEOF_PTR 8
232#else
233#define DISPATCH_CONTINUATION_HEADER(x) \
234	const void *do_vtable; \
235	union { \
236		pthread_priority_t dc_priority; \
237		int dc_cache_cnt; \
238		uintptr_t dc_pad; \
239	}; \
240	struct voucher_s *dc_voucher; \
241	struct dispatch_##x##_s *volatile do_next; \
242	dispatch_function_t dc_func; \
243	void *dc_ctxt; \
244	void *dc_data; \
245	void *dc_other;
246#define _DISPATCH_SIZEOF_PTR 4
247#endif
248#define _DISPATCH_CONTINUATION_PTRS 8
249#if DISPATCH_HW_CONFIG_UP
250// UP devices don't contend on continuations so we don't need to force them to
251// occupy a whole cacheline (which is intended to avoid contention)
252#define DISPATCH_CONTINUATION_SIZE \
253		(_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
254#else
255#define DISPATCH_CONTINUATION_SIZE  ROUND_UP_TO_CACHELINE_SIZE( \
256		(_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
257#endif
258#define ROUND_UP_TO_CONTINUATION_SIZE(x) \
259		(((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
260		~(DISPATCH_CONTINUATION_SIZE - 1u))
261
262#define DISPATCH_OBJ_ASYNC_BIT		0x1
263#define DISPATCH_OBJ_BARRIER_BIT	0x2
264#define DISPATCH_OBJ_GROUP_BIT		0x4
265#define DISPATCH_OBJ_SYNC_SLOW_BIT	0x8
266#define DISPATCH_OBJ_BLOCK_RELEASE_BIT 0x10
267#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x20
268#define DISPATCH_OBJ_HAS_VOUCHER_BIT 0x80
269// vtables are pointers far away from the low page in memory
270#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 0xfful)
271
272struct dispatch_continuation_s {
273	DISPATCH_CONTINUATION_HEADER(continuation);
274};
275typedef struct dispatch_continuation_s *dispatch_continuation_t;
276
277#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
278#if TARGET_OS_EMBEDDED
279#define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
280#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16
281#else
282#define DISPATCH_CONTINUATION_CACHE_LIMIT 65536
283#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128
284#endif
285#endif
286
287dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
288void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
289
290#if DISPATCH_USE_MEMORYSTATUS_SOURCE
291extern int _dispatch_continuation_cache_limit;
292void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
293#else
294#define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
295#define _dispatch_continuation_free_to_cache_limit(c) \
296		_dispatch_continuation_free_to_heap(c)
297#endif
298
299#pragma mark -
300#pragma mark dispatch_apply_t
301
302struct dispatch_apply_s {
303	size_t volatile da_index, da_todo;
304	size_t da_iterations, da_nested;
305	dispatch_continuation_t da_dc;
306	_dispatch_thread_semaphore_t da_sema;
307	uint32_t da_thr_cnt;
308};
309typedef struct dispatch_apply_s *dispatch_apply_t;
310
311#pragma mark -
312#pragma mark dispatch_block_t
313
314#ifdef __BLOCKS__
315
316#define DISPATCH_BLOCK_API_MASK (0x80u - 1)
317#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
318#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
319
320struct dispatch_block_private_data_s {
321	unsigned long dbpd_magic;
322	dispatch_block_flags_t dbpd_flags;
323	unsigned int volatile dbpd_atomic_flags;
324	int volatile dbpd_performed;
325	pthread_priority_t dbpd_priority;
326	voucher_t dbpd_voucher;
327	dispatch_block_t dbpd_block;
328	struct dispatch_semaphore_s dbpd_group;
329	dispatch_queue_t volatile dbpd_queue;
330	mach_port_t dbpd_thread;
331};
332typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
333
334// dbpd_atomic_flags bits
335#define DBF_CANCELED 1u // block has been cancelled
336#define DBF_WAITING 2u // dispatch_block_wait has begun
337#define DBF_WAITED 4u // dispatch_block_wait has finished without timeout
338#define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave
339
340#define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
341
342#define DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, prio, block) \
343		{ \
344			.dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
345			.dbpd_flags = (flags), \
346			.dbpd_priority = (prio), \
347			.dbpd_voucher = (voucher), \
348			.dbpd_block = (block), \
349			.dbpd_group = DISPATCH_GROUP_INITIALIZER(1), \
350		}
351
352dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
353		voucher_t voucher, pthread_priority_t priority, dispatch_block_t block);
354void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd);
355
356#endif /* __BLOCKS__ */
357
358#endif
359