1/*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21/*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27#ifndef __DISPATCH_INLINE_INTERNAL__
28#define __DISPATCH_INLINE_INTERNAL__
29
30#ifndef __DISPATCH_INDIRECT__
31#error "Please #include <dispatch/dispatch.h> instead of this file directly."
32#include <dispatch/base.h> // for HeaderDoc
33#endif
34
35#if DISPATCH_USE_CLIENT_CALLOUT
36
37DISPATCH_NOTHROW void
38_dispatch_client_callout(void *ctxt, dispatch_function_t f);
39DISPATCH_NOTHROW void
40_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
41DISPATCH_NOTHROW bool
42_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
43		const void *buffer, size_t size, dispatch_data_applier_function_t f);
44DISPATCH_NOTHROW void
45_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
46		dispatch_mach_msg_t dmsg, mach_error_t error,
47		dispatch_mach_handler_function_t f);
48
49#else // !DISPATCH_USE_CLIENT_CALLOUT
50
51DISPATCH_ALWAYS_INLINE
52static inline void
53_dispatch_client_callout(void *ctxt, dispatch_function_t f)
54{
55	return f(ctxt);
56}
57
58DISPATCH_ALWAYS_INLINE
59static inline void
60_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
61{
62	return f(ctxt, i);
63}
64
65DISPATCH_ALWAYS_INLINE
66static inline bool
67_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
68		const void *buffer, size_t size, dispatch_data_applier_function_t f)
69{
70	return f(ctxt, region, offset, buffer, size);
71}
72
73DISPATCH_ALWAYS_INLINE
74static inline void
75_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
76		dispatch_mach_msg_t dmsg, mach_error_t error,
77		dispatch_mach_handler_function_t f)
78{
79	return f(ctxt, reason, dmsg, error);
80}
81
82#endif // !DISPATCH_USE_CLIENT_CALLOUT
83
84#if !(USE_OBJC && __OBJC2__)
85
86#pragma mark -
87#pragma mark _os_object_t & dispatch_object_t
88
89DISPATCH_ALWAYS_INLINE
90static inline _os_object_t
91_os_object_retain_internal_inline(_os_object_t obj)
92{
93	int ref_cnt = obj->os_obj_ref_cnt;
94	if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
95		return obj; // global object
96	}
97	ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed);
98	if (slowpath(ref_cnt <= 0)) {
99		DISPATCH_CRASH("Resurrection of an object");
100	}
101	return obj;
102}
103
104DISPATCH_ALWAYS_INLINE
105static inline void
106_os_object_release_internal_inline(_os_object_t obj)
107{
108	int ref_cnt = obj->os_obj_ref_cnt;
109	if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
110		return; // global object
111	}
112	ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
113	if (fastpath(ref_cnt >= 0)) {
114		return;
115	}
116	if (slowpath(ref_cnt < -1)) {
117		DISPATCH_CRASH("Over-release of an object");
118	}
119#if DISPATCH_DEBUG
120	if (slowpath(obj->os_obj_xref_cnt >= 0)) {
121		DISPATCH_CRASH("Release while external references exist");
122	}
123#endif
124	return _os_object_dispose(obj);
125}
126
127DISPATCH_ALWAYS_INLINE_NDEBUG
128static inline void
129_dispatch_retain(dispatch_object_t dou)
130{
131	(void)_os_object_retain_internal_inline(dou._os_obj);
132}
133
134DISPATCH_ALWAYS_INLINE_NDEBUG
135static inline void
136_dispatch_release(dispatch_object_t dou)
137{
138	_os_object_release_internal_inline(dou._os_obj);
139}
140
141#pragma mark -
142#pragma mark dispatch_thread
143
144DISPATCH_ALWAYS_INLINE
145static inline void
146_dispatch_wqthread_override_start(mach_port_t thread,
147		pthread_priority_t priority)
148{
149#if HAVE_PTHREAD_WORKQUEUE_QOS
150	if (!_dispatch_set_qos_class_enabled) return;
151	(void)_pthread_workqueue_override_start_direct(thread, priority);
152#else
153	(void)thread; (void)priority;
154#endif
155}
156
157DISPATCH_ALWAYS_INLINE
158static inline void
159_dispatch_wqthread_override_reset(void)
160{
161#if HAVE_PTHREAD_WORKQUEUE_QOS
162	if (!_dispatch_set_qos_class_enabled) return;
163	(void)_pthread_workqueue_override_reset();
164#endif
165}
166
167DISPATCH_ALWAYS_INLINE
168static inline void
169_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t priority)
170{
171#if HAVE_PTHREAD_WORKQUEUE_QOS
172	if (!_dispatch_set_qos_class_enabled) return;
173	(void)_pthread_override_qos_class_start_direct(thread, priority);
174#else
175	(void)thread; (void)priority;
176#endif
177}
178
179DISPATCH_ALWAYS_INLINE
180static inline void
181_dispatch_thread_override_end(mach_port_t thread)
182{
183#if HAVE_PTHREAD_WORKQUEUE_QOS
184	if (!_dispatch_set_qos_class_enabled) return;
185	(void)_pthread_override_qos_class_end_direct(thread);
186#else
187	(void)thread;
188#endif
189}
190
191#pragma mark -
192#pragma mark dispatch_queue_t
193
194static inline bool _dispatch_queue_need_override(dispatch_queue_t dq,
195		pthread_priority_t pp);
196static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq,
197		pthread_priority_t pp);
198static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq,
199		pthread_priority_t pp);
200static inline pthread_priority_t _dispatch_queue_get_override_priority(
201		dispatch_queue_t dq);
202static inline pthread_priority_t _dispatch_queue_reset_override_priority(
203		dispatch_queue_t dq);
204static inline pthread_priority_t _dispatch_get_defaultpriority(void);
205static inline void _dispatch_set_defaultpriority_override(void);
206static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority);
207static inline void _dispatch_set_priority(pthread_priority_t priority);
208
209DISPATCH_ALWAYS_INLINE
210static inline void
211_dispatch_queue_set_thread(dispatch_queue_t dq)
212{
213	// The manager queue uses dispatch_queue_drain but is thread bound
214	if (!dq->dq_is_thread_bound) {
215		dq->dq_thread = _dispatch_thread_port();
216	}
217}
218
219DISPATCH_ALWAYS_INLINE
220static inline void
221_dispatch_queue_clear_thread(dispatch_queue_t dq)
222{
223	if (!dq->dq_is_thread_bound) {
224		dq->dq_thread = MACH_PORT_NULL;
225	}
226}
227
228DISPATCH_ALWAYS_INLINE
229static inline bool
230_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head,
231		struct dispatch_object_s *tail)
232{
233	struct dispatch_object_s *prev;
234	tail->do_next = NULL;
235	prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release);
236	if (fastpath(prev)) {
237		// if we crash here with a value less than 0x1000, then we are at a
238		// known bug in client code for example, see _dispatch_queue_dispose
239		// or _dispatch_atfork_child
240		prev->do_next = head;
241	}
242	return (prev != NULL);
243}
244
245DISPATCH_ALWAYS_INLINE
246static inline void
247_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
248		dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
249{
250	struct dispatch_object_s *head = _head._do, *tail = _tail._do;
251	bool override = _dispatch_queue_need_override_retain(dq, pp);
252	if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) {
253		_dispatch_queue_push_list_slow(dq, pp, head, n, override);
254	} else if (override) {
255		_dispatch_queue_wakeup_with_qos_and_release(dq, pp);
256	}
257}
258
259DISPATCH_ALWAYS_INLINE
260static inline void
261_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail,
262		pthread_priority_t pp)
263{
264	struct dispatch_object_s *tail = _tail._do;
265	bool override = _dispatch_queue_need_override_retain(dq, pp);
266	if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
267		_dispatch_queue_push_slow(dq, pp, tail, override);
268	} else if (override) {
269		_dispatch_queue_wakeup_with_qos_and_release(dq, pp);
270	}
271}
272
273DISPATCH_ALWAYS_INLINE
274static inline void
275_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
276		pthread_priority_t pp, bool wakeup)
277{
278	// caller assumed to have a reference on dq
279	struct dispatch_object_s *tail = _tail._do;
280	if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
281		_dispatch_queue_push_slow(dq, pp, tail, false);
282	} else if (_dispatch_queue_need_override(dq, pp)) {
283		_dispatch_queue_wakeup_with_qos(dq, pp);
284	} else if (slowpath(wakeup)) {
285		_dispatch_queue_wakeup(dq);
286	}
287}
288
289DISPATCH_ALWAYS_INLINE
290static inline void
291_dispatch_queue_class_invoke(dispatch_object_t dou,
292		dispatch_queue_t (*invoke)(dispatch_object_t,
293		_dispatch_thread_semaphore_t*))
294{
295	pthread_priority_t p = 0;
296	dispatch_queue_t dq = dou._dq;
297	if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
298			fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
299		_dispatch_queue_set_thread(dq);
300		dispatch_queue_t tq = NULL;
301		_dispatch_thread_semaphore_t sema = 0;
302		tq = invoke(dq, &sema);
303		_dispatch_queue_clear_thread(dq);
304		p = _dispatch_queue_reset_override_priority(dq);
305		if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
306			// Ensure that the root queue sees that this thread was overridden.
307			_dispatch_set_defaultpriority_override();
308		}
309		// We do not need to check the result.
310		// When the suspend-count lock is dropped, then the check will happen.
311		(void)dispatch_atomic_dec2o(dq, dq_running, release);
312		if (sema) {
313			_dispatch_thread_semaphore_signal(sema);
314		} else if (tq) {
315			_dispatch_introspection_queue_item_complete(dq);
316			return _dispatch_queue_push(tq, dq, p);
317		}
318	}
319	dq->do_next = DISPATCH_OBJECT_LISTLESS;
320	_dispatch_introspection_queue_item_complete(dq);
321	if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
322			DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) {
323		// seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
324		if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
325			// verify that the queue is idle
326			return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
327		}
328	}
329	_dispatch_release(dq); // added when the queue is put on the list
330}
331
332DISPATCH_ALWAYS_INLINE
333static inline unsigned long
334_dispatch_queue_class_probe(dispatch_object_t dou)
335{
336	dispatch_queue_t dq = dou._dq;
337	struct dispatch_object_s *tail;
338	// seq_cst with atomic store to suspend_cnt <rdar://problem/14637483>
339	tail = dispatch_atomic_load2o(dq, dq_items_tail, seq_cst);
340	return (unsigned long)slowpath(tail != NULL);
341}
342
343DISPATCH_ALWAYS_INLINE
344static inline bool
345_dispatch_object_suspended(dispatch_object_t dou)
346{
347	struct dispatch_object_s *obj = dou._do;
348	unsigned int suspend_cnt;
349	// seq_cst with atomic store to tail <rdar://problem/14637483>
350	suspend_cnt = dispatch_atomic_load2o(obj, do_suspend_cnt, seq_cst);
351	return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL);
352}
353
354DISPATCH_ALWAYS_INLINE
355static inline dispatch_queue_t
356_dispatch_queue_get_current(void)
357{
358	return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
359}
360
361DISPATCH_ALWAYS_INLINE DISPATCH_CONST
362static inline dispatch_queue_t
363_dispatch_get_root_queue(qos_class_t priority, bool overcommit)
364{
365	if (overcommit) switch (priority) {
366	case _DISPATCH_QOS_CLASS_MAINTENANCE:
367		return &_dispatch_root_queues[
368				DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
369	case _DISPATCH_QOS_CLASS_BACKGROUND:
370		return &_dispatch_root_queues[
371				DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
372	case _DISPATCH_QOS_CLASS_UTILITY:
373		return &_dispatch_root_queues[
374				DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
375	case _DISPATCH_QOS_CLASS_DEFAULT:
376		return &_dispatch_root_queues[
377				DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
378	case _DISPATCH_QOS_CLASS_USER_INITIATED:
379		return &_dispatch_root_queues[
380				DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
381	case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
382		return &_dispatch_root_queues[
383				DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
384	} else switch (priority) {
385	case _DISPATCH_QOS_CLASS_MAINTENANCE:
386		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
387	case _DISPATCH_QOS_CLASS_BACKGROUND:
388		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
389	case _DISPATCH_QOS_CLASS_UTILITY:
390		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
391	case _DISPATCH_QOS_CLASS_DEFAULT:
392		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
393	case _DISPATCH_QOS_CLASS_USER_INITIATED:
394		return &_dispatch_root_queues[
395				DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
396	case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
397		return &_dispatch_root_queues[
398				DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
399	}
400	return NULL;
401}
402
403// Note to later developers: ensure that any initialization changes are
404// made for statically allocated queues (i.e. _dispatch_main_q).
405static inline void
406_dispatch_queue_init(dispatch_queue_t dq)
407{
408	dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
409
410	dq->dq_running = 0;
411	dq->dq_width = 1;
412	dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
413			relaxed);
414}
415
416DISPATCH_ALWAYS_INLINE
417static inline void
418_dispatch_queue_set_bound_thread(dispatch_queue_t dq)
419{
420	//Tag thread-bound queues with the owning thread
421	dispatch_assert(dq->dq_is_thread_bound);
422	dq->dq_thread = _dispatch_thread_port();
423}
424
425DISPATCH_ALWAYS_INLINE
426static inline void
427_dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
428{
429	dispatch_assert(dq->dq_is_thread_bound);
430	dq->dq_thread = MACH_PORT_NULL;
431}
432
433DISPATCH_ALWAYS_INLINE
434static inline mach_port_t
435_dispatch_queue_get_bound_thread(dispatch_queue_t dq)
436{
437	dispatch_assert(dq->dq_is_thread_bound);
438	return dq->dq_thread;
439}
440
441#pragma mark -
442#pragma mark dispatch_priority
443
444DISPATCH_ALWAYS_INLINE
445static inline pthread_priority_t
446_dispatch_get_defaultpriority(void)
447{
448#if HAVE_PTHREAD_WORKQUEUE_QOS
449	pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific(
450			dispatch_defaultpriority_key);
451	return priority;
452#else
453	return 0;
454#endif
455}
456
457DISPATCH_ALWAYS_INLINE
458static inline void
459_dispatch_reset_defaultpriority(pthread_priority_t priority)
460{
461#if HAVE_PTHREAD_WORKQUEUE_QOS
462	pthread_priority_t old_priority = _dispatch_get_defaultpriority();
463	// if an inner-loop or'd in the override flag to the per-thread priority,
464	// it needs to be propogated up the chain
465	priority |= old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
466
467	if (slowpath(priority != old_priority)) {
468		_dispatch_thread_setspecific(dispatch_defaultpriority_key,
469				(void*)priority);
470	}
471#else
472	(void)priority;
473#endif
474}
475
476DISPATCH_ALWAYS_INLINE
477static inline void
478_dispatch_set_defaultpriority_override(void)
479{
480#if HAVE_PTHREAD_WORKQUEUE_QOS
481	pthread_priority_t old_priority = _dispatch_get_defaultpriority();
482	pthread_priority_t priority = old_priority |
483			_PTHREAD_PRIORITY_OVERRIDE_FLAG;
484
485	if (slowpath(priority != old_priority)) {
486		_dispatch_thread_setspecific(dispatch_defaultpriority_key,
487				(void*)priority);
488	}
489#endif
490}
491
492DISPATCH_ALWAYS_INLINE
493static inline bool
494_dispatch_reset_defaultpriority_override(void)
495{
496#if HAVE_PTHREAD_WORKQUEUE_QOS
497	pthread_priority_t old_priority = _dispatch_get_defaultpriority();
498	pthread_priority_t priority = old_priority &
499			~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
500
501	if (slowpath(priority != old_priority)) {
502		_dispatch_thread_setspecific(dispatch_defaultpriority_key,
503				(void*)priority);
504		return true;
505	}
506#endif
507	return false;
508}
509
510DISPATCH_ALWAYS_INLINE
511static inline void
512_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
513		dispatch_queue_t tq)
514{
515#if HAVE_PTHREAD_WORKQUEUE_QOS
516	const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
517	const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
518	pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
519	if ((!dqp || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) {
520		dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
521	}
522#else
523	(void)dq; (void)tq;
524#endif
525}
526
527DISPATCH_ALWAYS_INLINE
528static inline pthread_priority_t
529_dispatch_set_defaultpriority(pthread_priority_t priority)
530{
531#if HAVE_PTHREAD_WORKQUEUE_QOS
532	pthread_priority_t old_priority = _dispatch_get_defaultpriority();
533	if (old_priority) {
534		pthread_priority_t flags, defaultqueue, basepri;
535		flags = (priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
536		defaultqueue = (old_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
537		basepri = (old_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK);
538		priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
539		if (!priority) {
540			flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
541			priority = basepri;
542		} else if (priority < basepri && !defaultqueue) { // rdar://16349734
543			priority = basepri;
544		}
545		priority |= flags | (old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG);
546	}
547	if (slowpath(priority != old_priority)) {
548		_dispatch_thread_setspecific(dispatch_defaultpriority_key,
549				(void*)priority);
550	}
551	return old_priority;
552#else
553	(void)priority;
554	return 0;
555#endif
556}
557
558DISPATCH_ALWAYS_INLINE
559static inline pthread_priority_t
560_dispatch_priority_adopt(pthread_priority_t priority, unsigned long flags)
561{
562#if HAVE_PTHREAD_WORKQUEUE_QOS
563	pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
564	bool enforce, inherited, defaultqueue;
565	enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
566			(priority & _PTHREAD_PRIORITY_ENFORCE_FLAG);
567	inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
568	defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
569	defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
570	priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
571	if (!priority) {
572		enforce = false;
573	} else if (!enforce) {
574		if (priority < defaultpri) {
575			if (defaultqueue) enforce = true; // rdar://16349734
576		} else if (inherited || defaultqueue) {
577			enforce = true;
578		}
579	} else if (priority < defaultpri && !defaultqueue) { // rdar://16349734
580		enforce = false;
581	}
582	return enforce ? priority : defaultpri;
583#else
584	(void)priority; (void)flags;
585	return 0;
586#endif
587}
588
589DISPATCH_ALWAYS_INLINE
590static inline pthread_priority_t
591_dispatch_get_priority(void)
592{
593#if HAVE_PTHREAD_WORKQUEUE_QOS
594	pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific(
595			dispatch_priority_key);
596	return (priority & ~_PTHREAD_PRIORITY_FLAGS_MASK);
597#else
598	return 0;
599#endif
600}
601
602DISPATCH_ALWAYS_INLINE
603static inline void
604_dispatch_set_priority_and_mach_voucher(pthread_priority_t priority,
605		mach_voucher_t kv)
606{
607#if HAVE_PTHREAD_WORKQUEUE_QOS
608	_pthread_set_flags_t flags = 0;
609	if (priority && _dispatch_set_qos_class_enabled) {
610		pthread_priority_t old_priority = _dispatch_get_priority();
611		if (priority != old_priority && old_priority) {
612			flags |= _PTHREAD_SET_SELF_QOS_FLAG;
613		}
614	}
615	if (kv != VOUCHER_NO_MACH_VOUCHER) {
616#if VOUCHER_USE_MACH_VOUCHER
617		flags |= _PTHREAD_SET_SELF_VOUCHER_FLAG;
618#endif
619	}
620	if (!flags) return;
621	int r = _pthread_set_properties_self(flags, priority, kv);
622	(void)dispatch_assume_zero(r);
623#elif VOUCHER_USE_MACH_VOUCHER
624#error Invalid build configuration
625#else
626	(void)priority; (void)kv;
627#endif
628}
629
630DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
631static inline voucher_t
632_dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority,
633		voucher_t voucher)
634{
635	pthread_priority_t p = (priority != DISPATCH_NO_PRIORITY) ? priority : 0;
636	voucher_t ov = DISPATCH_NO_VOUCHER;
637	mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER;
638	if (voucher != DISPATCH_NO_VOUCHER) {
639		ov = _voucher_get();
640		kv = _voucher_swap_and_get_mach_voucher(ov, voucher);
641	}
642	_dispatch_set_priority_and_mach_voucher(p, kv);
643	return ov;
644}
645
646DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
647static inline voucher_t
648_dispatch_adopt_priority_and_voucher(pthread_priority_t priority,
649		voucher_t voucher, unsigned long flags)
650{
651	pthread_priority_t p = 0;
652	if (priority != DISPATCH_NO_PRIORITY) {
653		p = _dispatch_priority_adopt(priority, flags);
654	}
655	return _dispatch_set_priority_and_adopt_voucher(p, voucher);
656}
657
658DISPATCH_ALWAYS_INLINE
659static inline void
660_dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority,
661		voucher_t voucher, unsigned long flags)
662{
663	voucher_t ov;
664	ov = _dispatch_adopt_priority_and_voucher(priority, voucher, flags);
665	if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
666}
667
668DISPATCH_ALWAYS_INLINE
669static inline void
670_dispatch_set_priority_and_replace_voucher(pthread_priority_t priority,
671		voucher_t voucher)
672{
673	voucher_t ov;
674	ov = _dispatch_set_priority_and_adopt_voucher(priority, voucher);
675	if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
676}
677
678DISPATCH_ALWAYS_INLINE
679static inline void
680_dispatch_set_priority(pthread_priority_t priority)
681{
682	_dispatch_set_priority_and_mach_voucher(priority, VOUCHER_NO_MACH_VOUCHER);
683}
684
685DISPATCH_ALWAYS_INLINE
686static inline pthread_priority_t
687_dispatch_priority_normalize(pthread_priority_t pp)
688{
689	dispatch_assert_zero(pp & ~(pthread_priority_t)
690			_PTHREAD_PRIORITY_QOS_CLASS_MASK);
691	unsigned int qosbits = (unsigned int)pp, idx;
692	if (!qosbits) return 0;
693	idx = (unsigned int)(sizeof(qosbits)*8) -
694			(unsigned int)__builtin_clz(qosbits) - 1;
695	return (1 << idx);
696}
697
698DISPATCH_ALWAYS_INLINE
699static inline bool
700_dispatch_queue_need_override(dispatch_queue_t dq, pthread_priority_t pp)
701{
702	if (!pp || dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE) return false;
703	uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
704	uint32_t o = dq->dq_override;
705	return (o < p);
706}
707
708DISPATCH_ALWAYS_INLINE
709static inline bool
710_dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp)
711{
712	bool override = _dispatch_queue_need_override(dq, pp);
713	if (override) _dispatch_retain(dq);
714	return override;
715}
716
717DISPATCH_ALWAYS_INLINE
718static inline bool
719_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t pp)
720{
721	uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
722	uint32_t o = dq->dq_override;
723	if (o < p) o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed);
724	return (o < p);
725}
726
727DISPATCH_ALWAYS_INLINE
728static inline pthread_priority_t
729_dispatch_queue_get_override_priority(dispatch_queue_t dq)
730{
731	uint32_t p = (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
732	uint32_t o = dq->dq_override;
733	if (o == p) return o;
734	return _dispatch_priority_normalize(o);
735}
736
737DISPATCH_ALWAYS_INLINE
738static inline void
739_dispatch_queue_set_override_priority(dispatch_queue_t dq)
740{
741	uint32_t p = 0;
742	if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
743		p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
744	}
745	dispatch_atomic_store2o(dq, dq_override, p, relaxed);
746}
747
748DISPATCH_ALWAYS_INLINE
749static inline pthread_priority_t
750_dispatch_queue_reset_override_priority(dispatch_queue_t dq)
751{
752	uint32_t p = 0;
753	if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
754		p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
755	}
756	uint32_t o = dispatch_atomic_xchg2o(dq, dq_override, p, relaxed);
757	if (o == p) return o;
758	return _dispatch_priority_normalize(o);
759}
760
761DISPATCH_ALWAYS_INLINE
762static inline pthread_priority_t
763_dispatch_priority_propagate(void)
764{
765#if HAVE_PTHREAD_WORKQUEUE_QOS
766	pthread_priority_t priority = _dispatch_get_priority();
767	if (priority > _dispatch_user_initiated_priority) {
768		// Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
769		priority = _dispatch_user_initiated_priority;
770	}
771	return priority;
772#else
773	return 0;
774#endif
775}
776
777// including maintenance
778DISPATCH_ALWAYS_INLINE
779static inline bool
780_dispatch_is_background_thread(void)
781{
782#if HAVE_PTHREAD_WORKQUEUE_QOS
783	pthread_priority_t priority;
784	priority = _dispatch_get_priority();
785	return priority && (priority <= _dispatch_background_priority);
786#else
787	return false;
788#endif
789}
790
791#pragma mark -
792#pragma mark dispatch_block_t
793
794#ifdef __BLOCKS__
795
796DISPATCH_ALWAYS_INLINE
797static inline bool
798_dispatch_block_has_private_data(const dispatch_block_t block)
799{
800	extern void (*_dispatch_block_special_invoke)(void*);
801	return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
802}
803
804DISPATCH_ALWAYS_INLINE
805static inline dispatch_block_private_data_t
806_dispatch_block_get_data(const dispatch_block_t db)
807{
808	if (!_dispatch_block_has_private_data(db)) {
809		return NULL;
810	}
811	// Keep in sync with _dispatch_block_create implementation
812	uint8_t *x = (uint8_t *)db;
813	// x points to base of struct Block_layout
814	x += sizeof(struct Block_layout);
815	// x points to addresss of captured block
816	x += sizeof(dispatch_block_t);
817#if USE_OBJC
818	// x points to addresss of captured voucher
819	x += sizeof(voucher_t);
820#endif
821	// x points to base of captured dispatch_block_private_data_s structure
822	dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
823	if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
824		DISPATCH_CRASH("Corruption of dispatch block object");
825	}
826	return dbpd;
827}
828
829DISPATCH_ALWAYS_INLINE
830static inline pthread_priority_t
831_dispatch_block_get_priority(const dispatch_block_t db)
832{
833	dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
834	return dbpd ? dbpd->dbpd_priority : 0;
835}
836
837DISPATCH_ALWAYS_INLINE
838static inline dispatch_block_flags_t
839_dispatch_block_get_flags(const dispatch_block_t db)
840{
841	dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
842	return dbpd ? dbpd->dbpd_flags : 0;
843}
844
845#define DISPATCH_BLOCK_HAS(flag, db) \
846		((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0)
847#define DISPATCH_BLOCK_IS(flag, db) \
848		((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0)
849
850#endif
851
852#pragma mark -
853#pragma mark dispatch_continuation_t
854
855DISPATCH_ALWAYS_INLINE
856static inline dispatch_continuation_t
857_dispatch_continuation_alloc_cacheonly(void)
858{
859	dispatch_continuation_t dc = (dispatch_continuation_t)
860			fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
861	if (dc) {
862		_dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
863	}
864	return dc;
865}
866
867DISPATCH_ALWAYS_INLINE
868static inline dispatch_continuation_t
869_dispatch_continuation_alloc(void)
870{
871	dispatch_continuation_t dc =
872			fastpath(_dispatch_continuation_alloc_cacheonly());
873	if(!dc) {
874		return _dispatch_continuation_alloc_from_heap();
875	}
876	return dc;
877}
878
879DISPATCH_ALWAYS_INLINE
880static inline dispatch_continuation_t
881_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
882{
883	dispatch_continuation_t prev_dc = (dispatch_continuation_t)
884			fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
885	int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1;
886	// Cap continuation cache
887	if (slowpath(cnt > _dispatch_continuation_cache_limit)) {
888		return dc;
889	}
890	dc->do_next = prev_dc;
891	dc->dc_cache_cnt = cnt;
892	_dispatch_thread_setspecific(dispatch_cache_key, dc);
893	return NULL;
894}
895
896DISPATCH_ALWAYS_INLINE
897static inline void
898_dispatch_continuation_free(dispatch_continuation_t dc)
899{
900	dc = _dispatch_continuation_free_cacheonly(dc);
901	if (slowpath(dc)) {
902		_dispatch_continuation_free_to_cache_limit(dc);
903	}
904}
905
906#include "trace.h"
907
908DISPATCH_ALWAYS_INLINE_NDEBUG
909static inline void
910_dispatch_continuation_pop(dispatch_object_t dou)
911{
912	dispatch_continuation_t dc = dou._dc, dc1;
913	dispatch_group_t dg;
914
915	_dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou);
916	if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
917		return dx_invoke(dou._do);
918	}
919
920	// Add the item back to the cache before calling the function. This
921	// allows the 'hot' continuation to be used for a quick callback.
922	//
923	// The ccache version is per-thread.
924	// Therefore, the object has not been reused yet.
925	// This generates better assembly.
926	if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
927		_dispatch_continuation_voucher_adopt(dc);
928		dc1 = _dispatch_continuation_free_cacheonly(dc);
929	} else {
930		dc1 = NULL;
931	}
932	if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
933		dg = dc->dc_data;
934	} else {
935		dg = NULL;
936	}
937	_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
938	if (dg) {
939		dispatch_group_leave(dg);
940		_dispatch_release(dg);
941	}
942	_dispatch_introspection_queue_item_complete(dou);
943	if (slowpath(dc1)) {
944		_dispatch_continuation_free_to_cache_limit(dc1);
945	}
946}
947
948DISPATCH_ALWAYS_INLINE
949static inline void
950_dispatch_continuation_priority_set(dispatch_continuation_t dc,
951		pthread_priority_t pp, dispatch_block_flags_t flags)
952{
953#if HAVE_PTHREAD_WORKQUEUE_QOS
954	pthread_priority_t prio = 0;
955	if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
956		prio = pp;
957	} else if (!(flags & DISPATCH_BLOCK_NO_QOS_CLASS)) {
958		prio = _dispatch_priority_propagate();
959	}
960	if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) {
961		prio |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
962	}
963	dc->dc_priority = prio;
964#else
965	(void)dc; (void)pp; (void)flags;
966#endif
967}
968
969DISPATCH_ALWAYS_INLINE
970static inline pthread_priority_t
971_dispatch_continuation_get_override_priority(dispatch_queue_t dq,
972		dispatch_continuation_t dc)
973{
974#if HAVE_PTHREAD_WORKQUEUE_QOS
975	pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
976	bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
977	pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
978	bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
979	if (!p) {
980		enforce = false;
981	} else if (!enforce && (!dqp || defaultqueue)) {
982		enforce = true;
983	}
984	if (!enforce) {
985		p = dqp;
986	}
987	return p;
988#else
989	(void)dq; (void)dc;
990	return 0;
991#endif
992}
993
994#endif // !(USE_OBJC && __OBJC2__)
995
996#endif /* __DISPATCH_INLINE_INTERNAL__ */
997