1/*
2 * Copyright (c) 2010-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21/*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27#ifndef __DISPATCH_TRACE__
28#define __DISPATCH_TRACE__
29
30#if !__OBJC2__
31
32#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
33typedef struct dispatch_trace_timer_params_s {
34	int64_t deadline, interval, leeway;
35} *dispatch_trace_timer_params_t;
36
37#include "provider.h"
38#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
39
40#if DISPATCH_USE_DTRACE_INTROSPECTION
41#define _dispatch_trace_callout(_c, _f, _dcc) do { \
42		if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \
43				slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \
44			dispatch_queue_t _dq = _dispatch_queue_get_current(); \
45			const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \
46			dispatch_function_t _func = (dispatch_function_t)(_f); \
47			void *_ctxt = (_c); \
48			DISPATCH_CALLOUT_ENTRY(_dq, _label, _func, _ctxt); \
49			_dcc; \
50			DISPATCH_CALLOUT_RETURN(_dq, _label, _func, _ctxt); \
51		} else { \
52			_dcc; \
53		} \
54	} while (0)
55#elif DISPATCH_INTROSPECTION
56#define _dispatch_trace_callout(_c, _f, _dcc) \
57		do { (void)(_c); (void)(_f); _dcc; } while (0)
58#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
59
60#if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
61DISPATCH_ALWAYS_INLINE
62static inline void
63_dispatch_trace_client_callout(void *ctxt, dispatch_function_t f)
64{
65	dispatch_function_t func = (f == _dispatch_call_block_and_release &&
66			ctxt ? _dispatch_Block_invoke(ctxt) : f);
67	_dispatch_introspection_callout_entry(ctxt, func);
68	_dispatch_trace_callout(ctxt, func, _dispatch_client_callout(ctxt, f));
69	_dispatch_introspection_callout_return(ctxt, func);
70}
71
72DISPATCH_ALWAYS_INLINE
73static inline void
74_dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
75{
76	dispatch_function_t func = (dispatch_function_t)f;
77	_dispatch_introspection_callout_entry(ctxt, func);
78	_dispatch_trace_callout(ctxt, func, _dispatch_client_callout2(ctxt, i, f));
79	_dispatch_introspection_callout_return(ctxt, func);
80}
81
82#define _dispatch_client_callout		_dispatch_trace_client_callout
83#define _dispatch_client_callout2		_dispatch_trace_client_callout2
84#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
85
86#if DISPATCH_USE_DTRACE_INTROSPECTION
87#define _dispatch_trace_continuation(_q, _o, _t) do { \
88		dispatch_queue_t _dq = (_q); \
89		const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \
90		struct dispatch_object_s *_do = (_o); \
91		dispatch_continuation_t _dc; \
92		char *_kind; \
93		dispatch_function_t _func; \
94		void *_ctxt; \
95		if (DISPATCH_OBJ_IS_VTABLE(_do)) { \
96			_kind = (char*)dx_kind(_do); \
97			if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \
98					_DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \
99				dispatch_source_t _ds = (dispatch_source_t)_do; \
100				_dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \
101				_func = _dc->dc_func; \
102				_ctxt = _dc->dc_ctxt; \
103			} else { \
104				_func = (dispatch_function_t)_dispatch_queue_invoke; \
105				_ctxt = _do->do_ctxt; \
106			} \
107		} else { \
108			_dc = (void*)_do; \
109			_ctxt = _dc->dc_ctxt; \
110			if ((long)_dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { \
111				_kind = "semaphore"; \
112				_func = (dispatch_function_t)dispatch_semaphore_signal; \
113			} else if (_dc->dc_func == _dispatch_call_block_and_release) { \
114				_kind = "block"; \
115				_func = _dispatch_Block_invoke(_dc->dc_ctxt); \
116			} else { \
117				_kind = "function"; \
118				_func = _dc->dc_func; \
119			} \
120		} \
121		_t(_dq, _label, _do, _kind, _func, _ctxt); \
122	} while (0)
123#elif DISPATCH_INTROSPECTION
124#define _dispatch_trace_continuation(_q, _o, _t) \
125		do { (void)(_q); (void)(_o); } while(0)
126#define DISPATCH_QUEUE_PUSH_ENABLED() 0
127#define DISPATCH_QUEUE_POP_ENABLED() 0
128#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
129
130#if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
131DISPATCH_ALWAYS_INLINE
132static inline void
133_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
134		dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
135{
136	if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
137		struct dispatch_object_s *dou = _head._do;
138		do {
139			_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
140		} while (dou != _tail._do && (dou = dou->do_next));
141	}
142	_dispatch_introspection_queue_push_list(dq, _head, _tail);
143	_dispatch_queue_push_list(dq, _head, _tail, pp, n);
144}
145
146DISPATCH_ALWAYS_INLINE
147static inline void
148_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, pthread_priority_t pp)
149{
150	if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
151		struct dispatch_object_s *dou = _tail._do;
152		_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
153	}
154	_dispatch_introspection_queue_push(dq, _tail);
155	_dispatch_queue_push(dq, _tail, pp);
156}
157
158DISPATCH_ALWAYS_INLINE
159static inline void
160_dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
161		pthread_priority_t pp, bool wakeup)
162{
163	if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
164		struct dispatch_object_s *dou = _tail._do;
165		_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
166	}
167	_dispatch_introspection_queue_push(dq, _tail);
168	_dispatch_queue_push_wakeup(dq, _tail, pp, wakeup);
169}
170
171DISPATCH_ALWAYS_INLINE
172static inline void
173_dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail)
174{
175	if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
176		struct dispatch_object_s *dou = _tail._do;
177		_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
178	}
179	_dispatch_introspection_queue_push(dq, _tail);
180}
181
182DISPATCH_ALWAYS_INLINE
183static inline void
184_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp)
185{
186	_dispatch_queue_push(dq, dou, pp);
187}
188
189#define _dispatch_queue_push_list _dispatch_trace_queue_push_list
190#define _dispatch_queue_push _dispatch_trace_queue_push
191#define _dispatch_queue_push_wakeup _dispatch_trace_queue_push_wakeup
192
193DISPATCH_ALWAYS_INLINE
194static inline void
195_dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou)
196{
197	if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) {
198		_dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP);
199	}
200	_dispatch_introspection_queue_pop(dq, dou);
201}
202#else
203#define _dispatch_queue_push_notrace _dispatch_queue_push
204#define _dispatch_trace_continuation_push(dq, dou) \
205		do { (void)(dq); (void)(dou); } while(0)
206#define _dispatch_trace_continuation_pop(dq, dou) \
207		do { (void)(dq); (void)(dou); } while(0)
208#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
209
210#if DISPATCH_USE_DTRACE
211static inline dispatch_function_t
212_dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr)
213{
214	dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER];
215	dispatch_function_t func = dc ? dc->dc_func : NULL;
216	if (func == _dispatch_after_timer_callback &&
217			!(ds->ds_atomic_flags & DSF_CANCELED)) {
218		dc = ds->do_ctxt;
219		func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func :
220				dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL;
221	}
222	return func;
223}
224
225DISPATCH_ALWAYS_INLINE
226static inline dispatch_trace_timer_params_t
227_dispatch_trace_timer_params(uintptr_t ident,
228		struct dispatch_timer_source_s *values, uint64_t deadline,
229		dispatch_trace_timer_params_t params)
230{
231	#define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \
232			== DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t))
233	#define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \
234			(v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);})
235	#define _dispatch_trace_time2nano(v) ({ uint64_t _t; \
236			_t = _dispatch_trace_time2nano3(v); _t >= INT64_MAX ? -1ll : \
237			(int64_t)_t; })
238	if (deadline) {
239		params->deadline = (int64_t)deadline;
240	} else {
241		uint64_t now = (DISPATCH_TIMER_KIND(ident) ==
242				DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() :
243				 _dispatch_get_nanoseconds());
244		params->deadline = _dispatch_trace_time2nano2(values->target,
245				values->target < now ? 0 : values->target - now);
246	}
247	params->interval = _dispatch_trace_time2nano(values->interval);
248	params->leeway = _dispatch_trace_time2nano(values->leeway);
249	return params;
250}
251
252DISPATCH_ALWAYS_INLINE
253static inline bool
254_dispatch_trace_timer_configure_enabled(void)
255{
256	return slowpath(DISPATCH_TIMER_CONFIGURE_ENABLED());
257}
258
259DISPATCH_ALWAYS_INLINE
260static inline void
261_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident,
262		struct dispatch_timer_source_s *values)
263{
264	struct dispatch_trace_timer_params_s params;
265	DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds,
266			ds->ds_refs), _dispatch_trace_timer_params(ident, values, 0,
267			&params));
268}
269
270DISPATCH_ALWAYS_INLINE
271static inline void
272_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline)
273{
274	if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) {
275		if (deadline && dr) {
276			dispatch_source_t ds = _dispatch_source_from_refs(dr);
277			struct dispatch_trace_timer_params_s params;
278			DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(ds, dr),
279					_dispatch_trace_timer_params(ds->ds_ident_hack,
280					&ds_timer(dr), deadline, &params));
281		}
282	}
283}
284
285DISPATCH_ALWAYS_INLINE
286static inline void
287_dispatch_trace_timer_wake(dispatch_source_refs_t dr)
288{
289	if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) {
290		if (dr) {
291			dispatch_source_t ds = _dispatch_source_from_refs(dr);
292			DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(ds, dr));
293		}
294	}
295}
296
297DISPATCH_ALWAYS_INLINE
298static inline void
299_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data,
300		unsigned long missed)
301{
302	if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) {
303		if (!(data - missed) && dr) {
304			dispatch_source_t ds = _dispatch_source_from_refs(dr);
305			DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(ds, dr));
306		}
307	}
308}
309
310#else
311
312#define _dispatch_trace_timer_configure_enabled() false
313#define _dispatch_trace_timer_configure(ds, ident, values) \
314		do { (void)(ds); (void)(ident); (void)(values); } while(0)
315#define _dispatch_trace_timer_program(dr, deadline) \
316		do { (void)(dr); (void)(deadline); } while(0)
317#define _dispatch_trace_timer_wake(dr) \
318		do { (void)(dr); } while(0)
319#define _dispatch_trace_timer_fire(dr, data, missed) \
320		do { (void)(dr); (void)(data); (void)(missed); } while(0)
321
322#endif // DISPATCH_USE_DTRACE
323
324#endif // !__OBJC2__
325
326#endif // __DISPATCH_TRACE__
327