dtrace_impl.h revision 297077
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * $FreeBSD: stable/10/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace_impl.h 297077 2016-03-20 20:00:25Z mav $
22 */
23
24/*
25 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30/*
31 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
32 */
33
34#ifndef _SYS_DTRACE_IMPL_H
35#define	_SYS_DTRACE_IMPL_H
36
37#ifdef	__cplusplus
38extern "C" {
39#endif
40
41/*
42 * DTrace Dynamic Tracing Software: Kernel Implementation Interfaces
43 *
44 * Note: The contents of this file are private to the implementation of the
45 * Solaris system and DTrace subsystem and are subject to change at any time
46 * without notice.  Applications and drivers using these interfaces will fail
47 * to run on future releases.  These interfaces should not be used for any
48 * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB).
49 * Please refer to the "Solaris Dynamic Tracing Guide" for more information.
50 */
51
52#include <sys/dtrace.h>
53#ifndef illumos
54#ifdef __sparcv9
55typedef uint32_t		pc_t;
56#else
57typedef uintptr_t		pc_t;
58#endif
59typedef	u_long			greg_t;
60#endif
61
62/*
63 * DTrace Implementation Constants and Typedefs
64 */
65#define	DTRACE_MAXPROPLEN		128
66#define	DTRACE_DYNVAR_CHUNKSIZE		256
67
68struct dtrace_probe;
69struct dtrace_ecb;
70struct dtrace_predicate;
71struct dtrace_action;
72struct dtrace_provider;
73struct dtrace_state;
74
75typedef struct dtrace_probe dtrace_probe_t;
76typedef struct dtrace_ecb dtrace_ecb_t;
77typedef struct dtrace_predicate dtrace_predicate_t;
78typedef struct dtrace_action dtrace_action_t;
79typedef struct dtrace_provider dtrace_provider_t;
80typedef struct dtrace_meta dtrace_meta_t;
81typedef struct dtrace_state dtrace_state_t;
82typedef uint32_t dtrace_optid_t;
83typedef uint32_t dtrace_specid_t;
84typedef uint64_t dtrace_genid_t;
85
86/*
87 * DTrace Probes
88 *
89 * The probe is the fundamental unit of the DTrace architecture.  Probes are
90 * created by DTrace providers, and managed by the DTrace framework.  A probe
91 * is identified by a unique <provider, module, function, name> tuple, and has
92 * a unique probe identifier assigned to it.  (Some probes are not associated
93 * with a specific point in text; these are called _unanchored probes_ and have
94 * no module or function associated with them.)  Probes are represented as a
95 * dtrace_probe structure.  To allow quick lookups based on each element of the
96 * probe tuple, probes are hashed by each of provider, module, function and
97 * name.  (If a lookup is performed based on a regular expression, a
98 * dtrace_probekey is prepared, and a linear search is performed.) Each probe
99 * is additionally pointed to by a linear array indexed by its identifier.  The
100 * identifier is the provider's mechanism for indicating to the DTrace
101 * framework that a probe has fired:  the identifier is passed as the first
102 * argument to dtrace_probe(), where it is then mapped into the corresponding
103 * dtrace_probe structure.  From the dtrace_probe structure, dtrace_probe() can
104 * iterate over the probe's list of enabling control blocks; see "DTrace
105 * Enabling Control Blocks", below.)
106 */
107struct dtrace_probe {
108	dtrace_id_t dtpr_id;			/* probe identifier */
109	dtrace_ecb_t *dtpr_ecb;			/* ECB list; see below */
110	dtrace_ecb_t *dtpr_ecb_last;		/* last ECB in list */
111	void *dtpr_arg;				/* provider argument */
112	dtrace_cacheid_t dtpr_predcache;	/* predicate cache ID */
113	int dtpr_aframes;			/* artificial frames */
114	dtrace_provider_t *dtpr_provider;	/* pointer to provider */
115	char *dtpr_mod;				/* probe's module name */
116	char *dtpr_func;			/* probe's function name */
117	char *dtpr_name;			/* probe's name */
118	dtrace_probe_t *dtpr_nextmod;		/* next in module hash */
119	dtrace_probe_t *dtpr_prevmod;		/* previous in module hash */
120	dtrace_probe_t *dtpr_nextfunc;		/* next in function hash */
121	dtrace_probe_t *dtpr_prevfunc;		/* previous in function hash */
122	dtrace_probe_t *dtpr_nextname;		/* next in name hash */
123	dtrace_probe_t *dtpr_prevname;		/* previous in name hash */
124	dtrace_genid_t dtpr_gen;		/* probe generation ID */
125};
126
127typedef int dtrace_probekey_f(const char *, const char *, int);
128
129typedef struct dtrace_probekey {
130	char *dtpk_prov;			/* provider name to match */
131	dtrace_probekey_f *dtpk_pmatch;		/* provider matching function */
132	char *dtpk_mod;				/* module name to match */
133	dtrace_probekey_f *dtpk_mmatch;		/* module matching function */
134	char *dtpk_func;			/* func name to match */
135	dtrace_probekey_f *dtpk_fmatch;		/* func matching function */
136	char *dtpk_name;			/* name to match */
137	dtrace_probekey_f *dtpk_nmatch;		/* name matching function */
138	dtrace_id_t dtpk_id;			/* identifier to match */
139} dtrace_probekey_t;
140
141typedef struct dtrace_hashbucket {
142	struct dtrace_hashbucket *dthb_next;	/* next on hash chain */
143	dtrace_probe_t *dthb_chain;		/* chain of probes */
144	int dthb_len;				/* number of probes here */
145} dtrace_hashbucket_t;
146
147typedef struct dtrace_hash {
148	dtrace_hashbucket_t **dth_tab;		/* hash table */
149	int dth_size;				/* size of hash table */
150	int dth_mask;				/* mask to index into table */
151	int dth_nbuckets;			/* total number of buckets */
152	uintptr_t dth_nextoffs;			/* offset of next in probe */
153	uintptr_t dth_prevoffs;			/* offset of prev in probe */
154	uintptr_t dth_stroffs;			/* offset of str in probe */
155} dtrace_hash_t;
156
157/*
158 * DTrace Enabling Control Blocks
159 *
160 * When a provider wishes to fire a probe, it calls into dtrace_probe(),
161 * passing the probe identifier as the first argument.  As described above,
162 * dtrace_probe() maps the identifier into a pointer to a dtrace_probe_t
163 * structure.  This structure contains information about the probe, and a
164 * pointer to the list of Enabling Control Blocks (ECBs).  Each ECB points to
165 * DTrace consumer state, and contains an optional predicate, and a list of
166 * actions.  (Shown schematically below.)  The ECB abstraction allows a single
167 * probe to be multiplexed across disjoint consumers, or across disjoint
168 * enablings of a single probe within one consumer.
169 *
170 *   Enabling Control Block
171 *        dtrace_ecb_t
172 * +------------------------+
173 * | dtrace_epid_t ---------+--------------> Enabled Probe ID (EPID)
174 * | dtrace_state_t * ------+--------------> State associated with this ECB
175 * | dtrace_predicate_t * --+---------+
176 * | dtrace_action_t * -----+----+    |
177 * | dtrace_ecb_t * ---+    |    |    |       Predicate (if any)
178 * +-------------------+----+    |    |       dtrace_predicate_t
179 *                     |         |    +---> +--------------------+
180 *                     |         |          | dtrace_difo_t * ---+----> DIFO
181 *                     |         |          +--------------------+
182 *                     |         |
183 *            Next ECB |         |           Action
184 *            (if any) |         |       dtrace_action_t
185 *                     :         +--> +-------------------+
186 *                     :              | dtrace_actkind_t -+------> kind
187 *                     v              | dtrace_difo_t * --+------> DIFO (if any)
188 *                                    | dtrace_recdesc_t -+------> record descr.
189 *                                    | dtrace_action_t * +------+
190 *                                    +-------------------+      |
191 *                                                               | Next action
192 *                               +-------------------------------+  (if any)
193 *                               |
194 *                               |           Action
195 *                               |       dtrace_action_t
196 *                               +--> +-------------------+
197 *                                    | dtrace_actkind_t -+------> kind
198 *                                    | dtrace_difo_t * --+------> DIFO (if any)
199 *                                    | dtrace_action_t * +------+
200 *                                    +-------------------+      |
201 *                                                               | Next action
202 *                               +-------------------------------+  (if any)
203 *                               |
204 *                               :
205 *                               v
206 *
207 *
208 * dtrace_probe() iterates over the ECB list.  If the ECB needs less space
209 * than is available in the principal buffer, the ECB is processed:  if the
210 * predicate is non-NULL, the DIF object is executed.  If the result is
211 * non-zero, the action list is processed, with each action being executed
212 * accordingly.  When the action list has been completely executed, processing
213 * advances to the next ECB. The ECB abstraction allows disjoint consumers
214 * to multiplex on single probes.
215 *
216 * Execution of the ECB results in consuming dte_size bytes in the buffer
217 * to record data.  During execution, dte_needed bytes must be available in
218 * the buffer.  This space is used for both recorded data and tuple data.
219 */
220struct dtrace_ecb {
221	dtrace_epid_t dte_epid;			/* enabled probe ID */
222	uint32_t dte_alignment;			/* required alignment */
223	size_t dte_needed;			/* space needed for execution */
224	size_t dte_size;			/* size of recorded payload */
225	dtrace_predicate_t *dte_predicate;	/* predicate, if any */
226	dtrace_action_t *dte_action;		/* actions, if any */
227	dtrace_ecb_t *dte_next;			/* next ECB on probe */
228	dtrace_state_t *dte_state;		/* pointer to state */
229	uint32_t dte_cond;			/* security condition */
230	dtrace_probe_t *dte_probe;		/* pointer to probe */
231	dtrace_action_t *dte_action_last;	/* last action on ECB */
232	uint64_t dte_uarg;			/* library argument */
233};
234
235struct dtrace_predicate {
236	dtrace_difo_t *dtp_difo;		/* DIF object */
237	dtrace_cacheid_t dtp_cacheid;		/* cache identifier */
238	int dtp_refcnt;				/* reference count */
239};
240
241struct dtrace_action {
242	dtrace_actkind_t dta_kind;		/* kind of action */
243	uint16_t dta_intuple;			/* boolean:  in aggregation */
244	uint32_t dta_refcnt;			/* reference count */
245	dtrace_difo_t *dta_difo;		/* pointer to DIFO */
246	dtrace_recdesc_t dta_rec;		/* record description */
247	dtrace_action_t *dta_prev;		/* previous action */
248	dtrace_action_t *dta_next;		/* next action */
249};
250
251typedef struct dtrace_aggregation {
252	dtrace_action_t dtag_action;		/* action; must be first */
253	dtrace_aggid_t dtag_id;			/* identifier */
254	dtrace_ecb_t *dtag_ecb;			/* corresponding ECB */
255	dtrace_action_t *dtag_first;		/* first action in tuple */
256	uint32_t dtag_base;			/* base of aggregation */
257	uint8_t dtag_hasarg;			/* boolean:  has argument */
258	uint64_t dtag_initial;			/* initial value */
259	void (*dtag_aggregate)(uint64_t *, uint64_t, uint64_t);
260} dtrace_aggregation_t;
261
262/*
263 * DTrace Buffers
264 *
265 * Principal buffers, aggregation buffers, and speculative buffers are all
266 * managed with the dtrace_buffer structure.  By default, this structure
267 * includes twin data buffers -- dtb_tomax and dtb_xamot -- that serve as the
268 * active and passive buffers, respectively.  For speculative buffers,
269 * dtb_xamot will be NULL; for "ring" and "fill" buffers, dtb_xamot will point
270 * to a scratch buffer.  For all buffer types, the dtrace_buffer structure is
271 * always allocated on a per-CPU basis; a single dtrace_buffer structure is
272 * never shared among CPUs.  (That is, there is never true sharing of the
273 * dtrace_buffer structure; to prevent false sharing of the structure, it must
274 * always be aligned to the coherence granularity -- generally 64 bytes.)
275 *
276 * One of the critical design decisions of DTrace is that a given ECB always
277 * stores the same quantity and type of data.  This is done to assure that the
278 * only metadata required for an ECB's traced data is the EPID.  That is, from
279 * the EPID, the consumer can determine the data layout.  (The data buffer
280 * layout is shown schematically below.)  By assuring that one can determine
281 * data layout from the EPID, the metadata stream can be separated from the
282 * data stream -- simplifying the data stream enormously.  The ECB always
283 * proceeds the recorded data as part of the dtrace_rechdr_t structure that
284 * includes the EPID and a high-resolution timestamp used for output ordering
285 * consistency.
286 *
287 *      base of data buffer --->  +--------+--------------------+--------+
288 *                                | rechdr | data               | rechdr |
289 *                                +--------+------+--------+----+--------+
290 *                                | data          | rechdr | data        |
291 *                                +---------------+--------+-------------+
292 *                                | data, cont.                          |
293 *                                +--------+--------------------+--------+
294 *                                | rechdr | data               |        |
295 *                                +--------+--------------------+        |
296 *                                |                ||                    |
297 *                                |                ||                    |
298 *                                |                \/                    |
299 *                                :                                      :
300 *                                .                                      .
301 *                                .                                      .
302 *                                .                                      .
303 *                                :                                      :
304 *                                |                                      |
305 *     limit of data buffer --->  +--------------------------------------+
306 *
307 * When evaluating an ECB, dtrace_probe() determines if the ECB's needs of the
308 * principal buffer (both scratch and payload) exceed the available space.  If
309 * the ECB's needs exceed available space (and if the principal buffer policy
310 * is the default "switch" policy), the ECB is dropped, the buffer's drop count
311 * is incremented, and processing advances to the next ECB.  If the ECB's needs
312 * can be met with the available space, the ECB is processed, but the offset in
313 * the principal buffer is only advanced if the ECB completes processing
314 * without error.
315 *
316 * When a buffer is to be switched (either because the buffer is the principal
317 * buffer with a "switch" policy or because it is an aggregation buffer), a
318 * cross call is issued to the CPU associated with the buffer.  In the cross
319 * call context, interrupts are disabled, and the active and the inactive
320 * buffers are atomically switched.  This involves switching the data pointers,
321 * copying the various state fields (offset, drops, errors, etc.) into their
322 * inactive equivalents, and clearing the state fields.  Because interrupts are
323 * disabled during this procedure, the switch is guaranteed to appear atomic to
324 * dtrace_probe().
325 *
326 * DTrace Ring Buffering
327 *
328 * To process a ring buffer correctly, one must know the oldest valid record.
329 * Processing starts at the oldest record in the buffer and continues until
330 * the end of the buffer is reached.  Processing then resumes starting with
331 * the record stored at offset 0 in the buffer, and continues until the
332 * youngest record is processed.  If trace records are of a fixed-length,
333 * determining the oldest record is trivial:
334 *
335 *   - If the ring buffer has not wrapped, the oldest record is the record
336 *     stored at offset 0.
337 *
338 *   - If the ring buffer has wrapped, the oldest record is the record stored
339 *     at the current offset.
340 *
341 * With variable length records, however, just knowing the current offset
342 * doesn't suffice for determining the oldest valid record:  assuming that one
343 * allows for arbitrary data, one has no way of searching forward from the
344 * current offset to find the oldest valid record.  (That is, one has no way
345 * of separating data from metadata.) It would be possible to simply refuse to
346 * process any data in the ring buffer between the current offset and the
347 * limit, but this leaves (potentially) an enormous amount of otherwise valid
348 * data unprocessed.
349 *
350 * To effect ring buffering, we track two offsets in the buffer:  the current
351 * offset and the _wrapped_ offset.  If a request is made to reserve some
352 * amount of data, and the buffer has wrapped, the wrapped offset is
353 * incremented until the wrapped offset minus the current offset is greater
354 * than or equal to the reserve request.  This is done by repeatedly looking
355 * up the ECB corresponding to the EPID at the current wrapped offset, and
356 * incrementing the wrapped offset by the size of the data payload
357 * corresponding to that ECB.  If this offset is greater than or equal to the
358 * limit of the data buffer, the wrapped offset is set to 0.  Thus, the
359 * current offset effectively "chases" the wrapped offset around the buffer.
360 * Schematically:
361 *
362 *      base of data buffer --->  +------+--------------------+------+
363 *                                | EPID | data               | EPID |
364 *                                +------+--------+------+----+------+
365 *                                | data          | EPID | data      |
366 *                                +---------------+------+-----------+
367 *                                | data, cont.                      |
368 *                                +------+---------------------------+
369 *                                | EPID | data                      |
370 *           current offset --->  +------+---------------------------+
371 *                                | invalid data                     |
372 *           wrapped offset --->  +------+--------------------+------+
373 *                                | EPID | data               | EPID |
374 *                                +------+--------+------+----+------+
375 *                                | data          | EPID | data      |
376 *                                +---------------+------+-----------+
377 *                                :                                  :
378 *                                .                                  .
379 *                                .        ... valid data ...        .
380 *                                .                                  .
381 *                                :                                  :
382 *                                +------+-------------+------+------+
383 *                                | EPID | data        | EPID | data |
384 *                                +------+------------++------+------+
385 *                                | data, cont.       | leftover     |
386 *     limit of data buffer --->  +-------------------+--------------+
387 *
388 * If the amount of requested buffer space exceeds the amount of space
389 * available between the current offset and the end of the buffer:
390 *
391 *  (1)  all words in the data buffer between the current offset and the limit
392 *       of the data buffer (marked "leftover", above) are set to
393 *       DTRACE_EPIDNONE
394 *
395 *  (2)  the wrapped offset is set to zero
396 *
397 *  (3)  the iteration process described above occurs until the wrapped offset
398 *       is greater than the amount of desired space.
399 *
400 * The wrapped offset is implemented by (re-)using the inactive offset.
401 * In a "switch" buffer policy, the inactive offset stores the offset in
402 * the inactive buffer; in a "ring" buffer policy, it stores the wrapped
403 * offset.
404 *
405 * DTrace Scratch Buffering
406 *
407 * Some ECBs may wish to allocate dynamically-sized temporary scratch memory.
408 * To accommodate such requests easily, scratch memory may be allocated in
409 * the buffer beyond the current offset plus the needed memory of the current
410 * ECB.  If there isn't sufficient room in the buffer for the requested amount
411 * of scratch space, the allocation fails and an error is generated.  Scratch
412 * memory is tracked in the dtrace_mstate_t and is automatically freed when
413 * the ECB ceases processing.  Note that ring buffers cannot allocate their
414 * scratch from the principal buffer -- lest they needlessly overwrite older,
415 * valid data.  Ring buffers therefore have their own dedicated scratch buffer
416 * from which scratch is allocated.
417 */
418#define	DTRACEBUF_RING		0x0001		/* bufpolicy set to "ring" */
419#define	DTRACEBUF_FILL		0x0002		/* bufpolicy set to "fill" */
420#define	DTRACEBUF_NOSWITCH	0x0004		/* do not switch buffer */
421#define	DTRACEBUF_WRAPPED	0x0008		/* ring buffer has wrapped */
422#define	DTRACEBUF_DROPPED	0x0010		/* drops occurred */
423#define	DTRACEBUF_ERROR		0x0020		/* errors occurred */
424#define	DTRACEBUF_FULL		0x0040		/* "fill" buffer is full */
425#define	DTRACEBUF_CONSUMED	0x0080		/* buffer has been consumed */
426#define	DTRACEBUF_INACTIVE	0x0100		/* buffer is not yet active */
427
428typedef struct dtrace_buffer {
429	uint64_t dtb_offset;			/* current offset in buffer */
430	uint64_t dtb_size;			/* size of buffer */
431	uint32_t dtb_flags;			/* flags */
432	uint32_t dtb_drops;			/* number of drops */
433	caddr_t dtb_tomax;			/* active buffer */
434	caddr_t dtb_xamot;			/* inactive buffer */
435	uint32_t dtb_xamot_flags;		/* inactive flags */
436	uint32_t dtb_xamot_drops;		/* drops in inactive buffer */
437	uint64_t dtb_xamot_offset;		/* offset in inactive buffer */
438	uint32_t dtb_errors;			/* number of errors */
439	uint32_t dtb_xamot_errors;		/* errors in inactive buffer */
440#ifndef _LP64
441	uint64_t dtb_pad1;			/* pad out to 64 bytes */
442#endif
443	uint64_t dtb_switched;			/* time of last switch */
444	uint64_t dtb_interval;			/* observed switch interval */
445	uint64_t dtb_pad2[6];			/* pad to avoid false sharing */
446} dtrace_buffer_t;
447
448/*
449 * DTrace Aggregation Buffers
450 *
451 * Aggregation buffers use much of the same mechanism as described above
452 * ("DTrace Buffers").  However, because an aggregation is fundamentally a
453 * hash, there exists dynamic metadata associated with an aggregation buffer
454 * that is not associated with other kinds of buffers.  This aggregation
455 * metadata is _only_ relevant for the in-kernel implementation of
456 * aggregations; it is not actually relevant to user-level consumers.  To do
457 * this, we allocate dynamic aggregation data (hash keys and hash buckets)
458 * starting below the _limit_ of the buffer, and we allocate data from the
459 * _base_ of the buffer.  When the aggregation buffer is copied out, _only_ the
460 * data is copied out; the metadata is simply discarded.  Schematically,
461 * aggregation buffers look like:
462 *
463 *      base of data buffer --->  +-------+------+-----------+-------+
464 *                                | aggid | key  | value     | aggid |
465 *                                +-------+------+-----------+-------+
466 *                                | key                              |
467 *                                +-------+-------+-----+------------+
468 *                                | value | aggid | key | value      |
469 *                                +-------+------++-----+------+-----+
470 *                                | aggid | key  | value       |     |
471 *                                +-------+------+-------------+     |
472 *                                |                ||                |
473 *                                |                ||                |
474 *                                |                \/                |
475 *                                :                                  :
476 *                                .                                  .
477 *                                .                                  .
478 *                                .                                  .
479 *                                :                                  :
480 *                                |                /\                |
481 *                                |                ||   +------------+
482 *                                |                ||   |            |
483 *                                +---------------------+            |
484 *                                | hash keys                        |
485 *                                | (dtrace_aggkey structures)       |
486 *                                |                                  |
487 *                                +----------------------------------+
488 *                                | hash buckets                     |
489 *                                | (dtrace_aggbuffer structure)     |
490 *                                |                                  |
491 *     limit of data buffer --->  +----------------------------------+
492 *
493 *
494 * As implied above, just as we assure that ECBs always store a constant
495 * amount of data, we assure that a given aggregation -- identified by its
496 * aggregation ID -- always stores data of a constant quantity and type.
497 * As with EPIDs, this allows the aggregation ID to serve as the metadata for a
498 * given record.
499 *
500 * Note that the size of the dtrace_aggkey structure must be sizeof (uintptr_t)
501 * aligned.  (If this the structure changes such that this becomes false, an
502 * assertion will fail in dtrace_aggregate().)
503 */
504typedef struct dtrace_aggkey {
505	uint32_t dtak_hashval;			/* hash value */
506	uint32_t dtak_action:4;			/* action -- 4 bits */
507	uint32_t dtak_size:28;			/* size -- 28 bits */
508	caddr_t dtak_data;			/* data pointer */
509	struct dtrace_aggkey *dtak_next;	/* next in hash chain */
510} dtrace_aggkey_t;
511
512typedef struct dtrace_aggbuffer {
513	uintptr_t dtagb_hashsize;		/* number of buckets */
514	uintptr_t dtagb_free;			/* free list of keys */
515	dtrace_aggkey_t **dtagb_hash;		/* hash table */
516} dtrace_aggbuffer_t;
517
518/*
519 * DTrace Speculations
520 *
521 * Speculations have a per-CPU buffer and a global state.  Once a speculation
522 * buffer has been comitted or discarded, it cannot be reused until all CPUs
523 * have taken the same action (commit or discard) on their respective
524 * speculative buffer.  However, because DTrace probes may execute in arbitrary
525 * context, other CPUs cannot simply be cross-called at probe firing time to
526 * perform the necessary commit or discard.  The speculation states thus
527 * optimize for the case that a speculative buffer is only active on one CPU at
528 * the time of a commit() or discard() -- for if this is the case, other CPUs
529 * need not take action, and the speculation is immediately available for
530 * reuse.  If the speculation is active on multiple CPUs, it must be
531 * asynchronously cleaned -- potentially leading to a higher rate of dirty
532 * speculative drops.  The speculation states are as follows:
533 *
534 *  DTRACESPEC_INACTIVE       <= Initial state; inactive speculation
535 *  DTRACESPEC_ACTIVE         <= Allocated, but not yet speculatively traced to
536 *  DTRACESPEC_ACTIVEONE      <= Speculatively traced to on one CPU
537 *  DTRACESPEC_ACTIVEMANY     <= Speculatively traced to on more than one CPU
538 *  DTRACESPEC_COMMITTING     <= Currently being commited on one CPU
539 *  DTRACESPEC_COMMITTINGMANY <= Currently being commited on many CPUs
540 *  DTRACESPEC_DISCARDING     <= Currently being discarded on many CPUs
541 *
542 * The state transition diagram is as follows:
543 *
544 *     +----------------------------------------------------------+
545 *     |                                                          |
546 *     |                      +------------+                      |
547 *     |  +-------------------| COMMITTING |<-----------------+   |
548 *     |  |                   +------------+                  |   |
549 *     |  | copied spec.            ^             commit() on |   | discard() on
550 *     |  | into principal          |              active CPU |   | active CPU
551 *     |  |                         | commit()                |   |
552 *     V  V                         |                         |   |
553 * +----------+                 +--------+                +-----------+
554 * | INACTIVE |---------------->| ACTIVE |--------------->| ACTIVEONE |
555 * +----------+  speculation()  +--------+  speculate()   +-----------+
556 *     ^  ^                         |                         |   |
557 *     |  |                         | discard()               |   |
558 *     |  | asynchronously          |            discard() on |   | speculate()
559 *     |  | cleaned                 V            inactive CPU |   | on inactive
560 *     |  |                   +------------+                  |   | CPU
561 *     |  +-------------------| DISCARDING |<-----------------+   |
562 *     |                      +------------+                      |
563 *     | asynchronously             ^                             |
564 *     | copied spec.               |       discard()             |
565 *     | into principal             +------------------------+    |
566 *     |                                                     |    V
567 *  +----------------+             commit()              +------------+
568 *  | COMMITTINGMANY |<----------------------------------| ACTIVEMANY |
569 *  +----------------+                                   +------------+
570 */
571typedef enum dtrace_speculation_state {
572	DTRACESPEC_INACTIVE = 0,
573	DTRACESPEC_ACTIVE,
574	DTRACESPEC_ACTIVEONE,
575	DTRACESPEC_ACTIVEMANY,
576	DTRACESPEC_COMMITTING,
577	DTRACESPEC_COMMITTINGMANY,
578	DTRACESPEC_DISCARDING
579} dtrace_speculation_state_t;
580
581typedef struct dtrace_speculation {
582	dtrace_speculation_state_t dtsp_state;	/* current speculation state */
583	int dtsp_cleaning;			/* non-zero if being cleaned */
584	dtrace_buffer_t *dtsp_buffer;		/* speculative buffer */
585} dtrace_speculation_t;
586
587/*
588 * DTrace Dynamic Variables
589 *
590 * The dynamic variable problem is obviously decomposed into two subproblems:
591 * allocating new dynamic storage, and freeing old dynamic storage.  The
592 * presence of the second problem makes the first much more complicated -- or
593 * rather, the absence of the second renders the first trivial.  This is the
594 * case with aggregations, for which there is effectively no deallocation of
595 * dynamic storage.  (Or more accurately, all dynamic storage is deallocated
596 * when a snapshot is taken of the aggregation.)  As DTrace dynamic variables
597 * allow for both dynamic allocation and dynamic deallocation, the
598 * implementation of dynamic variables is quite a bit more complicated than
599 * that of their aggregation kin.
600 *
601 * We observe that allocating new dynamic storage is tricky only because the
602 * size can vary -- the allocation problem is much easier if allocation sizes
603 * are uniform.  We further observe that in D, the size of dynamic variables is
604 * actually _not_ dynamic -- dynamic variable sizes may be determined by static
605 * analysis of DIF text.  (This is true even of putatively dynamically-sized
606 * objects like strings and stacks, the sizes of which are dictated by the
607 * "stringsize" and "stackframes" variables, respectively.)  We exploit this by
608 * performing this analysis on all DIF before enabling any probes.  For each
609 * dynamic load or store, we calculate the dynamically-allocated size plus the
610 * size of the dtrace_dynvar structure plus the storage required to key the
611 * data.  For all DIF, we take the largest value and dub it the _chunksize_.
612 * We then divide dynamic memory into two parts:  a hash table that is wide
613 * enough to have every chunk in its own bucket, and a larger region of equal
614 * chunksize units.  Whenever we wish to dynamically allocate a variable, we
615 * always allocate a single chunk of memory.  Depending on the uniformity of
616 * allocation, this will waste some amount of memory -- but it eliminates the
617 * non-determinism inherent in traditional heap fragmentation.
618 *
619 * Dynamic objects are allocated by storing a non-zero value to them; they are
620 * deallocated by storing a zero value to them.  Dynamic variables are
621 * complicated enormously by being shared between CPUs.  In particular,
622 * consider the following scenario:
623 *
624 *                 CPU A                                 CPU B
625 *  +---------------------------------+   +---------------------------------+
626 *  |                                 |   |                                 |
627 *  | allocates dynamic object a[123] |   |                                 |
628 *  | by storing the value 345 to it  |   |                                 |
629 *  |                               --------->                              |
630 *  |                                 |   | wishing to load from object     |
631 *  |                                 |   | a[123], performs lookup in      |
632 *  |                                 |   | dynamic variable space          |
633 *  |                               <---------                              |
634 *  | deallocates object a[123] by    |   |                                 |
635 *  | storing 0 to it                 |   |                                 |
636 *  |                                 |   |                                 |
637 *  | allocates dynamic object b[567] |   | performs load from a[123]       |
638 *  | by storing the value 789 to it  |   |                                 |
639 *  :                                 :   :                                 :
640 *  .                                 .   .                                 .
641 *
642 * This is obviously a race in the D program, but there are nonetheless only
643 * two valid values for CPU B's load from a[123]:  345 or 0.  Most importantly,
644 * CPU B may _not_ see the value 789 for a[123].
645 *
646 * There are essentially two ways to deal with this:
647 *
648 *  (1)  Explicitly spin-lock variables.  That is, if CPU B wishes to load
649 *       from a[123], it needs to lock a[123] and hold the lock for the
650 *       duration that it wishes to manipulate it.
651 *
652 *  (2)  Avoid reusing freed chunks until it is known that no CPU is referring
653 *       to them.
654 *
655 * The implementation of (1) is rife with complexity, because it requires the
656 * user of a dynamic variable to explicitly decree when they are done using it.
657 * Were all variables by value, this perhaps wouldn't be debilitating -- but
658 * dynamic variables of non-scalar types are tracked by reference.  That is, if
659 * a dynamic variable is, say, a string, and that variable is to be traced to,
660 * say, the principal buffer, the DIF emulation code returns to the main
661 * dtrace_probe() loop a pointer to the underlying storage, not the contents of
662 * the storage.  Further, code calling on DIF emulation would have to be aware
663 * that the DIF emulation has returned a reference to a dynamic variable that
664 * has been potentially locked.  The variable would have to be unlocked after
665 * the main dtrace_probe() loop is finished with the variable, and the main
666 * dtrace_probe() loop would have to be careful to not call any further DIF
667 * emulation while the variable is locked to avoid deadlock.  More generally,
668 * if one were to implement (1), DIF emulation code dealing with dynamic
669 * variables could only deal with one dynamic variable at a time (lest deadlock
670 * result).  To sum, (1) exports too much subtlety to the users of dynamic
671 * variables -- increasing maintenance burden and imposing serious constraints
672 * on future DTrace development.
673 *
674 * The implementation of (2) is also complex, but the complexity is more
675 * manageable.  We need to be sure that when a variable is deallocated, it is
676 * not placed on a traditional free list, but rather on a _dirty_ list.  Once a
677 * variable is on a dirty list, it cannot be found by CPUs performing a
678 * subsequent lookup of the variable -- but it may still be in use by other
679 * CPUs.  To assure that all CPUs that may be seeing the old variable have
680 * cleared out of probe context, a dtrace_sync() can be issued.  Once the
681 * dtrace_sync() has completed, it can be known that all CPUs are done
682 * manipulating the dynamic variable -- the dirty list can be atomically
683 * appended to the free list.  Unfortunately, there's a slight hiccup in this
684 * mechanism:  dtrace_sync() may not be issued from probe context.  The
685 * dtrace_sync() must be therefore issued asynchronously from non-probe
686 * context.  For this we rely on the DTrace cleaner, a cyclic that runs at the
687 * "cleanrate" frequency.  To ease this implementation, we define several chunk
688 * lists:
689 *
690 *   - Dirty.  Deallocated chunks, not yet cleaned.  Not available.
691 *
692 *   - Rinsing.  Formerly dirty chunks that are currently being asynchronously
693 *     cleaned.  Not available, but will be shortly.  Dynamic variable
694 *     allocation may not spin or block for availability, however.
695 *
696 *   - Clean.  Clean chunks, ready for allocation -- but not on the free list.
697 *
698 *   - Free.  Available for allocation.
699 *
700 * Moreover, to avoid absurd contention, _each_ of these lists is implemented
701 * on a per-CPU basis.  This is only for performance, not correctness; chunks
702 * may be allocated from another CPU's free list.  The algorithm for allocation
703 * then is this:
704 *
705 *   (1)  Attempt to atomically allocate from current CPU's free list.  If list
706 *        is non-empty and allocation is successful, allocation is complete.
707 *
708 *   (2)  If the clean list is non-empty, atomically move it to the free list,
709 *        and reattempt (1).
710 *
711 *   (3)  If the dynamic variable space is in the CLEAN state, look for free
712 *        and clean lists on other CPUs by setting the current CPU to the next
713 *        CPU, and reattempting (1).  If the next CPU is the current CPU (that
714 *        is, if all CPUs have been checked), atomically switch the state of
715 *        the dynamic variable space based on the following:
716 *
717 *        - If no free chunks were found and no dirty chunks were found,
718 *          atomically set the state to EMPTY.
719 *
720 *        - If dirty chunks were found, atomically set the state to DIRTY.
721 *
722 *        - If rinsing chunks were found, atomically set the state to RINSING.
723 *
724 *   (4)  Based on state of dynamic variable space state, increment appropriate
725 *        counter to indicate dynamic drops (if in EMPTY state) vs. dynamic
726 *        dirty drops (if in DIRTY state) vs. dynamic rinsing drops (if in
727 *        RINSING state).  Fail the allocation.
728 *
729 * The cleaning cyclic operates with the following algorithm:  for all CPUs
730 * with a non-empty dirty list, atomically move the dirty list to the rinsing
731 * list.  Perform a dtrace_sync().  For all CPUs with a non-empty rinsing list,
732 * atomically move the rinsing list to the clean list.  Perform another
733 * dtrace_sync().  By this point, all CPUs have seen the new clean list; the
734 * state of the dynamic variable space can be restored to CLEAN.
735 *
736 * There exist two final races that merit explanation.  The first is a simple
737 * allocation race:
738 *
739 *                 CPU A                                 CPU B
740 *  +---------------------------------+   +---------------------------------+
741 *  |                                 |   |                                 |
742 *  | allocates dynamic object a[123] |   | allocates dynamic object a[123] |
743 *  | by storing the value 345 to it  |   | by storing the value 567 to it  |
744 *  |                                 |   |                                 |
745 *  :                                 :   :                                 :
746 *  .                                 .   .                                 .
747 *
748 * Again, this is a race in the D program.  It can be resolved by having a[123]
749 * hold the value 345 or a[123] hold the value 567 -- but it must be true that
750 * a[123] have only _one_ of these values.  (That is, the racing CPUs may not
751 * put the same element twice on the same hash chain.)  This is resolved
752 * simply:  before the allocation is undertaken, the start of the new chunk's
753 * hash chain is noted.  Later, after the allocation is complete, the hash
754 * chain is atomically switched to point to the new element.  If this fails
755 * (because of either concurrent allocations or an allocation concurrent with a
756 * deletion), the newly allocated chunk is deallocated to the dirty list, and
757 * the whole process of looking up (and potentially allocating) the dynamic
758 * variable is reattempted.
759 *
760 * The final race is a simple deallocation race:
761 *
762 *                 CPU A                                 CPU B
763 *  +---------------------------------+   +---------------------------------+
764 *  |                                 |   |                                 |
765 *  | deallocates dynamic object      |   | deallocates dynamic object      |
766 *  | a[123] by storing the value 0   |   | a[123] by storing the value 0   |
767 *  | to it                           |   | to it                           |
768 *  |                                 |   |                                 |
769 *  :                                 :   :                                 :
770 *  .                                 .   .                                 .
771 *
772 * Once again, this is a race in the D program, but it is one that we must
773 * handle without corrupting the underlying data structures.  Because
774 * deallocations require the deletion of a chunk from the middle of a hash
775 * chain, we cannot use a single-word atomic operation to remove it.  For this,
776 * we add a spin lock to the hash buckets that is _only_ used for deallocations
777 * (allocation races are handled as above).  Further, this spin lock is _only_
778 * held for the duration of the delete; before control is returned to the DIF
779 * emulation code, the hash bucket is unlocked.
780 */
781typedef struct dtrace_key {
782	uint64_t dttk_value;			/* data value or data pointer */
783	uint64_t dttk_size;			/* 0 if by-val, >0 if by-ref */
784} dtrace_key_t;
785
786typedef struct dtrace_tuple {
787	uint32_t dtt_nkeys;			/* number of keys in tuple */
788	uint32_t dtt_pad;			/* padding */
789	dtrace_key_t dtt_key[1];		/* array of tuple keys */
790} dtrace_tuple_t;
791
792typedef struct dtrace_dynvar {
793	uint64_t dtdv_hashval;			/* hash value -- 0 if free */
794	struct dtrace_dynvar *dtdv_next;	/* next on list or hash chain */
795	void *dtdv_data;			/* pointer to data */
796	dtrace_tuple_t dtdv_tuple;		/* tuple key */
797} dtrace_dynvar_t;
798
799typedef enum dtrace_dynvar_op {
800	DTRACE_DYNVAR_ALLOC,
801	DTRACE_DYNVAR_NOALLOC,
802	DTRACE_DYNVAR_DEALLOC
803} dtrace_dynvar_op_t;
804
805typedef struct dtrace_dynhash {
806	dtrace_dynvar_t *dtdh_chain;		/* hash chain for this bucket */
807	uintptr_t dtdh_lock;			/* deallocation lock */
808#ifdef _LP64
809	uintptr_t dtdh_pad[6];			/* pad to avoid false sharing */
810#else
811	uintptr_t dtdh_pad[14];			/* pad to avoid false sharing */
812#endif
813} dtrace_dynhash_t;
814
815typedef struct dtrace_dstate_percpu {
816	dtrace_dynvar_t *dtdsc_free;		/* free list for this CPU */
817	dtrace_dynvar_t *dtdsc_dirty;		/* dirty list for this CPU */
818	dtrace_dynvar_t *dtdsc_rinsing;		/* rinsing list for this CPU */
819	dtrace_dynvar_t *dtdsc_clean;		/* clean list for this CPU */
820	uint64_t dtdsc_drops;			/* number of capacity drops */
821	uint64_t dtdsc_dirty_drops;		/* number of dirty drops */
822	uint64_t dtdsc_rinsing_drops;		/* number of rinsing drops */
823#ifdef _LP64
824	uint64_t dtdsc_pad;			/* pad to avoid false sharing */
825#else
826	uint64_t dtdsc_pad[2];			/* pad to avoid false sharing */
827#endif
828} dtrace_dstate_percpu_t;
829
830typedef enum dtrace_dstate_state {
831	DTRACE_DSTATE_CLEAN = 0,
832	DTRACE_DSTATE_EMPTY,
833	DTRACE_DSTATE_DIRTY,
834	DTRACE_DSTATE_RINSING
835} dtrace_dstate_state_t;
836
837typedef struct dtrace_dstate {
838	void *dtds_base;			/* base of dynamic var. space */
839	size_t dtds_size;			/* size of dynamic var. space */
840	size_t dtds_hashsize;			/* number of buckets in hash */
841	size_t dtds_chunksize;			/* size of each chunk */
842	dtrace_dynhash_t *dtds_hash;		/* pointer to hash table */
843	dtrace_dstate_state_t dtds_state;	/* current dynamic var. state */
844	dtrace_dstate_percpu_t *dtds_percpu;	/* per-CPU dyn. var. state */
845} dtrace_dstate_t;
846
847/*
848 * DTrace Variable State
849 *
850 * The DTrace variable state tracks user-defined variables in its dtrace_vstate
851 * structure.  Each DTrace consumer has exactly one dtrace_vstate structure,
852 * but some dtrace_vstate structures may exist without a corresponding DTrace
853 * consumer (see "DTrace Helpers", below).  As described in <sys/dtrace.h>,
854 * user-defined variables can have one of three scopes:
855 *
856 *  DIFV_SCOPE_GLOBAL  =>  global scope
857 *  DIFV_SCOPE_THREAD  =>  thread-local scope (i.e. "self->" variables)
858 *  DIFV_SCOPE_LOCAL   =>  clause-local scope (i.e. "this->" variables)
859 *
860 * The variable state tracks variables by both their scope and their allocation
861 * type:
862 *
863 *  - The dtvs_globals and dtvs_locals members each point to an array of
864 *    dtrace_statvar structures.  These structures contain both the variable
865 *    metadata (dtrace_difv structures) and the underlying storage for all
866 *    statically allocated variables, including statically allocated
867 *    DIFV_SCOPE_GLOBAL variables and all DIFV_SCOPE_LOCAL variables.
868 *
869 *  - The dtvs_tlocals member points to an array of dtrace_difv structures for
870 *    DIFV_SCOPE_THREAD variables.  As such, this array tracks _only_ the
871 *    variable metadata for DIFV_SCOPE_THREAD variables; the underlying storage
872 *    is allocated out of the dynamic variable space.
873 *
874 *  - The dtvs_dynvars member is the dynamic variable state associated with the
875 *    variable state.  The dynamic variable state (described in "DTrace Dynamic
876 *    Variables", above) tracks all DIFV_SCOPE_THREAD variables and all
877 *    dynamically-allocated DIFV_SCOPE_GLOBAL variables.
878 */
879typedef struct dtrace_statvar {
880	uint64_t dtsv_data;			/* data or pointer to it */
881	size_t dtsv_size;			/* size of pointed-to data */
882	int dtsv_refcnt;			/* reference count */
883	dtrace_difv_t dtsv_var;			/* variable metadata */
884} dtrace_statvar_t;
885
886typedef struct dtrace_vstate {
887	dtrace_state_t *dtvs_state;		/* back pointer to state */
888	dtrace_statvar_t **dtvs_globals;	/* statically-allocated glbls */
889	int dtvs_nglobals;			/* number of globals */
890	dtrace_difv_t *dtvs_tlocals;		/* thread-local metadata */
891	int dtvs_ntlocals;			/* number of thread-locals */
892	dtrace_statvar_t **dtvs_locals;		/* clause-local data */
893	int dtvs_nlocals;			/* number of clause-locals */
894	dtrace_dstate_t dtvs_dynvars;		/* dynamic variable state */
895} dtrace_vstate_t;
896
897/*
898 * DTrace Machine State
899 *
900 * In the process of processing a fired probe, DTrace needs to track and/or
901 * cache some per-CPU state associated with that particular firing.  This is
902 * state that is always discarded after the probe firing has completed, and
903 * much of it is not specific to any DTrace consumer, remaining valid across
904 * all ECBs.  This state is tracked in the dtrace_mstate structure.
905 */
906#define	DTRACE_MSTATE_ARGS		0x00000001
907#define	DTRACE_MSTATE_PROBE		0x00000002
908#define	DTRACE_MSTATE_EPID		0x00000004
909#define	DTRACE_MSTATE_TIMESTAMP		0x00000008
910#define	DTRACE_MSTATE_STACKDEPTH	0x00000010
911#define	DTRACE_MSTATE_CALLER		0x00000020
912#define	DTRACE_MSTATE_IPL		0x00000040
913#define	DTRACE_MSTATE_FLTOFFS		0x00000080
914#define	DTRACE_MSTATE_WALLTIMESTAMP	0x00000100
915#define	DTRACE_MSTATE_USTACKDEPTH	0x00000200
916#define	DTRACE_MSTATE_UCALLER		0x00000400
917
918typedef struct dtrace_mstate {
919	uintptr_t dtms_scratch_base;		/* base of scratch space */
920	uintptr_t dtms_scratch_ptr;		/* current scratch pointer */
921	size_t dtms_scratch_size;		/* scratch size */
922	uint32_t dtms_present;			/* variables that are present */
923	uint64_t dtms_arg[5];			/* cached arguments */
924	dtrace_epid_t dtms_epid;		/* current EPID */
925	uint64_t dtms_timestamp;		/* cached timestamp */
926	hrtime_t dtms_walltimestamp;		/* cached wall timestamp */
927	int dtms_stackdepth;			/* cached stackdepth */
928	int dtms_ustackdepth;			/* cached ustackdepth */
929	struct dtrace_probe *dtms_probe;	/* current probe */
930	uintptr_t dtms_caller;			/* cached caller */
931	uint64_t dtms_ucaller;			/* cached user-level caller */
932	int dtms_ipl;				/* cached interrupt pri lev */
933	int dtms_fltoffs;			/* faulting DIFO offset */
934	uintptr_t dtms_strtok;			/* saved strtok() pointer */
935	uint32_t dtms_access;			/* memory access rights */
936	dtrace_difo_t *dtms_difo;		/* current dif object */
937	file_t *dtms_getf;			/* cached rval of getf() */
938} dtrace_mstate_t;
939
940#define	DTRACE_COND_OWNER	0x1
941#define	DTRACE_COND_USERMODE	0x2
942#define	DTRACE_COND_ZONEOWNER	0x4
943
944#define	DTRACE_PROBEKEY_MAXDEPTH	8	/* max glob recursion depth */
945
946/*
947 * Access flag used by dtrace_mstate.dtms_access.
948 */
949#define	DTRACE_ACCESS_KERNEL	0x1		/* the priv to read kmem */
950
951
952/*
953 * DTrace Activity
954 *
955 * Each DTrace consumer is in one of several states, which (for purposes of
956 * avoiding yet-another overloading of the noun "state") we call the current
957 * _activity_.  The activity transitions on dtrace_go() (from DTRACIOCGO), on
958 * dtrace_stop() (from DTRACIOCSTOP) and on the exit() action.  Activities may
959 * only transition in one direction; the activity transition diagram is a
960 * directed acyclic graph.  The activity transition diagram is as follows:
961 *
962 *
963 * +----------+                   +--------+                   +--------+
964 * | INACTIVE |------------------>| WARMUP |------------------>| ACTIVE |
965 * +----------+   dtrace_go(),    +--------+   dtrace_go(),    +--------+
966 *                before BEGIN        |        after BEGIN       |  |  |
967 *                                    |                          |  |  |
968 *                      exit() action |                          |  |  |
969 *                     from BEGIN ECB |                          |  |  |
970 *                                    |                          |  |  |
971 *                                    v                          |  |  |
972 *                               +----------+     exit() action  |  |  |
973 * +-----------------------------| DRAINING |<-------------------+  |  |
974 * |                             +----------+                       |  |
975 * |                                  |                             |  |
976 * |                   dtrace_stop(), |                             |  |
977 * |                     before END   |                             |  |
978 * |                                  |                             |  |
979 * |                                  v                             |  |
980 * | +---------+                 +----------+                       |  |
981 * | | STOPPED |<----------------| COOLDOWN |<----------------------+  |
982 * | +---------+  dtrace_stop(), +----------+     dtrace_stop(),       |
983 * |                after END                       before END         |
984 * |                                                                   |
985 * |                              +--------+                           |
986 * +----------------------------->| KILLED |<--------------------------+
987 *       deadman timeout or       +--------+     deadman timeout or
988 *        killed consumer                         killed consumer
989 *
990 * Note that once a DTrace consumer has stopped tracing, there is no way to
991 * restart it; if a DTrace consumer wishes to restart tracing, it must reopen
992 * the DTrace pseudodevice.
993 */
994typedef enum dtrace_activity {
995	DTRACE_ACTIVITY_INACTIVE = 0,		/* not yet running */
996	DTRACE_ACTIVITY_WARMUP,			/* while starting */
997	DTRACE_ACTIVITY_ACTIVE,			/* running */
998	DTRACE_ACTIVITY_DRAINING,		/* before stopping */
999	DTRACE_ACTIVITY_COOLDOWN,		/* while stopping */
1000	DTRACE_ACTIVITY_STOPPED,		/* after stopping */
1001	DTRACE_ACTIVITY_KILLED			/* killed */
1002} dtrace_activity_t;
1003
1004/*
1005 * DTrace Helper Implementation
1006 *
1007 * A description of the helper architecture may be found in <sys/dtrace.h>.
1008 * Each process contains a pointer to its helpers in its p_dtrace_helpers
1009 * member.  This is a pointer to a dtrace_helpers structure, which contains an
1010 * array of pointers to dtrace_helper structures, helper variable state (shared
1011 * among a process's helpers) and a generation count.  (The generation count is
1012 * used to provide an identifier when a helper is added so that it may be
1013 * subsequently removed.)  The dtrace_helper structure is self-explanatory,
1014 * containing pointers to the objects needed to execute the helper.  Note that
1015 * helpers are _duplicated_ across fork(2), and destroyed on exec(2).  No more
1016 * than dtrace_helpers_max are allowed per-process.
1017 */
1018#define	DTRACE_HELPER_ACTION_USTACK	0
1019#define	DTRACE_NHELPER_ACTIONS		1
1020
1021typedef struct dtrace_helper_action {
1022	int dtha_generation;			/* helper action generation */
1023	int dtha_nactions;			/* number of actions */
1024	dtrace_difo_t *dtha_predicate;		/* helper action predicate */
1025	dtrace_difo_t **dtha_actions;		/* array of actions */
1026	struct dtrace_helper_action *dtha_next;	/* next helper action */
1027} dtrace_helper_action_t;
1028
1029typedef struct dtrace_helper_provider {
1030	int dthp_generation;			/* helper provider generation */
1031	uint32_t dthp_ref;			/* reference count */
1032	dof_helper_t dthp_prov;			/* DOF w/ provider and probes */
1033} dtrace_helper_provider_t;
1034
1035typedef struct dtrace_helpers {
1036	dtrace_helper_action_t **dthps_actions;	/* array of helper actions */
1037	dtrace_vstate_t dthps_vstate;		/* helper action var. state */
1038	dtrace_helper_provider_t **dthps_provs;	/* array of providers */
1039	uint_t dthps_nprovs;			/* count of providers */
1040	uint_t dthps_maxprovs;			/* provider array size */
1041	int dthps_generation;			/* current generation */
1042	pid_t dthps_pid;			/* pid of associated proc */
1043	int dthps_deferred;			/* helper in deferred list */
1044	struct dtrace_helpers *dthps_next;	/* next pointer */
1045	struct dtrace_helpers *dthps_prev;	/* prev pointer */
1046} dtrace_helpers_t;
1047
1048/*
1049 * DTrace Helper Action Tracing
1050 *
1051 * Debugging helper actions can be arduous.  To ease the development and
1052 * debugging of helpers, DTrace contains a tracing-framework-within-a-tracing-
1053 * framework: helper tracing.  If dtrace_helptrace_enabled is non-zero (which
1054 * it is by default on DEBUG kernels), all helper activity will be traced to a
1055 * global, in-kernel ring buffer.  Each entry includes a pointer to the specific
1056 * helper, the location within the helper, and a trace of all local variables.
1057 * The ring buffer may be displayed in a human-readable format with the
1058 * ::dtrace_helptrace mdb(1) dcmd.
1059 */
1060#define	DTRACE_HELPTRACE_NEXT	(-1)
1061#define	DTRACE_HELPTRACE_DONE	(-2)
1062#define	DTRACE_HELPTRACE_ERR	(-3)
1063
1064typedef struct dtrace_helptrace {
1065	dtrace_helper_action_t	*dtht_helper;	/* helper action */
1066	int dtht_where;				/* where in helper action */
1067	int dtht_nlocals;			/* number of locals */
1068	int dtht_fault;				/* type of fault (if any) */
1069	int dtht_fltoffs;			/* DIF offset */
1070	uint64_t dtht_illval;			/* faulting value */
1071	uint64_t dtht_locals[1];		/* local variables */
1072} dtrace_helptrace_t;
1073
1074/*
1075 * DTrace Credentials
1076 *
1077 * In probe context, we have limited flexibility to examine the credentials
1078 * of the DTrace consumer that created a particular enabling.  We use
1079 * the Least Privilege interfaces to cache the consumer's cred pointer and
1080 * some facts about that credential in a dtrace_cred_t structure. These
1081 * can limit the consumer's breadth of visibility and what actions the
1082 * consumer may take.
1083 */
1084#define	DTRACE_CRV_ALLPROC		0x01
1085#define	DTRACE_CRV_KERNEL		0x02
1086#define	DTRACE_CRV_ALLZONE		0x04
1087
1088#define	DTRACE_CRV_ALL		(DTRACE_CRV_ALLPROC | DTRACE_CRV_KERNEL | \
1089	DTRACE_CRV_ALLZONE)
1090
1091#define	DTRACE_CRA_PROC				0x0001
1092#define	DTRACE_CRA_PROC_CONTROL			0x0002
1093#define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER	0x0004
1094#define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE	0x0008
1095#define	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG	0x0010
1096#define	DTRACE_CRA_KERNEL			0x0020
1097#define	DTRACE_CRA_KERNEL_DESTRUCTIVE		0x0040
1098
1099#define	DTRACE_CRA_ALL		(DTRACE_CRA_PROC | \
1100	DTRACE_CRA_PROC_CONTROL | \
1101	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER | \
1102	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE | \
1103	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG | \
1104	DTRACE_CRA_KERNEL | \
1105	DTRACE_CRA_KERNEL_DESTRUCTIVE)
1106
1107typedef struct dtrace_cred {
1108	cred_t			*dcr_cred;
1109	uint8_t			dcr_destructive;
1110	uint8_t			dcr_visible;
1111	uint16_t		dcr_action;
1112} dtrace_cred_t;
1113
1114/*
1115 * DTrace Consumer State
1116 *
1117 * Each DTrace consumer has an associated dtrace_state structure that contains
1118 * its in-kernel DTrace state -- including options, credentials, statistics and
1119 * pointers to ECBs, buffers, speculations and formats.  A dtrace_state
1120 * structure is also allocated for anonymous enablings.  When anonymous state
1121 * is grabbed, the grabbing consumers dts_anon pointer is set to the grabbed
1122 * dtrace_state structure.
1123 */
1124struct dtrace_state {
1125#ifdef illumos
1126	dev_t dts_dev;				/* device */
1127#else
1128	struct cdev *dts_dev;			/* device */
1129#endif
1130	int dts_necbs;				/* total number of ECBs */
1131	dtrace_ecb_t **dts_ecbs;		/* array of ECBs */
1132	dtrace_epid_t dts_epid;			/* next EPID to allocate */
1133	size_t dts_needed;			/* greatest needed space */
1134	struct dtrace_state *dts_anon;		/* anon. state, if grabbed */
1135	dtrace_activity_t dts_activity;		/* current activity */
1136	dtrace_vstate_t dts_vstate;		/* variable state */
1137	dtrace_buffer_t *dts_buffer;		/* principal buffer */
1138	dtrace_buffer_t *dts_aggbuffer;		/* aggregation buffer */
1139	dtrace_speculation_t *dts_speculations;	/* speculation array */
1140	int dts_nspeculations;			/* number of speculations */
1141	int dts_naggregations;			/* number of aggregations */
1142	dtrace_aggregation_t **dts_aggregations; /* aggregation array */
1143#ifdef illumos
1144	vmem_t *dts_aggid_arena;		/* arena for aggregation IDs */
1145#else
1146	struct unrhdr *dts_aggid_arena;		/* arena for aggregation IDs */
1147#endif
1148	uint64_t dts_errors;			/* total number of errors */
1149	uint32_t dts_speculations_busy;		/* number of spec. busy */
1150	uint32_t dts_speculations_unavail;	/* number of spec unavail */
1151	uint32_t dts_stkstroverflows;		/* stack string tab overflows */
1152	uint32_t dts_dblerrors;			/* errors in ERROR probes */
1153	uint32_t dts_reserve;			/* space reserved for END */
1154	hrtime_t dts_laststatus;		/* time of last status */
1155#ifdef illumos
1156	cyclic_id_t dts_cleaner;		/* cleaning cyclic */
1157	cyclic_id_t dts_deadman;		/* deadman cyclic */
1158#else
1159	struct callout dts_cleaner;		/* Cleaning callout. */
1160	struct callout dts_deadman;		/* Deadman callout. */
1161#endif
1162	hrtime_t dts_alive;			/* time last alive */
1163	char dts_speculates;			/* boolean: has speculations */
1164	char dts_destructive;			/* boolean: has dest. actions */
1165	int dts_nformats;			/* number of formats */
1166	char **dts_formats;			/* format string array */
1167	dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */
1168	dtrace_cred_t dts_cred;			/* credentials */
1169	size_t dts_nretained;			/* number of retained enabs */
1170	int dts_getf;				/* number of getf() calls */
1171};
1172
1173struct dtrace_provider {
1174	dtrace_pattr_t dtpv_attr;		/* provider attributes */
1175	dtrace_ppriv_t dtpv_priv;		/* provider privileges */
1176	dtrace_pops_t dtpv_pops;		/* provider operations */
1177	char *dtpv_name;			/* provider name */
1178	void *dtpv_arg;				/* provider argument */
1179	hrtime_t dtpv_defunct;			/* when made defunct */
1180	struct dtrace_provider *dtpv_next;	/* next provider */
1181};
1182
1183struct dtrace_meta {
1184	dtrace_mops_t dtm_mops;			/* meta provider operations */
1185	char *dtm_name;				/* meta provider name */
1186	void *dtm_arg;				/* meta provider user arg */
1187	uint64_t dtm_count;			/* no. of associated provs. */
1188};
1189
1190/*
1191 * DTrace Enablings
1192 *
1193 * A dtrace_enabling structure is used to track a collection of ECB
1194 * descriptions -- before they have been turned into actual ECBs.  This is
1195 * created as a result of DOF processing, and is generally used to generate
1196 * ECBs immediately thereafter.  However, enablings are also generally
1197 * retained should the probes they describe be created at a later time; as
1198 * each new module or provider registers with the framework, the retained
1199 * enablings are reevaluated, with any new match resulting in new ECBs.  To
1200 * prevent probes from being matched more than once, the enabling tracks the
1201 * last probe generation matched, and only matches probes from subsequent
1202 * generations.
1203 */
1204typedef struct dtrace_enabling {
1205	dtrace_ecbdesc_t **dten_desc;		/* all ECB descriptions */
1206	int dten_ndesc;				/* number of ECB descriptions */
1207	int dten_maxdesc;			/* size of ECB array */
1208	dtrace_vstate_t *dten_vstate;		/* associated variable state */
1209	dtrace_genid_t dten_probegen;		/* matched probe generation */
1210	dtrace_ecbdesc_t *dten_current;		/* current ECB description */
1211	int dten_error;				/* current error value */
1212	int dten_primed;			/* boolean: set if primed */
1213	struct dtrace_enabling *dten_prev;	/* previous enabling */
1214	struct dtrace_enabling *dten_next;	/* next enabling */
1215} dtrace_enabling_t;
1216
1217/*
1218 * DTrace Anonymous Enablings
1219 *
1220 * Anonymous enablings are DTrace enablings that are not associated with a
1221 * controlling process, but rather derive their enabling from DOF stored as
1222 * properties in the dtrace.conf file.  If there is an anonymous enabling, a
1223 * DTrace consumer state and enabling are created on attach.  The state may be
1224 * subsequently grabbed by the first consumer specifying the "grabanon"
1225 * option.  As long as an anonymous DTrace enabling exists, dtrace(7D) will
1226 * refuse to unload.
1227 */
1228typedef struct dtrace_anon {
1229	dtrace_state_t *dta_state;		/* DTrace consumer state */
1230	dtrace_enabling_t *dta_enabling;	/* pointer to enabling */
1231	processorid_t dta_beganon;		/* which CPU BEGIN ran on */
1232} dtrace_anon_t;
1233
1234/*
1235 * DTrace Error Debugging
1236 */
1237#ifdef DEBUG
1238#define	DTRACE_ERRDEBUG
1239#endif
1240
1241#ifdef DTRACE_ERRDEBUG
1242
1243typedef struct dtrace_errhash {
1244	const char	*dter_msg;	/* error message */
1245	int		dter_count;	/* number of times seen */
1246} dtrace_errhash_t;
1247
1248#define	DTRACE_ERRHASHSZ	256	/* must be > number of err msgs */
1249
1250#endif	/* DTRACE_ERRDEBUG */
1251
1252/*
1253 * DTrace Toxic Ranges
1254 *
1255 * DTrace supports safe loads from probe context; if the address turns out to
1256 * be invalid, a bit will be set by the kernel indicating that DTrace
1257 * encountered a memory error, and DTrace will propagate the error to the user
1258 * accordingly.  However, there may exist some regions of memory in which an
1259 * arbitrary load can change system state, and from which it is impossible to
1260 * recover from such a load after it has been attempted.  Examples of this may
1261 * include memory in which programmable I/O registers are mapped (for which a
1262 * read may have some implications for the device) or (in the specific case of
1263 * UltraSPARC-I and -II) the virtual address hole.  The platform is required
1264 * to make DTrace aware of these toxic ranges; DTrace will then check that
1265 * target addresses are not in a toxic range before attempting to issue a
1266 * safe load.
1267 */
1268typedef struct dtrace_toxrange {
1269	uintptr_t	dtt_base;		/* base of toxic range */
1270	uintptr_t	dtt_limit;		/* limit of toxic range */
1271} dtrace_toxrange_t;
1272
1273#ifdef illumos
1274extern uint64_t dtrace_getarg(int, int);
1275#else
1276extern uint64_t __noinline dtrace_getarg(int, int);
1277#endif
1278extern greg_t dtrace_getfp(void);
1279extern int dtrace_getipl(void);
1280extern uintptr_t dtrace_caller(int);
1281extern uint32_t dtrace_cas32(uint32_t *, uint32_t, uint32_t);
1282extern void *dtrace_casptr(volatile void *, volatile void *, volatile void *);
1283extern void dtrace_copyin(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1284extern void dtrace_copyinstr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1285extern void dtrace_copyout(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1286extern void dtrace_copyoutstr(uintptr_t, uintptr_t, size_t,
1287    volatile uint16_t *);
1288extern void dtrace_getpcstack(pc_t *, int, int, uint32_t *);
1289extern ulong_t dtrace_getreg(struct trapframe *, uint_t);
1290extern int dtrace_getstackdepth(int);
1291extern void dtrace_getupcstack(uint64_t *, int);
1292extern void dtrace_getufpstack(uint64_t *, uint64_t *, int);
1293extern int dtrace_getustackdepth(void);
1294extern uintptr_t dtrace_fulword(void *);
1295extern uint8_t dtrace_fuword8(void *);
1296extern uint16_t dtrace_fuword16(void *);
1297extern uint32_t dtrace_fuword32(void *);
1298extern uint64_t dtrace_fuword64(void *);
1299extern void dtrace_probe_error(dtrace_state_t *, dtrace_epid_t, int, int,
1300    int, uintptr_t);
1301extern int dtrace_assfail(const char *, const char *, int);
1302extern int dtrace_attached(void);
1303#ifdef illumos
1304extern hrtime_t dtrace_gethrestime(void);
1305#endif
1306
1307#ifdef __sparc
1308extern void dtrace_flush_windows(void);
1309extern void dtrace_flush_user_windows(void);
1310extern uint_t dtrace_getotherwin(void);
1311extern uint_t dtrace_getfprs(void);
1312#else
1313extern void dtrace_copy(uintptr_t, uintptr_t, size_t);
1314extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1315#endif
1316
1317/*
1318 * DTrace Assertions
1319 *
1320 * DTrace calls ASSERT from probe context.  To assure that a failed ASSERT
1321 * does not induce a markedly more catastrophic failure (e.g., one from which
1322 * a dump cannot be gleaned), DTrace must define its own ASSERT to be one that
1323 * may safely be called from probe context.  This header file must thus be
1324 * included by any DTrace component that calls ASSERT from probe context, and
1325 * _only_ by those components.  (The only exception to this is kernel
1326 * debugging infrastructure at user-level that doesn't depend on calling
1327 * ASSERT.)
1328 */
1329#undef ASSERT
1330#ifdef DEBUG
1331#define	ASSERT(EX)	((void)((EX) || \
1332			dtrace_assfail(#EX, __FILE__, __LINE__)))
1333#else
1334#define	ASSERT(X)	((void)0)
1335#endif
1336
1337#ifdef	__cplusplus
1338}
1339#endif
1340
1341#endif /* _SYS_DTRACE_IMPL_H */
1342