1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * $FreeBSD$
22 */
23
24/*
25 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
26 * Use is subject to license terms.
27 */
28
29/*
30 * Copyright 2016 Joyent, Inc.
31 * Copyright (c) 2012 by Delphix. All rights reserved.
32 */
33
34#ifndef _SYS_DTRACE_IMPL_H
35#define	_SYS_DTRACE_IMPL_H
36
37#ifdef	__cplusplus
38extern "C" {
39#endif
40
41/*
42 * DTrace Dynamic Tracing Software: Kernel Implementation Interfaces
43 *
44 * Note: The contents of this file are private to the implementation of the
45 * Solaris system and DTrace subsystem and are subject to change at any time
46 * without notice.  Applications and drivers using these interfaces will fail
47 * to run on future releases.  These interfaces should not be used for any
48 * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB).
49 * Please refer to the "Solaris Dynamic Tracing Guide" for more information.
50 */
51
52#include <sys/dtrace.h>
53
54#ifndef illumos
55#ifdef __sparcv9
56typedef uint32_t		pc_t;
57#else
58typedef uintptr_t		pc_t;
59#endif
60typedef	u_long			greg_t;
61#endif
62
63/*
64 * DTrace Implementation Constants and Typedefs
65 */
66#define	DTRACE_MAXPROPLEN		128
67#define	DTRACE_DYNVAR_CHUNKSIZE		256
68
69#ifdef __FreeBSD__
70#define	NCPU		MAXCPU
71#endif /* __FreeBSD__ */
72
73struct dtrace_probe;
74struct dtrace_ecb;
75struct dtrace_predicate;
76struct dtrace_action;
77struct dtrace_provider;
78struct dtrace_state;
79
80typedef struct dtrace_probe dtrace_probe_t;
81typedef struct dtrace_ecb dtrace_ecb_t;
82typedef struct dtrace_predicate dtrace_predicate_t;
83typedef struct dtrace_action dtrace_action_t;
84typedef struct dtrace_provider dtrace_provider_t;
85typedef struct dtrace_meta dtrace_meta_t;
86typedef struct dtrace_state dtrace_state_t;
87typedef uint32_t dtrace_optid_t;
88typedef uint32_t dtrace_specid_t;
89typedef uint64_t dtrace_genid_t;
90
91/*
92 * DTrace Probes
93 *
94 * The probe is the fundamental unit of the DTrace architecture.  Probes are
95 * created by DTrace providers, and managed by the DTrace framework.  A probe
96 * is identified by a unique <provider, module, function, name> tuple, and has
97 * a unique probe identifier assigned to it.  (Some probes are not associated
98 * with a specific point in text; these are called _unanchored probes_ and have
99 * no module or function associated with them.)  Probes are represented as a
100 * dtrace_probe structure.  To allow quick lookups based on each element of the
101 * probe tuple, probes are hashed by each of provider, module, function and
102 * name.  (If a lookup is performed based on a regular expression, a
103 * dtrace_probekey is prepared, and a linear search is performed.) Each probe
104 * is additionally pointed to by a linear array indexed by its identifier.  The
105 * identifier is the provider's mechanism for indicating to the DTrace
106 * framework that a probe has fired:  the identifier is passed as the first
107 * argument to dtrace_probe(), where it is then mapped into the corresponding
108 * dtrace_probe structure.  From the dtrace_probe structure, dtrace_probe() can
109 * iterate over the probe's list of enabling control blocks; see "DTrace
110 * Enabling Control Blocks", below.)
111 */
112struct dtrace_probe {
113	dtrace_id_t dtpr_id;			/* probe identifier */
114	dtrace_ecb_t *dtpr_ecb;			/* ECB list; see below */
115	dtrace_ecb_t *dtpr_ecb_last;		/* last ECB in list */
116	void *dtpr_arg;				/* provider argument */
117	dtrace_cacheid_t dtpr_predcache;	/* predicate cache ID */
118	int dtpr_aframes;			/* artificial frames */
119	dtrace_provider_t *dtpr_provider;	/* pointer to provider */
120	char *dtpr_mod;				/* probe's module name */
121	char *dtpr_func;			/* probe's function name */
122	char *dtpr_name;			/* probe's name */
123	dtrace_probe_t *dtpr_nextmod;		/* next in module hash */
124	dtrace_probe_t *dtpr_prevmod;		/* previous in module hash */
125	dtrace_probe_t *dtpr_nextfunc;		/* next in function hash */
126	dtrace_probe_t *dtpr_prevfunc;		/* previous in function hash */
127	dtrace_probe_t *dtpr_nextname;		/* next in name hash */
128	dtrace_probe_t *dtpr_prevname;		/* previous in name hash */
129	dtrace_genid_t dtpr_gen;		/* probe generation ID */
130};
131
132typedef int dtrace_probekey_f(const char *, const char *, int);
133
134typedef struct dtrace_probekey {
135	char *dtpk_prov;			/* provider name to match */
136	dtrace_probekey_f *dtpk_pmatch;		/* provider matching function */
137	char *dtpk_mod;				/* module name to match */
138	dtrace_probekey_f *dtpk_mmatch;		/* module matching function */
139	char *dtpk_func;			/* func name to match */
140	dtrace_probekey_f *dtpk_fmatch;		/* func matching function */
141	char *dtpk_name;			/* name to match */
142	dtrace_probekey_f *dtpk_nmatch;		/* name matching function */
143	dtrace_id_t dtpk_id;			/* identifier to match */
144} dtrace_probekey_t;
145
146typedef struct dtrace_hashbucket {
147	struct dtrace_hashbucket *dthb_next;	/* next on hash chain */
148	dtrace_probe_t *dthb_chain;		/* chain of probes */
149	int dthb_len;				/* number of probes here */
150} dtrace_hashbucket_t;
151
152typedef struct dtrace_hash {
153	dtrace_hashbucket_t **dth_tab;		/* hash table */
154	int dth_size;				/* size of hash table */
155	int dth_mask;				/* mask to index into table */
156	int dth_nbuckets;			/* total number of buckets */
157	uintptr_t dth_nextoffs;			/* offset of next in probe */
158	uintptr_t dth_prevoffs;			/* offset of prev in probe */
159	uintptr_t dth_stroffs;			/* offset of str in probe */
160} dtrace_hash_t;
161
162/*
163 * DTrace Enabling Control Blocks
164 *
165 * When a provider wishes to fire a probe, it calls into dtrace_probe(),
166 * passing the probe identifier as the first argument.  As described above,
167 * dtrace_probe() maps the identifier into a pointer to a dtrace_probe_t
168 * structure.  This structure contains information about the probe, and a
169 * pointer to the list of Enabling Control Blocks (ECBs).  Each ECB points to
170 * DTrace consumer state, and contains an optional predicate, and a list of
171 * actions.  (Shown schematically below.)  The ECB abstraction allows a single
172 * probe to be multiplexed across disjoint consumers, or across disjoint
173 * enablings of a single probe within one consumer.
174 *
175 *   Enabling Control Block
176 *        dtrace_ecb_t
177 * +------------------------+
178 * | dtrace_epid_t ---------+--------------> Enabled Probe ID (EPID)
179 * | dtrace_state_t * ------+--------------> State associated with this ECB
180 * | dtrace_predicate_t * --+---------+
181 * | dtrace_action_t * -----+----+    |
182 * | dtrace_ecb_t * ---+    |    |    |       Predicate (if any)
183 * +-------------------+----+    |    |       dtrace_predicate_t
184 *                     |         |    +---> +--------------------+
185 *                     |         |          | dtrace_difo_t * ---+----> DIFO
186 *                     |         |          +--------------------+
187 *                     |         |
188 *            Next ECB |         |           Action
189 *            (if any) |         |       dtrace_action_t
190 *                     :         +--> +-------------------+
191 *                     :              | dtrace_actkind_t -+------> kind
192 *                     v              | dtrace_difo_t * --+------> DIFO (if any)
193 *                                    | dtrace_recdesc_t -+------> record descr.
194 *                                    | dtrace_action_t * +------+
195 *                                    +-------------------+      |
196 *                                                               | Next action
197 *                               +-------------------------------+  (if any)
198 *                               |
199 *                               |           Action
200 *                               |       dtrace_action_t
201 *                               +--> +-------------------+
202 *                                    | dtrace_actkind_t -+------> kind
203 *                                    | dtrace_difo_t * --+------> DIFO (if any)
204 *                                    | dtrace_action_t * +------+
205 *                                    +-------------------+      |
206 *                                                               | Next action
207 *                               +-------------------------------+  (if any)
208 *                               |
209 *                               :
210 *                               v
211 *
212 *
213 * dtrace_probe() iterates over the ECB list.  If the ECB needs less space
214 * than is available in the principal buffer, the ECB is processed:  if the
215 * predicate is non-NULL, the DIF object is executed.  If the result is
216 * non-zero, the action list is processed, with each action being executed
217 * accordingly.  When the action list has been completely executed, processing
218 * advances to the next ECB. The ECB abstraction allows disjoint consumers
219 * to multiplex on single probes.
220 *
221 * Execution of the ECB results in consuming dte_size bytes in the buffer
222 * to record data.  During execution, dte_needed bytes must be available in
223 * the buffer.  This space is used for both recorded data and tuple data.
224 */
225struct dtrace_ecb {
226	dtrace_epid_t dte_epid;			/* enabled probe ID */
227	uint32_t dte_alignment;			/* required alignment */
228	size_t dte_needed;			/* space needed for execution */
229	size_t dte_size;			/* size of recorded payload */
230	dtrace_predicate_t *dte_predicate;	/* predicate, if any */
231	dtrace_action_t *dte_action;		/* actions, if any */
232	dtrace_ecb_t *dte_next;			/* next ECB on probe */
233	dtrace_state_t *dte_state;		/* pointer to state */
234	uint32_t dte_cond;			/* security condition */
235	dtrace_probe_t *dte_probe;		/* pointer to probe */
236	dtrace_action_t *dte_action_last;	/* last action on ECB */
237	uint64_t dte_uarg;			/* library argument */
238};
239
240struct dtrace_predicate {
241	dtrace_difo_t *dtp_difo;		/* DIF object */
242	dtrace_cacheid_t dtp_cacheid;		/* cache identifier */
243	int dtp_refcnt;				/* reference count */
244};
245
246struct dtrace_action {
247	dtrace_actkind_t dta_kind;		/* kind of action */
248	uint16_t dta_intuple;			/* boolean:  in aggregation */
249	uint32_t dta_refcnt;			/* reference count */
250	dtrace_difo_t *dta_difo;		/* pointer to DIFO */
251	dtrace_recdesc_t dta_rec;		/* record description */
252	dtrace_action_t *dta_prev;		/* previous action */
253	dtrace_action_t *dta_next;		/* next action */
254};
255
256typedef struct dtrace_aggregation {
257	dtrace_action_t dtag_action;		/* action; must be first */
258	dtrace_aggid_t dtag_id;			/* identifier */
259	dtrace_ecb_t *dtag_ecb;			/* corresponding ECB */
260	dtrace_action_t *dtag_first;		/* first action in tuple */
261	uint32_t dtag_base;			/* base of aggregation */
262	uint8_t dtag_hasarg;			/* boolean:  has argument */
263	uint64_t dtag_initial;			/* initial value */
264	void (*dtag_aggregate)(uint64_t *, uint64_t, uint64_t);
265} dtrace_aggregation_t;
266
267/*
268 * DTrace Buffers
269 *
270 * Principal buffers, aggregation buffers, and speculative buffers are all
271 * managed with the dtrace_buffer structure.  By default, this structure
272 * includes twin data buffers -- dtb_tomax and dtb_xamot -- that serve as the
273 * active and passive buffers, respectively.  For speculative buffers,
274 * dtb_xamot will be NULL; for "ring" and "fill" buffers, dtb_xamot will point
275 * to a scratch buffer.  For all buffer types, the dtrace_buffer structure is
276 * always allocated on a per-CPU basis; a single dtrace_buffer structure is
277 * never shared among CPUs.  (That is, there is never true sharing of the
278 * dtrace_buffer structure; to prevent false sharing of the structure, it must
279 * always be aligned to the coherence granularity -- generally 64 bytes.)
280 *
281 * One of the critical design decisions of DTrace is that a given ECB always
282 * stores the same quantity and type of data.  This is done to assure that the
283 * only metadata required for an ECB's traced data is the EPID.  That is, from
284 * the EPID, the consumer can determine the data layout.  (The data buffer
285 * layout is shown schematically below.)  By assuring that one can determine
286 * data layout from the EPID, the metadata stream can be separated from the
287 * data stream -- simplifying the data stream enormously.  The ECB always
288 * proceeds the recorded data as part of the dtrace_rechdr_t structure that
289 * includes the EPID and a high-resolution timestamp used for output ordering
290 * consistency.
291 *
292 *      base of data buffer --->  +--------+--------------------+--------+
293 *                                | rechdr | data               | rechdr |
294 *                                +--------+------+--------+----+--------+
295 *                                | data          | rechdr | data        |
296 *                                +---------------+--------+-------------+
297 *                                | data, cont.                          |
298 *                                +--------+--------------------+--------+
299 *                                | rechdr | data               |        |
300 *                                +--------+--------------------+        |
301 *                                |                ||                    |
302 *                                |                ||                    |
303 *                                |                \/                    |
304 *                                :                                      :
305 *                                .                                      .
306 *                                .                                      .
307 *                                .                                      .
308 *                                :                                      :
309 *                                |                                      |
310 *     limit of data buffer --->  +--------------------------------------+
311 *
312 * When evaluating an ECB, dtrace_probe() determines if the ECB's needs of the
313 * principal buffer (both scratch and payload) exceed the available space.  If
314 * the ECB's needs exceed available space (and if the principal buffer policy
315 * is the default "switch" policy), the ECB is dropped, the buffer's drop count
316 * is incremented, and processing advances to the next ECB.  If the ECB's needs
317 * can be met with the available space, the ECB is processed, but the offset in
318 * the principal buffer is only advanced if the ECB completes processing
319 * without error.
320 *
321 * When a buffer is to be switched (either because the buffer is the principal
322 * buffer with a "switch" policy or because it is an aggregation buffer), a
323 * cross call is issued to the CPU associated with the buffer.  In the cross
324 * call context, interrupts are disabled, and the active and the inactive
325 * buffers are atomically switched.  This involves switching the data pointers,
326 * copying the various state fields (offset, drops, errors, etc.) into their
327 * inactive equivalents, and clearing the state fields.  Because interrupts are
328 * disabled during this procedure, the switch is guaranteed to appear atomic to
329 * dtrace_probe().
330 *
331 * DTrace Ring Buffering
332 *
333 * To process a ring buffer correctly, one must know the oldest valid record.
334 * Processing starts at the oldest record in the buffer and continues until
335 * the end of the buffer is reached.  Processing then resumes starting with
336 * the record stored at offset 0 in the buffer, and continues until the
337 * youngest record is processed.  If trace records are of a fixed-length,
338 * determining the oldest record is trivial:
339 *
340 *   - If the ring buffer has not wrapped, the oldest record is the record
341 *     stored at offset 0.
342 *
343 *   - If the ring buffer has wrapped, the oldest record is the record stored
344 *     at the current offset.
345 *
346 * With variable length records, however, just knowing the current offset
347 * doesn't suffice for determining the oldest valid record:  assuming that one
348 * allows for arbitrary data, one has no way of searching forward from the
349 * current offset to find the oldest valid record.  (That is, one has no way
350 * of separating data from metadata.) It would be possible to simply refuse to
351 * process any data in the ring buffer between the current offset and the
352 * limit, but this leaves (potentially) an enormous amount of otherwise valid
353 * data unprocessed.
354 *
355 * To effect ring buffering, we track two offsets in the buffer:  the current
356 * offset and the _wrapped_ offset.  If a request is made to reserve some
357 * amount of data, and the buffer has wrapped, the wrapped offset is
358 * incremented until the wrapped offset minus the current offset is greater
359 * than or equal to the reserve request.  This is done by repeatedly looking
360 * up the ECB corresponding to the EPID at the current wrapped offset, and
361 * incrementing the wrapped offset by the size of the data payload
362 * corresponding to that ECB.  If this offset is greater than or equal to the
363 * limit of the data buffer, the wrapped offset is set to 0.  Thus, the
364 * current offset effectively "chases" the wrapped offset around the buffer.
365 * Schematically:
366 *
367 *      base of data buffer --->  +------+--------------------+------+
368 *                                | EPID | data               | EPID |
369 *                                +------+--------+------+----+------+
370 *                                | data          | EPID | data      |
371 *                                +---------------+------+-----------+
372 *                                | data, cont.                      |
373 *                                +------+---------------------------+
374 *                                | EPID | data                      |
375 *           current offset --->  +------+---------------------------+
376 *                                | invalid data                     |
377 *           wrapped offset --->  +------+--------------------+------+
378 *                                | EPID | data               | EPID |
379 *                                +------+--------+------+----+------+
380 *                                | data          | EPID | data      |
381 *                                +---------------+------+-----------+
382 *                                :                                  :
383 *                                .                                  .
384 *                                .        ... valid data ...        .
385 *                                .                                  .
386 *                                :                                  :
387 *                                +------+-------------+------+------+
388 *                                | EPID | data        | EPID | data |
389 *                                +------+------------++------+------+
390 *                                | data, cont.       | leftover     |
391 *     limit of data buffer --->  +-------------------+--------------+
392 *
393 * If the amount of requested buffer space exceeds the amount of space
394 * available between the current offset and the end of the buffer:
395 *
396 *  (1)  all words in the data buffer between the current offset and the limit
397 *       of the data buffer (marked "leftover", above) are set to
398 *       DTRACE_EPIDNONE
399 *
400 *  (2)  the wrapped offset is set to zero
401 *
402 *  (3)  the iteration process described above occurs until the wrapped offset
403 *       is greater than the amount of desired space.
404 *
405 * The wrapped offset is implemented by (re-)using the inactive offset.
406 * In a "switch" buffer policy, the inactive offset stores the offset in
407 * the inactive buffer; in a "ring" buffer policy, it stores the wrapped
408 * offset.
409 *
410 * DTrace Scratch Buffering
411 *
412 * Some ECBs may wish to allocate dynamically-sized temporary scratch memory.
413 * To accommodate such requests easily, scratch memory may be allocated in
414 * the buffer beyond the current offset plus the needed memory of the current
415 * ECB.  If there isn't sufficient room in the buffer for the requested amount
416 * of scratch space, the allocation fails and an error is generated.  Scratch
417 * memory is tracked in the dtrace_mstate_t and is automatically freed when
418 * the ECB ceases processing.  Note that ring buffers cannot allocate their
419 * scratch from the principal buffer -- lest they needlessly overwrite older,
420 * valid data.  Ring buffers therefore have their own dedicated scratch buffer
421 * from which scratch is allocated.
422 */
423#define	DTRACEBUF_RING		0x0001		/* bufpolicy set to "ring" */
424#define	DTRACEBUF_FILL		0x0002		/* bufpolicy set to "fill" */
425#define	DTRACEBUF_NOSWITCH	0x0004		/* do not switch buffer */
426#define	DTRACEBUF_WRAPPED	0x0008		/* ring buffer has wrapped */
427#define	DTRACEBUF_DROPPED	0x0010		/* drops occurred */
428#define	DTRACEBUF_ERROR		0x0020		/* errors occurred */
429#define	DTRACEBUF_FULL		0x0040		/* "fill" buffer is full */
430#define	DTRACEBUF_CONSUMED	0x0080		/* buffer has been consumed */
431#define	DTRACEBUF_INACTIVE	0x0100		/* buffer is not yet active */
432
433typedef struct dtrace_buffer {
434	uint64_t dtb_offset;			/* current offset in buffer */
435	uint64_t dtb_size;			/* size of buffer */
436	uint32_t dtb_flags;			/* flags */
437	uint32_t dtb_drops;			/* number of drops */
438	caddr_t dtb_tomax;			/* active buffer */
439	caddr_t dtb_xamot;			/* inactive buffer */
440	uint32_t dtb_xamot_flags;		/* inactive flags */
441	uint32_t dtb_xamot_drops;		/* drops in inactive buffer */
442	uint64_t dtb_xamot_offset;		/* offset in inactive buffer */
443	uint32_t dtb_errors;			/* number of errors */
444	uint32_t dtb_xamot_errors;		/* errors in inactive buffer */
445#ifndef _LP64
446	uint64_t dtb_pad1;			/* pad out to 64 bytes */
447#endif
448	uint64_t dtb_switched;			/* time of last switch */
449	uint64_t dtb_interval;			/* observed switch interval */
450	uint64_t dtb_pad2[6];			/* pad to avoid false sharing */
451} dtrace_buffer_t;
452
453/*
454 * DTrace Aggregation Buffers
455 *
456 * Aggregation buffers use much of the same mechanism as described above
457 * ("DTrace Buffers").  However, because an aggregation is fundamentally a
458 * hash, there exists dynamic metadata associated with an aggregation buffer
459 * that is not associated with other kinds of buffers.  This aggregation
460 * metadata is _only_ relevant for the in-kernel implementation of
461 * aggregations; it is not actually relevant to user-level consumers.  To do
462 * this, we allocate dynamic aggregation data (hash keys and hash buckets)
463 * starting below the _limit_ of the buffer, and we allocate data from the
464 * _base_ of the buffer.  When the aggregation buffer is copied out, _only_ the
465 * data is copied out; the metadata is simply discarded.  Schematically,
466 * aggregation buffers look like:
467 *
468 *      base of data buffer --->  +-------+------+-----------+-------+
469 *                                | aggid | key  | value     | aggid |
470 *                                +-------+------+-----------+-------+
471 *                                | key                              |
472 *                                +-------+-------+-----+------------+
473 *                                | value | aggid | key | value      |
474 *                                +-------+------++-----+------+-----+
475 *                                | aggid | key  | value       |     |
476 *                                +-------+------+-------------+     |
477 *                                |                ||                |
478 *                                |                ||                |
479 *                                |                \/                |
480 *                                :                                  :
481 *                                .                                  .
482 *                                .                                  .
483 *                                .                                  .
484 *                                :                                  :
485 *                                |                /\                |
486 *                                |                ||   +------------+
487 *                                |                ||   |            |
488 *                                +---------------------+            |
489 *                                | hash keys                        |
490 *                                | (dtrace_aggkey structures)       |
491 *                                |                                  |
492 *                                +----------------------------------+
493 *                                | hash buckets                     |
494 *                                | (dtrace_aggbuffer structure)     |
495 *                                |                                  |
496 *     limit of data buffer --->  +----------------------------------+
497 *
498 *
499 * As implied above, just as we assure that ECBs always store a constant
500 * amount of data, we assure that a given aggregation -- identified by its
501 * aggregation ID -- always stores data of a constant quantity and type.
502 * As with EPIDs, this allows the aggregation ID to serve as the metadata for a
503 * given record.
504 *
505 * Note that the size of the dtrace_aggkey structure must be sizeof (uintptr_t)
506 * aligned.  (If this the structure changes such that this becomes false, an
507 * assertion will fail in dtrace_aggregate().)
508 */
509typedef struct dtrace_aggkey {
510	uint32_t dtak_hashval;			/* hash value */
511	uint32_t dtak_action:4;			/* action -- 4 bits */
512	uint32_t dtak_size:28;			/* size -- 28 bits */
513	caddr_t dtak_data;			/* data pointer */
514	struct dtrace_aggkey *dtak_next;	/* next in hash chain */
515} dtrace_aggkey_t;
516
517typedef struct dtrace_aggbuffer {
518	uintptr_t dtagb_hashsize;		/* number of buckets */
519	uintptr_t dtagb_free;			/* free list of keys */
520	dtrace_aggkey_t **dtagb_hash;		/* hash table */
521} dtrace_aggbuffer_t;
522
523/*
524 * DTrace Speculations
525 *
526 * Speculations have a per-CPU buffer and a global state.  Once a speculation
527 * buffer has been comitted or discarded, it cannot be reused until all CPUs
528 * have taken the same action (commit or discard) on their respective
529 * speculative buffer.  However, because DTrace probes may execute in arbitrary
530 * context, other CPUs cannot simply be cross-called at probe firing time to
531 * perform the necessary commit or discard.  The speculation states thus
532 * optimize for the case that a speculative buffer is only active on one CPU at
533 * the time of a commit() or discard() -- for if this is the case, other CPUs
534 * need not take action, and the speculation is immediately available for
535 * reuse.  If the speculation is active on multiple CPUs, it must be
536 * asynchronously cleaned -- potentially leading to a higher rate of dirty
537 * speculative drops.  The speculation states are as follows:
538 *
539 *  DTRACESPEC_INACTIVE       <= Initial state; inactive speculation
540 *  DTRACESPEC_ACTIVE         <= Allocated, but not yet speculatively traced to
541 *  DTRACESPEC_ACTIVEONE      <= Speculatively traced to on one CPU
542 *  DTRACESPEC_ACTIVEMANY     <= Speculatively traced to on more than one CPU
543 *  DTRACESPEC_COMMITTING     <= Currently being commited on one CPU
544 *  DTRACESPEC_COMMITTINGMANY <= Currently being commited on many CPUs
545 *  DTRACESPEC_DISCARDING     <= Currently being discarded on many CPUs
546 *
547 * The state transition diagram is as follows:
548 *
549 *     +----------------------------------------------------------+
550 *     |                                                          |
551 *     |                      +------------+                      |
552 *     |  +-------------------| COMMITTING |<-----------------+   |
553 *     |  |                   +------------+                  |   |
554 *     |  | copied spec.            ^             commit() on |   | discard() on
555 *     |  | into principal          |              active CPU |   | active CPU
556 *     |  |                         | commit()                |   |
557 *     V  V                         |                         |   |
558 * +----------+                 +--------+                +-----------+
559 * | INACTIVE |---------------->| ACTIVE |--------------->| ACTIVEONE |
560 * +----------+  speculation()  +--------+  speculate()   +-----------+
561 *     ^  ^                         |                         |   |
562 *     |  |                         | discard()               |   |
563 *     |  | asynchronously          |            discard() on |   | speculate()
564 *     |  | cleaned                 V            inactive CPU |   | on inactive
565 *     |  |                   +------------+                  |   | CPU
566 *     |  +-------------------| DISCARDING |<-----------------+   |
567 *     |                      +------------+                      |
568 *     | asynchronously             ^                             |
569 *     | copied spec.               |       discard()             |
570 *     | into principal             +------------------------+    |
571 *     |                                                     |    V
572 *  +----------------+             commit()              +------------+
573 *  | COMMITTINGMANY |<----------------------------------| ACTIVEMANY |
574 *  +----------------+                                   +------------+
575 */
576typedef enum dtrace_speculation_state {
577	DTRACESPEC_INACTIVE = 0,
578	DTRACESPEC_ACTIVE,
579	DTRACESPEC_ACTIVEONE,
580	DTRACESPEC_ACTIVEMANY,
581	DTRACESPEC_COMMITTING,
582	DTRACESPEC_COMMITTINGMANY,
583	DTRACESPEC_DISCARDING
584} dtrace_speculation_state_t;
585
586typedef struct dtrace_speculation {
587	dtrace_speculation_state_t dtsp_state;	/* current speculation state */
588	int dtsp_cleaning;			/* non-zero if being cleaned */
589	dtrace_buffer_t *dtsp_buffer;		/* speculative buffer */
590} dtrace_speculation_t;
591
592/*
593 * DTrace Dynamic Variables
594 *
595 * The dynamic variable problem is obviously decomposed into two subproblems:
596 * allocating new dynamic storage, and freeing old dynamic storage.  The
597 * presence of the second problem makes the first much more complicated -- or
598 * rather, the absence of the second renders the first trivial.  This is the
599 * case with aggregations, for which there is effectively no deallocation of
600 * dynamic storage.  (Or more accurately, all dynamic storage is deallocated
601 * when a snapshot is taken of the aggregation.)  As DTrace dynamic variables
602 * allow for both dynamic allocation and dynamic deallocation, the
603 * implementation of dynamic variables is quite a bit more complicated than
604 * that of their aggregation kin.
605 *
606 * We observe that allocating new dynamic storage is tricky only because the
607 * size can vary -- the allocation problem is much easier if allocation sizes
608 * are uniform.  We further observe that in D, the size of dynamic variables is
609 * actually _not_ dynamic -- dynamic variable sizes may be determined by static
610 * analysis of DIF text.  (This is true even of putatively dynamically-sized
611 * objects like strings and stacks, the sizes of which are dictated by the
612 * "stringsize" and "stackframes" variables, respectively.)  We exploit this by
613 * performing this analysis on all DIF before enabling any probes.  For each
614 * dynamic load or store, we calculate the dynamically-allocated size plus the
615 * size of the dtrace_dynvar structure plus the storage required to key the
616 * data.  For all DIF, we take the largest value and dub it the _chunksize_.
617 * We then divide dynamic memory into two parts:  a hash table that is wide
618 * enough to have every chunk in its own bucket, and a larger region of equal
619 * chunksize units.  Whenever we wish to dynamically allocate a variable, we
620 * always allocate a single chunk of memory.  Depending on the uniformity of
621 * allocation, this will waste some amount of memory -- but it eliminates the
622 * non-determinism inherent in traditional heap fragmentation.
623 *
624 * Dynamic objects are allocated by storing a non-zero value to them; they are
625 * deallocated by storing a zero value to them.  Dynamic variables are
626 * complicated enormously by being shared between CPUs.  In particular,
627 * consider the following scenario:
628 *
629 *                 CPU A                                 CPU B
630 *  +---------------------------------+   +---------------------------------+
631 *  |                                 |   |                                 |
632 *  | allocates dynamic object a[123] |   |                                 |
633 *  | by storing the value 345 to it  |   |                                 |
634 *  |                               --------->                              |
635 *  |                                 |   | wishing to load from object     |
636 *  |                                 |   | a[123], performs lookup in      |
637 *  |                                 |   | dynamic variable space          |
638 *  |                               <---------                              |
639 *  | deallocates object a[123] by    |   |                                 |
640 *  | storing 0 to it                 |   |                                 |
641 *  |                                 |   |                                 |
642 *  | allocates dynamic object b[567] |   | performs load from a[123]       |
643 *  | by storing the value 789 to it  |   |                                 |
644 *  :                                 :   :                                 :
645 *  .                                 .   .                                 .
646 *
647 * This is obviously a race in the D program, but there are nonetheless only
648 * two valid values for CPU B's load from a[123]:  345 or 0.  Most importantly,
649 * CPU B may _not_ see the value 789 for a[123].
650 *
651 * There are essentially two ways to deal with this:
652 *
653 *  (1)  Explicitly spin-lock variables.  That is, if CPU B wishes to load
654 *       from a[123], it needs to lock a[123] and hold the lock for the
655 *       duration that it wishes to manipulate it.
656 *
657 *  (2)  Avoid reusing freed chunks until it is known that no CPU is referring
658 *       to them.
659 *
660 * The implementation of (1) is rife with complexity, because it requires the
661 * user of a dynamic variable to explicitly decree when they are done using it.
662 * Were all variables by value, this perhaps wouldn't be debilitating -- but
663 * dynamic variables of non-scalar types are tracked by reference.  That is, if
664 * a dynamic variable is, say, a string, and that variable is to be traced to,
665 * say, the principal buffer, the DIF emulation code returns to the main
666 * dtrace_probe() loop a pointer to the underlying storage, not the contents of
667 * the storage.  Further, code calling on DIF emulation would have to be aware
668 * that the DIF emulation has returned a reference to a dynamic variable that
669 * has been potentially locked.  The variable would have to be unlocked after
670 * the main dtrace_probe() loop is finished with the variable, and the main
671 * dtrace_probe() loop would have to be careful to not call any further DIF
672 * emulation while the variable is locked to avoid deadlock.  More generally,
673 * if one were to implement (1), DIF emulation code dealing with dynamic
674 * variables could only deal with one dynamic variable at a time (lest deadlock
675 * result).  To sum, (1) exports too much subtlety to the users of dynamic
676 * variables -- increasing maintenance burden and imposing serious constraints
677 * on future DTrace development.
678 *
679 * The implementation of (2) is also complex, but the complexity is more
680 * manageable.  We need to be sure that when a variable is deallocated, it is
681 * not placed on a traditional free list, but rather on a _dirty_ list.  Once a
682 * variable is on a dirty list, it cannot be found by CPUs performing a
683 * subsequent lookup of the variable -- but it may still be in use by other
684 * CPUs.  To assure that all CPUs that may be seeing the old variable have
685 * cleared out of probe context, a dtrace_sync() can be issued.  Once the
686 * dtrace_sync() has completed, it can be known that all CPUs are done
687 * manipulating the dynamic variable -- the dirty list can be atomically
688 * appended to the free list.  Unfortunately, there's a slight hiccup in this
689 * mechanism:  dtrace_sync() may not be issued from probe context.  The
690 * dtrace_sync() must be therefore issued asynchronously from non-probe
691 * context.  For this we rely on the DTrace cleaner, a cyclic that runs at the
692 * "cleanrate" frequency.  To ease this implementation, we define several chunk
693 * lists:
694 *
695 *   - Dirty.  Deallocated chunks, not yet cleaned.  Not available.
696 *
697 *   - Rinsing.  Formerly dirty chunks that are currently being asynchronously
698 *     cleaned.  Not available, but will be shortly.  Dynamic variable
699 *     allocation may not spin or block for availability, however.
700 *
701 *   - Clean.  Clean chunks, ready for allocation -- but not on the free list.
702 *
703 *   - Free.  Available for allocation.
704 *
705 * Moreover, to avoid absurd contention, _each_ of these lists is implemented
706 * on a per-CPU basis.  This is only for performance, not correctness; chunks
707 * may be allocated from another CPU's free list.  The algorithm for allocation
708 * then is this:
709 *
710 *   (1)  Attempt to atomically allocate from current CPU's free list.  If list
711 *        is non-empty and allocation is successful, allocation is complete.
712 *
713 *   (2)  If the clean list is non-empty, atomically move it to the free list,
714 *        and reattempt (1).
715 *
716 *   (3)  If the dynamic variable space is in the CLEAN state, look for free
717 *        and clean lists on other CPUs by setting the current CPU to the next
718 *        CPU, and reattempting (1).  If the next CPU is the current CPU (that
719 *        is, if all CPUs have been checked), atomically switch the state of
720 *        the dynamic variable space based on the following:
721 *
722 *        - If no free chunks were found and no dirty chunks were found,
723 *          atomically set the state to EMPTY.
724 *
725 *        - If dirty chunks were found, atomically set the state to DIRTY.
726 *
727 *        - If rinsing chunks were found, atomically set the state to RINSING.
728 *
729 *   (4)  Based on state of dynamic variable space state, increment appropriate
730 *        counter to indicate dynamic drops (if in EMPTY state) vs. dynamic
731 *        dirty drops (if in DIRTY state) vs. dynamic rinsing drops (if in
732 *        RINSING state).  Fail the allocation.
733 *
734 * The cleaning cyclic operates with the following algorithm:  for all CPUs
735 * with a non-empty dirty list, atomically move the dirty list to the rinsing
736 * list.  Perform a dtrace_sync().  For all CPUs with a non-empty rinsing list,
737 * atomically move the rinsing list to the clean list.  Perform another
738 * dtrace_sync().  By this point, all CPUs have seen the new clean list; the
739 * state of the dynamic variable space can be restored to CLEAN.
740 *
741 * There exist two final races that merit explanation.  The first is a simple
742 * allocation race:
743 *
744 *                 CPU A                                 CPU B
745 *  +---------------------------------+   +---------------------------------+
746 *  |                                 |   |                                 |
747 *  | allocates dynamic object a[123] |   | allocates dynamic object a[123] |
748 *  | by storing the value 345 to it  |   | by storing the value 567 to it  |
749 *  |                                 |   |                                 |
750 *  :                                 :   :                                 :
751 *  .                                 .   .                                 .
752 *
753 * Again, this is a race in the D program.  It can be resolved by having a[123]
754 * hold the value 345 or a[123] hold the value 567 -- but it must be true that
755 * a[123] have only _one_ of these values.  (That is, the racing CPUs may not
756 * put the same element twice on the same hash chain.)  This is resolved
757 * simply:  before the allocation is undertaken, the start of the new chunk's
758 * hash chain is noted.  Later, after the allocation is complete, the hash
759 * chain is atomically switched to point to the new element.  If this fails
760 * (because of either concurrent allocations or an allocation concurrent with a
761 * deletion), the newly allocated chunk is deallocated to the dirty list, and
762 * the whole process of looking up (and potentially allocating) the dynamic
763 * variable is reattempted.
764 *
765 * The final race is a simple deallocation race:
766 *
767 *                 CPU A                                 CPU B
768 *  +---------------------------------+   +---------------------------------+
769 *  |                                 |   |                                 |
770 *  | deallocates dynamic object      |   | deallocates dynamic object      |
771 *  | a[123] by storing the value 0   |   | a[123] by storing the value 0   |
772 *  | to it                           |   | to it                           |
773 *  |                                 |   |                                 |
774 *  :                                 :   :                                 :
775 *  .                                 .   .                                 .
776 *
777 * Once again, this is a race in the D program, but it is one that we must
778 * handle without corrupting the underlying data structures.  Because
779 * deallocations require the deletion of a chunk from the middle of a hash
780 * chain, we cannot use a single-word atomic operation to remove it.  For this,
781 * we add a spin lock to the hash buckets that is _only_ used for deallocations
782 * (allocation races are handled as above).  Further, this spin lock is _only_
783 * held for the duration of the delete; before control is returned to the DIF
784 * emulation code, the hash bucket is unlocked.
785 */
786typedef struct dtrace_key {
787	uint64_t dttk_value;			/* data value or data pointer */
788	uint64_t dttk_size;			/* 0 if by-val, >0 if by-ref */
789} dtrace_key_t;
790
791typedef struct dtrace_tuple {
792	uint32_t dtt_nkeys;			/* number of keys in tuple */
793	uint32_t dtt_pad;			/* padding */
794	dtrace_key_t dtt_key[1];		/* array of tuple keys */
795} dtrace_tuple_t;
796
797typedef struct dtrace_dynvar {
798	uint64_t dtdv_hashval;			/* hash value -- 0 if free */
799	struct dtrace_dynvar *dtdv_next;	/* next on list or hash chain */
800	void *dtdv_data;			/* pointer to data */
801	dtrace_tuple_t dtdv_tuple;		/* tuple key */
802} dtrace_dynvar_t;
803
804typedef enum dtrace_dynvar_op {
805	DTRACE_DYNVAR_ALLOC,
806	DTRACE_DYNVAR_NOALLOC,
807	DTRACE_DYNVAR_DEALLOC
808} dtrace_dynvar_op_t;
809
810typedef struct dtrace_dynhash {
811	dtrace_dynvar_t *dtdh_chain;		/* hash chain for this bucket */
812	uintptr_t dtdh_lock;			/* deallocation lock */
813#ifdef _LP64
814	uintptr_t dtdh_pad[6];			/* pad to avoid false sharing */
815#else
816	uintptr_t dtdh_pad[14];			/* pad to avoid false sharing */
817#endif
818} dtrace_dynhash_t;
819
820typedef struct dtrace_dstate_percpu {
821	dtrace_dynvar_t *dtdsc_free;		/* free list for this CPU */
822	dtrace_dynvar_t *dtdsc_dirty;		/* dirty list for this CPU */
823	dtrace_dynvar_t *dtdsc_rinsing;		/* rinsing list for this CPU */
824	dtrace_dynvar_t *dtdsc_clean;		/* clean list for this CPU */
825	uint64_t dtdsc_drops;			/* number of capacity drops */
826	uint64_t dtdsc_dirty_drops;		/* number of dirty drops */
827	uint64_t dtdsc_rinsing_drops;		/* number of rinsing drops */
828#ifdef _LP64
829	uint64_t dtdsc_pad;			/* pad to avoid false sharing */
830#else
831	uint64_t dtdsc_pad[2];			/* pad to avoid false sharing */
832#endif
833} dtrace_dstate_percpu_t;
834
835typedef enum dtrace_dstate_state {
836	DTRACE_DSTATE_CLEAN = 0,
837	DTRACE_DSTATE_EMPTY,
838	DTRACE_DSTATE_DIRTY,
839	DTRACE_DSTATE_RINSING
840} dtrace_dstate_state_t;
841
842typedef struct dtrace_dstate {
843	void *dtds_base;			/* base of dynamic var. space */
844	size_t dtds_size;			/* size of dynamic var. space */
845	size_t dtds_hashsize;			/* number of buckets in hash */
846	size_t dtds_chunksize;			/* size of each chunk */
847	dtrace_dynhash_t *dtds_hash;		/* pointer to hash table */
848	dtrace_dstate_state_t dtds_state;	/* current dynamic var. state */
849	dtrace_dstate_percpu_t *dtds_percpu;	/* per-CPU dyn. var. state */
850} dtrace_dstate_t;
851
852/*
853 * DTrace Variable State
854 *
855 * The DTrace variable state tracks user-defined variables in its dtrace_vstate
856 * structure.  Each DTrace consumer has exactly one dtrace_vstate structure,
857 * but some dtrace_vstate structures may exist without a corresponding DTrace
858 * consumer (see "DTrace Helpers", below).  As described in <sys/dtrace.h>,
859 * user-defined variables can have one of three scopes:
860 *
861 *  DIFV_SCOPE_GLOBAL  =>  global scope
862 *  DIFV_SCOPE_THREAD  =>  thread-local scope (i.e. "self->" variables)
863 *  DIFV_SCOPE_LOCAL   =>  clause-local scope (i.e. "this->" variables)
864 *
865 * The variable state tracks variables by both their scope and their allocation
866 * type:
867 *
868 *  - The dtvs_globals and dtvs_locals members each point to an array of
869 *    dtrace_statvar structures.  These structures contain both the variable
870 *    metadata (dtrace_difv structures) and the underlying storage for all
871 *    statically allocated variables, including statically allocated
872 *    DIFV_SCOPE_GLOBAL variables and all DIFV_SCOPE_LOCAL variables.
873 *
874 *  - The dtvs_tlocals member points to an array of dtrace_difv structures for
875 *    DIFV_SCOPE_THREAD variables.  As such, this array tracks _only_ the
876 *    variable metadata for DIFV_SCOPE_THREAD variables; the underlying storage
877 *    is allocated out of the dynamic variable space.
878 *
879 *  - The dtvs_dynvars member is the dynamic variable state associated with the
880 *    variable state.  The dynamic variable state (described in "DTrace Dynamic
881 *    Variables", above) tracks all DIFV_SCOPE_THREAD variables and all
882 *    dynamically-allocated DIFV_SCOPE_GLOBAL variables.
883 */
884typedef struct dtrace_statvar {
885	uint64_t dtsv_data;			/* data or pointer to it */
886	size_t dtsv_size;			/* size of pointed-to data */
887	int dtsv_refcnt;			/* reference count */
888	dtrace_difv_t dtsv_var;			/* variable metadata */
889} dtrace_statvar_t;
890
891typedef struct dtrace_vstate {
892	dtrace_state_t *dtvs_state;		/* back pointer to state */
893	dtrace_statvar_t **dtvs_globals;	/* statically-allocated glbls */
894	int dtvs_nglobals;			/* number of globals */
895	dtrace_difv_t *dtvs_tlocals;		/* thread-local metadata */
896	int dtvs_ntlocals;			/* number of thread-locals */
897	dtrace_statvar_t **dtvs_locals;		/* clause-local data */
898	int dtvs_nlocals;			/* number of clause-locals */
899	dtrace_dstate_t dtvs_dynvars;		/* dynamic variable state */
900} dtrace_vstate_t;
901
902/*
903 * DTrace Machine State
904 *
905 * In the process of processing a fired probe, DTrace needs to track and/or
906 * cache some per-CPU state associated with that particular firing.  This is
907 * state that is always discarded after the probe firing has completed, and
908 * much of it is not specific to any DTrace consumer, remaining valid across
909 * all ECBs.  This state is tracked in the dtrace_mstate structure.
910 */
911#define	DTRACE_MSTATE_ARGS		0x00000001
912#define	DTRACE_MSTATE_PROBE		0x00000002
913#define	DTRACE_MSTATE_EPID		0x00000004
914#define	DTRACE_MSTATE_TIMESTAMP		0x00000008
915#define	DTRACE_MSTATE_STACKDEPTH	0x00000010
916#define	DTRACE_MSTATE_CALLER		0x00000020
917#define	DTRACE_MSTATE_IPL		0x00000040
918#define	DTRACE_MSTATE_FLTOFFS		0x00000080
919#define	DTRACE_MSTATE_WALLTIMESTAMP	0x00000100
920#define	DTRACE_MSTATE_USTACKDEPTH	0x00000200
921#define	DTRACE_MSTATE_UCALLER		0x00000400
922
923typedef struct dtrace_mstate {
924	uintptr_t dtms_scratch_base;		/* base of scratch space */
925	uintptr_t dtms_scratch_ptr;		/* current scratch pointer */
926	size_t dtms_scratch_size;		/* scratch size */
927	uint32_t dtms_present;			/* variables that are present */
928	uint64_t dtms_arg[5];			/* cached arguments */
929	dtrace_epid_t dtms_epid;		/* current EPID */
930	uint64_t dtms_timestamp;		/* cached timestamp */
931	hrtime_t dtms_walltimestamp;		/* cached wall timestamp */
932	int dtms_stackdepth;			/* cached stackdepth */
933	int dtms_ustackdepth;			/* cached ustackdepth */
934	struct dtrace_probe *dtms_probe;	/* current probe */
935	uintptr_t dtms_caller;			/* cached caller */
936	uint64_t dtms_ucaller;			/* cached user-level caller */
937	int dtms_ipl;				/* cached interrupt pri lev */
938	int dtms_fltoffs;			/* faulting DIFO offset */
939	uintptr_t dtms_strtok;			/* saved strtok() pointer */
940	uintptr_t dtms_strtok_limit;		/* upper bound of strtok ptr */
941	uint32_t dtms_access;			/* memory access rights */
942	dtrace_difo_t *dtms_difo;		/* current dif object */
943	file_t *dtms_getf;			/* cached rval of getf() */
944} dtrace_mstate_t;
945
946#define	DTRACE_COND_OWNER	0x1
947#define	DTRACE_COND_USERMODE	0x2
948#define	DTRACE_COND_ZONEOWNER	0x4
949
950#define	DTRACE_PROBEKEY_MAXDEPTH	8	/* max glob recursion depth */
951
952/*
953 * Access flag used by dtrace_mstate.dtms_access.
954 */
955#define	DTRACE_ACCESS_KERNEL	0x1		/* the priv to read kmem */
956
957
958/*
959 * DTrace Activity
960 *
961 * Each DTrace consumer is in one of several states, which (for purposes of
962 * avoiding yet-another overloading of the noun "state") we call the current
963 * _activity_.  The activity transitions on dtrace_go() (from DTRACIOCGO), on
964 * dtrace_stop() (from DTRACIOCSTOP) and on the exit() action.  Activities may
965 * only transition in one direction; the activity transition diagram is a
966 * directed acyclic graph.  The activity transition diagram is as follows:
967 *
968 *
969 * +----------+                   +--------+                   +--------+
970 * | INACTIVE |------------------>| WARMUP |------------------>| ACTIVE |
971 * +----------+   dtrace_go(),    +--------+   dtrace_go(),    +--------+
972 *                before BEGIN        |        after BEGIN       |  |  |
973 *                                    |                          |  |  |
974 *                      exit() action |                          |  |  |
975 *                     from BEGIN ECB |                          |  |  |
976 *                                    |                          |  |  |
977 *                                    v                          |  |  |
978 *                               +----------+     exit() action  |  |  |
979 * +-----------------------------| DRAINING |<-------------------+  |  |
980 * |                             +----------+                       |  |
981 * |                                  |                             |  |
982 * |                   dtrace_stop(), |                             |  |
983 * |                     before END   |                             |  |
984 * |                                  |                             |  |
985 * |                                  v                             |  |
986 * | +---------+                 +----------+                       |  |
987 * | | STOPPED |<----------------| COOLDOWN |<----------------------+  |
988 * | +---------+  dtrace_stop(), +----------+     dtrace_stop(),       |
989 * |                after END                       before END         |
990 * |                                                                   |
991 * |                              +--------+                           |
992 * +----------------------------->| KILLED |<--------------------------+
993 *       deadman timeout or       +--------+     deadman timeout or
994 *        killed consumer                         killed consumer
995 *
996 * Note that once a DTrace consumer has stopped tracing, there is no way to
997 * restart it; if a DTrace consumer wishes to restart tracing, it must reopen
998 * the DTrace pseudodevice.
999 */
1000typedef enum dtrace_activity {
1001	DTRACE_ACTIVITY_INACTIVE = 0,		/* not yet running */
1002	DTRACE_ACTIVITY_WARMUP,			/* while starting */
1003	DTRACE_ACTIVITY_ACTIVE,			/* running */
1004	DTRACE_ACTIVITY_DRAINING,		/* before stopping */
1005	DTRACE_ACTIVITY_COOLDOWN,		/* while stopping */
1006	DTRACE_ACTIVITY_STOPPED,		/* after stopping */
1007	DTRACE_ACTIVITY_KILLED			/* killed */
1008} dtrace_activity_t;
1009
1010/*
1011 * DTrace Helper Implementation
1012 *
1013 * A description of the helper architecture may be found in <sys/dtrace.h>.
1014 * Each process contains a pointer to its helpers in its p_dtrace_helpers
1015 * member.  This is a pointer to a dtrace_helpers structure, which contains an
1016 * array of pointers to dtrace_helper structures, helper variable state (shared
1017 * among a process's helpers) and a generation count.  (The generation count is
1018 * used to provide an identifier when a helper is added so that it may be
1019 * subsequently removed.)  The dtrace_helper structure is self-explanatory,
1020 * containing pointers to the objects needed to execute the helper.  Note that
1021 * helpers are _duplicated_ across fork(2), and destroyed on exec(2).  No more
1022 * than dtrace_helpers_max are allowed per-process.
1023 */
1024#define	DTRACE_HELPER_ACTION_USTACK	0
1025#define	DTRACE_NHELPER_ACTIONS		1
1026
1027typedef struct dtrace_helper_action {
1028	int dtha_generation;			/* helper action generation */
1029	int dtha_nactions;			/* number of actions */
1030	dtrace_difo_t *dtha_predicate;		/* helper action predicate */
1031	dtrace_difo_t **dtha_actions;		/* array of actions */
1032	struct dtrace_helper_action *dtha_next;	/* next helper action */
1033} dtrace_helper_action_t;
1034
1035typedef struct dtrace_helper_provider {
1036	int dthp_generation;			/* helper provider generation */
1037	uint32_t dthp_ref;			/* reference count */
1038	dof_helper_t dthp_prov;			/* DOF w/ provider and probes */
1039} dtrace_helper_provider_t;
1040
1041typedef struct dtrace_helpers {
1042	dtrace_helper_action_t **dthps_actions;	/* array of helper actions */
1043	dtrace_vstate_t dthps_vstate;		/* helper action var. state */
1044	dtrace_helper_provider_t **dthps_provs;	/* array of providers */
1045	uint_t dthps_nprovs;			/* count of providers */
1046	uint_t dthps_maxprovs;			/* provider array size */
1047	int dthps_generation;			/* current generation */
1048	pid_t dthps_pid;			/* pid of associated proc */
1049	int dthps_deferred;			/* helper in deferred list */
1050	struct dtrace_helpers *dthps_next;	/* next pointer */
1051	struct dtrace_helpers *dthps_prev;	/* prev pointer */
1052} dtrace_helpers_t;
1053
1054/*
1055 * DTrace Helper Action Tracing
1056 *
1057 * Debugging helper actions can be arduous.  To ease the development and
1058 * debugging of helpers, DTrace contains a tracing-framework-within-a-tracing-
1059 * framework: helper tracing.  If dtrace_helptrace_enabled is non-zero (which
1060 * it is by default on DEBUG kernels), all helper activity will be traced to a
1061 * global, in-kernel ring buffer.  Each entry includes a pointer to the specific
1062 * helper, the location within the helper, and a trace of all local variables.
1063 * The ring buffer may be displayed in a human-readable format with the
1064 * ::dtrace_helptrace mdb(1) dcmd.
1065 */
1066#define	DTRACE_HELPTRACE_NEXT	(-1)
1067#define	DTRACE_HELPTRACE_DONE	(-2)
1068#define	DTRACE_HELPTRACE_ERR	(-3)
1069
1070typedef struct dtrace_helptrace {
1071	dtrace_helper_action_t	*dtht_helper;	/* helper action */
1072	int dtht_where;				/* where in helper action */
1073	int dtht_nlocals;			/* number of locals */
1074	int dtht_fault;				/* type of fault (if any) */
1075	int dtht_fltoffs;			/* DIF offset */
1076	uint64_t dtht_illval;			/* faulting value */
1077	uint64_t dtht_locals[1];		/* local variables */
1078} dtrace_helptrace_t;
1079
1080/*
1081 * DTrace Credentials
1082 *
1083 * In probe context, we have limited flexibility to examine the credentials
1084 * of the DTrace consumer that created a particular enabling.  We use
1085 * the Least Privilege interfaces to cache the consumer's cred pointer and
1086 * some facts about that credential in a dtrace_cred_t structure. These
1087 * can limit the consumer's breadth of visibility and what actions the
1088 * consumer may take.
1089 */
1090#define	DTRACE_CRV_ALLPROC		0x01
1091#define	DTRACE_CRV_KERNEL		0x02
1092#define	DTRACE_CRV_ALLZONE		0x04
1093
1094#define	DTRACE_CRV_ALL		(DTRACE_CRV_ALLPROC | DTRACE_CRV_KERNEL | \
1095	DTRACE_CRV_ALLZONE)
1096
1097#define	DTRACE_CRA_PROC				0x0001
1098#define	DTRACE_CRA_PROC_CONTROL			0x0002
1099#define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER	0x0004
1100#define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE	0x0008
1101#define	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG	0x0010
1102#define	DTRACE_CRA_KERNEL			0x0020
1103#define	DTRACE_CRA_KERNEL_DESTRUCTIVE		0x0040
1104
1105#define	DTRACE_CRA_ALL		(DTRACE_CRA_PROC | \
1106	DTRACE_CRA_PROC_CONTROL | \
1107	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER | \
1108	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE | \
1109	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG | \
1110	DTRACE_CRA_KERNEL | \
1111	DTRACE_CRA_KERNEL_DESTRUCTIVE)
1112
1113typedef struct dtrace_cred {
1114	cred_t			*dcr_cred;
1115	uint8_t			dcr_destructive;
1116	uint8_t			dcr_visible;
1117	uint16_t		dcr_action;
1118} dtrace_cred_t;
1119
1120/*
1121 * DTrace Consumer State
1122 *
1123 * Each DTrace consumer has an associated dtrace_state structure that contains
1124 * its in-kernel DTrace state -- including options, credentials, statistics and
1125 * pointers to ECBs, buffers, speculations and formats.  A dtrace_state
1126 * structure is also allocated for anonymous enablings.  When anonymous state
1127 * is grabbed, the grabbing consumers dts_anon pointer is set to the grabbed
1128 * dtrace_state structure.
1129 */
1130struct dtrace_state {
1131#ifdef illumos
1132	dev_t dts_dev;				/* device */
1133#else
1134	struct cdev *dts_dev;			/* device */
1135#endif
1136	int dts_necbs;				/* total number of ECBs */
1137	dtrace_ecb_t **dts_ecbs;		/* array of ECBs */
1138	dtrace_epid_t dts_epid;			/* next EPID to allocate */
1139	size_t dts_needed;			/* greatest needed space */
1140	struct dtrace_state *dts_anon;		/* anon. state, if grabbed */
1141	dtrace_activity_t dts_activity;		/* current activity */
1142	dtrace_vstate_t dts_vstate;		/* variable state */
1143	dtrace_buffer_t *dts_buffer;		/* principal buffer */
1144	dtrace_buffer_t *dts_aggbuffer;		/* aggregation buffer */
1145	dtrace_speculation_t *dts_speculations;	/* speculation array */
1146	int dts_nspeculations;			/* number of speculations */
1147	int dts_naggregations;			/* number of aggregations */
1148	dtrace_aggregation_t **dts_aggregations; /* aggregation array */
1149#ifdef illumos
1150	vmem_t *dts_aggid_arena;		/* arena for aggregation IDs */
1151#else
1152	struct unrhdr *dts_aggid_arena;		/* arena for aggregation IDs */
1153#endif
1154	uint64_t dts_errors;			/* total number of errors */
1155	uint32_t dts_speculations_busy;		/* number of spec. busy */
1156	uint32_t dts_speculations_unavail;	/* number of spec unavail */
1157	uint32_t dts_stkstroverflows;		/* stack string tab overflows */
1158	uint32_t dts_dblerrors;			/* errors in ERROR probes */
1159	uint32_t dts_reserve;			/* space reserved for END */
1160	hrtime_t dts_laststatus;		/* time of last status */
1161#ifdef illumos
1162	cyclic_id_t dts_cleaner;		/* cleaning cyclic */
1163	cyclic_id_t dts_deadman;		/* deadman cyclic */
1164#else
1165	struct callout dts_cleaner;		/* Cleaning callout. */
1166	struct callout dts_deadman;		/* Deadman callout. */
1167#endif
1168	hrtime_t dts_alive;			/* time last alive */
1169	char dts_speculates;			/* boolean: has speculations */
1170	char dts_destructive;			/* boolean: has dest. actions */
1171	int dts_nformats;			/* number of formats */
1172	char **dts_formats;			/* format string array */
1173	dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */
1174	dtrace_cred_t dts_cred;			/* credentials */
1175	size_t dts_nretained;			/* number of retained enabs */
1176	int dts_getf;				/* number of getf() calls */
1177	uint64_t dts_rstate[NCPU][2];		/* per-CPU random state */
1178};
1179
1180struct dtrace_provider {
1181	dtrace_pattr_t dtpv_attr;		/* provider attributes */
1182	dtrace_ppriv_t dtpv_priv;		/* provider privileges */
1183	dtrace_pops_t dtpv_pops;		/* provider operations */
1184	char *dtpv_name;			/* provider name */
1185	void *dtpv_arg;				/* provider argument */
1186	hrtime_t dtpv_defunct;			/* when made defunct */
1187	struct dtrace_provider *dtpv_next;	/* next provider */
1188};
1189
1190struct dtrace_meta {
1191	dtrace_mops_t dtm_mops;			/* meta provider operations */
1192	char *dtm_name;				/* meta provider name */
1193	void *dtm_arg;				/* meta provider user arg */
1194	uint64_t dtm_count;			/* no. of associated provs. */
1195};
1196
1197/*
1198 * DTrace Enablings
1199 *
1200 * A dtrace_enabling structure is used to track a collection of ECB
1201 * descriptions -- before they have been turned into actual ECBs.  This is
1202 * created as a result of DOF processing, and is generally used to generate
1203 * ECBs immediately thereafter.  However, enablings are also generally
1204 * retained should the probes they describe be created at a later time; as
1205 * each new module or provider registers with the framework, the retained
1206 * enablings are reevaluated, with any new match resulting in new ECBs.  To
1207 * prevent probes from being matched more than once, the enabling tracks the
1208 * last probe generation matched, and only matches probes from subsequent
1209 * generations.
1210 */
1211typedef struct dtrace_enabling {
1212	dtrace_ecbdesc_t **dten_desc;		/* all ECB descriptions */
1213	int dten_ndesc;				/* number of ECB descriptions */
1214	int dten_maxdesc;			/* size of ECB array */
1215	dtrace_vstate_t *dten_vstate;		/* associated variable state */
1216	dtrace_genid_t dten_probegen;		/* matched probe generation */
1217	dtrace_ecbdesc_t *dten_current;		/* current ECB description */
1218	int dten_error;				/* current error value */
1219	int dten_primed;			/* boolean: set if primed */
1220	struct dtrace_enabling *dten_prev;	/* previous enabling */
1221	struct dtrace_enabling *dten_next;	/* next enabling */
1222} dtrace_enabling_t;
1223
1224/*
1225 * DTrace Anonymous Enablings
1226 *
1227 * Anonymous enablings are DTrace enablings that are not associated with a
1228 * controlling process, but rather derive their enabling from DOF stored as
1229 * properties in the dtrace.conf file.  If there is an anonymous enabling, a
1230 * DTrace consumer state and enabling are created on attach.  The state may be
1231 * subsequently grabbed by the first consumer specifying the "grabanon"
1232 * option.  As long as an anonymous DTrace enabling exists, dtrace(7D) will
1233 * refuse to unload.
1234 */
1235typedef struct dtrace_anon {
1236	dtrace_state_t *dta_state;		/* DTrace consumer state */
1237	dtrace_enabling_t *dta_enabling;	/* pointer to enabling */
1238	processorid_t dta_beganon;		/* which CPU BEGIN ran on */
1239} dtrace_anon_t;
1240
1241/*
1242 * DTrace Error Debugging
1243 */
1244#ifdef DEBUG
1245#define	DTRACE_ERRDEBUG
1246#endif
1247
1248#ifdef DTRACE_ERRDEBUG
1249
1250typedef struct dtrace_errhash {
1251	const char	*dter_msg;	/* error message */
1252	int		dter_count;	/* number of times seen */
1253} dtrace_errhash_t;
1254
1255#define	DTRACE_ERRHASHSZ	256	/* must be > number of err msgs */
1256
1257#endif	/* DTRACE_ERRDEBUG */
1258
1259/*
1260 * DTrace Toxic Ranges
1261 *
1262 * DTrace supports safe loads from probe context; if the address turns out to
1263 * be invalid, a bit will be set by the kernel indicating that DTrace
1264 * encountered a memory error, and DTrace will propagate the error to the user
1265 * accordingly.  However, there may exist some regions of memory in which an
1266 * arbitrary load can change system state, and from which it is impossible to
1267 * recover from such a load after it has been attempted.  Examples of this may
1268 * include memory in which programmable I/O registers are mapped (for which a
1269 * read may have some implications for the device) or (in the specific case of
1270 * UltraSPARC-I and -II) the virtual address hole.  The platform is required
1271 * to make DTrace aware of these toxic ranges; DTrace will then check that
1272 * target addresses are not in a toxic range before attempting to issue a
1273 * safe load.
1274 */
1275typedef struct dtrace_toxrange {
1276	uintptr_t	dtt_base;		/* base of toxic range */
1277	uintptr_t	dtt_limit;		/* limit of toxic range */
1278} dtrace_toxrange_t;
1279
1280#ifdef illumos
1281extern uint64_t dtrace_getarg(int, int);
1282#else
1283extern uint64_t __noinline dtrace_getarg(int, int);
1284#endif
1285extern greg_t dtrace_getfp(void);
1286extern int dtrace_getipl(void);
1287extern uintptr_t dtrace_caller(int);
1288extern uint32_t dtrace_cas32(uint32_t *, uint32_t, uint32_t);
1289extern void *dtrace_casptr(volatile void *, volatile void *, volatile void *);
1290extern void dtrace_copyin(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1291extern void dtrace_copyinstr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1292extern void dtrace_copyout(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1293extern void dtrace_copyoutstr(uintptr_t, uintptr_t, size_t,
1294    volatile uint16_t *);
1295extern void dtrace_getpcstack(pc_t *, int, int, uint32_t *);
1296extern ulong_t dtrace_getreg(struct trapframe *, uint_t);
1297extern int dtrace_getstackdepth(int);
1298extern void dtrace_getupcstack(uint64_t *, int);
1299extern void dtrace_getufpstack(uint64_t *, uint64_t *, int);
1300extern int dtrace_getustackdepth(void);
1301extern uintptr_t dtrace_fulword(void *);
1302extern uint8_t dtrace_fuword8(void *);
1303extern uint16_t dtrace_fuword16(void *);
1304extern uint32_t dtrace_fuword32(void *);
1305extern uint64_t dtrace_fuword64(void *);
1306extern void dtrace_probe_error(dtrace_state_t *, dtrace_epid_t, int, int,
1307    int, uintptr_t);
1308extern int dtrace_assfail(const char *, const char *, int);
1309extern int dtrace_attached(void);
1310#ifdef illumos
1311extern hrtime_t dtrace_gethrestime(void);
1312#endif
1313
1314#ifdef __sparc
1315extern void dtrace_flush_windows(void);
1316extern void dtrace_flush_user_windows(void);
1317extern uint_t dtrace_getotherwin(void);
1318extern uint_t dtrace_getfprs(void);
1319#else
1320extern void dtrace_copy(uintptr_t, uintptr_t, size_t);
1321extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1322#endif
1323
1324/*
1325 * DTrace Assertions
1326 *
1327 * DTrace calls ASSERT and VERIFY from probe context.  To assure that a failed
1328 * ASSERT or VERIFY does not induce a markedly more catastrophic failure (e.g.,
1329 * one from which a dump cannot be gleaned), DTrace must define its own ASSERT
1330 * and VERIFY macros to be ones that may safely be called from probe context.
1331 * This header file must thus be included by any DTrace component that calls
1332 * ASSERT and/or VERIFY from probe context, and _only_ by those components.
1333 * (The only exception to this is kernel debugging infrastructure at user-level
1334 * that doesn't depend on calling ASSERT.)
1335 */
1336#undef ASSERT
1337#undef VERIFY
1338#define	VERIFY(EX)	((void)((EX) || \
1339			dtrace_assfail(#EX, __FILE__, __LINE__)))
1340#ifdef DEBUG
1341#define	ASSERT(EX)	((void)((EX) || \
1342			dtrace_assfail(#EX, __FILE__, __LINE__)))
1343#else
1344#define	ASSERT(X)	((void)0)
1345#endif
1346
1347#ifdef	__cplusplus
1348}
1349#endif
1350
1351#endif /* _SYS_DTRACE_IMPL_H */
1352