xref: /illumos-gate/usr/src/uts/common/dtrace/dtrace.c (revision 56a20711c16596906185d9bfb85f8a4b02e93fc6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2017, Joyent, Inc.
25  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
26  */
27 
28 /*
29  * DTrace - Dynamic Tracing for Solaris
30  *
31  * This is the implementation of the Solaris Dynamic Tracing framework
32  * (DTrace).  The user-visible interface to DTrace is described at length in
33  * the "Solaris Dynamic Tracing Guide".  The interfaces between the libdtrace
34  * library, the in-kernel DTrace framework, and the DTrace providers are
35  * described in the block comments in the <sys/dtrace.h> header file.  The
36  * internal architecture of DTrace is described in the block comments in the
37  * <sys/dtrace_impl.h> header file.  The comments contained within the DTrace
38  * implementation very much assume mastery of all of these sources; if one has
39  * an unanswered question about the implementation, one should consult them
40  * first.
41  *
42  * The functions here are ordered roughly as follows:
43  *
44  *   - Probe context functions
45  *   - Probe hashing functions
46  *   - Non-probe context utility functions
47  *   - Matching functions
48  *   - Provider-to-Framework API functions
49  *   - Probe management functions
50  *   - DIF object functions
51  *   - Format functions
52  *   - Predicate functions
53  *   - ECB functions
54  *   - Buffer functions
55  *   - Enabling functions
56  *   - DOF functions
57  *   - Anonymous enabling functions
58  *   - Consumer state functions
59  *   - Helper functions
60  *   - Hook functions
61  *   - Driver cookbook functions
62  *
63  * Each group of functions begins with a block comment labelled the "DTrace
64  * [Group] Functions", allowing one to find each block by searching forward
65  * on capital-f functions.
66  */
67 #include <sys/errno.h>
68 #include <sys/stat.h>
69 #include <sys/modctl.h>
70 #include <sys/conf.h>
71 #include <sys/systm.h>
72 #include <sys/ddi.h>
73 #include <sys/sunddi.h>
74 #include <sys/cpuvar.h>
75 #include <sys/kmem.h>
76 #include <sys/strsubr.h>
77 #include <sys/sysmacros.h>
78 #include <sys/dtrace_impl.h>
79 #include <sys/atomic.h>
80 #include <sys/cmn_err.h>
81 #include <sys/mutex_impl.h>
82 #include <sys/rwlock_impl.h>
83 #include <sys/ctf_api.h>
84 #include <sys/panic.h>
85 #include <sys/priv_impl.h>
86 #include <sys/policy.h>
87 #include <sys/cred_impl.h>
88 #include <sys/procfs_isa.h>
89 #include <sys/taskq.h>
90 #include <sys/mkdev.h>
91 #include <sys/kdi.h>
92 #include <sys/zone.h>
93 #include <sys/socket.h>
94 #include <netinet/in.h>
95 #include "strtolctype.h"
96 
97 /*
98  * DTrace Tunable Variables
99  *
100  * The following variables may be tuned by adding a line to /etc/system that
101  * includes both the name of the DTrace module ("dtrace") and the name of the
102  * variable.  For example:
103  *
104  *   set dtrace:dtrace_destructive_disallow = 1
105  *
106  * In general, the only variables that one should be tuning this way are those
107  * that affect system-wide DTrace behavior, and for which the default behavior
108  * is undesirable.  Most of these variables are tunable on a per-consumer
109  * basis using DTrace options, and need not be tuned on a system-wide basis.
110  * When tuning these variables, avoid pathological values; while some attempt
111  * is made to verify the integrity of these variables, they are not considered
112  * part of the supported interface to DTrace, and they are therefore not
113  * checked comprehensively.  Further, these variables should not be tuned
114  * dynamically via "mdb -kw" or other means; they should only be tuned via
115  * /etc/system.
116  */
117 int		dtrace_destructive_disallow = 0;
118 dtrace_optval_t	dtrace_nonroot_maxsize = (16 * 1024 * 1024);
119 size_t		dtrace_difo_maxsize = (256 * 1024);
120 dtrace_optval_t	dtrace_dof_maxsize = (8 * 1024 * 1024);
121 size_t		dtrace_statvar_maxsize = (16 * 1024);
122 size_t		dtrace_actions_max = (16 * 1024);
123 size_t		dtrace_retain_max = 1024;
124 dtrace_optval_t	dtrace_helper_actions_max = 1024;
125 dtrace_optval_t	dtrace_helper_providers_max = 32;
126 dtrace_optval_t	dtrace_dstate_defsize = (1 * 1024 * 1024);
127 size_t		dtrace_strsize_default = 256;
128 dtrace_optval_t	dtrace_cleanrate_default = 9900990;		/* 101 hz */
129 dtrace_optval_t	dtrace_cleanrate_min = 200000;			/* 5000 hz */
130 dtrace_optval_t	dtrace_cleanrate_max = (uint64_t)60 * NANOSEC;	/* 1/minute */
131 dtrace_optval_t	dtrace_aggrate_default = NANOSEC;		/* 1 hz */
132 dtrace_optval_t	dtrace_statusrate_default = NANOSEC;		/* 1 hz */
133 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC;	 /* 6/minute */
134 dtrace_optval_t	dtrace_switchrate_default = NANOSEC;		/* 1 hz */
135 dtrace_optval_t	dtrace_nspec_default = 1;
136 dtrace_optval_t	dtrace_specsize_default = 32 * 1024;
137 dtrace_optval_t dtrace_stackframes_default = 20;
138 dtrace_optval_t dtrace_ustackframes_default = 20;
139 dtrace_optval_t dtrace_jstackframes_default = 50;
140 dtrace_optval_t dtrace_jstackstrsize_default = 512;
141 int		dtrace_msgdsize_max = 128;
142 hrtime_t	dtrace_chill_max = MSEC2NSEC(500);		/* 500 ms */
143 hrtime_t	dtrace_chill_interval = NANOSEC;		/* 1000 ms */
144 int		dtrace_devdepth_max = 32;
145 int		dtrace_err_verbose;
146 hrtime_t	dtrace_deadman_interval = NANOSEC;
147 hrtime_t	dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
148 hrtime_t	dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
149 hrtime_t	dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
150 
151 /*
152  * DTrace External Variables
153  *
154  * As dtrace(7D) is a kernel module, any DTrace variables are obviously
155  * available to DTrace consumers via the backtick (`) syntax.  One of these,
156  * dtrace_zero, is made deliberately so:  it is provided as a source of
157  * well-known, zero-filled memory.  While this variable is not documented,
158  * it is used by some translators as an implementation detail.
159  */
160 const char	dtrace_zero[256] = { 0 };	/* zero-filled memory */
161 
162 /*
163  * DTrace Internal Variables
164  */
165 static dev_info_t	*dtrace_devi;		/* device info */
166 static vmem_t		*dtrace_arena;		/* probe ID arena */
167 static vmem_t		*dtrace_minor;		/* minor number arena */
168 static taskq_t		*dtrace_taskq;		/* task queue */
169 static dtrace_probe_t	**dtrace_probes;	/* array of all probes */
170 static int		dtrace_nprobes;		/* number of probes */
171 static dtrace_provider_t *dtrace_provider;	/* provider list */
172 static dtrace_meta_t	*dtrace_meta_pid;	/* user-land meta provider */
173 static int		dtrace_opens;		/* number of opens */
174 static int		dtrace_helpers;		/* number of helpers */
175 static int		dtrace_getf;		/* number of unpriv getf()s */
176 static void		*dtrace_softstate;	/* softstate pointer */
177 static dtrace_hash_t	*dtrace_bymod;		/* probes hashed by module */
178 static dtrace_hash_t	*dtrace_byfunc;		/* probes hashed by function */
179 static dtrace_hash_t	*dtrace_byname;		/* probes hashed by name */
180 static dtrace_toxrange_t *dtrace_toxrange;	/* toxic range array */
181 static int		dtrace_toxranges;	/* number of toxic ranges */
182 static int		dtrace_toxranges_max;	/* size of toxic range array */
183 static dtrace_anon_t	dtrace_anon;		/* anonymous enabling */
184 static kmem_cache_t	*dtrace_state_cache;	/* cache for dynamic state */
185 static uint64_t		dtrace_vtime_references; /* number of vtimestamp refs */
186 static kthread_t	*dtrace_panicked;	/* panicking thread */
187 static dtrace_ecb_t	*dtrace_ecb_create_cache; /* cached created ECB */
188 static dtrace_genid_t	dtrace_probegen;	/* current probe generation */
189 static dtrace_helpers_t *dtrace_deferred_pid;	/* deferred helper list */
190 static dtrace_enabling_t *dtrace_retained;	/* list of retained enablings */
191 static dtrace_genid_t	dtrace_retained_gen;	/* current retained enab gen */
192 static dtrace_dynvar_t	dtrace_dynhash_sink;	/* end of dynamic hash chains */
193 static int		dtrace_dynvar_failclean; /* dynvars failed to clean */
194 
195 /*
196  * DTrace Locking
197  * DTrace is protected by three (relatively coarse-grained) locks:
198  *
199  * (1) dtrace_lock is required to manipulate essentially any DTrace state,
200  *     including enabling state, probes, ECBs, consumer state, helper state,
201  *     etc.  Importantly, dtrace_lock is _not_ required when in probe context;
202  *     probe context is lock-free -- synchronization is handled via the
203  *     dtrace_sync() cross call mechanism.
204  *
205  * (2) dtrace_provider_lock is required when manipulating provider state, or
206  *     when provider state must be held constant.
207  *
208  * (3) dtrace_meta_lock is required when manipulating meta provider state, or
209  *     when meta provider state must be held constant.
210  *
211  * The lock ordering between these three locks is dtrace_meta_lock before
212  * dtrace_provider_lock before dtrace_lock.  (In particular, there are
213  * several places where dtrace_provider_lock is held by the framework as it
214  * calls into the providers -- which then call back into the framework,
215  * grabbing dtrace_lock.)
216  *
217  * There are two other locks in the mix:  mod_lock and cpu_lock.  With respect
218  * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
219  * role as a coarse-grained lock; it is acquired before both of these locks.
220  * With respect to dtrace_meta_lock, its behavior is stranger:  cpu_lock must
221  * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
222  * mod_lock is similar with respect to dtrace_provider_lock in that it must be
223  * acquired _between_ dtrace_provider_lock and dtrace_lock.
224  */
225 static kmutex_t		dtrace_lock;		/* probe state lock */
226 static kmutex_t		dtrace_provider_lock;	/* provider state lock */
227 static kmutex_t		dtrace_meta_lock;	/* meta-provider state lock */
228 
229 /*
230  * DTrace Provider Variables
231  *
232  * These are the variables relating to DTrace as a provider (that is, the
233  * provider of the BEGIN, END, and ERROR probes).
234  */
235 static dtrace_pattr_t	dtrace_provider_attr = {
236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
237 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
238 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
239 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
240 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
241 };
242 
243 static void
244 dtrace_nullop(void)
245 {}
246 
247 static int
248 dtrace_enable_nullop(void)
249 {
250 	return (0);
251 }
252 
253 static dtrace_pops_t	dtrace_provider_ops = {
254 	(void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
255 	(void (*)(void *, struct modctl *))dtrace_nullop,
256 	(int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
257 	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
258 	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
259 	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
260 	NULL,
261 	NULL,
262 	NULL,
263 	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop
264 };
265 
266 static dtrace_id_t	dtrace_probeid_begin;	/* special BEGIN probe */
267 static dtrace_id_t	dtrace_probeid_end;	/* special END probe */
268 dtrace_id_t		dtrace_probeid_error;	/* special ERROR probe */
269 
270 /*
271  * DTrace Helper Tracing Variables
272  *
273  * These variables should be set dynamically to enable helper tracing.  The
274  * only variables that should be set are dtrace_helptrace_enable (which should
275  * be set to a non-zero value to allocate helper tracing buffers on the next
276  * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
277  * non-zero value to deallocate helper tracing buffers on the next close of
278  * /dev/dtrace).  When (and only when) helper tracing is disabled, the
279  * buffer size may also be set via dtrace_helptrace_bufsize.
280  */
281 int			dtrace_helptrace_enable = 0;
282 int			dtrace_helptrace_disable = 0;
283 int			dtrace_helptrace_bufsize = 16 * 1024 * 1024;
284 uint32_t		dtrace_helptrace_nlocals;
285 static dtrace_helptrace_t *dtrace_helptrace_buffer;
286 static uint32_t		dtrace_helptrace_next = 0;
287 static int		dtrace_helptrace_wrapped = 0;
288 
289 /*
290  * DTrace Error Hashing
291  *
292  * On DEBUG kernels, DTrace will track the errors that has seen in a hash
293  * table.  This is very useful for checking coverage of tests that are
294  * expected to induce DIF or DOF processing errors, and may be useful for
295  * debugging problems in the DIF code generator or in DOF generation .  The
296  * error hash may be examined with the ::dtrace_errhash MDB dcmd.
297  */
298 #ifdef DEBUG
299 static dtrace_errhash_t	dtrace_errhash[DTRACE_ERRHASHSZ];
300 static const char *dtrace_errlast;
301 static kthread_t *dtrace_errthread;
302 static kmutex_t dtrace_errlock;
303 #endif
304 
305 /*
306  * DTrace Macros and Constants
307  *
308  * These are various macros that are useful in various spots in the
309  * implementation, along with a few random constants that have no meaning
310  * outside of the implementation.  There is no real structure to this cpp
311  * mishmash -- but is there ever?
312  */
313 #define	DTRACE_HASHSTR(hash, probe)	\
314 	dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
315 
316 #define	DTRACE_HASHNEXT(hash, probe)	\
317 	(dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
318 
319 #define	DTRACE_HASHPREV(hash, probe)	\
320 	(dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
321 
322 #define	DTRACE_HASHEQ(hash, lhs, rhs)	\
323 	(strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
324 	    *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
325 
326 #define	DTRACE_AGGHASHSIZE_SLEW		17
327 
328 #define	DTRACE_V4MAPPED_OFFSET		(sizeof (uint32_t) * 3)
329 
330 /*
331  * The key for a thread-local variable consists of the lower 61 bits of the
332  * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
333  * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
334  * equal to a variable identifier.  This is necessary (but not sufficient) to
335  * assure that global associative arrays never collide with thread-local
336  * variables.  To guarantee that they cannot collide, we must also define the
337  * order for keying dynamic variables.  That order is:
338  *
339  *   [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
340  *
341  * Because the variable-key and the tls-key are in orthogonal spaces, there is
342  * no way for a global variable key signature to match a thread-local key
343  * signature.
344  */
345 #define	DTRACE_TLS_THRKEY(where) { \
346 	uint_t intr = 0; \
347 	uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
348 	for (; actv; actv >>= 1) \
349 		intr++; \
350 	ASSERT(intr < (1 << 3)); \
351 	(where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
352 	    (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
353 }
354 
355 #define	DT_BSWAP_8(x)	((x) & 0xff)
356 #define	DT_BSWAP_16(x)	((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
357 #define	DT_BSWAP_32(x)	((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
358 #define	DT_BSWAP_64(x)	((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
359 
360 #define	DT_MASK_LO 0x00000000FFFFFFFFULL
361 
362 #define	DTRACE_STORE(type, tomax, offset, what) \
363 	*((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
364 
365 #ifndef __x86
366 #define	DTRACE_ALIGNCHECK(addr, size, flags)				\
367 	if (addr & (size - 1)) {					\
368 		*flags |= CPU_DTRACE_BADALIGN;				\
369 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
370 		return (0);						\
371 	}
372 #else
373 #define	DTRACE_ALIGNCHECK(addr, size, flags)
374 #endif
375 
376 /*
377  * Test whether a range of memory starting at testaddr of size testsz falls
378  * within the range of memory described by addr, sz.  We take care to avoid
379  * problems with overflow and underflow of the unsigned quantities, and
380  * disallow all negative sizes.  Ranges of size 0 are allowed.
381  */
382 #define	DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
383 	((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
384 	(testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
385 	(testaddr) + (testsz) >= (testaddr))
386 
387 #define	DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz)		\
388 do {									\
389 	if ((remp) != NULL) {						\
390 		*(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr);	\
391 	}								\
392 _NOTE(CONSTCOND) } while (0)
393 
394 
395 /*
396  * Test whether alloc_sz bytes will fit in the scratch region.  We isolate
397  * alloc_sz on the righthand side of the comparison in order to avoid overflow
398  * or underflow in the comparison with it.  This is simpler than the INRANGE
399  * check above, because we know that the dtms_scratch_ptr is valid in the
400  * range.  Allocations of size zero are allowed.
401  */
402 #define	DTRACE_INSCRATCH(mstate, alloc_sz) \
403 	((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
404 	(mstate)->dtms_scratch_ptr >= (alloc_sz))
405 
406 #define	DTRACE_LOADFUNC(bits)						\
407 /*CSTYLED*/								\
408 uint##bits##_t								\
409 dtrace_load##bits(uintptr_t addr)					\
410 {									\
411 	size_t size = bits / NBBY;					\
412 	/*CSTYLED*/							\
413 	uint##bits##_t rval;						\
414 	int i;								\
415 	volatile uint16_t *flags = (volatile uint16_t *)		\
416 	    &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;			\
417 									\
418 	DTRACE_ALIGNCHECK(addr, size, flags);				\
419 									\
420 	for (i = 0; i < dtrace_toxranges; i++) {			\
421 		if (addr >= dtrace_toxrange[i].dtt_limit)		\
422 			continue;					\
423 									\
424 		if (addr + size <= dtrace_toxrange[i].dtt_base)		\
425 			continue;					\
426 									\
427 		/*							\
428 		 * This address falls within a toxic region; return 0.	\
429 		 */							\
430 		*flags |= CPU_DTRACE_BADADDR;				\
431 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
432 		return (0);						\
433 	}								\
434 									\
435 	*flags |= CPU_DTRACE_NOFAULT;					\
436 	/*CSTYLED*/							\
437 	rval = *((volatile uint##bits##_t *)addr);			\
438 	*flags &= ~CPU_DTRACE_NOFAULT;					\
439 									\
440 	return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0);		\
441 }
442 
443 #ifdef _LP64
444 #define	dtrace_loadptr	dtrace_load64
445 #else
446 #define	dtrace_loadptr	dtrace_load32
447 #endif
448 
449 #define	DTRACE_DYNHASH_FREE	0
450 #define	DTRACE_DYNHASH_SINK	1
451 #define	DTRACE_DYNHASH_VALID	2
452 
453 #define	DTRACE_MATCH_FAIL	-1
454 #define	DTRACE_MATCH_NEXT	0
455 #define	DTRACE_MATCH_DONE	1
456 #define	DTRACE_ANCHORED(probe)	((probe)->dtpr_func[0] != '\0')
457 #define	DTRACE_STATE_ALIGN	64
458 
459 #define	DTRACE_FLAGS2FLT(flags)						\
460 	(((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR :		\
461 	((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP :		\
462 	((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO :		\
463 	((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV :		\
464 	((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV :		\
465 	((flags) & CPU_DTRACE_TUPOFLOW) ?  DTRACEFLT_TUPOFLOW :		\
466 	((flags) & CPU_DTRACE_BADALIGN) ?  DTRACEFLT_BADALIGN :		\
467 	((flags) & CPU_DTRACE_NOSCRATCH) ?  DTRACEFLT_NOSCRATCH :	\
468 	((flags) & CPU_DTRACE_BADSTACK) ?  DTRACEFLT_BADSTACK :		\
469 	DTRACEFLT_UNKNOWN)
470 
471 #define	DTRACEACT_ISSTRING(act)						\
472 	((act)->dta_kind == DTRACEACT_DIFEXPR &&			\
473 	(act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
474 
475 static size_t dtrace_strlen(const char *, size_t);
476 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
477 static void dtrace_enabling_provide(dtrace_provider_t *);
478 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
479 static void dtrace_enabling_matchall(void);
480 static void dtrace_enabling_reap(void);
481 static dtrace_state_t *dtrace_anon_grab(void);
482 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
483     dtrace_state_t *, uint64_t, uint64_t);
484 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
485 static void dtrace_buffer_drop(dtrace_buffer_t *);
486 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
487 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
488     dtrace_state_t *, dtrace_mstate_t *);
489 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
490     dtrace_optval_t);
491 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
492 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
493 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *);
494 static void dtrace_getf_barrier(void);
495 static int dtrace_canload_remains(uint64_t, size_t, size_t *,
496     dtrace_mstate_t *, dtrace_vstate_t *);
497 static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
498     dtrace_mstate_t *, dtrace_vstate_t *);
499 
500 /*
501  * DTrace Probe Context Functions
502  *
503  * These functions are called from probe context.  Because probe context is
504  * any context in which C may be called, arbitrarily locks may be held,
505  * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
506  * As a result, functions called from probe context may only call other DTrace
507  * support functions -- they may not interact at all with the system at large.
508  * (Note that the ASSERT macro is made probe-context safe by redefining it in
509  * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
510  * loads are to be performed from probe context, they _must_ be in terms of
511  * the safe dtrace_load*() variants.
512  *
513  * Some functions in this block are not actually called from probe context;
514  * for these functions, there will be a comment above the function reading
515  * "Note:  not called from probe context."
516  */
517 void
518 dtrace_panic(const char *format, ...)
519 {
520 	va_list alist;
521 
522 	va_start(alist, format);
523 	dtrace_vpanic(format, alist);
524 	va_end(alist);
525 }
526 
527 int
528 dtrace_assfail(const char *a, const char *f, int l)
529 {
530 	dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
531 
532 	/*
533 	 * We just need something here that even the most clever compiler
534 	 * cannot optimize away.
535 	 */
536 	return (a[(uintptr_t)f]);
537 }
538 
539 /*
540  * Atomically increment a specified error counter from probe context.
541  */
542 static void
543 dtrace_error(uint32_t *counter)
544 {
545 	/*
546 	 * Most counters stored to in probe context are per-CPU counters.
547 	 * However, there are some error conditions that are sufficiently
548 	 * arcane that they don't merit per-CPU storage.  If these counters
549 	 * are incremented concurrently on different CPUs, scalability will be
550 	 * adversely affected -- but we don't expect them to be white-hot in a
551 	 * correctly constructed enabling...
552 	 */
553 	uint32_t oval, nval;
554 
555 	do {
556 		oval = *counter;
557 
558 		if ((nval = oval + 1) == 0) {
559 			/*
560 			 * If the counter would wrap, set it to 1 -- assuring
561 			 * that the counter is never zero when we have seen
562 			 * errors.  (The counter must be 32-bits because we
563 			 * aren't guaranteed a 64-bit compare&swap operation.)
564 			 * To save this code both the infamy of being fingered
565 			 * by a priggish news story and the indignity of being
566 			 * the target of a neo-puritan witch trial, we're
567 			 * carefully avoiding any colorful description of the
568 			 * likelihood of this condition -- but suffice it to
569 			 * say that it is only slightly more likely than the
570 			 * overflow of predicate cache IDs, as discussed in
571 			 * dtrace_predicate_create().
572 			 */
573 			nval = 1;
574 		}
575 	} while (dtrace_cas32(counter, oval, nval) != oval);
576 }
577 
578 /*
579  * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
580  * uint8_t, a uint16_t, a uint32_t and a uint64_t.
581  */
582 /* BEGIN CSTYLED */
583 DTRACE_LOADFUNC(8)
584 DTRACE_LOADFUNC(16)
585 DTRACE_LOADFUNC(32)
586 DTRACE_LOADFUNC(64)
587 /* END CSTYLED */
588 
589 static int
590 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
591 {
592 	if (dest < mstate->dtms_scratch_base)
593 		return (0);
594 
595 	if (dest + size < dest)
596 		return (0);
597 
598 	if (dest + size > mstate->dtms_scratch_ptr)
599 		return (0);
600 
601 	return (1);
602 }
603 
604 static int
605 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
606     dtrace_statvar_t **svars, int nsvars)
607 {
608 	int i;
609 	size_t maxglobalsize, maxlocalsize;
610 
611 	if (nsvars == 0)
612 		return (0);
613 
614 	maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
615 	maxlocalsize = maxglobalsize * NCPU;
616 
617 	for (i = 0; i < nsvars; i++) {
618 		dtrace_statvar_t *svar = svars[i];
619 		uint8_t scope;
620 		size_t size;
621 
622 		if (svar == NULL || (size = svar->dtsv_size) == 0)
623 			continue;
624 
625 		scope = svar->dtsv_var.dtdv_scope;
626 
627 		/*
628 		 * We verify that our size is valid in the spirit of providing
629 		 * defense in depth:  we want to prevent attackers from using
630 		 * DTrace to escalate an orthogonal kernel heap corruption bug
631 		 * into the ability to store to arbitrary locations in memory.
632 		 */
633 		VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
634 		    (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
635 
636 		if (DTRACE_INRANGE(addr, sz, svar->dtsv_data,
637 		    svar->dtsv_size)) {
638 			DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
639 			    svar->dtsv_size);
640 			return (1);
641 		}
642 	}
643 
644 	return (0);
645 }
646 
647 /*
648  * Check to see if the address is within a memory region to which a store may
649  * be issued.  This includes the DTrace scratch areas, and any DTrace variable
650  * region.  The caller of dtrace_canstore() is responsible for performing any
651  * alignment checks that are needed before stores are actually executed.
652  */
653 static int
654 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
655     dtrace_vstate_t *vstate)
656 {
657 	return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
658 }
659 
660 /*
661  * Implementation of dtrace_canstore which communicates the upper bound of the
662  * allowed memory region.
663  */
664 static int
665 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
666     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
667 {
668 	/*
669 	 * First, check to see if the address is in scratch space...
670 	 */
671 	if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
672 	    mstate->dtms_scratch_size)) {
673 		DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
674 		    mstate->dtms_scratch_size);
675 		return (1);
676 	}
677 
678 	/*
679 	 * Now check to see if it's a dynamic variable.  This check will pick
680 	 * up both thread-local variables and any global dynamically-allocated
681 	 * variables.
682 	 */
683 	if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
684 	    vstate->dtvs_dynvars.dtds_size)) {
685 		dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
686 		uintptr_t base = (uintptr_t)dstate->dtds_base +
687 		    (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
688 		uintptr_t chunkoffs;
689 		dtrace_dynvar_t *dvar;
690 
691 		/*
692 		 * Before we assume that we can store here, we need to make
693 		 * sure that it isn't in our metadata -- storing to our
694 		 * dynamic variable metadata would corrupt our state.  For
695 		 * the range to not include any dynamic variable metadata,
696 		 * it must:
697 		 *
698 		 *	(1) Start above the hash table that is at the base of
699 		 *	the dynamic variable space
700 		 *
701 		 *	(2) Have a starting chunk offset that is beyond the
702 		 *	dtrace_dynvar_t that is at the base of every chunk
703 		 *
704 		 *	(3) Not span a chunk boundary
705 		 *
706 		 *	(4) Not be in the tuple space of a dynamic variable
707 		 *
708 		 */
709 		if (addr < base)
710 			return (0);
711 
712 		chunkoffs = (addr - base) % dstate->dtds_chunksize;
713 
714 		if (chunkoffs < sizeof (dtrace_dynvar_t))
715 			return (0);
716 
717 		if (chunkoffs + sz > dstate->dtds_chunksize)
718 			return (0);
719 
720 		dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
721 
722 		if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
723 			return (0);
724 
725 		if (chunkoffs < sizeof (dtrace_dynvar_t) +
726 		    ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
727 			return (0);
728 
729 		DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize);
730 		return (1);
731 	}
732 
733 	/*
734 	 * Finally, check the static local and global variables.  These checks
735 	 * take the longest, so we perform them last.
736 	 */
737 	if (dtrace_canstore_statvar(addr, sz, remain,
738 	    vstate->dtvs_locals, vstate->dtvs_nlocals))
739 		return (1);
740 
741 	if (dtrace_canstore_statvar(addr, sz, remain,
742 	    vstate->dtvs_globals, vstate->dtvs_nglobals))
743 		return (1);
744 
745 	return (0);
746 }
747 
748 
749 /*
750  * Convenience routine to check to see if the address is within a memory
751  * region in which a load may be issued given the user's privilege level;
752  * if not, it sets the appropriate error flags and loads 'addr' into the
753  * illegal value slot.
754  *
755  * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
756  * appropriate memory access protection.
757  */
758 static int
759 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
760     dtrace_vstate_t *vstate)
761 {
762 	return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
763 }
764 
765 /*
766  * Implementation of dtrace_canload which communicates the upper bound of the
767  * allowed memory region.
768  */
769 static int
770 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
771     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
772 {
773 	volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
774 	file_t *fp;
775 
776 	/*
777 	 * If we hold the privilege to read from kernel memory, then
778 	 * everything is readable.
779 	 */
780 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
781 		DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
782 		return (1);
783 	}
784 
785 	/*
786 	 * You can obviously read that which you can store.
787 	 */
788 	if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
789 		return (1);
790 
791 	/*
792 	 * We're allowed to read from our own string table.
793 	 */
794 	if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
795 	    mstate->dtms_difo->dtdo_strlen)) {
796 		DTRACE_RANGE_REMAIN(remain, addr,
797 		    mstate->dtms_difo->dtdo_strtab,
798 		    mstate->dtms_difo->dtdo_strlen);
799 		return (1);
800 	}
801 
802 	if (vstate->dtvs_state != NULL &&
803 	    dtrace_priv_proc(vstate->dtvs_state, mstate)) {
804 		proc_t *p;
805 
806 		/*
807 		 * When we have privileges to the current process, there are
808 		 * several context-related kernel structures that are safe to
809 		 * read, even absent the privilege to read from kernel memory.
810 		 * These reads are safe because these structures contain only
811 		 * state that (1) we're permitted to read, (2) is harmless or
812 		 * (3) contains pointers to additional kernel state that we're
813 		 * not permitted to read (and as such, do not present an
814 		 * opportunity for privilege escalation).  Finally (and
815 		 * critically), because of the nature of their relation with
816 		 * the current thread context, the memory associated with these
817 		 * structures cannot change over the duration of probe context,
818 		 * and it is therefore impossible for this memory to be
819 		 * deallocated and reallocated as something else while it's
820 		 * being operated upon.
821 		 */
822 		if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) {
823 			DTRACE_RANGE_REMAIN(remain, addr, curthread,
824 			    sizeof (kthread_t));
825 			return (1);
826 		}
827 
828 		if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
829 		    sz, curthread->t_procp, sizeof (proc_t))) {
830 			DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp,
831 			    sizeof (proc_t));
832 			return (1);
833 		}
834 
835 		if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
836 		    curthread->t_cred, sizeof (cred_t))) {
837 			DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred,
838 			    sizeof (cred_t));
839 			return (1);
840 		}
841 
842 		if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
843 		    &(p->p_pidp->pid_id), sizeof (pid_t))) {
844 			DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id),
845 			    sizeof (pid_t));
846 			return (1);
847 		}
848 
849 		if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
850 		    curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
851 			DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu,
852 			    offsetof(cpu_t, cpu_pause_thread));
853 			return (1);
854 		}
855 	}
856 
857 	if ((fp = mstate->dtms_getf) != NULL) {
858 		uintptr_t psz = sizeof (void *);
859 		vnode_t *vp;
860 		vnodeops_t *op;
861 
862 		/*
863 		 * When getf() returns a file_t, the enabling is implicitly
864 		 * granted the (transient) right to read the returned file_t
865 		 * as well as the v_path and v_op->vnop_name of the underlying
866 		 * vnode.  These accesses are allowed after a successful
867 		 * getf() because the members that they refer to cannot change
868 		 * once set -- and the barrier logic in the kernel's closef()
869 		 * path assures that the file_t and its referenced vode_t
870 		 * cannot themselves be stale (that is, it impossible for
871 		 * either dtms_getf itself or its f_vnode member to reference
872 		 * freed memory).
873 		 */
874 		if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) {
875 			DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t));
876 			return (1);
877 		}
878 
879 		if ((vp = fp->f_vnode) != NULL) {
880 			size_t slen;
881 
882 			if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) {
883 				DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path,
884 				    psz);
885 				return (1);
886 			}
887 
888 			slen = strlen(vp->v_path) + 1;
889 			if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) {
890 				DTRACE_RANGE_REMAIN(remain, addr, vp->v_path,
891 				    slen);
892 				return (1);
893 			}
894 
895 			if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) {
896 				DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op,
897 				    psz);
898 				return (1);
899 			}
900 
901 			if ((op = vp->v_op) != NULL &&
902 			    DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
903 				DTRACE_RANGE_REMAIN(remain, addr,
904 				    &op->vnop_name, psz);
905 				return (1);
906 			}
907 
908 			if (op != NULL && op->vnop_name != NULL &&
909 			    DTRACE_INRANGE(addr, sz, op->vnop_name,
910 			    (slen = strlen(op->vnop_name) + 1))) {
911 				DTRACE_RANGE_REMAIN(remain, addr,
912 				    op->vnop_name, slen);
913 				return (1);
914 			}
915 		}
916 	}
917 
918 	DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
919 	*illval = addr;
920 	return (0);
921 }
922 
923 /*
924  * Convenience routine to check to see if a given string is within a memory
925  * region in which a load may be issued given the user's privilege level;
926  * this exists so that we don't need to issue unnecessary dtrace_strlen()
927  * calls in the event that the user has all privileges.
928  */
929 static int
930 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
931     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
932 {
933 	size_t rsize;
934 
935 	/*
936 	 * If we hold the privilege to read from kernel memory, then
937 	 * everything is readable.
938 	 */
939 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
940 		DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
941 		return (1);
942 	}
943 
944 	/*
945 	 * Even if the caller is uninterested in querying the remaining valid
946 	 * range, it is required to ensure that the access is allowed.
947 	 */
948 	if (remain == NULL) {
949 		remain = &rsize;
950 	}
951 	if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
952 		size_t strsz;
953 		/*
954 		 * Perform the strlen after determining the length of the
955 		 * memory region which is accessible.  This prevents timing
956 		 * information from being used to find NULs in memory which is
957 		 * not accessible to the caller.
958 		 */
959 		strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
960 		    MIN(sz, *remain));
961 		if (strsz <= *remain) {
962 			return (1);
963 		}
964 	}
965 
966 	return (0);
967 }
968 
969 /*
970  * Convenience routine to check to see if a given variable is within a memory
971  * region in which a load may be issued given the user's privilege level.
972  */
973 static int
974 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
975     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
976 {
977 	size_t sz;
978 	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
979 
980 	/*
981 	 * Calculate the max size before performing any checks since even
982 	 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
983 	 * return the max length via 'remain'.
984 	 */
985 	if (type->dtdt_kind == DIF_TYPE_STRING) {
986 		dtrace_state_t *state = vstate->dtvs_state;
987 
988 		if (state != NULL) {
989 			sz = state->dts_options[DTRACEOPT_STRSIZE];
990 		} else {
991 			/*
992 			 * In helper context, we have a NULL state; fall back
993 			 * to using the system-wide default for the string size
994 			 * in this case.
995 			 */
996 			sz = dtrace_strsize_default;
997 		}
998 	} else {
999 		sz = type->dtdt_size;
1000 	}
1001 
1002 	/*
1003 	 * If we hold the privilege to read from kernel memory, then
1004 	 * everything is readable.
1005 	 */
1006 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1007 		DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
1008 		return (1);
1009 	}
1010 
1011 	if (type->dtdt_kind == DIF_TYPE_STRING) {
1012 		return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1013 		    vstate));
1014 	}
1015 	return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1016 	    vstate));
1017 }
1018 
1019 /*
1020  * Convert a string to a signed integer using safe loads.
1021  *
1022  * NOTE: This function uses various macros from strtolctype.h to manipulate
1023  * digit values, etc -- these have all been checked to ensure they make
1024  * no additional function calls.
1025  */
1026 static int64_t
1027 dtrace_strtoll(char *input, int base, size_t limit)
1028 {
1029 	uintptr_t pos = (uintptr_t)input;
1030 	int64_t val = 0;
1031 	int x;
1032 	boolean_t neg = B_FALSE;
1033 	char c, cc, ccc;
1034 	uintptr_t end = pos + limit;
1035 
1036 	/*
1037 	 * Consume any whitespace preceding digits.
1038 	 */
1039 	while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1040 		pos++;
1041 
1042 	/*
1043 	 * Handle an explicit sign if one is present.
1044 	 */
1045 	if (c == '-' || c == '+') {
1046 		if (c == '-')
1047 			neg = B_TRUE;
1048 		c = dtrace_load8(++pos);
1049 	}
1050 
1051 	/*
1052 	 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1053 	 * if present.
1054 	 */
1055 	if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1056 	    cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1057 		pos += 2;
1058 		c = ccc;
1059 	}
1060 
1061 	/*
1062 	 * Read in contiguous digits until the first non-digit character.
1063 	 */
1064 	for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1065 	    c = dtrace_load8(++pos))
1066 		val = val * base + x;
1067 
1068 	return (neg ? -val : val);
1069 }
1070 
1071 /*
1072  * Compare two strings using safe loads.
1073  */
1074 static int
1075 dtrace_strncmp(char *s1, char *s2, size_t limit)
1076 {
1077 	uint8_t c1, c2;
1078 	volatile uint16_t *flags;
1079 
1080 	if (s1 == s2 || limit == 0)
1081 		return (0);
1082 
1083 	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1084 
1085 	do {
1086 		if (s1 == NULL) {
1087 			c1 = '\0';
1088 		} else {
1089 			c1 = dtrace_load8((uintptr_t)s1++);
1090 		}
1091 
1092 		if (s2 == NULL) {
1093 			c2 = '\0';
1094 		} else {
1095 			c2 = dtrace_load8((uintptr_t)s2++);
1096 		}
1097 
1098 		if (c1 != c2)
1099 			return (c1 - c2);
1100 	} while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1101 
1102 	return (0);
1103 }
1104 
1105 /*
1106  * Compute strlen(s) for a string using safe memory accesses.  The additional
1107  * len parameter is used to specify a maximum length to ensure completion.
1108  */
1109 static size_t
1110 dtrace_strlen(const char *s, size_t lim)
1111 {
1112 	uint_t len;
1113 
1114 	for (len = 0; len != lim; len++) {
1115 		if (dtrace_load8((uintptr_t)s++) == '\0')
1116 			break;
1117 	}
1118 
1119 	return (len);
1120 }
1121 
1122 /*
1123  * Check if an address falls within a toxic region.
1124  */
1125 static int
1126 dtrace_istoxic(uintptr_t kaddr, size_t size)
1127 {
1128 	uintptr_t taddr, tsize;
1129 	int i;
1130 
1131 	for (i = 0; i < dtrace_toxranges; i++) {
1132 		taddr = dtrace_toxrange[i].dtt_base;
1133 		tsize = dtrace_toxrange[i].dtt_limit - taddr;
1134 
1135 		if (kaddr - taddr < tsize) {
1136 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1137 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
1138 			return (1);
1139 		}
1140 
1141 		if (taddr - kaddr < size) {
1142 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1143 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
1144 			return (1);
1145 		}
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 /*
1152  * Copy src to dst using safe memory accesses.  The src is assumed to be unsafe
1153  * memory specified by the DIF program.  The dst is assumed to be safe memory
1154  * that we can store to directly because it is managed by DTrace.  As with
1155  * standard bcopy, overlapping copies are handled properly.
1156  */
1157 static void
1158 dtrace_bcopy(const void *src, void *dst, size_t len)
1159 {
1160 	if (len != 0) {
1161 		uint8_t *s1 = dst;
1162 		const uint8_t *s2 = src;
1163 
1164 		if (s1 <= s2) {
1165 			do {
1166 				*s1++ = dtrace_load8((uintptr_t)s2++);
1167 			} while (--len != 0);
1168 		} else {
1169 			s2 += len;
1170 			s1 += len;
1171 
1172 			do {
1173 				*--s1 = dtrace_load8((uintptr_t)--s2);
1174 			} while (--len != 0);
1175 		}
1176 	}
1177 }
1178 
1179 /*
1180  * Copy src to dst using safe memory accesses, up to either the specified
1181  * length, or the point that a nul byte is encountered.  The src is assumed to
1182  * be unsafe memory specified by the DIF program.  The dst is assumed to be
1183  * safe memory that we can store to directly because it is managed by DTrace.
1184  * Unlike dtrace_bcopy(), overlapping regions are not handled.
1185  */
1186 static void
1187 dtrace_strcpy(const void *src, void *dst, size_t len)
1188 {
1189 	if (len != 0) {
1190 		uint8_t *s1 = dst, c;
1191 		const uint8_t *s2 = src;
1192 
1193 		do {
1194 			*s1++ = c = dtrace_load8((uintptr_t)s2++);
1195 		} while (--len != 0 && c != '\0');
1196 	}
1197 }
1198 
1199 /*
1200  * Copy src to dst, deriving the size and type from the specified (BYREF)
1201  * variable type.  The src is assumed to be unsafe memory specified by the DIF
1202  * program.  The dst is assumed to be DTrace variable memory that is of the
1203  * specified type; we assume that we can store to directly.
1204  */
1205 static void
1206 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
1207 {
1208 	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1209 
1210 	if (type->dtdt_kind == DIF_TYPE_STRING) {
1211 		dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
1212 	} else {
1213 		dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1214 	}
1215 }
1216 
1217 /*
1218  * Compare s1 to s2 using safe memory accesses.  The s1 data is assumed to be
1219  * unsafe memory specified by the DIF program.  The s2 data is assumed to be
1220  * safe memory that we can access directly because it is managed by DTrace.
1221  */
1222 static int
1223 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1224 {
1225 	volatile uint16_t *flags;
1226 
1227 	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1228 
1229 	if (s1 == s2)
1230 		return (0);
1231 
1232 	if (s1 == NULL || s2 == NULL)
1233 		return (1);
1234 
1235 	if (s1 != s2 && len != 0) {
1236 		const uint8_t *ps1 = s1;
1237 		const uint8_t *ps2 = s2;
1238 
1239 		do {
1240 			if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1241 				return (1);
1242 		} while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1243 	}
1244 	return (0);
1245 }
1246 
1247 /*
1248  * Zero the specified region using a simple byte-by-byte loop.  Note that this
1249  * is for safe DTrace-managed memory only.
1250  */
1251 static void
1252 dtrace_bzero(void *dst, size_t len)
1253 {
1254 	uchar_t *cp;
1255 
1256 	for (cp = dst; len != 0; len--)
1257 		*cp++ = 0;
1258 }
1259 
1260 static void
1261 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1262 {
1263 	uint64_t result[2];
1264 
1265 	result[0] = addend1[0] + addend2[0];
1266 	result[1] = addend1[1] + addend2[1] +
1267 	    (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1268 
1269 	sum[0] = result[0];
1270 	sum[1] = result[1];
1271 }
1272 
1273 /*
1274  * Shift the 128-bit value in a by b. If b is positive, shift left.
1275  * If b is negative, shift right.
1276  */
1277 static void
1278 dtrace_shift_128(uint64_t *a, int b)
1279 {
1280 	uint64_t mask;
1281 
1282 	if (b == 0)
1283 		return;
1284 
1285 	if (b < 0) {
1286 		b = -b;
1287 		if (b >= 64) {
1288 			a[0] = a[1] >> (b - 64);
1289 			a[1] = 0;
1290 		} else {
1291 			a[0] >>= b;
1292 			mask = 1LL << (64 - b);
1293 			mask -= 1;
1294 			a[0] |= ((a[1] & mask) << (64 - b));
1295 			a[1] >>= b;
1296 		}
1297 	} else {
1298 		if (b >= 64) {
1299 			a[1] = a[0] << (b - 64);
1300 			a[0] = 0;
1301 		} else {
1302 			a[1] <<= b;
1303 			mask = a[0] >> (64 - b);
1304 			a[1] |= mask;
1305 			a[0] <<= b;
1306 		}
1307 	}
1308 }
1309 
1310 /*
1311  * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1312  * use native multiplication on those, and then re-combine into the
1313  * resulting 128-bit value.
1314  *
1315  * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1316  *     hi1 * hi2 << 64 +
1317  *     hi1 * lo2 << 32 +
1318  *     hi2 * lo1 << 32 +
1319  *     lo1 * lo2
1320  */
1321 static void
1322 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1323 {
1324 	uint64_t hi1, hi2, lo1, lo2;
1325 	uint64_t tmp[2];
1326 
1327 	hi1 = factor1 >> 32;
1328 	hi2 = factor2 >> 32;
1329 
1330 	lo1 = factor1 & DT_MASK_LO;
1331 	lo2 = factor2 & DT_MASK_LO;
1332 
1333 	product[0] = lo1 * lo2;
1334 	product[1] = hi1 * hi2;
1335 
1336 	tmp[0] = hi1 * lo2;
1337 	tmp[1] = 0;
1338 	dtrace_shift_128(tmp, 32);
1339 	dtrace_add_128(product, tmp, product);
1340 
1341 	tmp[0] = hi2 * lo1;
1342 	tmp[1] = 0;
1343 	dtrace_shift_128(tmp, 32);
1344 	dtrace_add_128(product, tmp, product);
1345 }
1346 
1347 /*
1348  * This privilege check should be used by actions and subroutines to
1349  * verify that the user credentials of the process that enabled the
1350  * invoking ECB match the target credentials
1351  */
1352 static int
1353 dtrace_priv_proc_common_user(dtrace_state_t *state)
1354 {
1355 	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1356 
1357 	/*
1358 	 * We should always have a non-NULL state cred here, since if cred
1359 	 * is null (anonymous tracing), we fast-path bypass this routine.
1360 	 */
1361 	ASSERT(s_cr != NULL);
1362 
1363 	if ((cr = CRED()) != NULL &&
1364 	    s_cr->cr_uid == cr->cr_uid &&
1365 	    s_cr->cr_uid == cr->cr_ruid &&
1366 	    s_cr->cr_uid == cr->cr_suid &&
1367 	    s_cr->cr_gid == cr->cr_gid &&
1368 	    s_cr->cr_gid == cr->cr_rgid &&
1369 	    s_cr->cr_gid == cr->cr_sgid)
1370 		return (1);
1371 
1372 	return (0);
1373 }
1374 
1375 /*
1376  * This privilege check should be used by actions and subroutines to
1377  * verify that the zone of the process that enabled the invoking ECB
1378  * matches the target credentials
1379  */
1380 static int
1381 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1382 {
1383 	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1384 
1385 	/*
1386 	 * We should always have a non-NULL state cred here, since if cred
1387 	 * is null (anonymous tracing), we fast-path bypass this routine.
1388 	 */
1389 	ASSERT(s_cr != NULL);
1390 
1391 	if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1392 		return (1);
1393 
1394 	return (0);
1395 }
1396 
1397 /*
1398  * This privilege check should be used by actions and subroutines to
1399  * verify that the process has not setuid or changed credentials.
1400  */
1401 static int
1402 dtrace_priv_proc_common_nocd()
1403 {
1404 	proc_t *proc;
1405 
1406 	if ((proc = ttoproc(curthread)) != NULL &&
1407 	    !(proc->p_flag & SNOCD))
1408 		return (1);
1409 
1410 	return (0);
1411 }
1412 
1413 static int
1414 dtrace_priv_proc_destructive(dtrace_state_t *state, dtrace_mstate_t *mstate)
1415 {
1416 	int action = state->dts_cred.dcr_action;
1417 
1418 	if (!(mstate->dtms_access & DTRACE_ACCESS_PROC))
1419 		goto bad;
1420 
1421 	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1422 	    dtrace_priv_proc_common_zone(state) == 0)
1423 		goto bad;
1424 
1425 	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1426 	    dtrace_priv_proc_common_user(state) == 0)
1427 		goto bad;
1428 
1429 	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1430 	    dtrace_priv_proc_common_nocd() == 0)
1431 		goto bad;
1432 
1433 	return (1);
1434 
1435 bad:
1436 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1437 
1438 	return (0);
1439 }
1440 
1441 static int
1442 dtrace_priv_proc_control(dtrace_state_t *state, dtrace_mstate_t *mstate)
1443 {
1444 	if (mstate->dtms_access & DTRACE_ACCESS_PROC) {
1445 		if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1446 			return (1);
1447 
1448 		if (dtrace_priv_proc_common_zone(state) &&
1449 		    dtrace_priv_proc_common_user(state) &&
1450 		    dtrace_priv_proc_common_nocd())
1451 			return (1);
1452 	}
1453 
1454 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1455 
1456 	return (0);
1457 }
1458 
1459 static int
1460 dtrace_priv_proc(dtrace_state_t *state, dtrace_mstate_t *mstate)
1461 {
1462 	if ((mstate->dtms_access & DTRACE_ACCESS_PROC) &&
1463 	    (state->dts_cred.dcr_action & DTRACE_CRA_PROC))
1464 		return (1);
1465 
1466 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1467 
1468 	return (0);
1469 }
1470 
1471 static int
1472 dtrace_priv_kernel(dtrace_state_t *state)
1473 {
1474 	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1475 		return (1);
1476 
1477 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1478 
1479 	return (0);
1480 }
1481 
1482 static int
1483 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1484 {
1485 	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1486 		return (1);
1487 
1488 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1489 
1490 	return (0);
1491 }
1492 
1493 /*
1494  * Determine if the dte_cond of the specified ECB allows for processing of
1495  * the current probe to continue.  Note that this routine may allow continued
1496  * processing, but with access(es) stripped from the mstate's dtms_access
1497  * field.
1498  */
1499 static int
1500 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1501     dtrace_ecb_t *ecb)
1502 {
1503 	dtrace_probe_t *probe = ecb->dte_probe;
1504 	dtrace_provider_t *prov = probe->dtpr_provider;
1505 	dtrace_pops_t *pops = &prov->dtpv_pops;
1506 	int mode = DTRACE_MODE_NOPRIV_DROP;
1507 
1508 	ASSERT(ecb->dte_cond);
1509 
1510 	if (pops->dtps_mode != NULL) {
1511 		mode = pops->dtps_mode(prov->dtpv_arg,
1512 		    probe->dtpr_id, probe->dtpr_arg);
1513 
1514 		ASSERT(mode & (DTRACE_MODE_USER | DTRACE_MODE_KERNEL));
1515 		ASSERT(mode & (DTRACE_MODE_NOPRIV_RESTRICT |
1516 		    DTRACE_MODE_NOPRIV_DROP));
1517 	}
1518 
1519 	/*
1520 	 * If the dte_cond bits indicate that this consumer is only allowed to
1521 	 * see user-mode firings of this probe, check that the probe was fired
1522 	 * while in a user context.  If that's not the case, use the policy
1523 	 * specified by the provider to determine if we drop the probe or
1524 	 * merely restrict operation.
1525 	 */
1526 	if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1527 		ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1528 
1529 		if (!(mode & DTRACE_MODE_USER)) {
1530 			if (mode & DTRACE_MODE_NOPRIV_DROP)
1531 				return (0);
1532 
1533 			mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1534 		}
1535 	}
1536 
1537 	/*
1538 	 * This is more subtle than it looks. We have to be absolutely certain
1539 	 * that CRED() isn't going to change out from under us so it's only
1540 	 * legit to examine that structure if we're in constrained situations.
1541 	 * Currently, the only times we'll this check is if a non-super-user
1542 	 * has enabled the profile or syscall providers -- providers that
1543 	 * allow visibility of all processes. For the profile case, the check
1544 	 * above will ensure that we're examining a user context.
1545 	 */
1546 	if (ecb->dte_cond & DTRACE_COND_OWNER) {
1547 		cred_t *cr;
1548 		cred_t *s_cr = state->dts_cred.dcr_cred;
1549 		proc_t *proc;
1550 
1551 		ASSERT(s_cr != NULL);
1552 
1553 		if ((cr = CRED()) == NULL ||
1554 		    s_cr->cr_uid != cr->cr_uid ||
1555 		    s_cr->cr_uid != cr->cr_ruid ||
1556 		    s_cr->cr_uid != cr->cr_suid ||
1557 		    s_cr->cr_gid != cr->cr_gid ||
1558 		    s_cr->cr_gid != cr->cr_rgid ||
1559 		    s_cr->cr_gid != cr->cr_sgid ||
1560 		    (proc = ttoproc(curthread)) == NULL ||
1561 		    (proc->p_flag & SNOCD)) {
1562 			if (mode & DTRACE_MODE_NOPRIV_DROP)
1563 				return (0);
1564 
1565 			mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1566 		}
1567 	}
1568 
1569 	/*
1570 	 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1571 	 * in our zone, check to see if our mode policy is to restrict rather
1572 	 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1573 	 * and DTRACE_ACCESS_ARGS
1574 	 */
1575 	if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1576 		cred_t *cr;
1577 		cred_t *s_cr = state->dts_cred.dcr_cred;
1578 
1579 		ASSERT(s_cr != NULL);
1580 
1581 		if ((cr = CRED()) == NULL ||
1582 		    s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1583 			if (mode & DTRACE_MODE_NOPRIV_DROP)
1584 				return (0);
1585 
1586 			mstate->dtms_access &=
1587 			    ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1588 		}
1589 	}
1590 
1591 	/*
1592 	 * By merits of being in this code path at all, we have limited
1593 	 * privileges.  If the provider has indicated that limited privileges
1594 	 * are to denote restricted operation, strip off the ability to access
1595 	 * arguments.
1596 	 */
1597 	if (mode & DTRACE_MODE_LIMITEDPRIV_RESTRICT)
1598 		mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1599 
1600 	return (1);
1601 }
1602 
1603 /*
1604  * Note:  not called from probe context.  This function is called
1605  * asynchronously (and at a regular interval) from outside of probe context to
1606  * clean the dirty dynamic variable lists on all CPUs.  Dynamic variable
1607  * cleaning is explained in detail in <sys/dtrace_impl.h>.
1608  */
1609 void
1610 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1611 {
1612 	dtrace_dynvar_t *dirty;
1613 	dtrace_dstate_percpu_t *dcpu;
1614 	dtrace_dynvar_t **rinsep;
1615 	int i, j, work = 0;
1616 
1617 	for (i = 0; i < NCPU; i++) {
1618 		dcpu = &dstate->dtds_percpu[i];
1619 		rinsep = &dcpu->dtdsc_rinsing;
1620 
1621 		/*
1622 		 * If the dirty list is NULL, there is no dirty work to do.
1623 		 */
1624 		if (dcpu->dtdsc_dirty == NULL)
1625 			continue;
1626 
1627 		if (dcpu->dtdsc_rinsing != NULL) {
1628 			/*
1629 			 * If the rinsing list is non-NULL, then it is because
1630 			 * this CPU was selected to accept another CPU's
1631 			 * dirty list -- and since that time, dirty buffers
1632 			 * have accumulated.  This is a highly unlikely
1633 			 * condition, but we choose to ignore the dirty
1634 			 * buffers -- they'll be picked up a future cleanse.
1635 			 */
1636 			continue;
1637 		}
1638 
1639 		if (dcpu->dtdsc_clean != NULL) {
1640 			/*
1641 			 * If the clean list is non-NULL, then we're in a
1642 			 * situation where a CPU has done deallocations (we
1643 			 * have a non-NULL dirty list) but no allocations (we
1644 			 * also have a non-NULL clean list).  We can't simply
1645 			 * move the dirty list into the clean list on this
1646 			 * CPU, yet we also don't want to allow this condition
1647 			 * to persist, lest a short clean list prevent a
1648 			 * massive dirty list from being cleaned (which in
1649 			 * turn could lead to otherwise avoidable dynamic
1650 			 * drops).  To deal with this, we look for some CPU
1651 			 * with a NULL clean list, NULL dirty list, and NULL
1652 			 * rinsing list -- and then we borrow this CPU to
1653 			 * rinse our dirty list.
1654 			 */
1655 			for (j = 0; j < NCPU; j++) {
1656 				dtrace_dstate_percpu_t *rinser;
1657 
1658 				rinser = &dstate->dtds_percpu[j];
1659 
1660 				if (rinser->dtdsc_rinsing != NULL)
1661 					continue;
1662 
1663 				if (rinser->dtdsc_dirty != NULL)
1664 					continue;
1665 
1666 				if (rinser->dtdsc_clean != NULL)
1667 					continue;
1668 
1669 				rinsep = &rinser->dtdsc_rinsing;
1670 				break;
1671 			}
1672 
1673 			if (j == NCPU) {
1674 				/*
1675 				 * We were unable to find another CPU that
1676 				 * could accept this dirty list -- we are
1677 				 * therefore unable to clean it now.
1678 				 */
1679 				dtrace_dynvar_failclean++;
1680 				continue;
1681 			}
1682 		}
1683 
1684 		work = 1;
1685 
1686 		/*
1687 		 * Atomically move the dirty list aside.
1688 		 */
1689 		do {
1690 			dirty = dcpu->dtdsc_dirty;
1691 
1692 			/*
1693 			 * Before we zap the dirty list, set the rinsing list.
1694 			 * (This allows for a potential assertion in
1695 			 * dtrace_dynvar():  if a free dynamic variable appears
1696 			 * on a hash chain, either the dirty list or the
1697 			 * rinsing list for some CPU must be non-NULL.)
1698 			 */
1699 			*rinsep = dirty;
1700 			dtrace_membar_producer();
1701 		} while (dtrace_casptr(&dcpu->dtdsc_dirty,
1702 		    dirty, NULL) != dirty);
1703 	}
1704 
1705 	if (!work) {
1706 		/*
1707 		 * We have no work to do; we can simply return.
1708 		 */
1709 		return;
1710 	}
1711 
1712 	dtrace_sync();
1713 
1714 	for (i = 0; i < NCPU; i++) {
1715 		dcpu = &dstate->dtds_percpu[i];
1716 
1717 		if (dcpu->dtdsc_rinsing == NULL)
1718 			continue;
1719 
1720 		/*
1721 		 * We are now guaranteed that no hash chain contains a pointer
1722 		 * into this dirty list; we can make it clean.
1723 		 */
1724 		ASSERT(dcpu->dtdsc_clean == NULL);
1725 		dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1726 		dcpu->dtdsc_rinsing = NULL;
1727 	}
1728 
1729 	/*
1730 	 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1731 	 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1732 	 * This prevents a race whereby a CPU incorrectly decides that
1733 	 * the state should be something other than DTRACE_DSTATE_CLEAN
1734 	 * after dtrace_dynvar_clean() has completed.
1735 	 */
1736 	dtrace_sync();
1737 
1738 	dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1739 }
1740 
1741 /*
1742  * Depending on the value of the op parameter, this function looks-up,
1743  * allocates or deallocates an arbitrarily-keyed dynamic variable.  If an
1744  * allocation is requested, this function will return a pointer to a
1745  * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1746  * variable can be allocated.  If NULL is returned, the appropriate counter
1747  * will be incremented.
1748  */
1749 dtrace_dynvar_t *
1750 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1751     dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1752     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1753 {
1754 	uint64_t hashval = DTRACE_DYNHASH_VALID;
1755 	dtrace_dynhash_t *hash = dstate->dtds_hash;
1756 	dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1757 	processorid_t me = CPU->cpu_id, cpu = me;
1758 	dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1759 	size_t bucket, ksize;
1760 	size_t chunksize = dstate->dtds_chunksize;
1761 	uintptr_t kdata, lock, nstate;
1762 	uint_t i;
1763 
1764 	ASSERT(nkeys != 0);
1765 
1766 	/*
1767 	 * Hash the key.  As with aggregations, we use Jenkins' "One-at-a-time"
1768 	 * algorithm.  For the by-value portions, we perform the algorithm in
1769 	 * 16-bit chunks (as opposed to 8-bit chunks).  This speeds things up a
1770 	 * bit, and seems to have only a minute effect on distribution.  For
1771 	 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1772 	 * over each referenced byte.  It's painful to do this, but it's much
1773 	 * better than pathological hash distribution.  The efficacy of the
1774 	 * hashing algorithm (and a comparison with other algorithms) may be
1775 	 * found by running the ::dtrace_dynstat MDB dcmd.
1776 	 */
1777 	for (i = 0; i < nkeys; i++) {
1778 		if (key[i].dttk_size == 0) {
1779 			uint64_t val = key[i].dttk_value;
1780 
1781 			hashval += (val >> 48) & 0xffff;
1782 			hashval += (hashval << 10);
1783 			hashval ^= (hashval >> 6);
1784 
1785 			hashval += (val >> 32) & 0xffff;
1786 			hashval += (hashval << 10);
1787 			hashval ^= (hashval >> 6);
1788 
1789 			hashval += (val >> 16) & 0xffff;
1790 			hashval += (hashval << 10);
1791 			hashval ^= (hashval >> 6);
1792 
1793 			hashval += val & 0xffff;
1794 			hashval += (hashval << 10);
1795 			hashval ^= (hashval >> 6);
1796 		} else {
1797 			/*
1798 			 * This is incredibly painful, but it beats the hell
1799 			 * out of the alternative.
1800 			 */
1801 			uint64_t j, size = key[i].dttk_size;
1802 			uintptr_t base = (uintptr_t)key[i].dttk_value;
1803 
1804 			if (!dtrace_canload(base, size, mstate, vstate))
1805 				break;
1806 
1807 			for (j = 0; j < size; j++) {
1808 				hashval += dtrace_load8(base + j);
1809 				hashval += (hashval << 10);
1810 				hashval ^= (hashval >> 6);
1811 			}
1812 		}
1813 	}
1814 
1815 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1816 		return (NULL);
1817 
1818 	hashval += (hashval << 3);
1819 	hashval ^= (hashval >> 11);
1820 	hashval += (hashval << 15);
1821 
1822 	/*
1823 	 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1824 	 * comes out to be one of our two sentinel hash values.  If this
1825 	 * actually happens, we set the hashval to be a value known to be a
1826 	 * non-sentinel value.
1827 	 */
1828 	if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1829 		hashval = DTRACE_DYNHASH_VALID;
1830 
1831 	/*
1832 	 * Yes, it's painful to do a divide here.  If the cycle count becomes
1833 	 * important here, tricks can be pulled to reduce it.  (However, it's
1834 	 * critical that hash collisions be kept to an absolute minimum;
1835 	 * they're much more painful than a divide.)  It's better to have a
1836 	 * solution that generates few collisions and still keeps things
1837 	 * relatively simple.
1838 	 */
1839 	bucket = hashval % dstate->dtds_hashsize;
1840 
1841 	if (op == DTRACE_DYNVAR_DEALLOC) {
1842 		volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1843 
1844 		for (;;) {
1845 			while ((lock = *lockp) & 1)
1846 				continue;
1847 
1848 			if (dtrace_casptr((void *)lockp,
1849 			    (void *)lock, (void *)(lock + 1)) == (void *)lock)
1850 				break;
1851 		}
1852 
1853 		dtrace_membar_producer();
1854 	}
1855 
1856 top:
1857 	prev = NULL;
1858 	lock = hash[bucket].dtdh_lock;
1859 
1860 	dtrace_membar_consumer();
1861 
1862 	start = hash[bucket].dtdh_chain;
1863 	ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1864 	    start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1865 	    op != DTRACE_DYNVAR_DEALLOC));
1866 
1867 	for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1868 		dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1869 		dtrace_key_t *dkey = &dtuple->dtt_key[0];
1870 
1871 		if (dvar->dtdv_hashval != hashval) {
1872 			if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1873 				/*
1874 				 * We've reached the sink, and therefore the
1875 				 * end of the hash chain; we can kick out of
1876 				 * the loop knowing that we have seen a valid
1877 				 * snapshot of state.
1878 				 */
1879 				ASSERT(dvar->dtdv_next == NULL);
1880 				ASSERT(dvar == &dtrace_dynhash_sink);
1881 				break;
1882 			}
1883 
1884 			if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1885 				/*
1886 				 * We've gone off the rails:  somewhere along
1887 				 * the line, one of the members of this hash
1888 				 * chain was deleted.  Note that we could also
1889 				 * detect this by simply letting this loop run
1890 				 * to completion, as we would eventually hit
1891 				 * the end of the dirty list.  However, we
1892 				 * want to avoid running the length of the
1893 				 * dirty list unnecessarily (it might be quite
1894 				 * long), so we catch this as early as
1895 				 * possible by detecting the hash marker.  In
1896 				 * this case, we simply set dvar to NULL and
1897 				 * break; the conditional after the loop will
1898 				 * send us back to top.
1899 				 */
1900 				dvar = NULL;
1901 				break;
1902 			}
1903 
1904 			goto next;
1905 		}
1906 
1907 		if (dtuple->dtt_nkeys != nkeys)
1908 			goto next;
1909 
1910 		for (i = 0; i < nkeys; i++, dkey++) {
1911 			if (dkey->dttk_size != key[i].dttk_size)
1912 				goto next; /* size or type mismatch */
1913 
1914 			if (dkey->dttk_size != 0) {
1915 				if (dtrace_bcmp(
1916 				    (void *)(uintptr_t)key[i].dttk_value,
1917 				    (void *)(uintptr_t)dkey->dttk_value,
1918 				    dkey->dttk_size))
1919 					goto next;
1920 			} else {
1921 				if (dkey->dttk_value != key[i].dttk_value)
1922 					goto next;
1923 			}
1924 		}
1925 
1926 		if (op != DTRACE_DYNVAR_DEALLOC)
1927 			return (dvar);
1928 
1929 		ASSERT(dvar->dtdv_next == NULL ||
1930 		    dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1931 
1932 		if (prev != NULL) {
1933 			ASSERT(hash[bucket].dtdh_chain != dvar);
1934 			ASSERT(start != dvar);
1935 			ASSERT(prev->dtdv_next == dvar);
1936 			prev->dtdv_next = dvar->dtdv_next;
1937 		} else {
1938 			if (dtrace_casptr(&hash[bucket].dtdh_chain,
1939 			    start, dvar->dtdv_next) != start) {
1940 				/*
1941 				 * We have failed to atomically swing the
1942 				 * hash table head pointer, presumably because
1943 				 * of a conflicting allocation on another CPU.
1944 				 * We need to reread the hash chain and try
1945 				 * again.
1946 				 */
1947 				goto top;
1948 			}
1949 		}
1950 
1951 		dtrace_membar_producer();
1952 
1953 		/*
1954 		 * Now set the hash value to indicate that it's free.
1955 		 */
1956 		ASSERT(hash[bucket].dtdh_chain != dvar);
1957 		dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1958 
1959 		dtrace_membar_producer();
1960 
1961 		/*
1962 		 * Set the next pointer to point at the dirty list, and
1963 		 * atomically swing the dirty pointer to the newly freed dvar.
1964 		 */
1965 		do {
1966 			next = dcpu->dtdsc_dirty;
1967 			dvar->dtdv_next = next;
1968 		} while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1969 
1970 		/*
1971 		 * Finally, unlock this hash bucket.
1972 		 */
1973 		ASSERT(hash[bucket].dtdh_lock == lock);
1974 		ASSERT(lock & 1);
1975 		hash[bucket].dtdh_lock++;
1976 
1977 		return (NULL);
1978 next:
1979 		prev = dvar;
1980 		continue;
1981 	}
1982 
1983 	if (dvar == NULL) {
1984 		/*
1985 		 * If dvar is NULL, it is because we went off the rails:
1986 		 * one of the elements that we traversed in the hash chain
1987 		 * was deleted while we were traversing it.  In this case,
1988 		 * we assert that we aren't doing a dealloc (deallocs lock
1989 		 * the hash bucket to prevent themselves from racing with
1990 		 * one another), and retry the hash chain traversal.
1991 		 */
1992 		ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1993 		goto top;
1994 	}
1995 
1996 	if (op != DTRACE_DYNVAR_ALLOC) {
1997 		/*
1998 		 * If we are not to allocate a new variable, we want to
1999 		 * return NULL now.  Before we return, check that the value
2000 		 * of the lock word hasn't changed.  If it has, we may have
2001 		 * seen an inconsistent snapshot.
2002 		 */
2003 		if (op == DTRACE_DYNVAR_NOALLOC) {
2004 			if (hash[bucket].dtdh_lock != lock)
2005 				goto top;
2006 		} else {
2007 			ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2008 			ASSERT(hash[bucket].dtdh_lock == lock);
2009 			ASSERT(lock & 1);
2010 			hash[bucket].dtdh_lock++;
2011 		}
2012 
2013 		return (NULL);
2014 	}
2015 
2016 	/*
2017 	 * We need to allocate a new dynamic variable.  The size we need is the
2018 	 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2019 	 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2020 	 * the size of any referred-to data (dsize).  We then round the final
2021 	 * size up to the chunksize for allocation.
2022 	 */
2023 	for (ksize = 0, i = 0; i < nkeys; i++)
2024 		ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2025 
2026 	/*
2027 	 * This should be pretty much impossible, but could happen if, say,
2028 	 * strange DIF specified the tuple.  Ideally, this should be an
2029 	 * assertion and not an error condition -- but that requires that the
2030 	 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2031 	 * bullet-proof.  (That is, it must not be able to be fooled by
2032 	 * malicious DIF.)  Given the lack of backwards branches in DIF,
2033 	 * solving this would presumably not amount to solving the Halting
2034 	 * Problem -- but it still seems awfully hard.
2035 	 */
2036 	if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2037 	    ksize + dsize > chunksize) {
2038 		dcpu->dtdsc_drops++;
2039 		return (NULL);
2040 	}
2041 
2042 	nstate = DTRACE_DSTATE_EMPTY;
2043 
2044 	do {
2045 retry:
2046 		free = dcpu->dtdsc_free;
2047 
2048 		if (free == NULL) {
2049 			dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2050 			void *rval;
2051 
2052 			if (clean == NULL) {
2053 				/*
2054 				 * We're out of dynamic variable space on
2055 				 * this CPU.  Unless we have tried all CPUs,
2056 				 * we'll try to allocate from a different
2057 				 * CPU.
2058 				 */
2059 				switch (dstate->dtds_state) {
2060 				case DTRACE_DSTATE_CLEAN: {
2061 					void *sp = &dstate->dtds_state;
2062 
2063 					if (++cpu >= NCPU)
2064 						cpu = 0;
2065 
2066 					if (dcpu->dtdsc_dirty != NULL &&
2067 					    nstate == DTRACE_DSTATE_EMPTY)
2068 						nstate = DTRACE_DSTATE_DIRTY;
2069 
2070 					if (dcpu->dtdsc_rinsing != NULL)
2071 						nstate = DTRACE_DSTATE_RINSING;
2072 
2073 					dcpu = &dstate->dtds_percpu[cpu];
2074 
2075 					if (cpu != me)
2076 						goto retry;
2077 
2078 					(void) dtrace_cas32(sp,
2079 					    DTRACE_DSTATE_CLEAN, nstate);
2080 
2081 					/*
2082 					 * To increment the correct bean
2083 					 * counter, take another lap.
2084 					 */
2085 					goto retry;
2086 				}
2087 
2088 				case DTRACE_DSTATE_DIRTY:
2089 					dcpu->dtdsc_dirty_drops++;
2090 					break;
2091 
2092 				case DTRACE_DSTATE_RINSING:
2093 					dcpu->dtdsc_rinsing_drops++;
2094 					break;
2095 
2096 				case DTRACE_DSTATE_EMPTY:
2097 					dcpu->dtdsc_drops++;
2098 					break;
2099 				}
2100 
2101 				DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2102 				return (NULL);
2103 			}
2104 
2105 			/*
2106 			 * The clean list appears to be non-empty.  We want to
2107 			 * move the clean list to the free list; we start by
2108 			 * moving the clean pointer aside.
2109 			 */
2110 			if (dtrace_casptr(&dcpu->dtdsc_clean,
2111 			    clean, NULL) != clean) {
2112 				/*
2113 				 * We are in one of two situations:
2114 				 *
2115 				 *  (a)	The clean list was switched to the
2116 				 *	free list by another CPU.
2117 				 *
2118 				 *  (b)	The clean list was added to by the
2119 				 *	cleansing cyclic.
2120 				 *
2121 				 * In either of these situations, we can
2122 				 * just reattempt the free list allocation.
2123 				 */
2124 				goto retry;
2125 			}
2126 
2127 			ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2128 
2129 			/*
2130 			 * Now we'll move the clean list to our free list.
2131 			 * It's impossible for this to fail:  the only way
2132 			 * the free list can be updated is through this
2133 			 * code path, and only one CPU can own the clean list.
2134 			 * Thus, it would only be possible for this to fail if
2135 			 * this code were racing with dtrace_dynvar_clean().
2136 			 * (That is, if dtrace_dynvar_clean() updated the clean
2137 			 * list, and we ended up racing to update the free
2138 			 * list.)  This race is prevented by the dtrace_sync()
2139 			 * in dtrace_dynvar_clean() -- which flushes the
2140 			 * owners of the clean lists out before resetting
2141 			 * the clean lists.
2142 			 */
2143 			dcpu = &dstate->dtds_percpu[me];
2144 			rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2145 			ASSERT(rval == NULL);
2146 			goto retry;
2147 		}
2148 
2149 		dvar = free;
2150 		new_free = dvar->dtdv_next;
2151 	} while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2152 
2153 	/*
2154 	 * We have now allocated a new chunk.  We copy the tuple keys into the
2155 	 * tuple array and copy any referenced key data into the data space
2156 	 * following the tuple array.  As we do this, we relocate dttk_value
2157 	 * in the final tuple to point to the key data address in the chunk.
2158 	 */
2159 	kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2160 	dvar->dtdv_data = (void *)(kdata + ksize);
2161 	dvar->dtdv_tuple.dtt_nkeys = nkeys;
2162 
2163 	for (i = 0; i < nkeys; i++) {
2164 		dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2165 		size_t kesize = key[i].dttk_size;
2166 
2167 		if (kesize != 0) {
2168 			dtrace_bcopy(
2169 			    (const void *)(uintptr_t)key[i].dttk_value,
2170 			    (void *)kdata, kesize);
2171 			dkey->dttk_value = kdata;
2172 			kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2173 		} else {
2174 			dkey->dttk_value = key[i].dttk_value;
2175 		}
2176 
2177 		dkey->dttk_size = kesize;
2178 	}
2179 
2180 	ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2181 	dvar->dtdv_hashval = hashval;
2182 	dvar->dtdv_next = start;
2183 
2184 	if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2185 		return (dvar);
2186 
2187 	/*
2188 	 * The cas has failed.  Either another CPU is adding an element to
2189 	 * this hash chain, or another CPU is deleting an element from this
2190 	 * hash chain.  The simplest way to deal with both of these cases
2191 	 * (though not necessarily the most efficient) is to free our
2192 	 * allocated block and re-attempt it all.  Note that the free is
2193 	 * to the dirty list and _not_ to the free list.  This is to prevent
2194 	 * races with allocators, above.
2195 	 */
2196 	dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2197 
2198 	dtrace_membar_producer();
2199 
2200 	do {
2201 		free = dcpu->dtdsc_dirty;
2202 		dvar->dtdv_next = free;
2203 	} while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2204 
2205 	goto top;
2206 }
2207 
2208 /*ARGSUSED*/
2209 static void
2210 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2211 {
2212 	if ((int64_t)nval < (int64_t)*oval)
2213 		*oval = nval;
2214 }
2215 
2216 /*ARGSUSED*/
2217 static void
2218 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2219 {
2220 	if ((int64_t)nval > (int64_t)*oval)
2221 		*oval = nval;
2222 }
2223 
2224 static void
2225 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2226 {
2227 	int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2228 	int64_t val = (int64_t)nval;
2229 
2230 	if (val < 0) {
2231 		for (i = 0; i < zero; i++) {
2232 			if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2233 				quanta[i] += incr;
2234 				return;
2235 			}
2236 		}
2237 	} else {
2238 		for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2239 			if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2240 				quanta[i - 1] += incr;
2241 				return;
2242 			}
2243 		}
2244 
2245 		quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2246 		return;
2247 	}
2248 
2249 	ASSERT(0);
2250 }
2251 
2252 static void
2253 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2254 {
2255 	uint64_t arg = *lquanta++;
2256 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2257 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2258 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2259 	int32_t val = (int32_t)nval, level;
2260 
2261 	ASSERT(step != 0);
2262 	ASSERT(levels != 0);
2263 
2264 	if (val < base) {
2265 		/*
2266 		 * This is an underflow.
2267 		 */
2268 		lquanta[0] += incr;
2269 		return;
2270 	}
2271 
2272 	level = (val - base) / step;
2273 
2274 	if (level < levels) {
2275 		lquanta[level + 1] += incr;
2276 		return;
2277 	}
2278 
2279 	/*
2280 	 * This is an overflow.
2281 	 */
2282 	lquanta[levels + 1] += incr;
2283 }
2284 
2285 static int
2286 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2287     uint16_t high, uint16_t nsteps, int64_t value)
2288 {
2289 	int64_t this = 1, last, next;
2290 	int base = 1, order;
2291 
2292 	ASSERT(factor <= nsteps);
2293 	ASSERT(nsteps % factor == 0);
2294 
2295 	for (order = 0; order < low; order++)
2296 		this *= factor;
2297 
2298 	/*
2299 	 * If our value is less than our factor taken to the power of the
2300 	 * low order of magnitude, it goes into the zeroth bucket.
2301 	 */
2302 	if (value < (last = this))
2303 		return (0);
2304 
2305 	for (this *= factor; order <= high; order++) {
2306 		int nbuckets = this > nsteps ? nsteps : this;
2307 
2308 		if ((next = this * factor) < this) {
2309 			/*
2310 			 * We should not generally get log/linear quantizations
2311 			 * with a high magnitude that allows 64-bits to
2312 			 * overflow, but we nonetheless protect against this
2313 			 * by explicitly checking for overflow, and clamping
2314 			 * our value accordingly.
2315 			 */
2316 			value = this - 1;
2317 		}
2318 
2319 		if (value < this) {
2320 			/*
2321 			 * If our value lies within this order of magnitude,
2322 			 * determine its position by taking the offset within
2323 			 * the order of magnitude, dividing by the bucket
2324 			 * width, and adding to our (accumulated) base.
2325 			 */
2326 			return (base + (value - last) / (this / nbuckets));
2327 		}
2328 
2329 		base += nbuckets - (nbuckets / factor);
2330 		last = this;
2331 		this = next;
2332 	}
2333 
2334 	/*
2335 	 * Our value is greater than or equal to our factor taken to the
2336 	 * power of one plus the high magnitude -- return the top bucket.
2337 	 */
2338 	return (base);
2339 }
2340 
2341 static void
2342 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2343 {
2344 	uint64_t arg = *llquanta++;
2345 	uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2346 	uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2347 	uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2348 	uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2349 
2350 	llquanta[dtrace_aggregate_llquantize_bucket(factor,
2351 	    low, high, nsteps, nval)] += incr;
2352 }
2353 
2354 /*ARGSUSED*/
2355 static void
2356 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2357 {
2358 	data[0]++;
2359 	data[1] += nval;
2360 }
2361 
2362 /*ARGSUSED*/
2363 static void
2364 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2365 {
2366 	int64_t snval = (int64_t)nval;
2367 	uint64_t tmp[2];
2368 
2369 	data[0]++;
2370 	data[1] += nval;
2371 
2372 	/*
2373 	 * What we want to say here is:
2374 	 *
2375 	 * data[2] += nval * nval;
2376 	 *
2377 	 * But given that nval is 64-bit, we could easily overflow, so
2378 	 * we do this as 128-bit arithmetic.
2379 	 */
2380 	if (snval < 0)
2381 		snval = -snval;
2382 
2383 	dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2384 	dtrace_add_128(data + 2, tmp, data + 2);
2385 }
2386 
2387 /*ARGSUSED*/
2388 static void
2389 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2390 {
2391 	*oval = *oval + 1;
2392 }
2393 
2394 /*ARGSUSED*/
2395 static void
2396 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2397 {
2398 	*oval += nval;
2399 }
2400 
2401 /*
2402  * Aggregate given the tuple in the principal data buffer, and the aggregating
2403  * action denoted by the specified dtrace_aggregation_t.  The aggregation
2404  * buffer is specified as the buf parameter.  This routine does not return
2405  * failure; if there is no space in the aggregation buffer, the data will be
2406  * dropped, and a corresponding counter incremented.
2407  */
2408 static void
2409 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2410     intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2411 {
2412 	dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2413 	uint32_t i, ndx, size, fsize;
2414 	uint32_t align = sizeof (uint64_t) - 1;
2415 	dtrace_aggbuffer_t *agb;
2416 	dtrace_aggkey_t *key;
2417 	uint32_t hashval = 0, limit, isstr;
2418 	caddr_t tomax, data, kdata;
2419 	dtrace_actkind_t action;
2420 	dtrace_action_t *act;
2421 	uintptr_t offs;
2422 
2423 	if (buf == NULL)
2424 		return;
2425 
2426 	if (!agg->dtag_hasarg) {
2427 		/*
2428 		 * Currently, only quantize() and lquantize() take additional
2429 		 * arguments, and they have the same semantics:  an increment
2430 		 * value that defaults to 1 when not present.  If additional
2431 		 * aggregating actions take arguments, the setting of the
2432 		 * default argument value will presumably have to become more
2433 		 * sophisticated...
2434 		 */
2435 		arg = 1;
2436 	}
2437 
2438 	action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2439 	size = rec->dtrd_offset - agg->dtag_base;
2440 	fsize = size + rec->dtrd_size;
2441 
2442 	ASSERT(dbuf->dtb_tomax != NULL);
2443 	data = dbuf->dtb_tomax + offset + agg->dtag_base;
2444 
2445 	if ((tomax = buf->dtb_tomax) == NULL) {
2446 		dtrace_buffer_drop(buf);
2447 		return;
2448 	}
2449 
2450 	/*
2451 	 * The metastructure is always at the bottom of the buffer.
2452 	 */
2453 	agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2454 	    sizeof (dtrace_aggbuffer_t));
2455 
2456 	if (buf->dtb_offset == 0) {
2457 		/*
2458 		 * We just kludge up approximately 1/8th of the size to be
2459 		 * buckets.  If this guess ends up being routinely
2460 		 * off-the-mark, we may need to dynamically readjust this
2461 		 * based on past performance.
2462 		 */
2463 		uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2464 
2465 		if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2466 		    (uintptr_t)tomax || hashsize == 0) {
2467 			/*
2468 			 * We've been given a ludicrously small buffer;
2469 			 * increment our drop count and leave.
2470 			 */
2471 			dtrace_buffer_drop(buf);
2472 			return;
2473 		}
2474 
2475 		/*
2476 		 * And now, a pathetic attempt to try to get a an odd (or
2477 		 * perchance, a prime) hash size for better hash distribution.
2478 		 */
2479 		if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2480 			hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2481 
2482 		agb->dtagb_hashsize = hashsize;
2483 		agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2484 		    agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2485 		agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2486 
2487 		for (i = 0; i < agb->dtagb_hashsize; i++)
2488 			agb->dtagb_hash[i] = NULL;
2489 	}
2490 
2491 	ASSERT(agg->dtag_first != NULL);
2492 	ASSERT(agg->dtag_first->dta_intuple);
2493 
2494 	/*
2495 	 * Calculate the hash value based on the key.  Note that we _don't_
2496 	 * include the aggid in the hashing (but we will store it as part of
2497 	 * the key).  The hashing algorithm is Bob Jenkins' "One-at-a-time"
2498 	 * algorithm: a simple, quick algorithm that has no known funnels, and
2499 	 * gets good distribution in practice.  The efficacy of the hashing
2500 	 * algorithm (and a comparison with other algorithms) may be found by
2501 	 * running the ::dtrace_aggstat MDB dcmd.
2502 	 */
2503 	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2504 		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2505 		limit = i + act->dta_rec.dtrd_size;
2506 		ASSERT(limit <= size);
2507 		isstr = DTRACEACT_ISSTRING(act);
2508 
2509 		for (; i < limit; i++) {
2510 			hashval += data[i];
2511 			hashval += (hashval << 10);
2512 			hashval ^= (hashval >> 6);
2513 
2514 			if (isstr && data[i] == '\0')
2515 				break;
2516 		}
2517 	}
2518 
2519 	hashval += (hashval << 3);
2520 	hashval ^= (hashval >> 11);
2521 	hashval += (hashval << 15);
2522 
2523 	/*
2524 	 * Yes, the divide here is expensive -- but it's generally the least
2525 	 * of the performance issues given the amount of data that we iterate
2526 	 * over to compute hash values, compare data, etc.
2527 	 */
2528 	ndx = hashval % agb->dtagb_hashsize;
2529 
2530 	for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2531 		ASSERT((caddr_t)key >= tomax);
2532 		ASSERT((caddr_t)key < tomax + buf->dtb_size);
2533 
2534 		if (hashval != key->dtak_hashval || key->dtak_size != size)
2535 			continue;
2536 
2537 		kdata = key->dtak_data;
2538 		ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2539 
2540 		for (act = agg->dtag_first; act->dta_intuple;
2541 		    act = act->dta_next) {
2542 			i = act->dta_rec.dtrd_offset - agg->dtag_base;
2543 			limit = i + act->dta_rec.dtrd_size;
2544 			ASSERT(limit <= size);
2545 			isstr = DTRACEACT_ISSTRING(act);
2546 
2547 			for (; i < limit; i++) {
2548 				if (kdata[i] != data[i])
2549 					goto next;
2550 
2551 				if (isstr && data[i] == '\0')
2552 					break;
2553 			}
2554 		}
2555 
2556 		if (action != key->dtak_action) {
2557 			/*
2558 			 * We are aggregating on the same value in the same
2559 			 * aggregation with two different aggregating actions.
2560 			 * (This should have been picked up in the compiler,
2561 			 * so we may be dealing with errant or devious DIF.)
2562 			 * This is an error condition; we indicate as much,
2563 			 * and return.
2564 			 */
2565 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2566 			return;
2567 		}
2568 
2569 		/*
2570 		 * This is a hit:  we need to apply the aggregator to
2571 		 * the value at this key.
2572 		 */
2573 		agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2574 		return;
2575 next:
2576 		continue;
2577 	}
2578 
2579 	/*
2580 	 * We didn't find it.  We need to allocate some zero-filled space,
2581 	 * link it into the hash table appropriately, and apply the aggregator
2582 	 * to the (zero-filled) value.
2583 	 */
2584 	offs = buf->dtb_offset;
2585 	while (offs & (align - 1))
2586 		offs += sizeof (uint32_t);
2587 
2588 	/*
2589 	 * If we don't have enough room to both allocate a new key _and_
2590 	 * its associated data, increment the drop count and return.
2591 	 */
2592 	if ((uintptr_t)tomax + offs + fsize >
2593 	    agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2594 		dtrace_buffer_drop(buf);
2595 		return;
2596 	}
2597 
2598 	/*CONSTCOND*/
2599 	ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2600 	key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2601 	agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2602 
2603 	key->dtak_data = kdata = tomax + offs;
2604 	buf->dtb_offset = offs + fsize;
2605 
2606 	/*
2607 	 * Now copy the data across.
2608 	 */
2609 	*((dtrace_aggid_t *)kdata) = agg->dtag_id;
2610 
2611 	for (i = sizeof (dtrace_aggid_t); i < size; i++)
2612 		kdata[i] = data[i];
2613 
2614 	/*
2615 	 * Because strings are not zeroed out by default, we need to iterate
2616 	 * looking for actions that store strings, and we need to explicitly
2617 	 * pad these strings out with zeroes.
2618 	 */
2619 	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2620 		int nul;
2621 
2622 		if (!DTRACEACT_ISSTRING(act))
2623 			continue;
2624 
2625 		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2626 		limit = i + act->dta_rec.dtrd_size;
2627 		ASSERT(limit <= size);
2628 
2629 		for (nul = 0; i < limit; i++) {
2630 			if (nul) {
2631 				kdata[i] = '\0';
2632 				continue;
2633 			}
2634 
2635 			if (data[i] != '\0')
2636 				continue;
2637 
2638 			nul = 1;
2639 		}
2640 	}
2641 
2642 	for (i = size; i < fsize; i++)
2643 		kdata[i] = 0;
2644 
2645 	key->dtak_hashval = hashval;
2646 	key->dtak_size = size;
2647 	key->dtak_action = action;
2648 	key->dtak_next = agb->dtagb_hash[ndx];
2649 	agb->dtagb_hash[ndx] = key;
2650 
2651 	/*
2652 	 * Finally, apply the aggregator.
2653 	 */
2654 	*((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2655 	agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2656 }
2657 
2658 /*
2659  * Given consumer state, this routine finds a speculation in the INACTIVE
2660  * state and transitions it into the ACTIVE state.  If there is no speculation
2661  * in the INACTIVE state, 0 is returned.  In this case, no error counter is
2662  * incremented -- it is up to the caller to take appropriate action.
2663  */
2664 static int
2665 dtrace_speculation(dtrace_state_t *state)
2666 {
2667 	int i = 0;
2668 	dtrace_speculation_state_t current;
2669 	uint32_t *stat = &state->dts_speculations_unavail, count;
2670 
2671 	while (i < state->dts_nspeculations) {
2672 		dtrace_speculation_t *spec = &state->dts_speculations[i];
2673 
2674 		current = spec->dtsp_state;
2675 
2676 		if (current != DTRACESPEC_INACTIVE) {
2677 			if (current == DTRACESPEC_COMMITTINGMANY ||
2678 			    current == DTRACESPEC_COMMITTING ||
2679 			    current == DTRACESPEC_DISCARDING)
2680 				stat = &state->dts_speculations_busy;
2681 			i++;
2682 			continue;
2683 		}
2684 
2685 		if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2686 		    current, DTRACESPEC_ACTIVE) == current)
2687 			return (i + 1);
2688 	}
2689 
2690 	/*
2691 	 * We couldn't find a speculation.  If we found as much as a single
2692 	 * busy speculation buffer, we'll attribute this failure as "busy"
2693 	 * instead of "unavail".
2694 	 */
2695 	do {
2696 		count = *stat;
2697 	} while (dtrace_cas32(stat, count, count + 1) != count);
2698 
2699 	return (0);
2700 }
2701 
2702 /*
2703  * This routine commits an active speculation.  If the specified speculation
2704  * is not in a valid state to perform a commit(), this routine will silently do
2705  * nothing.  The state of the specified speculation is transitioned according
2706  * to the state transition diagram outlined in <sys/dtrace_impl.h>
2707  */
2708 static void
2709 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2710     dtrace_specid_t which)
2711 {
2712 	dtrace_speculation_t *spec;
2713 	dtrace_buffer_t *src, *dest;
2714 	uintptr_t daddr, saddr, dlimit, slimit;
2715 	dtrace_speculation_state_t current, new;
2716 	intptr_t offs;
2717 	uint64_t timestamp;
2718 
2719 	if (which == 0)
2720 		return;
2721 
2722 	if (which > state->dts_nspeculations) {
2723 		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2724 		return;
2725 	}
2726 
2727 	spec = &state->dts_speculations[which - 1];
2728 	src = &spec->dtsp_buffer[cpu];
2729 	dest = &state->dts_buffer[cpu];
2730 
2731 	do {
2732 		current = spec->dtsp_state;
2733 
2734 		if (current == DTRACESPEC_COMMITTINGMANY)
2735 			break;
2736 
2737 		switch (current) {
2738 		case DTRACESPEC_INACTIVE:
2739 		case DTRACESPEC_DISCARDING:
2740 			return;
2741 
2742 		case DTRACESPEC_COMMITTING:
2743 			/*
2744 			 * This is only possible if we are (a) commit()'ing
2745 			 * without having done a prior speculate() on this CPU
2746 			 * and (b) racing with another commit() on a different
2747 			 * CPU.  There's nothing to do -- we just assert that
2748 			 * our offset is 0.
2749 			 */
2750 			ASSERT(src->dtb_offset == 0);
2751 			return;
2752 
2753 		case DTRACESPEC_ACTIVE:
2754 			new = DTRACESPEC_COMMITTING;
2755 			break;
2756 
2757 		case DTRACESPEC_ACTIVEONE:
2758 			/*
2759 			 * This speculation is active on one CPU.  If our
2760 			 * buffer offset is non-zero, we know that the one CPU
2761 			 * must be us.  Otherwise, we are committing on a
2762 			 * different CPU from the speculate(), and we must
2763 			 * rely on being asynchronously cleaned.
2764 			 */
2765 			if (src->dtb_offset != 0) {
2766 				new = DTRACESPEC_COMMITTING;
2767 				break;
2768 			}
2769 			/*FALLTHROUGH*/
2770 
2771 		case DTRACESPEC_ACTIVEMANY:
2772 			new = DTRACESPEC_COMMITTINGMANY;
2773 			break;
2774 
2775 		default:
2776 			ASSERT(0);
2777 		}
2778 	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2779 	    current, new) != current);
2780 
2781 	/*
2782 	 * We have set the state to indicate that we are committing this
2783 	 * speculation.  Now reserve the necessary space in the destination
2784 	 * buffer.
2785 	 */
2786 	if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2787 	    sizeof (uint64_t), state, NULL)) < 0) {
2788 		dtrace_buffer_drop(dest);
2789 		goto out;
2790 	}
2791 
2792 	/*
2793 	 * We have sufficient space to copy the speculative buffer into the
2794 	 * primary buffer.  First, modify the speculative buffer, filling
2795 	 * in the timestamp of all entries with the current time.  The data
2796 	 * must have the commit() time rather than the time it was traced,
2797 	 * so that all entries in the primary buffer are in timestamp order.
2798 	 */
2799 	timestamp = dtrace_gethrtime();
2800 	saddr = (uintptr_t)src->dtb_tomax;
2801 	slimit = saddr + src->dtb_offset;
2802 	while (saddr < slimit) {
2803 		size_t size;
2804 		dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2805 
2806 		if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2807 			saddr += sizeof (dtrace_epid_t);
2808 			continue;
2809 		}
2810 		ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2811 		size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2812 
2813 		ASSERT3U(saddr + size, <=, slimit);
2814 		ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2815 		ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2816 
2817 		DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2818 
2819 		saddr += size;
2820 	}
2821 
2822 	/*
2823 	 * Copy the buffer across.  (Note that this is a
2824 	 * highly subobtimal bcopy(); in the unlikely event that this becomes
2825 	 * a serious performance issue, a high-performance DTrace-specific
2826 	 * bcopy() should obviously be invented.)
2827 	 */
2828 	daddr = (uintptr_t)dest->dtb_tomax + offs;
2829 	dlimit = daddr + src->dtb_offset;
2830 	saddr = (uintptr_t)src->dtb_tomax;
2831 
2832 	/*
2833 	 * First, the aligned portion.
2834 	 */
2835 	while (dlimit - daddr >= sizeof (uint64_t)) {
2836 		*((uint64_t *)daddr) = *((uint64_t *)saddr);
2837 
2838 		daddr += sizeof (uint64_t);
2839 		saddr += sizeof (uint64_t);
2840 	}
2841 
2842 	/*
2843 	 * Now any left-over bit...
2844 	 */
2845 	while (dlimit - daddr)
2846 		*((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2847 
2848 	/*
2849 	 * Finally, commit the reserved space in the destination buffer.
2850 	 */
2851 	dest->dtb_offset = offs + src->dtb_offset;
2852 
2853 out:
2854 	/*
2855 	 * If we're lucky enough to be the only active CPU on this speculation
2856 	 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2857 	 */
2858 	if (current == DTRACESPEC_ACTIVE ||
2859 	    (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2860 		uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2861 		    DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2862 
2863 		ASSERT(rval == DTRACESPEC_COMMITTING);
2864 	}
2865 
2866 	src->dtb_offset = 0;
2867 	src->dtb_xamot_drops += src->dtb_drops;
2868 	src->dtb_drops = 0;
2869 }
2870 
2871 /*
2872  * This routine discards an active speculation.  If the specified speculation
2873  * is not in a valid state to perform a discard(), this routine will silently
2874  * do nothing.  The state of the specified speculation is transitioned
2875  * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2876  */
2877 static void
2878 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2879     dtrace_specid_t which)
2880 {
2881 	dtrace_speculation_t *spec;
2882 	dtrace_speculation_state_t current, new;
2883 	dtrace_buffer_t *buf;
2884 
2885 	if (which == 0)
2886 		return;
2887 
2888 	if (which > state->dts_nspeculations) {
2889 		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2890 		return;
2891 	}
2892 
2893 	spec = &state->dts_speculations[which - 1];
2894 	buf = &spec->dtsp_buffer[cpu];
2895 
2896 	do {
2897 		current = spec->dtsp_state;
2898 
2899 		switch (current) {
2900 		case DTRACESPEC_INACTIVE:
2901 		case DTRACESPEC_COMMITTINGMANY:
2902 		case DTRACESPEC_COMMITTING:
2903 		case DTRACESPEC_DISCARDING:
2904 			return;
2905 
2906 		case DTRACESPEC_ACTIVE:
2907 		case DTRACESPEC_ACTIVEMANY:
2908 			new = DTRACESPEC_DISCARDING;
2909 			break;
2910 
2911 		case DTRACESPEC_ACTIVEONE:
2912 			if (buf->dtb_offset != 0) {
2913 				new = DTRACESPEC_INACTIVE;
2914 			} else {
2915 				new = DTRACESPEC_DISCARDING;
2916 			}
2917 			break;
2918 
2919 		default:
2920 			ASSERT(0);
2921 		}
2922 	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2923 	    current, new) != current);
2924 
2925 	buf->dtb_offset = 0;
2926 	buf->dtb_drops = 0;
2927 }
2928 
2929 /*
2930  * Note:  not called from probe context.  This function is called
2931  * asynchronously from cross call context to clean any speculations that are
2932  * in the COMMITTINGMANY or DISCARDING states.  These speculations may not be
2933  * transitioned back to the INACTIVE state until all CPUs have cleaned the
2934  * speculation.
2935  */
2936 static void
2937 dtrace_speculation_clean_here(dtrace_state_t *state)
2938 {
2939 	dtrace_icookie_t cookie;
2940 	processorid_t cpu = CPU->cpu_id;
2941 	dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2942 	dtrace_specid_t i;
2943 
2944 	cookie = dtrace_interrupt_disable();
2945 
2946 	if (dest->dtb_tomax == NULL) {
2947 		dtrace_interrupt_enable(cookie);
2948 		return;
2949 	}
2950 
2951 	for (i = 0; i < state->dts_nspeculations; i++) {
2952 		dtrace_speculation_t *spec = &state->dts_speculations[i];
2953 		dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2954 
2955 		if (src->dtb_tomax == NULL)
2956 			continue;
2957 
2958 		if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2959 			src->dtb_offset = 0;
2960 			continue;
2961 		}
2962 
2963 		if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2964 			continue;
2965 
2966 		if (src->dtb_offset == 0)
2967 			continue;
2968 
2969 		dtrace_speculation_commit(state, cpu, i + 1);
2970 	}
2971 
2972 	dtrace_interrupt_enable(cookie);
2973 }
2974 
2975 /*
2976  * Note:  not called from probe context.  This function is called
2977  * asynchronously (and at a regular interval) to clean any speculations that
2978  * are in the COMMITTINGMANY or DISCARDING states.  If it discovers that there
2979  * is work to be done, it cross calls all CPUs to perform that work;
2980  * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2981  * INACTIVE state until they have been cleaned by all CPUs.
2982  */
2983 static void
2984 dtrace_speculation_clean(dtrace_state_t *state)
2985 {
2986 	int work = 0, rv;
2987 	dtrace_specid_t i;
2988 
2989 	for (i = 0; i < state->dts_nspeculations; i++) {
2990 		dtrace_speculation_t *spec = &state->dts_speculations[i];
2991 
2992 		ASSERT(!spec->dtsp_cleaning);
2993 
2994 		if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2995 		    spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2996 			continue;
2997 
2998 		work++;
2999 		spec->dtsp_cleaning = 1;
3000 	}
3001 
3002 	if (!work)
3003 		return;
3004 
3005 	dtrace_xcall(DTRACE_CPUALL,
3006 	    (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3007 
3008 	/*
3009 	 * We now know that all CPUs have committed or discarded their
3010 	 * speculation buffers, as appropriate.  We can now set the state
3011 	 * to inactive.
3012 	 */
3013 	for (i = 0; i < state->dts_nspeculations; i++) {
3014 		dtrace_speculation_t *spec = &state->dts_speculations[i];
3015 		dtrace_speculation_state_t current, new;
3016 
3017 		if (!spec->dtsp_cleaning)
3018 			continue;
3019 
3020 		current = spec->dtsp_state;
3021 		ASSERT(current == DTRACESPEC_DISCARDING ||
3022 		    current == DTRACESPEC_COMMITTINGMANY);
3023 
3024 		new = DTRACESPEC_INACTIVE;
3025 
3026 		rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
3027 		ASSERT(rv == current);
3028 		spec->dtsp_cleaning = 0;
3029 	}
3030 }
3031 
3032 /*
3033  * Called as part of a speculate() to get the speculative buffer associated
3034  * with a given speculation.  Returns NULL if the specified speculation is not
3035  * in an ACTIVE state.  If the speculation is in the ACTIVEONE state -- and
3036  * the active CPU is not the specified CPU -- the speculation will be
3037  * atomically transitioned into the ACTIVEMANY state.
3038  */
3039 static dtrace_buffer_t *
3040 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3041     dtrace_specid_t which)
3042 {
3043 	dtrace_speculation_t *spec;
3044 	dtrace_speculation_state_t current, new;
3045 	dtrace_buffer_t *buf;
3046 
3047 	if (which == 0)
3048 		return (NULL);
3049 
3050 	if (which > state->dts_nspeculations) {
3051 		cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3052 		return (NULL);
3053 	}
3054 
3055 	spec = &state->dts_speculations[which - 1];
3056 	buf = &spec->dtsp_buffer[cpuid];
3057 
3058 	do {
3059 		current = spec->dtsp_state;
3060 
3061 		switch (current) {
3062 		case DTRACESPEC_INACTIVE:
3063 		case DTRACESPEC_COMMITTINGMANY:
3064 		case DTRACESPEC_DISCARDING:
3065 			return (NULL);
3066 
3067 		case DTRACESPEC_COMMITTING:
3068 			ASSERT(buf->dtb_offset == 0);
3069 			return (NULL);
3070 
3071 		case DTRACESPEC_ACTIVEONE:
3072 			/*
3073 			 * This speculation is currently active on one CPU.
3074 			 * Check the offset in the buffer; if it's non-zero,
3075 			 * that CPU must be us (and we leave the state alone).
3076 			 * If it's zero, assume that we're starting on a new
3077 			 * CPU -- and change the state to indicate that the
3078 			 * speculation is active on more than one CPU.
3079 			 */
3080 			if (buf->dtb_offset != 0)
3081 				return (buf);
3082 
3083 			new = DTRACESPEC_ACTIVEMANY;
3084 			break;
3085 
3086 		case DTRACESPEC_ACTIVEMANY:
3087 			return (buf);
3088 
3089 		case DTRACESPEC_ACTIVE:
3090 			new = DTRACESPEC_ACTIVEONE;
3091 			break;
3092 
3093 		default:
3094 			ASSERT(0);
3095 		}
3096 	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3097 	    current, new) != current);
3098 
3099 	ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3100 	return (buf);
3101 }
3102 
3103 /*
3104  * Return a string.  In the event that the user lacks the privilege to access
3105  * arbitrary kernel memory, we copy the string out to scratch memory so that we
3106  * don't fail access checking.
3107  *
3108  * dtrace_dif_variable() uses this routine as a helper for various
3109  * builtin values such as 'execname' and 'probefunc.'
3110  */
3111 uintptr_t
3112 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3113     dtrace_mstate_t *mstate)
3114 {
3115 	uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3116 	uintptr_t ret;
3117 	size_t strsz;
3118 
3119 	/*
3120 	 * The easy case: this probe is allowed to read all of memory, so
3121 	 * we can just return this as a vanilla pointer.
3122 	 */
3123 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3124 		return (addr);
3125 
3126 	/*
3127 	 * This is the tougher case: we copy the string in question from
3128 	 * kernel memory into scratch memory and return it that way: this
3129 	 * ensures that we won't trip up when access checking tests the
3130 	 * BYREF return value.
3131 	 */
3132 	strsz = dtrace_strlen((char *)addr, size) + 1;
3133 
3134 	if (mstate->dtms_scratch_ptr + strsz >
3135 	    mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3136 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3137 		return (NULL);
3138 	}
3139 
3140 	dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3141 	    strsz);
3142 	ret = mstate->dtms_scratch_ptr;
3143 	mstate->dtms_scratch_ptr += strsz;
3144 	return (ret);
3145 }
3146 
3147 /*
3148  * This function implements the DIF emulator's variable lookups.  The emulator
3149  * passes a reserved variable identifier and optional built-in array index.
3150  */
3151 static uint64_t
3152 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3153     uint64_t ndx)
3154 {
3155 	/*
3156 	 * If we're accessing one of the uncached arguments, we'll turn this
3157 	 * into a reference in the args array.
3158 	 */
3159 	if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3160 		ndx = v - DIF_VAR_ARG0;
3161 		v = DIF_VAR_ARGS;
3162 	}
3163 
3164 	switch (v) {
3165 	case DIF_VAR_ARGS:
3166 		if (!(mstate->dtms_access & DTRACE_ACCESS_ARGS)) {
3167 			cpu_core[CPU->cpu_id].cpuc_dtrace_flags |=
3168 			    CPU_DTRACE_KPRIV;
3169 			return (0);
3170 		}
3171 
3172 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3173 		if (ndx >= sizeof (mstate->dtms_arg) /
3174 		    sizeof (mstate->dtms_arg[0])) {
3175 			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3176 			dtrace_provider_t *pv;
3177 			uint64_t val;
3178 
3179 			pv = mstate->dtms_probe->dtpr_provider;
3180 			if (pv->dtpv_pops.dtps_getargval != NULL)
3181 				val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3182 				    mstate->dtms_probe->dtpr_id,
3183 				    mstate->dtms_probe->dtpr_arg, ndx, aframes);
3184 			else
3185 				val = dtrace_getarg(ndx, aframes);
3186 
3187 			/*
3188 			 * This is regrettably required to keep the compiler
3189 			 * from tail-optimizing the call to dtrace_getarg().
3190 			 * The condition always evaluates to true, but the
3191 			 * compiler has no way of figuring that out a priori.
3192 			 * (None of this would be necessary if the compiler
3193 			 * could be relied upon to _always_ tail-optimize
3194 			 * the call to dtrace_getarg() -- but it can't.)
3195 			 */
3196 			if (mstate->dtms_probe != NULL)
3197 				return (val);
3198 
3199 			ASSERT(0);
3200 		}
3201 
3202 		return (mstate->dtms_arg[ndx]);
3203 
3204 	case DIF_VAR_UREGS: {
3205 		klwp_t *lwp;
3206 
3207 		if (!dtrace_priv_proc(state, mstate))
3208 			return (0);
3209 
3210 		if ((lwp = curthread->t_lwp) == NULL) {
3211 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3212 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
3213 			return (0);
3214 		}
3215 
3216 		return (dtrace_getreg(lwp->lwp_regs, ndx));
3217 	}
3218 
3219 	case DIF_VAR_VMREGS: {
3220 		uint64_t rval;
3221 
3222 		if (!dtrace_priv_kernel(state))
3223 			return (0);
3224 
3225 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3226 
3227 		rval = dtrace_getvmreg(ndx,
3228 		    &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
3229 
3230 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3231 
3232 		return (rval);
3233 	}
3234 
3235 	case DIF_VAR_CURTHREAD:
3236 		if (!dtrace_priv_proc(state, mstate))
3237 			return (0);
3238 		return ((uint64_t)(uintptr_t)curthread);
3239 
3240 	case DIF_VAR_TIMESTAMP:
3241 		if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3242 			mstate->dtms_timestamp = dtrace_gethrtime();
3243 			mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3244 		}
3245 		return (mstate->dtms_timestamp);
3246 
3247 	case DIF_VAR_VTIMESTAMP:
3248 		ASSERT(dtrace_vtime_references != 0);
3249 		return (curthread->t_dtrace_vtime);
3250 
3251 	case DIF_VAR_WALLTIMESTAMP:
3252 		if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3253 			mstate->dtms_walltimestamp = dtrace_gethrestime();
3254 			mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3255 		}
3256 		return (mstate->dtms_walltimestamp);
3257 
3258 	case DIF_VAR_IPL:
3259 		if (!dtrace_priv_kernel(state))
3260 			return (0);
3261 		if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3262 			mstate->dtms_ipl = dtrace_getipl();
3263 			mstate->dtms_present |= DTRACE_MSTATE_IPL;
3264 		}
3265 		return (mstate->dtms_ipl);
3266 
3267 	case DIF_VAR_EPID:
3268 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3269 		return (mstate->dtms_epid);
3270 
3271 	case DIF_VAR_ID:
3272 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3273 		return (mstate->dtms_probe->dtpr_id);
3274 
3275 	case DIF_VAR_STACKDEPTH:
3276 		if (!dtrace_priv_kernel(state))
3277 			return (0);
3278 		if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3279 			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3280 
3281 			mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3282 			mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3283 		}
3284 		return (mstate->dtms_stackdepth);
3285 
3286 	case DIF_VAR_USTACKDEPTH:
3287 		if (!dtrace_priv_proc(state, mstate))
3288 			return (0);
3289 		if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3290 			/*
3291 			 * See comment in DIF_VAR_PID.
3292 			 */
3293 			if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3294 			    CPU_ON_INTR(CPU)) {
3295 				mstate->dtms_ustackdepth = 0;
3296 			} else {
3297 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3298 				mstate->dtms_ustackdepth =
3299 				    dtrace_getustackdepth();
3300 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3301 			}
3302 			mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3303 		}
3304 		return (mstate->dtms_ustackdepth);
3305 
3306 	case DIF_VAR_CALLER:
3307 		if (!dtrace_priv_kernel(state))
3308 			return (0);
3309 		if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3310 			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3311 
3312 			if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3313 				/*
3314 				 * If this is an unanchored probe, we are
3315 				 * required to go through the slow path:
3316 				 * dtrace_caller() only guarantees correct
3317 				 * results for anchored probes.
3318 				 */
3319 				pc_t caller[2];
3320 
3321 				dtrace_getpcstack(caller, 2, aframes,
3322 				    (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3323 				mstate->dtms_caller = caller[1];
3324 			} else if ((mstate->dtms_caller =
3325 			    dtrace_caller(aframes)) == -1) {
3326 				/*
3327 				 * We have failed to do this the quick way;
3328 				 * we must resort to the slower approach of
3329 				 * calling dtrace_getpcstack().
3330 				 */
3331 				pc_t caller;
3332 
3333 				dtrace_getpcstack(&caller, 1, aframes, NULL);
3334 				mstate->dtms_caller = caller;
3335 			}
3336 
3337 			mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3338 		}
3339 		return (mstate->dtms_caller);
3340 
3341 	case DIF_VAR_UCALLER:
3342 		if (!dtrace_priv_proc(state, mstate))
3343 			return (0);
3344 
3345 		if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3346 			uint64_t ustack[3];
3347 
3348 			/*
3349 			 * dtrace_getupcstack() fills in the first uint64_t
3350 			 * with the current PID.  The second uint64_t will
3351 			 * be the program counter at user-level.  The third
3352 			 * uint64_t will contain the caller, which is what
3353 			 * we're after.
3354 			 */
3355 			ustack[2] = NULL;
3356 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3357 			dtrace_getupcstack(ustack, 3);
3358 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3359 			mstate->dtms_ucaller = ustack[2];
3360 			mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3361 		}
3362 
3363 		return (mstate->dtms_ucaller);
3364 
3365 	case DIF_VAR_PROBEPROV:
3366 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3367 		return (dtrace_dif_varstr(
3368 		    (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3369 		    state, mstate));
3370 
3371 	case DIF_VAR_PROBEMOD:
3372 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3373 		return (dtrace_dif_varstr(
3374 		    (uintptr_t)mstate->dtms_probe->dtpr_mod,
3375 		    state, mstate));
3376 
3377 	case DIF_VAR_PROBEFUNC:
3378 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3379 		return (dtrace_dif_varstr(
3380 		    (uintptr_t)mstate->dtms_probe->dtpr_func,
3381 		    state, mstate));
3382 
3383 	case DIF_VAR_PROBENAME:
3384 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3385 		return (dtrace_dif_varstr(
3386 		    (uintptr_t)mstate->dtms_probe->dtpr_name,
3387 		    state, mstate));
3388 
3389 	case DIF_VAR_PID:
3390 		if (!dtrace_priv_proc(state, mstate))
3391 			return (0);
3392 
3393 		/*
3394 		 * Note that we are assuming that an unanchored probe is
3395 		 * always due to a high-level interrupt.  (And we're assuming
3396 		 * that there is only a single high level interrupt.)
3397 		 */
3398 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3399 			return (pid0.pid_id);
3400 
3401 		/*
3402 		 * It is always safe to dereference one's own t_procp pointer:
3403 		 * it always points to a valid, allocated proc structure.
3404 		 * Further, it is always safe to dereference the p_pidp member
3405 		 * of one's own proc structure.  (These are truisms becuase
3406 		 * threads and processes don't clean up their own state --
3407 		 * they leave that task to whomever reaps them.)
3408 		 */
3409 		return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3410 
3411 	case DIF_VAR_PPID:
3412 		if (!dtrace_priv_proc(state, mstate))
3413 			return (0);
3414 
3415 		/*
3416 		 * See comment in DIF_VAR_PID.
3417 		 */
3418 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3419 			return (pid0.pid_id);
3420 
3421 		/*
3422 		 * It is always safe to dereference one's own t_procp pointer:
3423 		 * it always points to a valid, allocated proc structure.
3424 		 * (This is true because threads don't clean up their own
3425 		 * state -- they leave that task to whomever reaps them.)
3426 		 */
3427 		return ((uint64_t)curthread->t_procp->p_ppid);
3428 
3429 	case DIF_VAR_TID:
3430 		/*
3431 		 * See comment in DIF_VAR_PID.
3432 		 */
3433 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3434 			return (0);
3435 
3436 		return ((uint64_t)curthread->t_tid);
3437 
3438 	case DIF_VAR_EXECNAME:
3439 		if (!dtrace_priv_proc(state, mstate))
3440 			return (0);
3441 
3442 		/*
3443 		 * See comment in DIF_VAR_PID.
3444 		 */
3445 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3446 			return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3447 
3448 		/*
3449 		 * It is always safe to dereference one's own t_procp pointer:
3450 		 * it always points to a valid, allocated proc structure.
3451 		 * (This is true because threads don't clean up their own
3452 		 * state -- they leave that task to whomever reaps them.)
3453 		 */
3454 		return (dtrace_dif_varstr(
3455 		    (uintptr_t)curthread->t_procp->p_user.u_comm,
3456 		    state, mstate));
3457 
3458 	case DIF_VAR_ZONENAME:
3459 		if (!dtrace_priv_proc(state, mstate))
3460 			return (0);
3461 
3462 		/*
3463 		 * See comment in DIF_VAR_PID.
3464 		 */
3465 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3466 			return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3467 
3468 		/*
3469 		 * It is always safe to dereference one's own t_procp pointer:
3470 		 * it always points to a valid, allocated proc structure.
3471 		 * (This is true because threads don't clean up their own
3472 		 * state -- they leave that task to whomever reaps them.)
3473 		 */
3474 		return (dtrace_dif_varstr(
3475 		    (uintptr_t)curthread->t_procp->p_zone->zone_name,
3476 		    state, mstate));
3477 
3478 	case DIF_VAR_UID:
3479 		if (!dtrace_priv_proc(state, mstate))
3480 			return (0);
3481 
3482 		/*
3483 		 * See comment in DIF_VAR_PID.
3484 		 */
3485 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3486 			return ((uint64_t)p0.p_cred->cr_uid);
3487 
3488 		/*
3489 		 * It is always safe to dereference one's own t_procp pointer:
3490 		 * it always points to a valid, allocated proc structure.
3491 		 * (This is true because threads don't clean up their own
3492 		 * state -- they leave that task to whomever reaps them.)
3493 		 *
3494 		 * Additionally, it is safe to dereference one's own process
3495 		 * credential, since this is never NULL after process birth.
3496 		 */
3497 		return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3498 
3499 	case DIF_VAR_GID:
3500 		if (!dtrace_priv_proc(state, mstate))
3501 			return (0);
3502 
3503 		/*
3504 		 * See comment in DIF_VAR_PID.
3505 		 */
3506 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3507 			return ((uint64_t)p0.p_cred->cr_gid);
3508 
3509 		/*
3510 		 * It is always safe to dereference one's own t_procp pointer:
3511 		 * it always points to a valid, allocated proc structure.
3512 		 * (This is true because threads don't clean up their own
3513 		 * state -- they leave that task to whomever reaps them.)
3514 		 *
3515 		 * Additionally, it is safe to dereference one's own process
3516 		 * credential, since this is never NULL after process birth.
3517 		 */
3518 		return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3519 
3520 	case DIF_VAR_ERRNO: {
3521 		klwp_t *lwp;
3522 		if (!dtrace_priv_proc(state, mstate))
3523 			return (0);
3524 
3525 		/*
3526 		 * See comment in DIF_VAR_PID.
3527 		 */
3528 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3529 			return (0);
3530 
3531 		/*
3532 		 * It is always safe to dereference one's own t_lwp pointer in
3533 		 * the event that this pointer is non-NULL.  (This is true
3534 		 * because threads and lwps don't clean up their own state --
3535 		 * they leave that task to whomever reaps them.)
3536 		 */
3537 		if ((lwp = curthread->t_lwp) == NULL)
3538 			return (0);
3539 
3540 		return ((uint64_t)lwp->lwp_errno);
3541 	}
3542 	default:
3543 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3544 		return (0);
3545 	}
3546 }
3547 
3548 static void
3549 dtrace_dif_variable_write(dtrace_mstate_t *mstate, dtrace_state_t *state,
3550     uint64_t v, uint64_t ndx, uint64_t data)
3551 {
3552 	switch (v) {
3553 	case DIF_VAR_UREGS: {
3554 		klwp_t *lwp;
3555 
3556 		if (dtrace_destructive_disallow ||
3557 		    !dtrace_priv_proc_control(state, mstate)) {
3558 			return;
3559 		}
3560 
3561 		if ((lwp = curthread->t_lwp) == NULL) {
3562 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3563 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
3564 			return;
3565 		}
3566 
3567 		dtrace_setreg(lwp->lwp_regs, ndx, data);
3568 		return;
3569 	}
3570 
3571 	default:
3572 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3573 		return;
3574 	}
3575 }
3576 
3577 typedef enum dtrace_json_state {
3578 	DTRACE_JSON_REST = 1,
3579 	DTRACE_JSON_OBJECT,
3580 	DTRACE_JSON_STRING,
3581 	DTRACE_JSON_STRING_ESCAPE,
3582 	DTRACE_JSON_STRING_ESCAPE_UNICODE,
3583 	DTRACE_JSON_COLON,
3584 	DTRACE_JSON_COMMA,
3585 	DTRACE_JSON_VALUE,
3586 	DTRACE_JSON_IDENTIFIER,
3587 	DTRACE_JSON_NUMBER,
3588 	DTRACE_JSON_NUMBER_FRAC,
3589 	DTRACE_JSON_NUMBER_EXP,
3590 	DTRACE_JSON_COLLECT_OBJECT
3591 } dtrace_json_state_t;
3592 
3593 /*
3594  * This function possesses just enough knowledge about JSON to extract a single
3595  * value from a JSON string and store it in the scratch buffer.  It is able
3596  * to extract nested object values, and members of arrays by index.
3597  *
3598  * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3599  * be looked up as we descend into the object tree.  e.g.
3600  *
3601  *    foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3602  *       with nelems = 5.
3603  *
3604  * The run time of this function must be bounded above by strsize to limit the
3605  * amount of work done in probe context.  As such, it is implemented as a
3606  * simple state machine, reading one character at a time using safe loads
3607  * until we find the requested element, hit a parsing error or run off the
3608  * end of the object or string.
3609  *
3610  * As there is no way for a subroutine to return an error without interrupting
3611  * clause execution, we simply return NULL in the event of a missing key or any
3612  * other error condition.  Each NULL return in this function is commented with
3613  * the error condition it represents -- parsing or otherwise.
3614  *
3615  * The set of states for the state machine closely matches the JSON
3616  * specification (http://json.org/).  Briefly:
3617  *
3618  *   DTRACE_JSON_REST:
3619  *     Skip whitespace until we find either a top-level Object, moving
3620  *     to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3621  *
3622  *   DTRACE_JSON_OBJECT:
3623  *     Locate the next key String in an Object.  Sets a flag to denote
3624  *     the next String as a key string and moves to DTRACE_JSON_STRING.
3625  *
3626  *   DTRACE_JSON_COLON:
3627  *     Skip whitespace until we find the colon that separates key Strings
3628  *     from their values.  Once found, move to DTRACE_JSON_VALUE.
3629  *
3630  *   DTRACE_JSON_VALUE:
3631  *     Detects the type of the next value (String, Number, Identifier, Object
3632  *     or Array) and routes to the states that process that type.  Here we also
3633  *     deal with the element selector list if we are requested to traverse down
3634  *     into the object tree.
3635  *
3636  *   DTRACE_JSON_COMMA:
3637  *     Skip whitespace until we find the comma that separates key-value pairs
3638  *     in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3639  *     (similarly DTRACE_JSON_VALUE).  All following literal value processing
3640  *     states return to this state at the end of their value, unless otherwise
3641  *     noted.
3642  *
3643  *   DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3644  *     Processes a Number literal from the JSON, including any exponent
3645  *     component that may be present.  Numbers are returned as strings, which
3646  *     may be passed to strtoll() if an integer is required.
3647  *
3648  *   DTRACE_JSON_IDENTIFIER:
3649  *     Processes a "true", "false" or "null" literal in the JSON.
3650  *
3651  *   DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3652  *   DTRACE_JSON_STRING_ESCAPE_UNICODE:
3653  *     Processes a String literal from the JSON, whether the String denotes
3654  *     a key, a value or part of a larger Object.  Handles all escape sequences
3655  *     present in the specification, including four-digit unicode characters,
3656  *     but merely includes the escape sequence without converting it to the
3657  *     actual escaped character.  If the String is flagged as a key, we
3658  *     move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3659  *
3660  *   DTRACE_JSON_COLLECT_OBJECT:
3661  *     This state collects an entire Object (or Array), correctly handling
3662  *     embedded strings.  If the full element selector list matches this nested
3663  *     object, we return the Object in full as a string.  If not, we use this
3664  *     state to skip to the next value at this level and continue processing.
3665  *
3666  * NOTE: This function uses various macros from strtolctype.h to manipulate
3667  * digit values, etc -- these have all been checked to ensure they make
3668  * no additional function calls.
3669  */
3670 static char *
3671 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3672     char *dest)
3673 {
3674 	dtrace_json_state_t state = DTRACE_JSON_REST;
3675 	int64_t array_elem = INT64_MIN;
3676 	int64_t array_pos = 0;
3677 	uint8_t escape_unicount = 0;
3678 	boolean_t string_is_key = B_FALSE;
3679 	boolean_t collect_object = B_FALSE;
3680 	boolean_t found_key = B_FALSE;
3681 	boolean_t in_array = B_FALSE;
3682 	uint32_t braces = 0, brackets = 0;
3683 	char *elem = elemlist;
3684 	char *dd = dest;
3685 	uintptr_t cur;
3686 
3687 	for (cur = json; cur < json + size; cur++) {
3688 		char cc = dtrace_load8(cur);
3689 		if (cc == '\0')
3690 			return (NULL);
3691 
3692 		switch (state) {
3693 		case DTRACE_JSON_REST:
3694 			if (isspace(cc))
3695 				break;
3696 
3697 			if (cc == '{') {
3698 				state = DTRACE_JSON_OBJECT;
3699 				break;
3700 			}
3701 
3702 			if (cc == '[') {
3703 				in_array = B_TRUE;
3704 				array_pos = 0;
3705 				array_elem = dtrace_strtoll(elem, 10, size);
3706 				found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3707 				state = DTRACE_JSON_VALUE;
3708 				break;
3709 			}
3710 
3711 			/*
3712 			 * ERROR: expected to find a top-level object or array.
3713 			 */
3714 			return (NULL);
3715 		case DTRACE_JSON_OBJECT:
3716 			if (isspace(cc))
3717 				break;
3718 
3719 			if (cc == '"') {
3720 				state = DTRACE_JSON_STRING;
3721 				string_is_key = B_TRUE;
3722 				break;
3723 			}
3724 
3725 			/*
3726 			 * ERROR: either the object did not start with a key
3727 			 * string, or we've run off the end of the object
3728 			 * without finding the requested key.
3729 			 */
3730 			return (NULL);
3731 		case DTRACE_JSON_STRING:
3732 			if (cc == '\\') {
3733 				*dd++ = '\\';
3734 				state = DTRACE_JSON_STRING_ESCAPE;
3735 				break;
3736 			}
3737 
3738 			if (cc == '"') {
3739 				if (collect_object) {
3740 					/*
3741 					 * We don't reset the dest here, as
3742 					 * the string is part of a larger
3743 					 * object being collected.
3744 					 */
3745 					*dd++ = cc;
3746 					collect_object = B_FALSE;
3747 					state = DTRACE_JSON_COLLECT_OBJECT;
3748 					break;
3749 				}
3750 				*dd = '\0';
3751 				dd = dest; /* reset string buffer */
3752 				if (string_is_key) {
3753 					if (dtrace_strncmp(dest, elem,
3754 					    size) == 0)
3755 						found_key = B_TRUE;
3756 				} else if (found_key) {
3757 					if (nelems > 1) {
3758 						/*
3759 						 * We expected an object, not
3760 						 * this string.
3761 						 */
3762 						return (NULL);
3763 					}
3764 					return (dest);
3765 				}
3766 				state = string_is_key ? DTRACE_JSON_COLON :
3767 				    DTRACE_JSON_COMMA;
3768 				string_is_key = B_FALSE;
3769 				break;
3770 			}
3771 
3772 			*dd++ = cc;
3773 			break;
3774 		case DTRACE_JSON_STRING_ESCAPE:
3775 			*dd++ = cc;
3776 			if (cc == 'u') {
3777 				escape_unicount = 0;
3778 				state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3779 			} else {
3780 				state = DTRACE_JSON_STRING;
3781 			}
3782 			break;
3783 		case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3784 			if (!isxdigit(cc)) {
3785 				/*
3786 				 * ERROR: invalid unicode escape, expected
3787 				 * four valid hexidecimal digits.
3788 				 */
3789 				return (NULL);
3790 			}
3791 
3792 			*dd++ = cc;
3793 			if (++escape_unicount == 4)
3794 				state = DTRACE_JSON_STRING;
3795 			break;
3796 		case DTRACE_JSON_COLON:
3797 			if (isspace(cc))
3798 				break;
3799 
3800 			if (cc == ':') {
3801 				state = DTRACE_JSON_VALUE;
3802 				break;
3803 			}
3804 
3805 			/*
3806 			 * ERROR: expected a colon.
3807 			 */
3808 			return (NULL);
3809 		case DTRACE_JSON_COMMA:
3810 			if (isspace(cc))
3811 				break;
3812 
3813 			if (cc == ',') {
3814 				if (in_array) {
3815 					state = DTRACE_JSON_VALUE;
3816 					if (++array_pos == array_elem)
3817 						found_key = B_TRUE;
3818 				} else {
3819 					state = DTRACE_JSON_OBJECT;
3820 				}
3821 				break;
3822 			}
3823 
3824 			/*
3825 			 * ERROR: either we hit an unexpected character, or
3826 			 * we reached the end of the object or array without
3827 			 * finding the requested key.
3828 			 */
3829 			return (NULL);
3830 		case DTRACE_JSON_IDENTIFIER:
3831 			if (islower(cc)) {
3832 				*dd++ = cc;
3833 				break;
3834 			}
3835 
3836 			*dd = '\0';
3837 			dd = dest; /* reset string buffer */
3838 
3839 			if (dtrace_strncmp(dest, "true", 5) == 0 ||
3840 			    dtrace_strncmp(dest, "false", 6) == 0 ||
3841 			    dtrace_strncmp(dest, "null", 5) == 0) {
3842 				if (found_key) {
3843 					if (nelems > 1) {
3844 						/*
3845 						 * ERROR: We expected an object,
3846 						 * not this identifier.
3847 						 */
3848 						return (NULL);
3849 					}
3850 					return (dest);
3851 				} else {
3852 					cur--;
3853 					state = DTRACE_JSON_COMMA;
3854 					break;
3855 				}
3856 			}
3857 
3858 			/*
3859 			 * ERROR: we did not recognise the identifier as one
3860 			 * of those in the JSON specification.
3861 			 */
3862 			return (NULL);
3863 		case DTRACE_JSON_NUMBER:
3864 			if (cc == '.') {
3865 				*dd++ = cc;
3866 				state = DTRACE_JSON_NUMBER_FRAC;
3867 				break;
3868 			}
3869 
3870 			if (cc == 'x' || cc == 'X') {
3871 				/*
3872 				 * ERROR: specification explicitly excludes
3873 				 * hexidecimal or octal numbers.
3874 				 */
3875 				return (NULL);
3876 			}
3877 
3878 			/* FALLTHRU */
3879 		case DTRACE_JSON_NUMBER_FRAC:
3880 			if (cc == 'e' || cc == 'E') {
3881 				*dd++ = cc;
3882 				state = DTRACE_JSON_NUMBER_EXP;
3883 				break;
3884 			}
3885 
3886 			if (cc == '+' || cc == '-') {
3887 				/*
3888 				 * ERROR: expect sign as part of exponent only.
3889 				 */
3890 				return (NULL);
3891 			}
3892 			/* FALLTHRU */
3893 		case DTRACE_JSON_NUMBER_EXP:
3894 			if (isdigit(cc) || cc == '+' || cc == '-') {
3895 				*dd++ = cc;
3896 				break;
3897 			}
3898 
3899 			*dd = '\0';
3900 			dd = dest; /* reset string buffer */
3901 			if (found_key) {
3902 				if (nelems > 1) {
3903 					/*
3904 					 * ERROR: We expected an object, not
3905 					 * this number.
3906 					 */
3907 					return (NULL);
3908 				}
3909 				return (dest);
3910 			}
3911 
3912 			cur--;
3913 			state = DTRACE_JSON_COMMA;
3914 			break;
3915 		case DTRACE_JSON_VALUE:
3916 			if (isspace(cc))
3917 				break;
3918 
3919 			if (cc == '{' || cc == '[') {
3920 				if (nelems > 1 && found_key) {
3921 					in_array = cc == '[' ? B_TRUE : B_FALSE;
3922 					/*
3923 					 * If our element selector directs us
3924 					 * to descend into this nested object,
3925 					 * then move to the next selector
3926 					 * element in the list and restart the
3927 					 * state machine.
3928 					 */
3929 					while (*elem != '\0')
3930 						elem++;
3931 					elem++; /* skip the inter-element NUL */
3932 					nelems--;
3933 					dd = dest;
3934 					if (in_array) {
3935 						state = DTRACE_JSON_VALUE;
3936 						array_pos = 0;
3937 						array_elem = dtrace_strtoll(
3938 						    elem, 10, size);
3939 						found_key = array_elem == 0 ?
3940 						    B_TRUE : B_FALSE;
3941 					} else {
3942 						found_key = B_FALSE;
3943 						state = DTRACE_JSON_OBJECT;
3944 					}
3945 					break;
3946 				}
3947 
3948 				/*
3949 				 * Otherwise, we wish to either skip this
3950 				 * nested object or return it in full.
3951 				 */
3952 				if (cc == '[')
3953 					brackets = 1;
3954 				else
3955 					braces = 1;
3956 				*dd++ = cc;
3957 				state = DTRACE_JSON_COLLECT_OBJECT;
3958 				break;
3959 			}
3960 
3961 			if (cc == '"') {
3962 				state = DTRACE_JSON_STRING;
3963 				break;
3964 			}
3965 
3966 			if (islower(cc)) {
3967 				/*
3968 				 * Here we deal with true, false and null.
3969 				 */
3970 				*dd++ = cc;
3971 				state = DTRACE_JSON_IDENTIFIER;
3972 				break;
3973 			}
3974 
3975 			if (cc == '-' || isdigit(cc)) {
3976 				*dd++ = cc;
3977 				state = DTRACE_JSON_NUMBER;
3978 				break;
3979 			}
3980 
3981 			/*
3982 			 * ERROR: unexpected character at start of value.
3983 			 */
3984 			return (NULL);
3985 		case DTRACE_JSON_COLLECT_OBJECT:
3986 			if (cc == '\0')
3987 				/*
3988 				 * ERROR: unexpected end of input.
3989 				 */
3990 				return (NULL);
3991 
3992 			*dd++ = cc;
3993 			if (cc == '"') {
3994 				collect_object = B_TRUE;
3995 				state = DTRACE_JSON_STRING;
3996 				break;
3997 			}
3998 
3999 			if (cc == ']') {
4000 				if (brackets-- == 0) {
4001 					/*
4002 					 * ERROR: unbalanced brackets.
4003 					 */
4004 					return (NULL);
4005 				}
4006 			} else if (cc == '}') {
4007 				if (braces-- == 0) {
4008 					/*
4009 					 * ERROR: unbalanced braces.
4010 					 */
4011 					return (NULL);
4012 				}
4013 			} else if (cc == '{') {
4014 				braces++;
4015 			} else if (cc == '[') {
4016 				brackets++;
4017 			}
4018 
4019 			if (brackets == 0 && braces == 0) {
4020 				if (found_key) {
4021 					*dd = '\0';
4022 					return (dest);
4023 				}
4024 				dd = dest; /* reset string buffer */
4025 				state = DTRACE_JSON_COMMA;
4026 			}
4027 			break;
4028 		}
4029 	}
4030 	return (NULL);
4031 }
4032 
4033 /*
4034  * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4035  * Notice that we don't bother validating the proper number of arguments or
4036  * their types in the tuple stack.  This isn't needed because all argument
4037  * interpretation is safe because of our load safety -- the worst that can
4038  * happen is that a bogus program can obtain bogus results.
4039  */
4040 static void
4041 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4042     dtrace_key_t *tupregs, int nargs,
4043     dtrace_mstate_t *mstate, dtrace_state_t *state)
4044 {
4045 	volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4046 	volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4047 	dtrace_vstate_t *vstate = &state->dts_vstate;
4048 
4049 	union {
4050 		mutex_impl_t mi;
4051 		uint64_t mx;
4052 	} m;
4053 
4054 	union {
4055 		krwlock_t ri;
4056 		uintptr_t rw;
4057 	} r;
4058 
4059 	switch (subr) {
4060 	case DIF_SUBR_RAND:
4061 		regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
4062 		break;
4063 
4064 	case DIF_SUBR_MUTEX_OWNED:
4065 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4066 		    mstate, vstate)) {
4067 			regs[rd] = NULL;
4068 			break;
4069 		}
4070 
4071 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4072 		if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4073 			regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4074 		else
4075 			regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4076 		break;
4077 
4078 	case DIF_SUBR_MUTEX_OWNER:
4079 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4080 		    mstate, vstate)) {
4081 			regs[rd] = NULL;
4082 			break;
4083 		}
4084 
4085 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4086 		if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4087 		    MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4088 			regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4089 		else
4090 			regs[rd] = 0;
4091 		break;
4092 
4093 	case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4094 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4095 		    mstate, vstate)) {
4096 			regs[rd] = NULL;
4097 			break;
4098 		}
4099 
4100 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4101 		regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4102 		break;
4103 
4104 	case DIF_SUBR_MUTEX_TYPE_SPIN:
4105 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4106 		    mstate, vstate)) {
4107 			regs[rd] = NULL;
4108 			break;
4109 		}
4110 
4111 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4112 		regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4113 		break;
4114 
4115 	case DIF_SUBR_RW_READ_HELD: {
4116 		uintptr_t tmp;
4117 
4118 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4119 		    mstate, vstate)) {
4120 			regs[rd] = NULL;
4121 			break;
4122 		}
4123 
4124 		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4125 		regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4126 		break;
4127 	}
4128 
4129 	case DIF_SUBR_RW_WRITE_HELD:
4130 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4131 		    mstate, vstate)) {
4132 			regs[rd] = NULL;
4133 			break;
4134 		}
4135 
4136 		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4137 		regs[rd] = _RW_WRITE_HELD(&r.ri);
4138 		break;
4139 
4140 	case DIF_SUBR_RW_ISWRITER:
4141 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4142 		    mstate, vstate)) {
4143 			regs[rd] = NULL;
4144 			break;
4145 		}
4146 
4147 		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4148 		regs[rd] = _RW_ISWRITER(&r.ri);
4149 		break;
4150 
4151 	case DIF_SUBR_BCOPY: {
4152 		/*
4153 		 * We need to be sure that the destination is in the scratch
4154 		 * region -- no other region is allowed.
4155 		 */
4156 		uintptr_t src = tupregs[0].dttk_value;
4157 		uintptr_t dest = tupregs[1].dttk_value;
4158 		size_t size = tupregs[2].dttk_value;
4159 
4160 		if (!dtrace_inscratch(dest, size, mstate)) {
4161 			*flags |= CPU_DTRACE_BADADDR;
4162 			*illval = regs[rd];
4163 			break;
4164 		}
4165 
4166 		if (!dtrace_canload(src, size, mstate, vstate)) {
4167 			regs[rd] = NULL;
4168 			break;
4169 		}
4170 
4171 		dtrace_bcopy((void *)src, (void *)dest, size);
4172 		break;
4173 	}
4174 
4175 	case DIF_SUBR_ALLOCA:
4176 	case DIF_SUBR_COPYIN: {
4177 		uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4178 		uint64_t size =
4179 		    tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4180 		size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4181 
4182 		/*
4183 		 * This action doesn't require any credential checks since
4184 		 * probes will not activate in user contexts to which the
4185 		 * enabling user does not have permissions.
4186 		 */
4187 
4188 		/*
4189 		 * Rounding up the user allocation size could have overflowed
4190 		 * a large, bogus allocation (like -1ULL) to 0.
4191 		 */
4192 		if (scratch_size < size ||
4193 		    !DTRACE_INSCRATCH(mstate, scratch_size)) {
4194 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4195 			regs[rd] = NULL;
4196 			break;
4197 		}
4198 
4199 		if (subr == DIF_SUBR_COPYIN) {
4200 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4201 			dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4202 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4203 		}
4204 
4205 		mstate->dtms_scratch_ptr += scratch_size;
4206 		regs[rd] = dest;
4207 		break;
4208 	}
4209 
4210 	case DIF_SUBR_COPYINTO: {
4211 		uint64_t size = tupregs[1].dttk_value;
4212 		uintptr_t dest = tupregs[2].dttk_value;
4213 
4214 		/*
4215 		 * This action doesn't require any credential checks since
4216 		 * probes will not activate in user contexts to which the
4217 		 * enabling user does not have permissions.
4218 		 */
4219 		if (!dtrace_inscratch(dest, size, mstate)) {
4220 			*flags |= CPU_DTRACE_BADADDR;
4221 			*illval = regs[rd];
4222 			break;
4223 		}
4224 
4225 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4226 		dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4227 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4228 		break;
4229 	}
4230 
4231 	case DIF_SUBR_COPYINSTR: {
4232 		uintptr_t dest = mstate->dtms_scratch_ptr;
4233 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4234 
4235 		if (nargs > 1 && tupregs[1].dttk_value < size)
4236 			size = tupregs[1].dttk_value + 1;
4237 
4238 		/*
4239 		 * This action doesn't require any credential checks since
4240 		 * probes will not activate in user contexts to which the
4241 		 * enabling user does not have permissions.
4242 		 */
4243 		if (!DTRACE_INSCRATCH(mstate, size)) {
4244 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4245 			regs[rd] = NULL;
4246 			break;
4247 		}
4248 
4249 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4250 		dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4251 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4252 
4253 		((char *)dest)[size - 1] = '\0';
4254 		mstate->dtms_scratch_ptr += size;
4255 		regs[rd] = dest;
4256 		break;
4257 	}
4258 
4259 	case DIF_SUBR_MSGSIZE:
4260 	case DIF_SUBR_MSGDSIZE: {
4261 		uintptr_t baddr = tupregs[0].dttk_value, daddr;
4262 		uintptr_t wptr, rptr;
4263 		size_t count = 0;
4264 		int cont = 0;
4265 
4266 		while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4267 
4268 			if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4269 			    vstate)) {
4270 				regs[rd] = NULL;
4271 				break;
4272 			}
4273 
4274 			wptr = dtrace_loadptr(baddr +
4275 			    offsetof(mblk_t, b_wptr));
4276 
4277 			rptr = dtrace_loadptr(baddr +
4278 			    offsetof(mblk_t, b_rptr));
4279 
4280 			if (wptr < rptr) {
4281 				*flags |= CPU_DTRACE_BADADDR;
4282 				*illval = tupregs[0].dttk_value;
4283 				break;
4284 			}
4285 
4286 			daddr = dtrace_loadptr(baddr +
4287 			    offsetof(mblk_t, b_datap));
4288 
4289 			baddr = dtrace_loadptr(baddr +
4290 			    offsetof(mblk_t, b_cont));
4291 
4292 			/*
4293 			 * We want to prevent against denial-of-service here,
4294 			 * so we're only going to search the list for
4295 			 * dtrace_msgdsize_max mblks.
4296 			 */
4297 			if (cont++ > dtrace_msgdsize_max) {
4298 				*flags |= CPU_DTRACE_ILLOP;
4299 				break;
4300 			}
4301 
4302 			if (subr == DIF_SUBR_MSGDSIZE) {
4303 				if (dtrace_load8(daddr +
4304 				    offsetof(dblk_t, db_type)) != M_DATA)
4305 					continue;
4306 			}
4307 
4308 			count += wptr - rptr;
4309 		}
4310 
4311 		if (!(*flags & CPU_DTRACE_FAULT))
4312 			regs[rd] = count;
4313 
4314 		break;
4315 	}
4316 
4317 	case DIF_SUBR_PROGENYOF: {
4318 		pid_t pid = tupregs[0].dttk_value;
4319 		proc_t *p;
4320 		int rval = 0;
4321 
4322 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4323 
4324 		for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4325 			if (p->p_pidp->pid_id == pid) {
4326 				rval = 1;
4327 				break;
4328 			}
4329 		}
4330 
4331 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4332 
4333 		regs[rd] = rval;
4334 		break;
4335 	}
4336 
4337 	case DIF_SUBR_SPECULATION:
4338 		regs[rd] = dtrace_speculation(state);
4339 		break;
4340 
4341 	case DIF_SUBR_COPYOUT: {
4342 		uintptr_t kaddr = tupregs[0].dttk_value;
4343 		uintptr_t uaddr = tupregs[1].dttk_value;
4344 		uint64_t size = tupregs[2].dttk_value;
4345 
4346 		if (!dtrace_destructive_disallow &&
4347 		    dtrace_priv_proc_control(state, mstate) &&
4348 		    !dtrace_istoxic(kaddr, size) &&
4349 		    dtrace_canload(kaddr, size, mstate, vstate)) {
4350 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4351 			dtrace_copyout(kaddr, uaddr, size, flags);
4352 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4353 		}
4354 		break;
4355 	}
4356 
4357 	case DIF_SUBR_COPYOUTSTR: {
4358 		uintptr_t kaddr = tupregs[0].dttk_value;
4359 		uintptr_t uaddr = tupregs[1].dttk_value;
4360 		uint64_t size = tupregs[2].dttk_value;
4361 		size_t lim;
4362 
4363 		if (!dtrace_destructive_disallow &&
4364 		    dtrace_priv_proc_control(state, mstate) &&
4365 		    !dtrace_istoxic(kaddr, size) &&
4366 		    dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
4367 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4368 			dtrace_copyoutstr(kaddr, uaddr, lim, flags);
4369 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4370 		}
4371 		break;
4372 	}
4373 
4374 	case DIF_SUBR_STRLEN: {
4375 		size_t size = state->dts_options[DTRACEOPT_STRSIZE];
4376 		uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4377 		size_t lim;
4378 
4379 		if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4380 			regs[rd] = NULL;
4381 			break;
4382 		}
4383 		regs[rd] = dtrace_strlen((char *)addr, lim);
4384 
4385 		break;
4386 	}
4387 
4388 	case DIF_SUBR_STRCHR:
4389 	case DIF_SUBR_STRRCHR: {
4390 		/*
4391 		 * We're going to iterate over the string looking for the
4392 		 * specified character.  We will iterate until we have reached
4393 		 * the string length or we have found the character.  If this
4394 		 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4395 		 * of the specified character instead of the first.
4396 		 */
4397 		uintptr_t addr = tupregs[0].dttk_value;
4398 		uintptr_t addr_limit;
4399 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4400 		size_t lim;
4401 		char c, target = (char)tupregs[1].dttk_value;
4402 
4403 		if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4404 			regs[rd] = NULL;
4405 			break;
4406 		}
4407 		addr_limit = addr + lim;
4408 
4409 		for (regs[rd] = NULL; addr < addr_limit; addr++) {
4410 			if ((c = dtrace_load8(addr)) == target) {
4411 				regs[rd] = addr;
4412 
4413 				if (subr == DIF_SUBR_STRCHR)
4414 					break;
4415 			}
4416 			if (c == '\0')
4417 				break;
4418 		}
4419 
4420 		break;
4421 	}
4422 
4423 	case DIF_SUBR_STRSTR:
4424 	case DIF_SUBR_INDEX:
4425 	case DIF_SUBR_RINDEX: {
4426 		/*
4427 		 * We're going to iterate over the string looking for the
4428 		 * specified string.  We will iterate until we have reached
4429 		 * the string length or we have found the string.  (Yes, this
4430 		 * is done in the most naive way possible -- but considering
4431 		 * that the string we're searching for is likely to be
4432 		 * relatively short, the complexity of Rabin-Karp or similar
4433 		 * hardly seems merited.)
4434 		 */
4435 		char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4436 		char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4437 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4438 		size_t len = dtrace_strlen(addr, size);
4439 		size_t sublen = dtrace_strlen(substr, size);
4440 		char *limit = addr + len, *orig = addr;
4441 		int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4442 		int inc = 1;
4443 
4444 		regs[rd] = notfound;
4445 
4446 		if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4447 			regs[rd] = NULL;
4448 			break;
4449 		}
4450 
4451 		if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4452 		    vstate)) {
4453 			regs[rd] = NULL;
4454 			break;
4455 		}
4456 
4457 		/*
4458 		 * strstr() and index()/rindex() have similar semantics if
4459 		 * both strings are the empty string: strstr() returns a
4460 		 * pointer to the (empty) string, and index() and rindex()
4461 		 * both return index 0 (regardless of any position argument).
4462 		 */
4463 		if (sublen == 0 && len == 0) {
4464 			if (subr == DIF_SUBR_STRSTR)
4465 				regs[rd] = (uintptr_t)addr;
4466 			else
4467 				regs[rd] = 0;
4468 			break;
4469 		}
4470 
4471 		if (subr != DIF_SUBR_STRSTR) {
4472 			if (subr == DIF_SUBR_RINDEX) {
4473 				limit = orig - 1;
4474 				addr += len;
4475 				inc = -1;
4476 			}
4477 
4478 			/*
4479 			 * Both index() and rindex() take an optional position
4480 			 * argument that denotes the starting position.
4481 			 */
4482 			if (nargs == 3) {
4483 				int64_t pos = (int64_t)tupregs[2].dttk_value;
4484 
4485 				/*
4486 				 * If the position argument to index() is
4487 				 * negative, Perl implicitly clamps it at
4488 				 * zero.  This semantic is a little surprising
4489 				 * given the special meaning of negative
4490 				 * positions to similar Perl functions like
4491 				 * substr(), but it appears to reflect a
4492 				 * notion that index() can start from a
4493 				 * negative index and increment its way up to
4494 				 * the string.  Given this notion, Perl's
4495 				 * rindex() is at least self-consistent in
4496 				 * that it implicitly clamps positions greater
4497 				 * than the string length to be the string
4498 				 * length.  Where Perl completely loses
4499 				 * coherence, however, is when the specified
4500 				 * substring is the empty string ("").  In
4501 				 * this case, even if the position is
4502 				 * negative, rindex() returns 0 -- and even if
4503 				 * the position is greater than the length,
4504 				 * index() returns the string length.  These
4505 				 * semantics violate the notion that index()
4506 				 * should never return a value less than the
4507 				 * specified position and that rindex() should
4508 				 * never return a value greater than the
4509 				 * specified position.  (One assumes that
4510 				 * these semantics are artifacts of Perl's
4511 				 * implementation and not the results of
4512 				 * deliberate design -- it beggars belief that
4513 				 * even Larry Wall could desire such oddness.)
4514 				 * While in the abstract one would wish for
4515 				 * consistent position semantics across
4516 				 * substr(), index() and rindex() -- or at the
4517 				 * very least self-consistent position
4518 				 * semantics for index() and rindex() -- we
4519 				 * instead opt to keep with the extant Perl
4520 				 * semantics, in all their broken glory.  (Do
4521 				 * we have more desire to maintain Perl's
4522 				 * semantics than Perl does?  Probably.)
4523 				 */
4524 				if (subr == DIF_SUBR_RINDEX) {
4525 					if (pos < 0) {
4526 						if (sublen == 0)
4527 							regs[rd] = 0;
4528 						break;
4529 					}
4530 
4531 					if (pos > len)
4532 						pos = len;
4533 				} else {
4534 					if (pos < 0)
4535 						pos = 0;
4536 
4537 					if (pos >= len) {
4538 						if (sublen == 0)
4539 							regs[rd] = len;
4540 						break;
4541 					}
4542 				}
4543 
4544 				addr = orig + pos;
4545 			}
4546 		}
4547 
4548 		for (regs[rd] = notfound; addr != limit; addr += inc) {
4549 			if (dtrace_strncmp(addr, substr, sublen) == 0) {
4550 				if (subr != DIF_SUBR_STRSTR) {
4551 					/*
4552 					 * As D index() and rindex() are
4553 					 * modeled on Perl (and not on awk),
4554 					 * we return a zero-based (and not a
4555 					 * one-based) index.  (For you Perl
4556 					 * weenies: no, we're not going to add
4557 					 * $[ -- and shouldn't you be at a con
4558 					 * or something?)
4559 					 */
4560 					regs[rd] = (uintptr_t)(addr - orig);
4561 					break;
4562 				}
4563 
4564 				ASSERT(subr == DIF_SUBR_STRSTR);
4565 				regs[rd] = (uintptr_t)addr;
4566 				break;
4567 			}
4568 		}
4569 
4570 		break;
4571 	}
4572 
4573 	case DIF_SUBR_STRTOK: {
4574 		uintptr_t addr = tupregs[0].dttk_value;
4575 		uintptr_t tokaddr = tupregs[1].dttk_value;
4576 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4577 		uintptr_t limit, toklimit;
4578 		size_t clim;
4579 		uint8_t c, tokmap[32];	 /* 256 / 8 */
4580 		char *dest = (char *)mstate->dtms_scratch_ptr;
4581 		int i;
4582 
4583 		/*
4584 		 * Check both the token buffer and (later) the input buffer,
4585 		 * since both could be non-scratch addresses.
4586 		 */
4587 		if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
4588 			regs[rd] = NULL;
4589 			break;
4590 		}
4591 		toklimit = tokaddr + clim;
4592 
4593 		if (!DTRACE_INSCRATCH(mstate, size)) {
4594 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4595 			regs[rd] = NULL;
4596 			break;
4597 		}
4598 
4599 		if (addr == NULL) {
4600 			/*
4601 			 * If the address specified is NULL, we use our saved
4602 			 * strtok pointer from the mstate.  Note that this
4603 			 * means that the saved strtok pointer is _only_
4604 			 * valid within multiple enablings of the same probe --
4605 			 * it behaves like an implicit clause-local variable.
4606 			 */
4607 			addr = mstate->dtms_strtok;
4608 			limit = mstate->dtms_strtok_limit;
4609 		} else {
4610 			/*
4611 			 * If the user-specified address is non-NULL we must
4612 			 * access check it.  This is the only time we have
4613 			 * a chance to do so, since this address may reside
4614 			 * in the string table of this clause-- future calls
4615 			 * (when we fetch addr from mstate->dtms_strtok)
4616 			 * would fail this access check.
4617 			 */
4618 			if (!dtrace_strcanload(addr, size, &clim, mstate,
4619 			    vstate)) {
4620 				regs[rd] = NULL;
4621 				break;
4622 			}
4623 			limit = addr + clim;
4624 		}
4625 
4626 		/*
4627 		 * First, zero the token map, and then process the token
4628 		 * string -- setting a bit in the map for every character
4629 		 * found in the token string.
4630 		 */
4631 		for (i = 0; i < sizeof (tokmap); i++)
4632 			tokmap[i] = 0;
4633 
4634 		for (; tokaddr < toklimit; tokaddr++) {
4635 			if ((c = dtrace_load8(tokaddr)) == '\0')
4636 				break;
4637 
4638 			ASSERT((c >> 3) < sizeof (tokmap));
4639 			tokmap[c >> 3] |= (1 << (c & 0x7));
4640 		}
4641 
4642 		for (; addr < limit; addr++) {
4643 			/*
4644 			 * We're looking for a character that is _not_
4645 			 * contained in the token string.
4646 			 */
4647 			if ((c = dtrace_load8(addr)) == '\0')
4648 				break;
4649 
4650 			if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4651 				break;
4652 		}
4653 
4654 		if (c == '\0') {
4655 			/*
4656 			 * We reached the end of the string without finding
4657 			 * any character that was not in the token string.
4658 			 * We return NULL in this case, and we set the saved
4659 			 * address to NULL as well.
4660 			 */
4661 			regs[rd] = NULL;
4662 			mstate->dtms_strtok = NULL;
4663 			mstate->dtms_strtok_limit = NULL;
4664 			break;
4665 		}
4666 
4667 		/*
4668 		 * From here on, we're copying into the destination string.
4669 		 */
4670 		for (i = 0; addr < limit && i < size - 1; addr++) {
4671 			if ((c = dtrace_load8(addr)) == '\0')
4672 				break;
4673 
4674 			if (tokmap[c >> 3] & (1 << (c & 0x7)))
4675 				break;
4676 
4677 			ASSERT(i < size);
4678 			dest[i++] = c;
4679 		}
4680 
4681 		ASSERT(i < size);
4682 		dest[i] = '\0';
4683 		regs[rd] = (uintptr_t)dest;
4684 		mstate->dtms_scratch_ptr += size;
4685 		mstate->dtms_strtok = addr;
4686 		mstate->dtms_strtok_limit = limit;
4687 		break;
4688 	}
4689 
4690 	case DIF_SUBR_SUBSTR: {
4691 		uintptr_t s = tupregs[0].dttk_value;
4692 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4693 		char *d = (char *)mstate->dtms_scratch_ptr;
4694 		int64_t index = (int64_t)tupregs[1].dttk_value;
4695 		int64_t remaining = (int64_t)tupregs[2].dttk_value;
4696 		size_t len = dtrace_strlen((char *)s, size);
4697 		int64_t i;
4698 
4699 		if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4700 			regs[rd] = NULL;
4701 			break;
4702 		}
4703 
4704 		if (!DTRACE_INSCRATCH(mstate, size)) {
4705 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4706 			regs[rd] = NULL;
4707 			break;
4708 		}
4709 
4710 		if (nargs <= 2)
4711 			remaining = (int64_t)size;
4712 
4713 		if (index < 0) {
4714 			index += len;
4715 
4716 			if (index < 0 && index + remaining > 0) {
4717 				remaining += index;
4718 				index = 0;
4719 			}
4720 		}
4721 
4722 		if (index >= len || index < 0) {
4723 			remaining = 0;
4724 		} else if (remaining < 0) {
4725 			remaining += len - index;
4726 		} else if (index + remaining > size) {
4727 			remaining = size - index;
4728 		}
4729 
4730 		for (i = 0; i < remaining; i++) {
4731 			if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4732 				break;
4733 		}
4734 
4735 		d[i] = '\0';
4736 
4737 		mstate->dtms_scratch_ptr += size;
4738 		regs[rd] = (uintptr_t)d;
4739 		break;
4740 	}
4741 
4742 	case DIF_SUBR_JSON: {
4743 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4744 		uintptr_t json = tupregs[0].dttk_value;
4745 		size_t jsonlen = dtrace_strlen((char *)json, size);
4746 		uintptr_t elem = tupregs[1].dttk_value;
4747 		size_t elemlen = dtrace_strlen((char *)elem, size);
4748 
4749 		char *dest = (char *)mstate->dtms_scratch_ptr;
4750 		char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
4751 		char *ee = elemlist;
4752 		int nelems = 1;
4753 		uintptr_t cur;
4754 
4755 		if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
4756 		    !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
4757 			regs[rd] = NULL;
4758 			break;
4759 		}
4760 
4761 		if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
4762 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4763 			regs[rd] = NULL;
4764 			break;
4765 		}
4766 
4767 		/*
4768 		 * Read the element selector and split it up into a packed list
4769 		 * of strings.
4770 		 */
4771 		for (cur = elem; cur < elem + elemlen; cur++) {
4772 			char cc = dtrace_load8(cur);
4773 
4774 			if (cur == elem && cc == '[') {
4775 				/*
4776 				 * If the first element selector key is
4777 				 * actually an array index then ignore the
4778 				 * bracket.
4779 				 */
4780 				continue;
4781 			}
4782 
4783 			if (cc == ']')
4784 				continue;
4785 
4786 			if (cc == '.' || cc == '[') {
4787 				nelems++;
4788 				cc = '\0';
4789 			}
4790 
4791 			*ee++ = cc;
4792 		}
4793 		*ee++ = '\0';
4794 
4795 		if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
4796 		    nelems, dest)) != NULL)
4797 			mstate->dtms_scratch_ptr += jsonlen + 1;
4798 		break;
4799 	}
4800 
4801 	case DIF_SUBR_TOUPPER:
4802 	case DIF_SUBR_TOLOWER: {
4803 		uintptr_t s = tupregs[0].dttk_value;
4804 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4805 		char *dest = (char *)mstate->dtms_scratch_ptr, c;
4806 		size_t len = dtrace_strlen((char *)s, size);
4807 		char lower, upper, convert;
4808 		int64_t i;
4809 
4810 		if (subr == DIF_SUBR_TOUPPER) {
4811 			lower = 'a';
4812 			upper = 'z';
4813 			convert = 'A';
4814 		} else {
4815 			lower = 'A';
4816 			upper = 'Z';
4817 			convert = 'a';
4818 		}
4819 
4820 		if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4821 			regs[rd] = NULL;
4822 			break;
4823 		}
4824 
4825 		if (!DTRACE_INSCRATCH(mstate, size)) {
4826 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4827 			regs[rd] = NULL;
4828 			break;
4829 		}
4830 
4831 		for (i = 0; i < size - 1; i++) {
4832 			if ((c = dtrace_load8(s + i)) == '\0')
4833 				break;
4834 
4835 			if (c >= lower && c <= upper)
4836 				c = convert + (c - lower);
4837 
4838 			dest[i] = c;
4839 		}
4840 
4841 		ASSERT(i < size);
4842 		dest[i] = '\0';
4843 		regs[rd] = (uintptr_t)dest;
4844 		mstate->dtms_scratch_ptr += size;
4845 		break;
4846 	}
4847 
4848 case DIF_SUBR_GETMAJOR:
4849 #ifdef _LP64
4850 		regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4851 #else
4852 		regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4853 #endif
4854 		break;
4855 
4856 	case DIF_SUBR_GETMINOR:
4857 #ifdef _LP64
4858 		regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4859 #else
4860 		regs[rd] = tupregs[0].dttk_value & MAXMIN;
4861 #endif
4862 		break;
4863 
4864 	case DIF_SUBR_DDI_PATHNAME: {
4865 		/*
4866 		 * This one is a galactic mess.  We are going to roughly
4867 		 * emulate ddi_pathname(), but it's made more complicated
4868 		 * by the fact that we (a) want to include the minor name and
4869 		 * (b) must proceed iteratively instead of recursively.
4870 		 */
4871 		uintptr_t dest = mstate->dtms_scratch_ptr;
4872 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4873 		char *start = (char *)dest, *end = start + size - 1;
4874 		uintptr_t daddr = tupregs[0].dttk_value;
4875 		int64_t minor = (int64_t)tupregs[1].dttk_value;
4876 		char *s;
4877 		int i, len, depth = 0;
4878 
4879 		/*
4880 		 * Due to all the pointer jumping we do and context we must
4881 		 * rely upon, we just mandate that the user must have kernel
4882 		 * read privileges to use this routine.
4883 		 */
4884 		if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4885 			*flags |= CPU_DTRACE_KPRIV;
4886 			*illval = daddr;
4887 			regs[rd] = NULL;
4888 		}
4889 
4890 		if (!DTRACE_INSCRATCH(mstate, size)) {
4891 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4892 			regs[rd] = NULL;
4893 			break;
4894 		}
4895 
4896 		*end = '\0';
4897 
4898 		/*
4899 		 * We want to have a name for the minor.  In order to do this,
4900 		 * we need to walk the minor list from the devinfo.  We want
4901 		 * to be sure that we don't infinitely walk a circular list,
4902 		 * so we check for circularity by sending a scout pointer
4903 		 * ahead two elements for every element that we iterate over;
4904 		 * if the list is circular, these will ultimately point to the
4905 		 * same element.  You may recognize this little trick as the
4906 		 * answer to a stupid interview question -- one that always
4907 		 * seems to be asked by those who had to have it laboriously
4908 		 * explained to them, and who can't even concisely describe
4909 		 * the conditions under which one would be forced to resort to
4910 		 * this technique.  Needless to say, those conditions are
4911 		 * found here -- and probably only here.  Is this the only use
4912 		 * of this infamous trick in shipping, production code?  If it
4913 		 * isn't, it probably should be...
4914 		 */
4915 		if (minor != -1) {
4916 			uintptr_t maddr = dtrace_loadptr(daddr +
4917 			    offsetof(struct dev_info, devi_minor));
4918 
4919 			uintptr_t next = offsetof(struct ddi_minor_data, next);
4920 			uintptr_t name = offsetof(struct ddi_minor_data,
4921 			    d_minor) + offsetof(struct ddi_minor, name);
4922 			uintptr_t dev = offsetof(struct ddi_minor_data,
4923 			    d_minor) + offsetof(struct ddi_minor, dev);
4924 			uintptr_t scout;
4925 
4926 			if (maddr != NULL)
4927 				scout = dtrace_loadptr(maddr + next);
4928 
4929 			while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4930 				uint64_t m;
4931 #ifdef _LP64
4932 				m = dtrace_load64(maddr + dev) & MAXMIN64;
4933 #else
4934 				m = dtrace_load32(maddr + dev) & MAXMIN;
4935 #endif
4936 				if (m != minor) {
4937 					maddr = dtrace_loadptr(maddr + next);
4938 
4939 					if (scout == NULL)
4940 						continue;
4941 
4942 					scout = dtrace_loadptr(scout + next);
4943 
4944 					if (scout == NULL)
4945 						continue;
4946 
4947 					scout = dtrace_loadptr(scout + next);
4948 
4949 					if (scout == NULL)
4950 						continue;
4951 
4952 					if (scout == maddr) {
4953 						*flags |= CPU_DTRACE_ILLOP;
4954 						break;
4955 					}
4956 
4957 					continue;
4958 				}
4959 
4960 				/*
4961 				 * We have the minor data.  Now we need to
4962 				 * copy the minor's name into the end of the
4963 				 * pathname.
4964 				 */
4965 				s = (char *)dtrace_loadptr(maddr + name);
4966 				len = dtrace_strlen(s, size);
4967 
4968 				if (*flags & CPU_DTRACE_FAULT)
4969 					break;
4970 
4971 				if (len != 0) {
4972 					if ((end -= (len + 1)) < start)
4973 						break;
4974 
4975 					*end = ':';
4976 				}
4977 
4978 				for (i = 1; i <= len; i++)
4979 					end[i] = dtrace_load8((uintptr_t)s++);
4980 				break;
4981 			}
4982 		}
4983 
4984 		while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4985 			ddi_node_state_t devi_state;
4986 
4987 			devi_state = dtrace_load32(daddr +
4988 			    offsetof(struct dev_info, devi_node_state));
4989 
4990 			if (*flags & CPU_DTRACE_FAULT)
4991 				break;
4992 
4993 			if (devi_state >= DS_INITIALIZED) {
4994 				s = (char *)dtrace_loadptr(daddr +
4995 				    offsetof(struct dev_info, devi_addr));
4996 				len = dtrace_strlen(s, size);
4997 
4998 				if (*flags & CPU_DTRACE_FAULT)
4999 					break;
5000 
5001 				if (len != 0) {
5002 					if ((end -= (len + 1)) < start)
5003 						break;
5004 
5005 					*end = '@';
5006 				}
5007 
5008 				for (i = 1; i <= len; i++)
5009 					end[i] = dtrace_load8((uintptr_t)s++);
5010 			}
5011 
5012 			/*
5013 			 * Now for the node name...
5014 			 */
5015 			s = (char *)dtrace_loadptr(daddr +
5016 			    offsetof(struct dev_info, devi_node_name));
5017 
5018 			daddr = dtrace_loadptr(daddr +
5019 			    offsetof(struct dev_info, devi_parent));
5020 
5021 			/*
5022 			 * If our parent is NULL (that is, if we're the root
5023 			 * node), we're going to use the special path
5024 			 * "devices".
5025 			 */
5026 			if (daddr == NULL)
5027 				s = "devices";
5028 
5029 			len = dtrace_strlen(s, size);
5030 			if (*flags & CPU_DTRACE_FAULT)
5031 				break;
5032 
5033 			if ((end -= (len + 1)) < start)
5034 				break;
5035 
5036 			for (i = 1; i <= len; i++)
5037 				end[i] = dtrace_load8((uintptr_t)s++);
5038 			*end = '/';
5039 
5040 			if (depth++ > dtrace_devdepth_max) {
5041 				*flags |= CPU_DTRACE_ILLOP;
5042 				break;
5043 			}
5044 		}
5045 
5046 		if (end < start)
5047 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5048 
5049 		if (daddr == NULL) {
5050 			regs[rd] = (uintptr_t)end;
5051 			mstate->dtms_scratch_ptr += size;
5052 		}
5053 
5054 		break;
5055 	}
5056 
5057 	case DIF_SUBR_STRJOIN: {
5058 		char *d = (char *)mstate->dtms_scratch_ptr;
5059 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5060 		uintptr_t s1 = tupregs[0].dttk_value;
5061 		uintptr_t s2 = tupregs[1].dttk_value;
5062 		int i = 0, j = 0;
5063 		size_t lim1, lim2;
5064 		char c;
5065 
5066 		if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
5067 		    !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
5068 			regs[rd] = NULL;
5069 			break;
5070 		}
5071 
5072 		if (!DTRACE_INSCRATCH(mstate, size)) {
5073 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5074 			regs[rd] = NULL;
5075 			break;
5076 		}
5077 
5078 		for (;;) {
5079 			if (i >= size) {
5080 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5081 				regs[rd] = NULL;
5082 				break;
5083 			}
5084 			c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
5085 			if ((d[i++] = c) == '\0') {
5086 				i--;
5087 				break;
5088 			}
5089 		}
5090 
5091 		for (;;) {
5092 			if (i >= size) {
5093 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5094 				regs[rd] = NULL;
5095 				break;
5096 			}
5097 
5098 			c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
5099 			if ((d[i++] = c) == '\0')
5100 				break;
5101 		}
5102 
5103 		if (i < size) {
5104 			mstate->dtms_scratch_ptr += i;
5105 			regs[rd] = (uintptr_t)d;
5106 		}
5107 
5108 		break;
5109 	}
5110 
5111 	case DIF_SUBR_STRTOLL: {
5112 		uintptr_t s = tupregs[0].dttk_value;
5113 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5114 		size_t lim;
5115 		int base = 10;
5116 
5117 		if (nargs > 1) {
5118 			if ((base = tupregs[1].dttk_value) <= 1 ||
5119 			    base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5120 				*flags |= CPU_DTRACE_ILLOP;
5121 				break;
5122 			}
5123 		}
5124 
5125 		if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
5126 			regs[rd] = INT64_MIN;
5127 			break;
5128 		}
5129 
5130 		regs[rd] = dtrace_strtoll((char *)s, base, lim);
5131 		break;
5132 	}
5133 
5134 	case DIF_SUBR_LLTOSTR: {
5135 		int64_t i = (int64_t)tupregs[0].dttk_value;
5136 		uint64_t val, digit;
5137 		uint64_t size = 65;	/* enough room for 2^64 in binary */
5138 		char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5139 		int base = 10;
5140 
5141 		if (nargs > 1) {
5142 			if ((base = tupregs[1].dttk_value) <= 1 ||
5143 			    base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5144 				*flags |= CPU_DTRACE_ILLOP;
5145 				break;
5146 			}
5147 		}
5148 
5149 		val = (base == 10 && i < 0) ? i * -1 : i;
5150 
5151 		if (!DTRACE_INSCRATCH(mstate, size)) {
5152 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5153 			regs[rd] = NULL;
5154 			break;
5155 		}
5156 
5157 		for (*end-- = '\0'; val; val /= base) {
5158 			if ((digit = val % base) <= '9' - '0') {
5159 				*end-- = '0' + digit;
5160 			} else {
5161 				*end-- = 'a' + (digit - ('9' - '0') - 1);
5162 			}
5163 		}
5164 
5165 		if (i == 0 && base == 16)
5166 			*end-- = '0';
5167 
5168 		if (base == 16)
5169 			*end-- = 'x';
5170 
5171 		if (i == 0 || base == 8 || base == 16)
5172 			*end-- = '0';
5173 
5174 		if (i < 0 && base == 10)
5175 			*end-- = '-';
5176 
5177 		regs[rd] = (uintptr_t)end + 1;
5178 		mstate->dtms_scratch_ptr += size;
5179 		break;
5180 	}
5181 
5182 	case DIF_SUBR_HTONS:
5183 	case DIF_SUBR_NTOHS:
5184 #ifdef _BIG_ENDIAN
5185 		regs[rd] = (uint16_t)tupregs[0].dttk_value;
5186 #else
5187 		regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5188 #endif
5189 		break;
5190 
5191 
5192 	case DIF_SUBR_HTONL:
5193 	case DIF_SUBR_NTOHL:
5194 #ifdef _BIG_ENDIAN
5195 		regs[rd] = (uint32_t)tupregs[0].dttk_value;
5196 #else
5197 		regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5198 #endif
5199 		break;
5200 
5201 
5202 	case DIF_SUBR_HTONLL:
5203 	case DIF_SUBR_NTOHLL:
5204 #ifdef _BIG_ENDIAN
5205 		regs[rd] = (uint64_t)tupregs[0].dttk_value;
5206 #else
5207 		regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5208 #endif
5209 		break;
5210 
5211 
5212 	case DIF_SUBR_DIRNAME:
5213 	case DIF_SUBR_BASENAME: {
5214 		char *dest = (char *)mstate->dtms_scratch_ptr;
5215 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5216 		uintptr_t src = tupregs[0].dttk_value;
5217 		int i, j, len = dtrace_strlen((char *)src, size);
5218 		int lastbase = -1, firstbase = -1, lastdir = -1;
5219 		int start, end;
5220 
5221 		if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5222 			regs[rd] = NULL;
5223 			break;
5224 		}
5225 
5226 		if (!DTRACE_INSCRATCH(mstate, size)) {
5227 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5228 			regs[rd] = NULL;
5229 			break;
5230 		}
5231 
5232 		/*
5233 		 * The basename and dirname for a zero-length string is
5234 		 * defined to be "."
5235 		 */
5236 		if (len == 0) {
5237 			len = 1;
5238 			src = (uintptr_t)".";
5239 		}
5240 
5241 		/*
5242 		 * Start from the back of the string, moving back toward the
5243 		 * front until we see a character that isn't a slash.  That
5244 		 * character is the last character in the basename.
5245 		 */
5246 		for (i = len - 1; i >= 0; i--) {
5247 			if (dtrace_load8(src + i) != '/')
5248 				break;
5249 		}
5250 
5251 		if (i >= 0)
5252 			lastbase = i;
5253 
5254 		/*
5255 		 * Starting from the last character in the basename, move
5256 		 * towards the front until we find a slash.  The character
5257 		 * that we processed immediately before that is the first
5258 		 * character in the basename.
5259 		 */
5260 		for (; i >= 0; i--) {
5261 			if (dtrace_load8(src + i) == '/')
5262 				break;
5263 		}
5264 
5265 		if (i >= 0)
5266 			firstbase = i + 1;
5267 
5268 		/*
5269 		 * Now keep going until we find a non-slash character.  That
5270 		 * character is the last character in the dirname.
5271 		 */
5272 		for (; i >= 0; i--) {
5273 			if (dtrace_load8(src + i) != '/')
5274 				break;
5275 		}
5276 
5277 		if (i >= 0)
5278 			lastdir = i;
5279 
5280 		ASSERT(!(lastbase == -1 && firstbase != -1));
5281 		ASSERT(!(firstbase == -1 && lastdir != -1));
5282 
5283 		if (lastbase == -1) {
5284 			/*
5285 			 * We didn't find a non-slash character.  We know that
5286 			 * the length is non-zero, so the whole string must be
5287 			 * slashes.  In either the dirname or the basename
5288 			 * case, we return '/'.
5289 			 */
5290 			ASSERT(firstbase == -1);
5291 			firstbase = lastbase = lastdir = 0;
5292 		}
5293 
5294 		if (firstbase == -1) {
5295 			/*
5296 			 * The entire string consists only of a basename
5297 			 * component.  If we're looking for dirname, we need
5298 			 * to change our string to be just "."; if we're
5299 			 * looking for a basename, we'll just set the first
5300 			 * character of the basename to be 0.
5301 			 */
5302 			if (subr == DIF_SUBR_DIRNAME) {
5303 				ASSERT(lastdir == -1);
5304 				src = (uintptr_t)".";
5305 				lastdir = 0;
5306 			} else {
5307 				firstbase = 0;
5308 			}
5309 		}
5310 
5311 		if (subr == DIF_SUBR_DIRNAME) {
5312 			if (lastdir == -1) {
5313 				/*
5314 				 * We know that we have a slash in the name --
5315 				 * or lastdir would be set to 0, above.  And
5316 				 * because lastdir is -1, we know that this
5317 				 * slash must be the first character.  (That
5318 				 * is, the full string must be of the form
5319 				 * "/basename".)  In this case, the last
5320 				 * character of the directory name is 0.
5321 				 */
5322 				lastdir = 0;
5323 			}
5324 
5325 			start = 0;
5326 			end = lastdir;
5327 		} else {
5328 			ASSERT(subr == DIF_SUBR_BASENAME);
5329 			ASSERT(firstbase != -1 && lastbase != -1);
5330 			start = firstbase;
5331 			end = lastbase;
5332 		}
5333 
5334 		for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5335 			dest[j] = dtrace_load8(src + i);
5336 
5337 		dest[j] = '\0';
5338 		regs[rd] = (uintptr_t)dest;
5339 		mstate->dtms_scratch_ptr += size;
5340 		break;
5341 	}
5342 
5343 	case DIF_SUBR_GETF: {
5344 		uintptr_t fd = tupregs[0].dttk_value;
5345 		uf_info_t *finfo = &curthread->t_procp->p_user.u_finfo;
5346 		file_t *fp;
5347 
5348 		if (!dtrace_priv_proc(state, mstate)) {
5349 			regs[rd] = NULL;
5350 			break;
5351 		}
5352 
5353 		/*
5354 		 * This is safe because fi_nfiles only increases, and the
5355 		 * fi_list array is not freed when the array size doubles.
5356 		 * (See the comment in flist_grow() for details on the
5357 		 * management of the u_finfo structure.)
5358 		 */
5359 		fp = fd < finfo->fi_nfiles ? finfo->fi_list[fd].uf_file : NULL;
5360 
5361 		mstate->dtms_getf = fp;
5362 		regs[rd] = (uintptr_t)fp;
5363 		break;
5364 	}
5365 
5366 	case DIF_SUBR_CLEANPATH: {
5367 		char *dest = (char *)mstate->dtms_scratch_ptr, c;
5368 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5369 		uintptr_t src = tupregs[0].dttk_value;
5370 		size_t lim;
5371 		int i = 0, j = 0;
5372 		zone_t *z;
5373 
5374 		if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5375 			regs[rd] = NULL;
5376 			break;
5377 		}
5378 
5379 		if (!DTRACE_INSCRATCH(mstate, size)) {
5380 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5381 			regs[rd] = NULL;
5382 			break;
5383 		}
5384 
5385 		/*
5386 		 * Move forward, loading each character.
5387 		 */
5388 		do {
5389 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5390 next:
5391 			if (j + 5 >= size)	/* 5 = strlen("/..c\0") */
5392 				break;
5393 
5394 			if (c != '/') {
5395 				dest[j++] = c;
5396 				continue;
5397 			}
5398 
5399 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5400 
5401 			if (c == '/') {
5402 				/*
5403 				 * We have two slashes -- we can just advance
5404 				 * to the next character.
5405 				 */
5406 				goto next;
5407 			}
5408 
5409 			if (c != '.') {
5410 				/*
5411 				 * This is not "." and it's not ".." -- we can
5412 				 * just store the "/" and this character and
5413 				 * drive on.
5414 				 */
5415 				dest[j++] = '/';
5416 				dest[j++] = c;
5417 				continue;
5418 			}
5419 
5420 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5421 
5422 			if (c == '/') {
5423 				/*
5424 				 * This is a "/./" component.  We're not going
5425 				 * to store anything in the destination buffer;
5426 				 * we're just going to go to the next component.
5427 				 */
5428 				goto next;
5429 			}
5430 
5431 			if (c != '.') {
5432 				/*
5433 				 * This is not ".." -- we can just store the
5434 				 * "/." and this character and continue
5435 				 * processing.
5436 				 */
5437 				dest[j++] = '/';
5438 				dest[j++] = '.';
5439 				dest[j++] = c;
5440 				continue;
5441 			}
5442 
5443 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5444 
5445 			if (c != '/' && c != '\0') {
5446 				/*
5447 				 * This is not ".." -- it's "..[mumble]".
5448 				 * We'll store the "/.." and this character
5449 				 * and continue processing.
5450 				 */
5451 				dest[j++] = '/';
5452 				dest[j++] = '.';
5453 				dest[j++] = '.';
5454 				dest[j++] = c;
5455 				continue;
5456 			}
5457 
5458 			/*
5459 			 * This is "/../" or "/..\0".  We need to back up
5460 			 * our destination pointer until we find a "/".
5461 			 */
5462 			i--;
5463 			while (j != 0 && dest[--j] != '/')
5464 				continue;
5465 
5466 			if (c == '\0')
5467 				dest[++j] = '/';
5468 		} while (c != '\0');
5469 
5470 		dest[j] = '\0';
5471 
5472 		if (mstate->dtms_getf != NULL &&
5473 		    !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5474 		    (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5475 			/*
5476 			 * If we've done a getf() as a part of this ECB and we
5477 			 * don't have kernel access (and we're not in the global
5478 			 * zone), check if the path we cleaned up begins with
5479 			 * the zone's root path, and trim it off if so.  Note
5480 			 * that this is an output cleanliness issue, not a
5481 			 * security issue: knowing one's zone root path does
5482 			 * not enable privilege escalation.
5483 			 */
5484 			if (strstr(dest, z->zone_rootpath) == dest)
5485 				dest += strlen(z->zone_rootpath) - 1;
5486 		}
5487 
5488 		regs[rd] = (uintptr_t)dest;
5489 		mstate->dtms_scratch_ptr += size;
5490 		break;
5491 	}
5492 
5493 	case DIF_SUBR_INET_NTOA:
5494 	case DIF_SUBR_INET_NTOA6:
5495 	case DIF_SUBR_INET_NTOP: {
5496 		size_t size;
5497 		int af, argi, i;
5498 		char *base, *end;
5499 
5500 		if (subr == DIF_SUBR_INET_NTOP) {
5501 			af = (int)tupregs[0].dttk_value;
5502 			argi = 1;
5503 		} else {
5504 			af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5505 			argi = 0;
5506 		}
5507 
5508 		if (af == AF_INET) {
5509 			ipaddr_t ip4;
5510 			uint8_t *ptr8, val;
5511 
5512 			if (!dtrace_canload(tupregs[argi].dttk_value,
5513 			    sizeof (ipaddr_t), mstate, vstate)) {
5514 				regs[rd] = NULL;
5515 				break;
5516 			}
5517 
5518 			/*
5519 			 * Safely load the IPv4 address.
5520 			 */
5521 			ip4 = dtrace_load32(tupregs[argi].dttk_value);
5522 
5523 			/*
5524 			 * Check an IPv4 string will fit in scratch.
5525 			 */
5526 			size = INET_ADDRSTRLEN;
5527 			if (!DTRACE_INSCRATCH(mstate, size)) {
5528 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5529 				regs[rd] = NULL;
5530 				break;
5531 			}
5532 			base = (char *)mstate->dtms_scratch_ptr;
5533 			end = (char *)mstate->dtms_scratch_ptr + size - 1;
5534 
5535 			/*
5536 			 * Stringify as a dotted decimal quad.
5537 			 */
5538 			*end-- = '\0';
5539 			ptr8 = (uint8_t *)&ip4;
5540 			for (i = 3; i >= 0; i--) {
5541 				val = ptr8[i];
5542 
5543 				if (val == 0) {
5544 					*end-- = '0';
5545 				} else {
5546 					for (; val; val /= 10) {
5547 						*end-- = '0' + (val % 10);
5548 					}
5549 				}
5550 
5551 				if (i > 0)
5552 					*end-- = '.';
5553 			}
5554 			ASSERT(end + 1 >= base);
5555 
5556 		} else if (af == AF_INET6) {
5557 			struct in6_addr ip6;
5558 			int firstzero, tryzero, numzero, v6end;
5559 			uint16_t val;
5560 			const char digits[] = "0123456789abcdef";
5561 
5562 			/*
5563 			 * Stringify using RFC 1884 convention 2 - 16 bit
5564 			 * hexadecimal values with a zero-run compression.
5565 			 * Lower case hexadecimal digits are used.
5566 			 * 	eg, fe80::214:4fff:fe0b:76c8.
5567 			 * The IPv4 embedded form is returned for inet_ntop,
5568 			 * just the IPv4 string is returned for inet_ntoa6.
5569 			 */
5570 
5571 			if (!dtrace_canload(tupregs[argi].dttk_value,
5572 			    sizeof (struct in6_addr), mstate, vstate)) {
5573 				regs[rd] = NULL;
5574 				break;
5575 			}
5576 
5577 			/*
5578 			 * Safely load the IPv6 address.
5579 			 */
5580 			dtrace_bcopy(
5581 			    (void *)(uintptr_t)tupregs[argi].dttk_value,
5582 			    (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5583 
5584 			/*
5585 			 * Check an IPv6 string will fit in scratch.
5586 			 */
5587 			size = INET6_ADDRSTRLEN;
5588 			if (!DTRACE_INSCRATCH(mstate, size)) {
5589 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5590 				regs[rd] = NULL;
5591 				break;
5592 			}
5593 			base = (char *)mstate->dtms_scratch_ptr;
5594 			end = (char *)mstate->dtms_scratch_ptr + size - 1;
5595 			*end-- = '\0';
5596 
5597 			/*
5598 			 * Find the longest run of 16 bit zero values
5599 			 * for the single allowed zero compression - "::".
5600 			 */
5601 			firstzero = -1;
5602 			tryzero = -1;
5603 			numzero = 1;
5604 			for (i = 0; i < sizeof (struct in6_addr); i++) {
5605 				if (ip6._S6_un._S6_u8[i] == 0 &&
5606 				    tryzero == -1 && i % 2 == 0) {
5607 					tryzero = i;
5608 					continue;
5609 				}
5610 
5611 				if (tryzero != -1 &&
5612 				    (ip6._S6_un._S6_u8[i] != 0 ||
5613 				    i == sizeof (struct in6_addr) - 1)) {
5614 
5615 					if (i - tryzero <= numzero) {
5616 						tryzero = -1;
5617 						continue;
5618 					}
5619 
5620 					firstzero = tryzero;
5621 					numzero = i - i % 2 - tryzero;
5622 					tryzero = -1;
5623 
5624 					if (ip6._S6_un._S6_u8[i] == 0 &&
5625 					    i == sizeof (struct in6_addr) - 1)
5626 						numzero += 2;
5627 				}
5628 			}
5629 			ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5630 
5631 			/*
5632 			 * Check for an IPv4 embedded address.
5633 			 */
5634 			v6end = sizeof (struct in6_addr) - 2;
5635 			if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5636 			    IN6_IS_ADDR_V4COMPAT(&ip6)) {
5637 				for (i = sizeof (struct in6_addr) - 1;
5638 				    i >= DTRACE_V4MAPPED_OFFSET; i--) {
5639 					ASSERT(end >= base);
5640 
5641 					val = ip6._S6_un._S6_u8[i];
5642 
5643 					if (val == 0) {
5644 						*end-- = '0';
5645 					} else {
5646 						for (; val; val /= 10) {
5647 							*end-- = '0' + val % 10;
5648 						}
5649 					}
5650 
5651 					if (i > DTRACE_V4MAPPED_OFFSET)
5652 						*end-- = '.';
5653 				}
5654 
5655 				if (subr == DIF_SUBR_INET_NTOA6)
5656 					goto inetout;
5657 
5658 				/*
5659 				 * Set v6end to skip the IPv4 address that
5660 				 * we have already stringified.
5661 				 */
5662 				v6end = 10;
5663 			}
5664 
5665 			/*
5666 			 * Build the IPv6 string by working through the
5667 			 * address in reverse.
5668 			 */
5669 			for (i = v6end; i >= 0; i -= 2) {
5670 				ASSERT(end >= base);
5671 
5672 				if (i == firstzero + numzero - 2) {
5673 					*end-- = ':';
5674 					*end-- = ':';
5675 					i -= numzero - 2;
5676 					continue;
5677 				}
5678 
5679 				if (i < 14 && i != firstzero - 2)
5680 					*end-- = ':';
5681 
5682 				val = (ip6._S6_un._S6_u8[i] << 8) +
5683 				    ip6._S6_un._S6_u8[i + 1];
5684 
5685 				if (val == 0) {
5686 					*end-- = '0';
5687 				} else {
5688 					for (; val; val /= 16) {
5689 						*end-- = digits[val % 16];
5690 					}
5691 				}
5692 			}
5693 			ASSERT(end + 1 >= base);
5694 
5695 		} else {
5696 			/*
5697 			 * The user didn't use AH_INET or AH_INET6.
5698 			 */
5699 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5700 			regs[rd] = NULL;
5701 			break;
5702 		}
5703 
5704 inetout:	regs[rd] = (uintptr_t)end + 1;
5705 		mstate->dtms_scratch_ptr += size;
5706 		break;
5707 	}
5708 
5709 	}
5710 }
5711 
5712 /*
5713  * Emulate the execution of DTrace IR instructions specified by the given
5714  * DIF object.  This function is deliberately void of assertions as all of
5715  * the necessary checks are handled by a call to dtrace_difo_validate().
5716  */
5717 static uint64_t
5718 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5719     dtrace_vstate_t *vstate, dtrace_state_t *state)
5720 {
5721 	const dif_instr_t *text = difo->dtdo_buf;
5722 	const uint_t textlen = difo->dtdo_len;
5723 	const char *strtab = difo->dtdo_strtab;
5724 	const uint64_t *inttab = difo->dtdo_inttab;
5725 
5726 	uint64_t rval = 0;
5727 	dtrace_statvar_t *svar;
5728 	dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5729 	dtrace_difv_t *v;
5730 	volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5731 	volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
5732 
5733 	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5734 	uint64_t regs[DIF_DIR_NREGS];
5735 	uint64_t *tmp;
5736 
5737 	uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5738 	int64_t cc_r;
5739 	uint_t pc = 0, id, opc;
5740 	uint8_t ttop = 0;
5741 	dif_instr_t instr;
5742 	uint_t r1, r2, rd;
5743 
5744 	/*
5745 	 * We stash the current DIF object into the machine state: we need it
5746 	 * for subsequent access checking.
5747 	 */
5748 	mstate->dtms_difo = difo;
5749 
5750 	regs[DIF_REG_R0] = 0; 		/* %r0 is fixed at zero */
5751 
5752 	while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5753 		opc = pc;
5754 
5755 		instr = text[pc++];
5756 		r1 = DIF_INSTR_R1(instr);
5757 		r2 = DIF_INSTR_R2(instr);
5758 		rd = DIF_INSTR_RD(instr);
5759 
5760 		switch (DIF_INSTR_OP(instr)) {
5761 		case DIF_OP_OR:
5762 			regs[rd] = regs[r1] | regs[r2];
5763 			break;
5764 		case DIF_OP_XOR:
5765 			regs[rd] = regs[r1] ^ regs[r2];
5766 			break;
5767 		case DIF_OP_AND:
5768 			regs[rd] = regs[r1] & regs[r2];
5769 			break;
5770 		case DIF_OP_SLL:
5771 			regs[rd] = regs[r1] << regs[r2];
5772 			break;
5773 		case DIF_OP_SRL:
5774 			regs[rd] = regs[r1] >> regs[r2];
5775 			break;
5776 		case DIF_OP_SUB:
5777 			regs[rd] = regs[r1] - regs[r2];
5778 			break;
5779 		case DIF_OP_ADD:
5780 			regs[rd] = regs[r1] + regs[r2];
5781 			break;
5782 		case DIF_OP_MUL:
5783 			regs[rd] = regs[r1] * regs[r2];
5784 			break;
5785 		case DIF_OP_SDIV:
5786 			if (regs[r2] == 0) {
5787 				regs[rd] = 0;
5788 				*flags |= CPU_DTRACE_DIVZERO;
5789 			} else {
5790 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5791 				regs[rd] = (int64_t)regs[r1] /
5792 				    (int64_t)regs[r2];
5793 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5794 			}
5795 			break;
5796 
5797 		case DIF_OP_UDIV:
5798 			if (regs[r2] == 0) {
5799 				regs[rd] = 0;
5800 				*flags |= CPU_DTRACE_DIVZERO;
5801 			} else {
5802 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5803 				regs[rd] = regs[r1] / regs[r2];
5804 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5805 			}
5806 			break;
5807 
5808 		case DIF_OP_SREM:
5809 			if (regs[r2] == 0) {
5810 				regs[rd] = 0;
5811 				*flags |= CPU_DTRACE_DIVZERO;
5812 			} else {
5813 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5814 				regs[rd] = (int64_t)regs[r1] %
5815 				    (int64_t)regs[r2];
5816 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5817 			}
5818 			break;
5819 
5820 		case DIF_OP_UREM:
5821 			if (regs[r2] == 0) {
5822 				regs[rd] = 0;
5823 				*flags |= CPU_DTRACE_DIVZERO;
5824 			} else {
5825 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5826 				regs[rd] = regs[r1] % regs[r2];
5827 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5828 			}
5829 			break;
5830 
5831 		case DIF_OP_NOT:
5832 			regs[rd] = ~regs[r1];
5833 			break;
5834 		case DIF_OP_MOV:
5835 			regs[rd] = regs[r1];
5836 			break;
5837 		case DIF_OP_CMP:
5838 			cc_r = regs[r1] - regs[r2];
5839 			cc_n = cc_r < 0;
5840 			cc_z = cc_r == 0;
5841 			cc_v = 0;
5842 			cc_c = regs[r1] < regs[r2];
5843 			break;
5844 		case DIF_OP_TST:
5845 			cc_n = cc_v = cc_c = 0;
5846 			cc_z = regs[r1] == 0;
5847 			break;
5848 		case DIF_OP_BA:
5849 			pc = DIF_INSTR_LABEL(instr);
5850 			break;
5851 		case DIF_OP_BE:
5852 			if (cc_z)
5853 				pc = DIF_INSTR_LABEL(instr);
5854 			break;
5855 		case DIF_OP_BNE:
5856 			if (cc_z == 0)
5857 				pc = DIF_INSTR_LABEL(instr);
5858 			break;
5859 		case DIF_OP_BG:
5860 			if ((cc_z | (cc_n ^ cc_v)) == 0)
5861 				pc = DIF_INSTR_LABEL(instr);
5862 			break;
5863 		case DIF_OP_BGU:
5864 			if ((cc_c | cc_z) == 0)
5865 				pc = DIF_INSTR_LABEL(instr);
5866 			break;
5867 		case DIF_OP_BGE:
5868 			if ((cc_n ^ cc_v) == 0)
5869 				pc = DIF_INSTR_LABEL(instr);
5870 			break;
5871 		case DIF_OP_BGEU:
5872 			if (cc_c == 0)
5873 				pc = DIF_INSTR_LABEL(instr);
5874 			break;
5875 		case DIF_OP_BL:
5876 			if (cc_n ^ cc_v)
5877 				pc = DIF_INSTR_LABEL(instr);
5878 			break;
5879 		case DIF_OP_BLU:
5880 			if (cc_c)
5881 				pc = DIF_INSTR_LABEL(instr);
5882 			break;
5883 		case DIF_OP_BLE:
5884 			if (cc_z | (cc_n ^ cc_v))
5885 				pc = DIF_INSTR_LABEL(instr);
5886 			break;
5887 		case DIF_OP_BLEU:
5888 			if (cc_c | cc_z)
5889 				pc = DIF_INSTR_LABEL(instr);
5890 			break;
5891 		case DIF_OP_RLDSB:
5892 			if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5893 				break;
5894 			/*FALLTHROUGH*/
5895 		case DIF_OP_LDSB:
5896 			regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5897 			break;
5898 		case DIF_OP_RLDSH:
5899 			if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5900 				break;
5901 			/*FALLTHROUGH*/
5902 		case DIF_OP_LDSH:
5903 			regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5904 			break;
5905 		case DIF_OP_RLDSW:
5906 			if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5907 				break;
5908 			/*FALLTHROUGH*/
5909 		case DIF_OP_LDSW:
5910 			regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5911 			break;
5912 		case DIF_OP_RLDUB:
5913 			if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5914 				break;
5915 			/*FALLTHROUGH*/
5916 		case DIF_OP_LDUB:
5917 			regs[rd] = dtrace_load8(regs[r1]);
5918 			break;
5919 		case DIF_OP_RLDUH:
5920 			if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5921 				break;
5922 			/*FALLTHROUGH*/
5923 		case DIF_OP_LDUH:
5924 			regs[rd] = dtrace_load16(regs[r1]);
5925 			break;
5926 		case DIF_OP_RLDUW:
5927 			if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5928 				break;
5929 			/*FALLTHROUGH*/
5930 		case DIF_OP_LDUW:
5931 			regs[rd] = dtrace_load32(regs[r1]);
5932 			break;
5933 		case DIF_OP_RLDX:
5934 			if (!dtrace_canload(regs[r1], 8, mstate, vstate))
5935 				break;
5936 			/*FALLTHROUGH*/
5937 		case DIF_OP_LDX:
5938 			regs[rd] = dtrace_load64(regs[r1]);
5939 			break;
5940 		case DIF_OP_ULDSB:
5941 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5942 			regs[rd] = (int8_t)
5943 			    dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5944 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5945 			break;
5946 		case DIF_OP_ULDSH:
5947 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5948 			regs[rd] = (int16_t)
5949 			    dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5950 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5951 			break;
5952 		case DIF_OP_ULDSW:
5953 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5954 			regs[rd] = (int32_t)
5955 			    dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5956 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5957 			break;
5958 		case DIF_OP_ULDUB:
5959 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5960 			regs[rd] =
5961 			    dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5962 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5963 			break;
5964 		case DIF_OP_ULDUH:
5965 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5966 			regs[rd] =
5967 			    dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5968 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5969 			break;
5970 		case DIF_OP_ULDUW:
5971 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5972 			regs[rd] =
5973 			    dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5974 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5975 			break;
5976 		case DIF_OP_ULDX:
5977 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5978 			regs[rd] =
5979 			    dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5980 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5981 			break;
5982 		case DIF_OP_RET:
5983 			rval = regs[rd];
5984 			pc = textlen;
5985 			break;
5986 		case DIF_OP_NOP:
5987 			break;
5988 		case DIF_OP_SETX:
5989 			regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5990 			break;
5991 		case DIF_OP_SETS:
5992 			regs[rd] = (uint64_t)(uintptr_t)
5993 			    (strtab + DIF_INSTR_STRING(instr));
5994 			break;
5995 		case DIF_OP_SCMP: {
5996 			size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5997 			uintptr_t s1 = regs[r1];
5998 			uintptr_t s2 = regs[r2];
5999 			size_t lim1, lim2;
6000 
6001 			if (s1 != NULL &&
6002 			    !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
6003 				break;
6004 			if (s2 != NULL &&
6005 			    !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
6006 				break;
6007 
6008 			cc_r = dtrace_strncmp((char *)s1, (char *)s2,
6009 			    MIN(lim1, lim2));
6010 
6011 			cc_n = cc_r < 0;
6012 			cc_z = cc_r == 0;
6013 			cc_v = cc_c = 0;
6014 			break;
6015 		}
6016 		case DIF_OP_LDGA:
6017 			regs[rd] = dtrace_dif_variable(mstate, state,
6018 			    r1, regs[r2]);
6019 			break;
6020 		case DIF_OP_LDGS:
6021 			id = DIF_INSTR_VAR(instr);
6022 
6023 			if (id >= DIF_VAR_OTHER_UBASE) {
6024 				uintptr_t a;
6025 
6026 				id -= DIF_VAR_OTHER_UBASE;
6027 				svar = vstate->dtvs_globals[id];
6028 				ASSERT(svar != NULL);
6029 				v = &svar->dtsv_var;
6030 
6031 				if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6032 					regs[rd] = svar->dtsv_data;
6033 					break;
6034 				}
6035 
6036 				a = (uintptr_t)svar->dtsv_data;
6037 
6038 				if (*(uint8_t *)a == UINT8_MAX) {
6039 					/*
6040 					 * If the 0th byte is set to UINT8_MAX
6041 					 * then this is to be treated as a
6042 					 * reference to a NULL variable.
6043 					 */
6044 					regs[rd] = NULL;
6045 				} else {
6046 					regs[rd] = a + sizeof (uint64_t);
6047 				}
6048 
6049 				break;
6050 			}
6051 
6052 			regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6053 			break;
6054 
6055 		case DIF_OP_STGA:
6056 			dtrace_dif_variable_write(mstate, state, r1, regs[r2],
6057 			    regs[rd]);
6058 			break;
6059 
6060 		case DIF_OP_STGS:
6061 			id = DIF_INSTR_VAR(instr);
6062 
6063 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6064 			id -= DIF_VAR_OTHER_UBASE;
6065 
6066 			VERIFY(id < vstate->dtvs_nglobals);
6067 			svar = vstate->dtvs_globals[id];
6068 			ASSERT(svar != NULL);
6069 			v = &svar->dtsv_var;
6070 
6071 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6072 				uintptr_t a = (uintptr_t)svar->dtsv_data;
6073 				size_t lim;
6074 
6075 				ASSERT(a != NULL);
6076 				ASSERT(svar->dtsv_size != 0);
6077 
6078 				if (regs[rd] == NULL) {
6079 					*(uint8_t *)a = UINT8_MAX;
6080 					break;
6081 				} else {
6082 					*(uint8_t *)a = 0;
6083 					a += sizeof (uint64_t);
6084 				}
6085 				if (!dtrace_vcanload(
6086 				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6087 				    &lim, mstate, vstate))
6088 					break;
6089 
6090 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6091 				    (void *)a, &v->dtdv_type, lim);
6092 				break;
6093 			}
6094 
6095 			svar->dtsv_data = regs[rd];
6096 			break;
6097 
6098 		case DIF_OP_LDTA:
6099 			/*
6100 			 * There are no DTrace built-in thread-local arrays at
6101 			 * present.  This opcode is saved for future work.
6102 			 */
6103 			*flags |= CPU_DTRACE_ILLOP;
6104 			regs[rd] = 0;
6105 			break;
6106 
6107 		case DIF_OP_LDLS:
6108 			id = DIF_INSTR_VAR(instr);
6109 
6110 			if (id < DIF_VAR_OTHER_UBASE) {
6111 				/*
6112 				 * For now, this has no meaning.
6113 				 */
6114 				regs[rd] = 0;
6115 				break;
6116 			}
6117 
6118 			id -= DIF_VAR_OTHER_UBASE;
6119 
6120 			ASSERT(id < vstate->dtvs_nlocals);
6121 			ASSERT(vstate->dtvs_locals != NULL);
6122 
6123 			svar = vstate->dtvs_locals[id];
6124 			ASSERT(svar != NULL);
6125 			v = &svar->dtsv_var;
6126 
6127 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6128 				uintptr_t a = (uintptr_t)svar->dtsv_data;
6129 				size_t sz = v->dtdv_type.dtdt_size;
6130 
6131 				sz += sizeof (uint64_t);
6132 				ASSERT(svar->dtsv_size == NCPU * sz);
6133 				a += CPU->cpu_id * sz;
6134 
6135 				if (*(uint8_t *)a == UINT8_MAX) {
6136 					/*
6137 					 * If the 0th byte is set to UINT8_MAX
6138 					 * then this is to be treated as a
6139 					 * reference to a NULL variable.
6140 					 */
6141 					regs[rd] = NULL;
6142 				} else {
6143 					regs[rd] = a + sizeof (uint64_t);
6144 				}
6145 
6146 				break;
6147 			}
6148 
6149 			ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6150 			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6151 			regs[rd] = tmp[CPU->cpu_id];
6152 			break;
6153 
6154 		case DIF_OP_STLS:
6155 			id = DIF_INSTR_VAR(instr);
6156 
6157 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6158 			id -= DIF_VAR_OTHER_UBASE;
6159 			VERIFY(id < vstate->dtvs_nlocals);
6160 
6161 			ASSERT(vstate->dtvs_locals != NULL);
6162 			svar = vstate->dtvs_locals[id];
6163 			ASSERT(svar != NULL);
6164 			v = &svar->dtsv_var;
6165 
6166 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6167 				uintptr_t a = (uintptr_t)svar->dtsv_data;
6168 				size_t sz = v->dtdv_type.dtdt_size;
6169 				size_t lim;
6170 
6171 				sz += sizeof (uint64_t);
6172 				ASSERT(svar->dtsv_size == NCPU * sz);
6173 				a += CPU->cpu_id * sz;
6174 
6175 				if (regs[rd] == NULL) {
6176 					*(uint8_t *)a = UINT8_MAX;
6177 					break;
6178 				} else {
6179 					*(uint8_t *)a = 0;
6180 					a += sizeof (uint64_t);
6181 				}
6182 
6183 				if (!dtrace_vcanload(
6184 				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6185 				    &lim, mstate, vstate))
6186 					break;
6187 
6188 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6189 				    (void *)a, &v->dtdv_type, lim);
6190 				break;
6191 			}
6192 
6193 			ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6194 			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6195 			tmp[CPU->cpu_id] = regs[rd];
6196 			break;
6197 
6198 		case DIF_OP_LDTS: {
6199 			dtrace_dynvar_t *dvar;
6200 			dtrace_key_t *key;
6201 
6202 			id = DIF_INSTR_VAR(instr);
6203 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6204 			id -= DIF_VAR_OTHER_UBASE;
6205 			v = &vstate->dtvs_tlocals[id];
6206 
6207 			key = &tupregs[DIF_DTR_NREGS];
6208 			key[0].dttk_value = (uint64_t)id;
6209 			key[0].dttk_size = 0;
6210 			DTRACE_TLS_THRKEY(key[1].dttk_value);
6211 			key[1].dttk_size = 0;
6212 
6213 			dvar = dtrace_dynvar(dstate, 2, key,
6214 			    sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6215 			    mstate, vstate);
6216 
6217 			if (dvar == NULL) {
6218 				regs[rd] = 0;
6219 				break;
6220 			}
6221 
6222 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6223 				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6224 			} else {
6225 				regs[rd] = *((uint64_t *)dvar->dtdv_data);
6226 			}
6227 
6228 			break;
6229 		}
6230 
6231 		case DIF_OP_STTS: {
6232 			dtrace_dynvar_t *dvar;
6233 			dtrace_key_t *key;
6234 
6235 			id = DIF_INSTR_VAR(instr);
6236 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6237 			id -= DIF_VAR_OTHER_UBASE;
6238 			VERIFY(id < vstate->dtvs_ntlocals);
6239 
6240 			key = &tupregs[DIF_DTR_NREGS];
6241 			key[0].dttk_value = (uint64_t)id;
6242 			key[0].dttk_size = 0;
6243 			DTRACE_TLS_THRKEY(key[1].dttk_value);
6244 			key[1].dttk_size = 0;
6245 			v = &vstate->dtvs_tlocals[id];
6246 
6247 			dvar = dtrace_dynvar(dstate, 2, key,
6248 			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6249 			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6250 			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
6251 			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6252 
6253 			/*
6254 			 * Given that we're storing to thread-local data,
6255 			 * we need to flush our predicate cache.
6256 			 */
6257 			curthread->t_predcache = NULL;
6258 
6259 			if (dvar == NULL)
6260 				break;
6261 
6262 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6263 				size_t lim;
6264 
6265 				if (!dtrace_vcanload(
6266 				    (void *)(uintptr_t)regs[rd],
6267 				    &v->dtdv_type, &lim, mstate, vstate))
6268 					break;
6269 
6270 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6271 				    dvar->dtdv_data, &v->dtdv_type, lim);
6272 			} else {
6273 				*((uint64_t *)dvar->dtdv_data) = regs[rd];
6274 			}
6275 
6276 			break;
6277 		}
6278 
6279 		case DIF_OP_SRA:
6280 			regs[rd] = (int64_t)regs[r1] >> regs[r2];
6281 			break;
6282 
6283 		case DIF_OP_CALL:
6284 			dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6285 			    regs, tupregs, ttop, mstate, state);
6286 			break;
6287 
6288 		case DIF_OP_PUSHTR:
6289 			if (ttop == DIF_DTR_NREGS) {
6290 				*flags |= CPU_DTRACE_TUPOFLOW;
6291 				break;
6292 			}
6293 
6294 			if (r1 == DIF_TYPE_STRING) {
6295 				/*
6296 				 * If this is a string type and the size is 0,
6297 				 * we'll use the system-wide default string
6298 				 * size.  Note that we are _not_ looking at
6299 				 * the value of the DTRACEOPT_STRSIZE option;
6300 				 * had this been set, we would expect to have
6301 				 * a non-zero size value in the "pushtr".
6302 				 */
6303 				tupregs[ttop].dttk_size =
6304 				    dtrace_strlen((char *)(uintptr_t)regs[rd],
6305 				    regs[r2] ? regs[r2] :
6306 				    dtrace_strsize_default) + 1;
6307 			} else {
6308 				if (regs[r2] > LONG_MAX) {
6309 					*flags |= CPU_DTRACE_ILLOP;
6310 					break;
6311 				}
6312 
6313 				tupregs[ttop].dttk_size = regs[r2];
6314 			}
6315 
6316 			tupregs[ttop++].dttk_value = regs[rd];
6317 			break;
6318 
6319 		case DIF_OP_PUSHTV:
6320 			if (ttop == DIF_DTR_NREGS) {
6321 				*flags |= CPU_DTRACE_TUPOFLOW;
6322 				break;
6323 			}
6324 
6325 			tupregs[ttop].dttk_value = regs[rd];
6326 			tupregs[ttop++].dttk_size = 0;
6327 			break;
6328 
6329 		case DIF_OP_POPTS:
6330 			if (ttop != 0)
6331 				ttop--;
6332 			break;
6333 
6334 		case DIF_OP_FLUSHTS:
6335 			ttop = 0;
6336 			break;
6337 
6338 		case DIF_OP_LDGAA:
6339 		case DIF_OP_LDTAA: {
6340 			dtrace_dynvar_t *dvar;
6341 			dtrace_key_t *key = tupregs;
6342 			uint_t nkeys = ttop;
6343 
6344 			id = DIF_INSTR_VAR(instr);
6345 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6346 			id -= DIF_VAR_OTHER_UBASE;
6347 
6348 			key[nkeys].dttk_value = (uint64_t)id;
6349 			key[nkeys++].dttk_size = 0;
6350 
6351 			if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6352 				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6353 				key[nkeys++].dttk_size = 0;
6354 				VERIFY(id < vstate->dtvs_ntlocals);
6355 				v = &vstate->dtvs_tlocals[id];
6356 			} else {
6357 				VERIFY(id < vstate->dtvs_nglobals);
6358 				v = &vstate->dtvs_globals[id]->dtsv_var;
6359 			}
6360 
6361 			dvar = dtrace_dynvar(dstate, nkeys, key,
6362 			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6363 			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6364 			    DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6365 
6366 			if (dvar == NULL) {
6367 				regs[rd] = 0;
6368 				break;
6369 			}
6370 
6371 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6372 				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6373 			} else {
6374 				regs[rd] = *((uint64_t *)dvar->dtdv_data);
6375 			}
6376 
6377 			break;
6378 		}
6379 
6380 		case DIF_OP_STGAA:
6381 		case DIF_OP_STTAA: {
6382 			dtrace_dynvar_t *dvar;
6383 			dtrace_key_t *key = tupregs;
6384 			uint_t nkeys = ttop;
6385 
6386 			id = DIF_INSTR_VAR(instr);
6387 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6388 			id -= DIF_VAR_OTHER_UBASE;
6389 
6390 			key[nkeys].dttk_value = (uint64_t)id;
6391 			key[nkeys++].dttk_size = 0;
6392 
6393 			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6394 				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6395 				key[nkeys++].dttk_size = 0;
6396 				VERIFY(id < vstate->dtvs_ntlocals);
6397 				v = &vstate->dtvs_tlocals[id];
6398 			} else {
6399 				VERIFY(id < vstate->dtvs_nglobals);
6400 				v = &vstate->dtvs_globals[id]->dtsv_var;
6401 			}
6402 
6403 			dvar = dtrace_dynvar(dstate, nkeys, key,
6404 			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6405 			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6406 			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
6407 			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6408 
6409 			if (dvar == NULL)
6410 				break;
6411 
6412 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6413 				size_t lim;
6414 
6415 				if (!dtrace_vcanload(
6416 				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6417 				    &lim, mstate, vstate))
6418 					break;
6419 
6420 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6421 				    dvar->dtdv_data, &v->dtdv_type, lim);
6422 			} else {
6423 				*((uint64_t *)dvar->dtdv_data) = regs[rd];
6424 			}
6425 
6426 			break;
6427 		}
6428 
6429 		case DIF_OP_ALLOCS: {
6430 			uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6431 			size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6432 
6433 			/*
6434 			 * Rounding up the user allocation size could have
6435 			 * overflowed large, bogus allocations (like -1ULL) to
6436 			 * 0.
6437 			 */
6438 			if (size < regs[r1] ||
6439 			    !DTRACE_INSCRATCH(mstate, size)) {
6440 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6441 				regs[rd] = NULL;
6442 				break;
6443 			}
6444 
6445 			dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6446 			mstate->dtms_scratch_ptr += size;
6447 			regs[rd] = ptr;
6448 			break;
6449 		}
6450 
6451 		case DIF_OP_COPYS:
6452 			if (!dtrace_canstore(regs[rd], regs[r2],
6453 			    mstate, vstate)) {
6454 				*flags |= CPU_DTRACE_BADADDR;
6455 				*illval = regs[rd];
6456 				break;
6457 			}
6458 
6459 			if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6460 				break;
6461 
6462 			dtrace_bcopy((void *)(uintptr_t)regs[r1],
6463 			    (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6464 			break;
6465 
6466 		case DIF_OP_STB:
6467 			if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6468 				*flags |= CPU_DTRACE_BADADDR;
6469 				*illval = regs[rd];
6470 				break;
6471 			}
6472 			*((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6473 			break;
6474 
6475 		case DIF_OP_STH:
6476 			if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6477 				*flags |= CPU_DTRACE_BADADDR;
6478 				*illval = regs[rd];
6479 				break;
6480 			}
6481 			if (regs[rd] & 1) {
6482 				*flags |= CPU_DTRACE_BADALIGN;
6483 				*illval = regs[rd];
6484 				break;
6485 			}
6486 			*((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6487 			break;
6488 
6489 		case DIF_OP_STW:
6490 			if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6491 				*flags |= CPU_DTRACE_BADADDR;
6492 				*illval = regs[rd];
6493 				break;
6494 			}
6495 			if (regs[rd] & 3) {
6496 				*flags |= CPU_DTRACE_BADALIGN;
6497 				*illval = regs[rd];
6498 				break;
6499 			}
6500 			*((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6501 			break;
6502 
6503 		case DIF_OP_STX:
6504 			if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6505 				*flags |= CPU_DTRACE_BADADDR;
6506 				*illval = regs[rd];
6507 				break;
6508 			}
6509 			if (regs[rd] & 7) {
6510 				*flags |= CPU_DTRACE_BADALIGN;
6511 				*illval = regs[rd];
6512 				break;
6513 			}
6514 			*((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6515 			break;
6516 		}
6517 	}
6518 
6519 	if (!(*flags & CPU_DTRACE_FAULT))
6520 		return (rval);
6521 
6522 	mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6523 	mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6524 
6525 	return (0);
6526 }
6527 
6528 static void
6529 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6530 {
6531 	dtrace_probe_t *probe = ecb->dte_probe;
6532 	dtrace_provider_t *prov = probe->dtpr_provider;
6533 	char c[DTRACE_FULLNAMELEN + 80], *str;
6534 	char *msg = "dtrace: breakpoint action at probe ";
6535 	char *ecbmsg = " (ecb ";
6536 	uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6537 	uintptr_t val = (uintptr_t)ecb;
6538 	int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6539 
6540 	if (dtrace_destructive_disallow)
6541 		return;
6542 
6543 	/*
6544 	 * It's impossible to be taking action on the NULL probe.
6545 	 */
6546 	ASSERT(probe != NULL);
6547 
6548 	/*
6549 	 * This is a poor man's (destitute man's?) sprintf():  we want to
6550 	 * print the provider name, module name, function name and name of
6551 	 * the probe, along with the hex address of the ECB with the breakpoint
6552 	 * action -- all of which we must place in the character buffer by
6553 	 * hand.
6554 	 */
6555 	while (*msg != '\0')
6556 		c[i++] = *msg++;
6557 
6558 	for (str = prov->dtpv_name; *str != '\0'; str++)
6559 		c[i++] = *str;
6560 	c[i++] = ':';
6561 
6562 	for (str = probe->dtpr_mod; *str != '\0'; str++)
6563 		c[i++] = *str;
6564 	c[i++] = ':';
6565 
6566 	for (str = probe->dtpr_func; *str != '\0'; str++)
6567 		c[i++] = *str;
6568 	c[i++] = ':';
6569 
6570 	for (str = probe->dtpr_name; *str != '\0'; str++)
6571 		c[i++] = *str;
6572 
6573 	while (*ecbmsg != '\0')
6574 		c[i++] = *ecbmsg++;
6575 
6576 	while (shift >= 0) {
6577 		mask = (uintptr_t)0xf << shift;
6578 
6579 		if (val >= ((uintptr_t)1 << shift))
6580 			c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6581 		shift -= 4;
6582 	}
6583 
6584 	c[i++] = ')';
6585 	c[i] = '\0';
6586 
6587 	debug_enter(c);
6588 }
6589 
6590 static void
6591 dtrace_action_panic(dtrace_ecb_t *ecb)
6592 {
6593 	dtrace_probe_t *probe = ecb->dte_probe;
6594 
6595 	/*
6596 	 * It's impossible to be taking action on the NULL probe.
6597 	 */
6598 	ASSERT(probe != NULL);
6599 
6600 	if (dtrace_destructive_disallow)
6601 		return;
6602 
6603 	if (dtrace_panicked != NULL)
6604 		return;
6605 
6606 	if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6607 		return;
6608 
6609 	/*
6610 	 * We won the right to panic.  (We want to be sure that only one
6611 	 * thread calls panic() from dtrace_probe(), and that panic() is
6612 	 * called exactly once.)
6613 	 */
6614 	dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6615 	    probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6616 	    probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6617 }
6618 
6619 static void
6620 dtrace_action_raise(uint64_t sig)
6621 {
6622 	if (dtrace_destructive_disallow)
6623 		return;
6624 
6625 	if (sig >= NSIG) {
6626 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6627 		return;
6628 	}
6629 
6630 	/*
6631 	 * raise() has a queue depth of 1 -- we ignore all subsequent
6632 	 * invocations of the raise() action.
6633 	 */
6634 	if (curthread->t_dtrace_sig == 0)
6635 		curthread->t_dtrace_sig = (uint8_t)sig;
6636 
6637 	curthread->t_sig_check = 1;
6638 	aston(curthread);
6639 }
6640 
6641 static void
6642 dtrace_action_stop(void)
6643 {
6644 	if (dtrace_destructive_disallow)
6645 		return;
6646 
6647 	if (!curthread->t_dtrace_stop) {
6648 		curthread->t_dtrace_stop = 1;
6649 		curthread->t_sig_check = 1;
6650 		aston(curthread);
6651 	}
6652 }
6653 
6654 static void
6655 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6656 {
6657 	hrtime_t now;
6658 	volatile uint16_t *flags;
6659 	cpu_t *cpu = CPU;
6660 
6661 	if (dtrace_destructive_disallow)
6662 		return;
6663 
6664 	flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6665 
6666 	now = dtrace_gethrtime();
6667 
6668 	if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6669 		/*
6670 		 * We need to advance the mark to the current time.
6671 		 */
6672 		cpu->cpu_dtrace_chillmark = now;
6673 		cpu->cpu_dtrace_chilled = 0;
6674 	}
6675 
6676 	/*
6677 	 * Now check to see if the requested chill time would take us over
6678 	 * the maximum amount of time allowed in the chill interval.  (Or
6679 	 * worse, if the calculation itself induces overflow.)
6680 	 */
6681 	if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6682 	    cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6683 		*flags |= CPU_DTRACE_ILLOP;
6684 		return;
6685 	}
6686 
6687 	while (dtrace_gethrtime() - now < val)
6688 		continue;
6689 
6690 	/*
6691 	 * Normally, we assure that the value of the variable "timestamp" does
6692 	 * not change within an ECB.  The presence of chill() represents an
6693 	 * exception to this rule, however.
6694 	 */
6695 	mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6696 	cpu->cpu_dtrace_chilled += val;
6697 }
6698 
6699 static void
6700 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6701     uint64_t *buf, uint64_t arg)
6702 {
6703 	int nframes = DTRACE_USTACK_NFRAMES(arg);
6704 	int strsize = DTRACE_USTACK_STRSIZE(arg);
6705 	uint64_t *pcs = &buf[1], *fps;
6706 	char *str = (char *)&pcs[nframes];
6707 	int size, offs = 0, i, j;
6708 	size_t rem;
6709 	uintptr_t old = mstate->dtms_scratch_ptr, saved;
6710 	uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6711 	char *sym;
6712 
6713 	/*
6714 	 * Should be taking a faster path if string space has not been
6715 	 * allocated.
6716 	 */
6717 	ASSERT(strsize != 0);
6718 
6719 	/*
6720 	 * We will first allocate some temporary space for the frame pointers.
6721 	 */
6722 	fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6723 	size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6724 	    (nframes * sizeof (uint64_t));
6725 
6726 	if (!DTRACE_INSCRATCH(mstate, size)) {
6727 		/*
6728 		 * Not enough room for our frame pointers -- need to indicate
6729 		 * that we ran out of scratch space.
6730 		 */
6731 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6732 		return;
6733 	}
6734 
6735 	mstate->dtms_scratch_ptr += size;
6736 	saved = mstate->dtms_scratch_ptr;
6737 
6738 	/*
6739 	 * Now get a stack with both program counters and frame pointers.
6740 	 */
6741 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6742 	dtrace_getufpstack(buf, fps, nframes + 1);
6743 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6744 
6745 	/*
6746 	 * If that faulted, we're cooked.
6747 	 */
6748 	if (*flags & CPU_DTRACE_FAULT)
6749 		goto out;
6750 
6751 	/*
6752 	 * Now we want to walk up the stack, calling the USTACK helper.  For
6753 	 * each iteration, we restore the scratch pointer.
6754 	 */
6755 	for (i = 0; i < nframes; i++) {
6756 		mstate->dtms_scratch_ptr = saved;
6757 
6758 		if (offs >= strsize)
6759 			break;
6760 
6761 		sym = (char *)(uintptr_t)dtrace_helper(
6762 		    DTRACE_HELPER_ACTION_USTACK,
6763 		    mstate, state, pcs[i], fps[i]);
6764 
6765 		/*
6766 		 * If we faulted while running the helper, we're going to
6767 		 * clear the fault and null out the corresponding string.
6768 		 */
6769 		if (*flags & CPU_DTRACE_FAULT) {
6770 			*flags &= ~CPU_DTRACE_FAULT;
6771 			str[offs++] = '\0';
6772 			continue;
6773 		}
6774 
6775 		if (sym == NULL) {
6776 			str[offs++] = '\0';
6777 			continue;
6778 		}
6779 
6780 		if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate,
6781 		    &(state->dts_vstate))) {
6782 			str[offs++] = '\0';
6783 			continue;
6784 		}
6785 
6786 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6787 
6788 		/*
6789 		 * Now copy in the string that the helper returned to us.
6790 		 */
6791 		for (j = 0; offs + j < strsize && j < rem; j++) {
6792 			if ((str[offs + j] = sym[j]) == '\0')
6793 				break;
6794 		}
6795 
6796 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6797 
6798 		offs += j + 1;
6799 	}
6800 
6801 	if (offs >= strsize) {
6802 		/*
6803 		 * If we didn't have room for all of the strings, we don't
6804 		 * abort processing -- this needn't be a fatal error -- but we
6805 		 * still want to increment a counter (dts_stkstroverflows) to
6806 		 * allow this condition to be warned about.  (If this is from
6807 		 * a jstack() action, it is easily tuned via jstackstrsize.)
6808 		 */
6809 		dtrace_error(&state->dts_stkstroverflows);
6810 	}
6811 
6812 	while (offs < strsize)
6813 		str[offs++] = '\0';
6814 
6815 out:
6816 	mstate->dtms_scratch_ptr = old;
6817 }
6818 
6819 static void
6820 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6821     size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6822 {
6823 	volatile uint16_t *flags;
6824 	uint64_t val = *valp;
6825 	size_t valoffs = *valoffsp;
6826 
6827 	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6828 	ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6829 
6830 	/*
6831 	 * If this is a string, we're going to only load until we find the zero
6832 	 * byte -- after which we'll store zero bytes.
6833 	 */
6834 	if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6835 		char c = '\0' + 1;
6836 		size_t s;
6837 
6838 		for (s = 0; s < size; s++) {
6839 			if (c != '\0' && dtkind == DIF_TF_BYREF) {
6840 				c = dtrace_load8(val++);
6841 			} else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6842 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6843 				c = dtrace_fuword8((void *)(uintptr_t)val++);
6844 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6845 				if (*flags & CPU_DTRACE_FAULT)
6846 					break;
6847 			}
6848 
6849 			DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6850 
6851 			if (c == '\0' && intuple)
6852 				break;
6853 		}
6854 	} else {
6855 		uint8_t c;
6856 		while (valoffs < end) {
6857 			if (dtkind == DIF_TF_BYREF) {
6858 				c = dtrace_load8(val++);
6859 			} else if (dtkind == DIF_TF_BYUREF) {
6860 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6861 				c = dtrace_fuword8((void *)(uintptr_t)val++);
6862 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6863 				if (*flags & CPU_DTRACE_FAULT)
6864 					break;
6865 			}
6866 
6867 			DTRACE_STORE(uint8_t, tomax,
6868 			    valoffs++, c);
6869 		}
6870 	}
6871 
6872 	*valp = val;
6873 	*valoffsp = valoffs;
6874 }
6875 
6876 /*
6877  * If you're looking for the epicenter of DTrace, you just found it.  This
6878  * is the function called by the provider to fire a probe -- from which all
6879  * subsequent probe-context DTrace activity emanates.
6880  */
6881 void
6882 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6883     uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6884 {
6885 	processorid_t cpuid;
6886 	dtrace_icookie_t cookie;
6887 	dtrace_probe_t *probe;
6888 	dtrace_mstate_t mstate;
6889 	dtrace_ecb_t *ecb;
6890 	dtrace_action_t *act;
6891 	intptr_t offs;
6892 	size_t size;
6893 	int vtime, onintr;
6894 	volatile uint16_t *flags;
6895 	hrtime_t now, end;
6896 
6897 	/*
6898 	 * Kick out immediately if this CPU is still being born (in which case
6899 	 * curthread will be set to -1) or the current thread can't allow
6900 	 * probes in its current context.
6901 	 */
6902 	if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6903 		return;
6904 
6905 	cookie = dtrace_interrupt_disable();
6906 	probe = dtrace_probes[id - 1];
6907 	cpuid = CPU->cpu_id;
6908 	onintr = CPU_ON_INTR(CPU);
6909 
6910 	CPU->cpu_dtrace_probes++;
6911 
6912 	if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6913 	    probe->dtpr_predcache == curthread->t_predcache) {
6914 		/*
6915 		 * We have hit in the predicate cache; we know that
6916 		 * this predicate would evaluate to be false.
6917 		 */
6918 		dtrace_interrupt_enable(cookie);
6919 		return;
6920 	}
6921 
6922 	if (panic_quiesce) {
6923 		/*
6924 		 * We don't trace anything if we're panicking.
6925 		 */
6926 		dtrace_interrupt_enable(cookie);
6927 		return;
6928 	}
6929 
6930 	now = mstate.dtms_timestamp = dtrace_gethrtime();
6931 	mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
6932 	vtime = dtrace_vtime_references != 0;
6933 
6934 	if (vtime && curthread->t_dtrace_start)
6935 		curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6936 
6937 	mstate.dtms_difo = NULL;
6938 	mstate.dtms_probe = probe;
6939 	mstate.dtms_strtok = NULL;
6940 	mstate.dtms_arg[0] = arg0;
6941 	mstate.dtms_arg[1] = arg1;
6942 	mstate.dtms_arg[2] = arg2;
6943 	mstate.dtms_arg[3] = arg3;
6944 	mstate.dtms_arg[4] = arg4;
6945 
6946 	flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6947 
6948 	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6949 		dtrace_predicate_t *pred = ecb->dte_predicate;
6950 		dtrace_state_t *state = ecb->dte_state;
6951 		dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6952 		dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6953 		dtrace_vstate_t *vstate = &state->dts_vstate;
6954 		dtrace_provider_t *prov = probe->dtpr_provider;
6955 		uint64_t tracememsize = 0;
6956 		int committed = 0;
6957 		caddr_t tomax;
6958 
6959 		/*
6960 		 * A little subtlety with the following (seemingly innocuous)
6961 		 * declaration of the automatic 'val':  by looking at the
6962 		 * code, you might think that it could be declared in the
6963 		 * action processing loop, below.  (That is, it's only used in
6964 		 * the action processing loop.)  However, it must be declared
6965 		 * out of that scope because in the case of DIF expression
6966 		 * arguments to aggregating actions, one iteration of the
6967 		 * action loop will use the last iteration's value.
6968 		 */
6969 #ifdef lint
6970 		uint64_t val = 0;
6971 #else
6972 		uint64_t val;
6973 #endif
6974 
6975 		mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6976 		mstate.dtms_access = DTRACE_ACCESS_ARGS | DTRACE_ACCESS_PROC;
6977 		mstate.dtms_getf = NULL;
6978 
6979 		*flags &= ~CPU_DTRACE_ERROR;
6980 
6981 		if (prov == dtrace_provider) {
6982 			/*
6983 			 * If dtrace itself is the provider of this probe,
6984 			 * we're only going to continue processing the ECB if
6985 			 * arg0 (the dtrace_state_t) is equal to the ECB's
6986 			 * creating state.  (This prevents disjoint consumers
6987 			 * from seeing one another's metaprobes.)
6988 			 */
6989 			if (arg0 != (uint64_t)(uintptr_t)state)
6990 				continue;
6991 		}
6992 
6993 		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6994 			/*
6995 			 * We're not currently active.  If our provider isn't
6996 			 * the dtrace pseudo provider, we're not interested.
6997 			 */
6998 			if (prov != dtrace_provider)
6999 				continue;
7000 
7001 			/*
7002 			 * Now we must further check if we are in the BEGIN
7003 			 * probe.  If we are, we will only continue processing
7004 			 * if we're still in WARMUP -- if one BEGIN enabling
7005 			 * has invoked the exit() action, we don't want to
7006 			 * evaluate subsequent BEGIN enablings.
7007 			 */
7008 			if (probe->dtpr_id == dtrace_probeid_begin &&
7009 			    state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7010 				ASSERT(state->dts_activity ==
7011 				    DTRACE_ACTIVITY_DRAINING);
7012 				continue;
7013 			}
7014 		}
7015 
7016 		if (ecb->dte_cond && !dtrace_priv_probe(state, &mstate, ecb))
7017 			continue;
7018 
7019 		if (now - state->dts_alive > dtrace_deadman_timeout) {
7020 			/*
7021 			 * We seem to be dead.  Unless we (a) have kernel
7022 			 * destructive permissions (b) have explicitly enabled
7023 			 * destructive actions and (c) destructive actions have
7024 			 * not been disabled, we're going to transition into
7025 			 * the KILLED state, from which no further processing
7026 			 * on this state will be performed.
7027 			 */
7028 			if (!dtrace_priv_kernel_destructive(state) ||
7029 			    !state->dts_cred.dcr_destructive ||
7030 			    dtrace_destructive_disallow) {
7031 				void *activity = &state->dts_activity;
7032 				dtrace_activity_t current;
7033 
7034 				do {
7035 					current = state->dts_activity;
7036 				} while (dtrace_cas32(activity, current,
7037 				    DTRACE_ACTIVITY_KILLED) != current);
7038 
7039 				continue;
7040 			}
7041 		}
7042 
7043 		if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7044 		    ecb->dte_alignment, state, &mstate)) < 0)
7045 			continue;
7046 
7047 		tomax = buf->dtb_tomax;
7048 		ASSERT(tomax != NULL);
7049 
7050 		if (ecb->dte_size != 0) {
7051 			dtrace_rechdr_t dtrh;
7052 			if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7053 				mstate.dtms_timestamp = dtrace_gethrtime();
7054 				mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7055 			}
7056 			ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
7057 			dtrh.dtrh_epid = ecb->dte_epid;
7058 			DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
7059 			    mstate.dtms_timestamp);
7060 			*((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
7061 		}
7062 
7063 		mstate.dtms_epid = ecb->dte_epid;
7064 		mstate.dtms_present |= DTRACE_MSTATE_EPID;
7065 
7066 		if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7067 			mstate.dtms_access |= DTRACE_ACCESS_KERNEL;
7068 
7069 		if (pred != NULL) {
7070 			dtrace_difo_t *dp = pred->dtp_difo;
7071 			int rval;
7072 
7073 			rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7074 
7075 			if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7076 				dtrace_cacheid_t cid = probe->dtpr_predcache;
7077 
7078 				if (cid != DTRACE_CACHEIDNONE && !onintr) {
7079 					/*
7080 					 * Update the predicate cache...
7081 					 */
7082 					ASSERT(cid == pred->dtp_cacheid);
7083 					curthread->t_predcache = cid;
7084 				}
7085 
7086 				continue;
7087 			}
7088 		}
7089 
7090 		for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7091 		    act != NULL; act = act->dta_next) {
7092 			size_t valoffs;
7093 			dtrace_difo_t *dp;
7094 			dtrace_recdesc_t *rec = &act->dta_rec;
7095 
7096 			size = rec->dtrd_size;
7097 			valoffs = offs + rec->dtrd_offset;
7098 
7099 			if (DTRACEACT_ISAGG(act->dta_kind)) {
7100 				uint64_t v = 0xbad;
7101 				dtrace_aggregation_t *agg;
7102 
7103 				agg = (dtrace_aggregation_t *)act;
7104 
7105 				if ((dp = act->dta_difo) != NULL)
7106 					v = dtrace_dif_emulate(dp,
7107 					    &mstate, vstate, state);
7108 
7109 				if (*flags & CPU_DTRACE_ERROR)
7110 					continue;
7111 
7112 				/*
7113 				 * Note that we always pass the expression
7114 				 * value from the previous iteration of the
7115 				 * action loop.  This value will only be used
7116 				 * if there is an expression argument to the
7117 				 * aggregating action, denoted by the
7118 				 * dtag_hasarg field.
7119 				 */
7120 				dtrace_aggregate(agg, buf,
7121 				    offs, aggbuf, v, val);
7122 				continue;
7123 			}
7124 
7125 			switch (act->dta_kind) {
7126 			case DTRACEACT_STOP:
7127 				if (dtrace_priv_proc_destructive(state,
7128 				    &mstate))
7129 					dtrace_action_stop();
7130 				continue;
7131 
7132 			case DTRACEACT_BREAKPOINT:
7133 				if (dtrace_priv_kernel_destructive(state))
7134 					dtrace_action_breakpoint(ecb);
7135 				continue;
7136 
7137 			case DTRACEACT_PANIC:
7138 				if (dtrace_priv_kernel_destructive(state))
7139 					dtrace_action_panic(ecb);
7140 				continue;
7141 
7142 			case DTRACEACT_STACK:
7143 				if (!dtrace_priv_kernel(state))
7144 					continue;
7145 
7146 				dtrace_getpcstack((pc_t *)(tomax + valoffs),
7147 				    size / sizeof (pc_t), probe->dtpr_aframes,
7148 				    DTRACE_ANCHORED(probe) ? NULL :
7149 				    (uint32_t *)arg0);
7150 
7151 				continue;
7152 
7153 			case DTRACEACT_JSTACK:
7154 			case DTRACEACT_USTACK:
7155 				if (!dtrace_priv_proc(state, &mstate))
7156 					continue;
7157 
7158 				/*
7159 				 * See comment in DIF_VAR_PID.
7160 				 */
7161 				if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7162 				    CPU_ON_INTR(CPU)) {
7163 					int depth = DTRACE_USTACK_NFRAMES(
7164 					    rec->dtrd_arg) + 1;
7165 
7166 					dtrace_bzero((void *)(tomax + valoffs),
7167 					    DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7168 					    + depth * sizeof (uint64_t));
7169 
7170 					continue;
7171 				}
7172 
7173 				if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7174 				    curproc->p_dtrace_helpers != NULL) {
7175 					/*
7176 					 * This is the slow path -- we have
7177 					 * allocated string space, and we're
7178 					 * getting the stack of a process that
7179 					 * has helpers.  Call into a separate
7180 					 * routine to perform this processing.
7181 					 */
7182 					dtrace_action_ustack(&mstate, state,
7183 					    (uint64_t *)(tomax + valoffs),
7184 					    rec->dtrd_arg);
7185 					continue;
7186 				}
7187 
7188 				/*
7189 				 * Clear the string space, since there's no
7190 				 * helper to do it for us.
7191 				 */
7192 				if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0) {
7193 					int depth = DTRACE_USTACK_NFRAMES(
7194 					    rec->dtrd_arg);
7195 					size_t strsize = DTRACE_USTACK_STRSIZE(
7196 					    rec->dtrd_arg);
7197 					uint64_t *buf = (uint64_t *)(tomax +
7198 					    valoffs);
7199 					void *strspace = &buf[depth + 1];
7200 
7201 					dtrace_bzero(strspace,
7202 					    MIN(depth, strsize));
7203 				}
7204 
7205 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7206 				dtrace_getupcstack((uint64_t *)
7207 				    (tomax + valoffs),
7208 				    DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7209 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7210 				continue;
7211 
7212 			default:
7213 				break;
7214 			}
7215 
7216 			dp = act->dta_difo;
7217 			ASSERT(dp != NULL);
7218 
7219 			val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7220 
7221 			if (*flags & CPU_DTRACE_ERROR)
7222 				continue;
7223 
7224 			switch (act->dta_kind) {
7225 			case DTRACEACT_SPECULATE: {
7226 				dtrace_rechdr_t *dtrh;
7227 
7228 				ASSERT(buf == &state->dts_buffer[cpuid]);
7229 				buf = dtrace_speculation_buffer(state,
7230 				    cpuid, val);
7231 
7232 				if (buf == NULL) {
7233 					*flags |= CPU_DTRACE_DROP;
7234 					continue;
7235 				}
7236 
7237 				offs = dtrace_buffer_reserve(buf,
7238 				    ecb->dte_needed, ecb->dte_alignment,
7239 				    state, NULL);
7240 
7241 				if (offs < 0) {
7242 					*flags |= CPU_DTRACE_DROP;
7243 					continue;
7244 				}
7245 
7246 				tomax = buf->dtb_tomax;
7247 				ASSERT(tomax != NULL);
7248 
7249 				if (ecb->dte_size == 0)
7250 					continue;
7251 
7252 				ASSERT3U(ecb->dte_size, >=,
7253 				    sizeof (dtrace_rechdr_t));
7254 				dtrh = ((void *)(tomax + offs));
7255 				dtrh->dtrh_epid = ecb->dte_epid;
7256 				/*
7257 				 * When the speculation is committed, all of
7258 				 * the records in the speculative buffer will
7259 				 * have their timestamps set to the commit
7260 				 * time.  Until then, it is set to a sentinel
7261 				 * value, for debugability.
7262 				 */
7263 				DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7264 				continue;
7265 			}
7266 
7267 			case DTRACEACT_CHILL:
7268 				if (dtrace_priv_kernel_destructive(state))
7269 					dtrace_action_chill(&mstate, val);
7270 				continue;
7271 
7272 			case DTRACEACT_RAISE:
7273 				if (dtrace_priv_proc_destructive(state,
7274 				    &mstate))
7275 					dtrace_action_raise(val);
7276 				continue;
7277 
7278 			case DTRACEACT_COMMIT:
7279 				ASSERT(!committed);
7280 
7281 				/*
7282 				 * We need to commit our buffer state.
7283 				 */
7284 				if (ecb->dte_size)
7285 					buf->dtb_offset = offs + ecb->dte_size;
7286 				buf = &state->dts_buffer[cpuid];
7287 				dtrace_speculation_commit(state, cpuid, val);
7288 				committed = 1;
7289 				continue;
7290 
7291 			case DTRACEACT_DISCARD:
7292 				dtrace_speculation_discard(state, cpuid, val);
7293 				continue;
7294 
7295 			case DTRACEACT_DIFEXPR:
7296 			case DTRACEACT_LIBACT:
7297 			case DTRACEACT_PRINTF:
7298 			case DTRACEACT_PRINTA:
7299 			case DTRACEACT_SYSTEM:
7300 			case DTRACEACT_FREOPEN:
7301 			case DTRACEACT_TRACEMEM:
7302 				break;
7303 
7304 			case DTRACEACT_TRACEMEM_DYNSIZE:
7305 				tracememsize = val;
7306 				break;
7307 
7308 			case DTRACEACT_SYM:
7309 			case DTRACEACT_MOD:
7310 				if (!dtrace_priv_kernel(state))
7311 					continue;
7312 				break;
7313 
7314 			case DTRACEACT_USYM:
7315 			case DTRACEACT_UMOD:
7316 			case DTRACEACT_UADDR: {
7317 				struct pid *pid = curthread->t_procp->p_pidp;
7318 
7319 				if (!dtrace_priv_proc(state, &mstate))
7320 					continue;
7321 
7322 				DTRACE_STORE(uint64_t, tomax,
7323 				    valoffs, (uint64_t)pid->pid_id);
7324 				DTRACE_STORE(uint64_t, tomax,
7325 				    valoffs + sizeof (uint64_t), val);
7326 
7327 				continue;
7328 			}
7329 
7330 			case DTRACEACT_EXIT: {
7331 				/*
7332 				 * For the exit action, we are going to attempt
7333 				 * to atomically set our activity to be
7334 				 * draining.  If this fails (either because
7335 				 * another CPU has beat us to the exit action,
7336 				 * or because our current activity is something
7337 				 * other than ACTIVE or WARMUP), we will
7338 				 * continue.  This assures that the exit action
7339 				 * can be successfully recorded at most once
7340 				 * when we're in the ACTIVE state.  If we're
7341 				 * encountering the exit() action while in
7342 				 * COOLDOWN, however, we want to honor the new
7343 				 * status code.  (We know that we're the only
7344 				 * thread in COOLDOWN, so there is no race.)
7345 				 */
7346 				void *activity = &state->dts_activity;
7347 				dtrace_activity_t current = state->dts_activity;
7348 
7349 				if (current == DTRACE_ACTIVITY_COOLDOWN)
7350 					break;
7351 
7352 				if (current != DTRACE_ACTIVITY_WARMUP)
7353 					current = DTRACE_ACTIVITY_ACTIVE;
7354 
7355 				if (dtrace_cas32(activity, current,
7356 				    DTRACE_ACTIVITY_DRAINING) != current) {
7357 					*flags |= CPU_DTRACE_DROP;
7358 					continue;
7359 				}
7360 
7361 				break;
7362 			}
7363 
7364 			default:
7365 				ASSERT(0);
7366 			}
7367 
7368 			if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7369 			    dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7370 				uintptr_t end = valoffs + size;
7371 
7372 				if (tracememsize != 0 &&
7373 				    valoffs + tracememsize < end) {
7374 					end = valoffs + tracememsize;
7375 					tracememsize = 0;
7376 				}
7377 
7378 				if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7379 				    !dtrace_vcanload((void *)(uintptr_t)val,
7380 				    &dp->dtdo_rtype, NULL, &mstate, vstate))
7381 					continue;
7382 
7383 				dtrace_store_by_ref(dp, tomax, size, &valoffs,
7384 				    &val, end, act->dta_intuple,
7385 				    dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7386 				    DIF_TF_BYREF: DIF_TF_BYUREF);
7387 				continue;
7388 			}
7389 
7390 			switch (size) {
7391 			case 0:
7392 				break;
7393 
7394 			case sizeof (uint8_t):
7395 				DTRACE_STORE(uint8_t, tomax, valoffs, val);
7396 				break;
7397 			case sizeof (uint16_t):
7398 				DTRACE_STORE(uint16_t, tomax, valoffs, val);
7399 				break;
7400 			case sizeof (uint32_t):
7401 				DTRACE_STORE(uint32_t, tomax, valoffs, val);
7402 				break;
7403 			case sizeof (uint64_t):
7404 				DTRACE_STORE(uint64_t, tomax, valoffs, val);
7405 				break;
7406 			default:
7407 				/*
7408 				 * Any other size should have been returned by
7409 				 * reference, not by value.
7410 				 */
7411 				ASSERT(0);
7412 				break;
7413 			}
7414 		}
7415 
7416 		if (*flags & CPU_DTRACE_DROP)
7417 			continue;
7418 
7419 		if (*flags & CPU_DTRACE_FAULT) {
7420 			int ndx;
7421 			dtrace_action_t *err;
7422 
7423 			buf->dtb_errors++;
7424 
7425 			if (probe->dtpr_id == dtrace_probeid_error) {
7426 				/*
7427 				 * There's nothing we can do -- we had an
7428 				 * error on the error probe.  We bump an
7429 				 * error counter to at least indicate that
7430 				 * this condition happened.
7431 				 */
7432 				dtrace_error(&state->dts_dblerrors);
7433 				continue;
7434 			}
7435 
7436 			if (vtime) {
7437 				/*
7438 				 * Before recursing on dtrace_probe(), we
7439 				 * need to explicitly clear out our start
7440 				 * time to prevent it from being accumulated
7441 				 * into t_dtrace_vtime.
7442 				 */
7443 				curthread->t_dtrace_start = 0;
7444 			}
7445 
7446 			/*
7447 			 * Iterate over the actions to figure out which action
7448 			 * we were processing when we experienced the error.
7449 			 * Note that act points _past_ the faulting action; if
7450 			 * act is ecb->dte_action, the fault was in the
7451 			 * predicate, if it's ecb->dte_action->dta_next it's
7452 			 * in action #1, and so on.
7453 			 */
7454 			for (err = ecb->dte_action, ndx = 0;
7455 			    err != act; err = err->dta_next, ndx++)
7456 				continue;
7457 
7458 			dtrace_probe_error(state, ecb->dte_epid, ndx,
7459 			    (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7460 			    mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7461 			    cpu_core[cpuid].cpuc_dtrace_illval);
7462 
7463 			continue;
7464 		}
7465 
7466 		if (!committed)
7467 			buf->dtb_offset = offs + ecb->dte_size;
7468 	}
7469 
7470 	end = dtrace_gethrtime();
7471 	if (vtime)
7472 		curthread->t_dtrace_start = end;
7473 
7474 	CPU->cpu_dtrace_nsec += end - now;
7475 
7476 	dtrace_interrupt_enable(cookie);
7477 }
7478 
7479 /*
7480  * DTrace Probe Hashing Functions
7481  *
7482  * The functions in this section (and indeed, the functions in remaining
7483  * sections) are not _called_ from probe context.  (Any exceptions to this are
7484  * marked with a "Note:".)  Rather, they are called from elsewhere in the
7485  * DTrace framework to look-up probes in, add probes to and remove probes from
7486  * the DTrace probe hashes.  (Each probe is hashed by each element of the
7487  * probe tuple -- allowing for fast lookups, regardless of what was
7488  * specified.)
7489  */
7490 static uint_t
7491 dtrace_hash_str(char *p)
7492 {
7493 	unsigned int g;
7494 	uint_t hval = 0;
7495 
7496 	while (*p) {
7497 		hval = (hval << 4) + *p++;
7498 		if ((g = (hval & 0xf0000000)) != 0)
7499 			hval ^= g >> 24;
7500 		hval &= ~g;
7501 	}
7502 	return (hval);
7503 }
7504 
7505 static dtrace_hash_t *
7506 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
7507 {
7508 	dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7509 
7510 	hash->dth_stroffs = stroffs;
7511 	hash->dth_nextoffs = nextoffs;
7512 	hash->dth_prevoffs = prevoffs;
7513 
7514 	hash->dth_size = 1;
7515 	hash->dth_mask = hash->dth_size - 1;
7516 
7517 	hash->dth_tab = kmem_zalloc(hash->dth_size *
7518 	    sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7519 
7520 	return (hash);
7521 }
7522 
7523 static void
7524 dtrace_hash_destroy(dtrace_hash_t *hash)
7525 {
7526 #ifdef DEBUG
7527 	int i;
7528 
7529 	for (i = 0; i < hash->dth_size; i++)
7530 		ASSERT(hash->dth_tab[i] == NULL);
7531 #endif
7532 
7533 	kmem_free(hash->dth_tab,
7534 	    hash->dth_size * sizeof (dtrace_hashbucket_t *));
7535 	kmem_free(hash, sizeof (dtrace_hash_t));
7536 }
7537 
7538 static void
7539 dtrace_hash_resize(dtrace_hash_t *hash)
7540 {
7541 	int size = hash->dth_size, i, ndx;
7542 	int new_size = hash->dth_size << 1;
7543 	int new_mask = new_size - 1;
7544 	dtrace_hashbucket_t **new_tab, *bucket, *next;
7545 
7546 	ASSERT((new_size & new_mask) == 0);
7547 
7548 	new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7549 
7550 	for (i = 0; i < size; i++) {
7551 		for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7552 			dtrace_probe_t *probe = bucket->dthb_chain;
7553 
7554 			ASSERT(probe != NULL);
7555 			ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
7556 
7557 			next = bucket->dthb_next;
7558 			bucket->dthb_next = new_tab[ndx];
7559 			new_tab[ndx] = bucket;
7560 		}
7561 	}
7562 
7563 	kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7564 	hash->dth_tab = new_tab;
7565 	hash->dth_size = new_size;
7566 	hash->dth_mask = new_mask;
7567 }
7568 
7569 static void
7570 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
7571 {
7572 	int hashval = DTRACE_HASHSTR(hash, new);
7573 	int ndx = hashval & hash->dth_mask;
7574 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7575 	dtrace_probe_t **nextp, **prevp;
7576 
7577 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7578 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7579 			goto add;
7580 	}
7581 
7582 	if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7583 		dtrace_hash_resize(hash);
7584 		dtrace_hash_add(hash, new);
7585 		return;
7586 	}
7587 
7588 	bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7589 	bucket->dthb_next = hash->dth_tab[ndx];
7590 	hash->dth_tab[ndx] = bucket;
7591 	hash->dth_nbuckets++;
7592 
7593 add:
7594 	nextp = DTRACE_HASHNEXT(hash, new);
7595 	ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7596 	*nextp = bucket->dthb_chain;
7597 
7598 	if (bucket->dthb_chain != NULL) {
7599 		prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7600 		ASSERT(*prevp == NULL);
7601 		*prevp = new;
7602 	}
7603 
7604 	bucket->dthb_chain = new;
7605 	bucket->dthb_len++;
7606 }
7607 
7608 static dtrace_probe_t *
7609 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
7610 {
7611 	int hashval = DTRACE_HASHSTR(hash, template);
7612 	int ndx = hashval & hash->dth_mask;
7613 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7614 
7615 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7616 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7617 			return (bucket->dthb_chain);
7618 	}
7619 
7620 	return (NULL);
7621 }
7622 
7623 static int
7624 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
7625 {
7626 	int hashval = DTRACE_HASHSTR(hash, template);
7627 	int ndx = hashval & hash->dth_mask;
7628 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7629 
7630 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7631 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7632 			return (bucket->dthb_len);
7633 	}
7634 
7635 	return (NULL);
7636 }
7637 
7638 static void
7639 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
7640 {
7641 	int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
7642 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7643 
7644 	dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
7645 	dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
7646 
7647 	/*
7648 	 * Find the bucket that we're removing this probe from.
7649 	 */
7650 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7651 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
7652 			break;
7653 	}
7654 
7655 	ASSERT(bucket != NULL);
7656 
7657 	if (*prevp == NULL) {
7658 		if (*nextp == NULL) {
7659 			/*
7660 			 * The removed probe was the only probe on this
7661 			 * bucket; we need to remove the bucket.
7662 			 */
7663 			dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7664 
7665 			ASSERT(bucket->dthb_chain == probe);
7666 			ASSERT(b != NULL);
7667 
7668 			if (b == bucket) {
7669 				hash->dth_tab[ndx] = bucket->dthb_next;
7670 			} else {
7671 				while (b->dthb_next != bucket)
7672 					b = b->dthb_next;
7673 				b->dthb_next = bucket->dthb_next;
7674 			}
7675 
7676 			ASSERT(hash->dth_nbuckets > 0);
7677 			hash->dth_nbuckets--;
7678 			kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7679 			return;
7680 		}
7681 
7682 		bucket->dthb_chain = *nextp;
7683 	} else {
7684 		*(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7685 	}
7686 
7687 	if (*nextp != NULL)
7688 		*(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7689 }
7690 
7691 /*
7692  * DTrace Utility Functions
7693  *
7694  * These are random utility functions that are _not_ called from probe context.
7695  */
7696 static int
7697 dtrace_badattr(const dtrace_attribute_t *a)
7698 {
7699 	return (a->dtat_name > DTRACE_STABILITY_MAX ||
7700 	    a->dtat_data > DTRACE_STABILITY_MAX ||
7701 	    a->dtat_class > DTRACE_CLASS_MAX);
7702 }
7703 
7704 /*
7705  * Return a duplicate copy of a string.  If the specified string is NULL,
7706  * this function returns a zero-length string.
7707  */
7708 static char *
7709 dtrace_strdup(const char *str)
7710 {
7711 	char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
7712 
7713 	if (str != NULL)
7714 		(void) strcpy(new, str);
7715 
7716 	return (new);
7717 }
7718 
7719 #define	DTRACE_ISALPHA(c)	\
7720 	(((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
7721 
7722 static int
7723 dtrace_badname(const char *s)
7724 {
7725 	char c;
7726 
7727 	if (s == NULL || (c = *s++) == '\0')
7728 		return (0);
7729 
7730 	if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
7731 		return (1);
7732 
7733 	while ((c = *s++) != '\0') {
7734 		if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7735 		    c != '-' && c != '_' && c != '.' && c != '`')
7736 			return (1);
7737 	}
7738 
7739 	return (0);
7740 }
7741 
7742 static void
7743 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7744 {
7745 	uint32_t priv;
7746 
7747 	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7748 		/*
7749 		 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7750 		 */
7751 		priv = DTRACE_PRIV_ALL;
7752 	} else {
7753 		*uidp = crgetuid(cr);
7754 		*zoneidp = crgetzoneid(cr);
7755 
7756 		priv = 0;
7757 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7758 			priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7759 		else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7760 			priv |= DTRACE_PRIV_USER;
7761 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7762 			priv |= DTRACE_PRIV_PROC;
7763 		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7764 			priv |= DTRACE_PRIV_OWNER;
7765 		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7766 			priv |= DTRACE_PRIV_ZONEOWNER;
7767 	}
7768 
7769 	*privp = priv;
7770 }
7771 
7772 #ifdef DTRACE_ERRDEBUG
7773 static void
7774 dtrace_errdebug(const char *str)
7775 {
7776 	int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
7777 	int occupied = 0;
7778 
7779 	mutex_enter(&dtrace_errlock);
7780 	dtrace_errlast = str;
7781 	dtrace_errthread = curthread;
7782 
7783 	while (occupied++ < DTRACE_ERRHASHSZ) {
7784 		if (dtrace_errhash[hval].dter_msg == str) {
7785 			dtrace_errhash[hval].dter_count++;
7786 			goto out;
7787 		}
7788 
7789 		if (dtrace_errhash[hval].dter_msg != NULL) {
7790 			hval = (hval + 1) % DTRACE_ERRHASHSZ;
7791 			continue;
7792 		}
7793 
7794 		dtrace_errhash[hval].dter_msg = str;
7795 		dtrace_errhash[hval].dter_count = 1;
7796 		goto out;
7797 	}
7798 
7799 	panic("dtrace: undersized error hash");
7800 out:
7801 	mutex_exit(&dtrace_errlock);
7802 }
7803 #endif
7804 
7805 /*
7806  * DTrace Matching Functions
7807  *
7808  * These functions are used to match groups of probes, given some elements of
7809  * a probe tuple, or some globbed expressions for elements of a probe tuple.
7810  */
7811 static int
7812 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7813     zoneid_t zoneid)
7814 {
7815 	if (priv != DTRACE_PRIV_ALL) {
7816 		uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7817 		uint32_t match = priv & ppriv;
7818 
7819 		/*
7820 		 * No PRIV_DTRACE_* privileges...
7821 		 */
7822 		if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7823 		    DTRACE_PRIV_KERNEL)) == 0)
7824 			return (0);
7825 
7826 		/*
7827 		 * No matching bits, but there were bits to match...
7828 		 */
7829 		if (match == 0 && ppriv != 0)
7830 			return (0);
7831 
7832 		/*
7833 		 * Need to have permissions to the process, but don't...
7834 		 */
7835 		if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7836 		    uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7837 			return (0);
7838 		}
7839 
7840 		/*
7841 		 * Need to be in the same zone unless we possess the
7842 		 * privilege to examine all zones.
7843 		 */
7844 		if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7845 		    zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7846 			return (0);
7847 		}
7848 	}
7849 
7850 	return (1);
7851 }
7852 
7853 /*
7854  * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7855  * consists of input pattern strings and an ops-vector to evaluate them.
7856  * This function returns >0 for match, 0 for no match, and <0 for error.
7857  */
7858 static int
7859 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7860     uint32_t priv, uid_t uid, zoneid_t zoneid)
7861 {
7862 	dtrace_provider_t *pvp = prp->dtpr_provider;
7863 	int rv;
7864 
7865 	if (pvp->dtpv_defunct)
7866 		return (0);
7867 
7868 	if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7869 		return (rv);
7870 
7871 	if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7872 		return (rv);
7873 
7874 	if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7875 		return (rv);
7876 
7877 	if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7878 		return (rv);
7879 
7880 	if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7881 		return (0);
7882 
7883 	return (rv);
7884 }
7885 
7886 /*
7887  * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7888  * interface for matching a glob pattern 'p' to an input string 's'.  Unlike
7889  * libc's version, the kernel version only applies to 8-bit ASCII strings.
7890  * In addition, all of the recursion cases except for '*' matching have been
7891  * unwound.  For '*', we still implement recursive evaluation, but a depth
7892  * counter is maintained and matching is aborted if we recurse too deep.
7893  * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7894  */
7895 static int
7896 dtrace_match_glob(const char *s, const char *p, int depth)
7897 {
7898 	const char *olds;
7899 	char s1, c;
7900 	int gs;
7901 
7902 	if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7903 		return (-1);
7904 
7905 	if (s == NULL)
7906 		s = ""; /* treat NULL as empty string */
7907 
7908 top:
7909 	olds = s;
7910 	s1 = *s++;
7911 
7912 	if (p == NULL)
7913 		return (0);
7914 
7915 	if ((c = *p++) == '\0')
7916 		return (s1 == '\0');
7917 
7918 	switch (c) {
7919 	case '[': {
7920 		int ok = 0, notflag = 0;
7921 		char lc = '\0';
7922 
7923 		if (s1 == '\0')
7924 			return (0);
7925 
7926 		if (*p == '!') {
7927 			notflag = 1;
7928 			p++;
7929 		}
7930 
7931 		if ((c = *p++) == '\0')
7932 			return (0);
7933 
7934 		do {
7935 			if (c == '-' && lc != '\0' && *p != ']') {
7936 				if ((c = *p++) == '\0')
7937 					return (0);
7938 				if (c == '\\' && (c = *p++) == '\0')
7939 					return (0);
7940 
7941 				if (notflag) {
7942 					if (s1 < lc || s1 > c)
7943 						ok++;
7944 					else
7945 						return (0);
7946 				} else if (lc <= s1 && s1 <= c)
7947 					ok++;
7948 
7949 			} else if (c == '\\' && (c = *p++) == '\0')
7950 				return (0);
7951 
7952 			lc = c; /* save left-hand 'c' for next iteration */
7953 
7954 			if (notflag) {
7955 				if (s1 != c)
7956 					ok++;
7957 				else
7958 					return (0);
7959 			} else if (s1 == c)
7960 				ok++;
7961 
7962 			if ((c = *p++) == '\0')
7963 				return (0);
7964 
7965 		} while (c != ']');
7966 
7967 		if (ok)
7968 			goto top;
7969 
7970 		return (0);
7971 	}
7972 
7973 	case '\\':
7974 		if ((c = *p++) == '\0')
7975 			return (0);
7976 		/*FALLTHRU*/
7977 
7978 	default:
7979 		if (c != s1)
7980 			return (0);
7981 		/*FALLTHRU*/
7982 
7983 	case '?':
7984 		if (s1 != '\0')
7985 			goto top;
7986 		return (0);
7987 
7988 	case '*':
7989 		while (*p == '*')
7990 			p++; /* consecutive *'s are identical to a single one */
7991 
7992 		if (*p == '\0')
7993 			return (1);
7994 
7995 		for (s = olds; *s != '\0'; s++) {
7996 			if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7997 				return (gs);
7998 		}
7999 
8000 		return (0);
8001 	}
8002 }
8003 
8004 /*ARGSUSED*/
8005 static int
8006 dtrace_match_string(const char *s, const char *p, int depth)
8007 {
8008 	return (s != NULL && strcmp(s, p) == 0);
8009 }
8010 
8011 /*ARGSUSED*/
8012 static int
8013 dtrace_match_nul(const char *s, const char *p, int depth)
8014 {
8015 	return (1); /* always match the empty pattern */
8016 }
8017 
8018 /*ARGSUSED*/
8019 static int
8020 dtrace_match_nonzero(const char *s, const char *p, int depth)
8021 {
8022 	return (s != NULL && s[0] != '\0');
8023 }
8024 
8025 static int
8026 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
8027     zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
8028 {
8029 	dtrace_probe_t template, *probe;
8030 	dtrace_hash_t *hash = NULL;
8031 	int len, rc, best = INT_MAX, nmatched = 0;
8032 	dtrace_id_t i;
8033 
8034 	ASSERT(MUTEX_HELD(&dtrace_lock));
8035 
8036 	/*
8037 	 * If the probe ID is specified in the key, just lookup by ID and
8038 	 * invoke the match callback once if a matching probe is found.
8039 	 */
8040 	if (pkp->dtpk_id != DTRACE_IDNONE) {
8041 		if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8042 		    dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8043 			if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
8044 				return (DTRACE_MATCH_FAIL);
8045 			nmatched++;
8046 		}
8047 		return (nmatched);
8048 	}
8049 
8050 	template.dtpr_mod = (char *)pkp->dtpk_mod;
8051 	template.dtpr_func = (char *)pkp->dtpk_func;
8052 	template.dtpr_name = (char *)pkp->dtpk_name;
8053 
8054 	/*
8055 	 * We want to find the most distinct of the module name, function
8056 	 * name, and name.  So for each one that is not a glob pattern or
8057 	 * empty string, we perform a lookup in the corresponding hash and
8058 	 * use the hash table with the fewest collisions to do our search.
8059 	 */
8060 	if (pkp->dtpk_mmatch == &dtrace_match_string &&
8061 	    (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8062 		best = len;
8063 		hash = dtrace_bymod;
8064 	}
8065 
8066 	if (pkp->dtpk_fmatch == &dtrace_match_string &&
8067 	    (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8068 		best = len;
8069 		hash = dtrace_byfunc;
8070 	}
8071 
8072 	if (pkp->dtpk_nmatch == &dtrace_match_string &&
8073 	    (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8074 		best = len;
8075 		hash = dtrace_byname;
8076 	}
8077 
8078 	/*
8079 	 * If we did not select a hash table, iterate over every probe and
8080 	 * invoke our callback for each one that matches our input probe key.
8081 	 */
8082 	if (hash == NULL) {
8083 		for (i = 0; i < dtrace_nprobes; i++) {
8084 			if ((probe = dtrace_probes[i]) == NULL ||
8085 			    dtrace_match_probe(probe, pkp, priv, uid,
8086 			    zoneid) <= 0)
8087 				continue;
8088 
8089 			nmatched++;
8090 
8091 			if ((rc = (*matched)(probe, arg)) !=
8092 			    DTRACE_MATCH_NEXT) {
8093 				if (rc == DTRACE_MATCH_FAIL)
8094 					return (DTRACE_MATCH_FAIL);
8095 				break;
8096 			}
8097 		}
8098 
8099 		return (nmatched);
8100 	}
8101 
8102 	/*
8103 	 * If we selected a hash table, iterate over each probe of the same key
8104 	 * name and invoke the callback for every probe that matches the other
8105 	 * attributes of our input probe key.
8106 	 */
8107 	for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8108 	    probe = *(DTRACE_HASHNEXT(hash, probe))) {
8109 
8110 		if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8111 			continue;
8112 
8113 		nmatched++;
8114 
8115 		if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
8116 			if (rc == DTRACE_MATCH_FAIL)
8117 				return (DTRACE_MATCH_FAIL);
8118 			break;
8119 		}
8120 	}
8121 
8122 	return (nmatched);
8123 }
8124 
8125 /*
8126  * Return the function pointer dtrace_probecmp() should use to compare the
8127  * specified pattern with a string.  For NULL or empty patterns, we select
8128  * dtrace_match_nul().  For glob pattern strings, we use dtrace_match_glob().
8129  * For non-empty non-glob strings, we use dtrace_match_string().
8130  */
8131 static dtrace_probekey_f *
8132 dtrace_probekey_func(const char *p)
8133 {
8134 	char c;
8135 
8136 	if (p == NULL || *p == '\0')
8137 		return (&dtrace_match_nul);
8138 
8139 	while ((c = *p++) != '\0') {
8140 		if (c == '[' || c == '?' || c == '*' || c == '\\')
8141 			return (&dtrace_match_glob);
8142 	}
8143 
8144 	return (&dtrace_match_string);
8145 }
8146 
8147 /*
8148  * Build a probe comparison key for use with dtrace_match_probe() from the
8149  * given probe description.  By convention, a null key only matches anchored
8150  * probes: if each field is the empty string, reset dtpk_fmatch to
8151  * dtrace_match_nonzero().
8152  */
8153 static void
8154 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8155 {
8156 	pkp->dtpk_prov = pdp->dtpd_provider;
8157 	pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8158 
8159 	pkp->dtpk_mod = pdp->dtpd_mod;
8160 	pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
8161 
8162 	pkp->dtpk_func = pdp->dtpd_func;
8163 	pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8164 
8165 	pkp->dtpk_name = pdp->dtpd_name;
8166 	pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8167 
8168 	pkp->dtpk_id = pdp->dtpd_id;
8169 
8170 	if (pkp->dtpk_id == DTRACE_IDNONE &&
8171 	    pkp->dtpk_pmatch == &dtrace_match_nul &&
8172 	    pkp->dtpk_mmatch == &dtrace_match_nul &&
8173 	    pkp->dtpk_fmatch == &dtrace_match_nul &&
8174 	    pkp->dtpk_nmatch == &dtrace_match_nul)
8175 		pkp->dtpk_fmatch = &dtrace_match_nonzero;
8176 }
8177 
8178 /*
8179  * DTrace Provider-to-Framework API Functions
8180  *
8181  * These functions implement much of the Provider-to-Framework API, as
8182  * described in <sys/dtrace.h>.  The parts of the API not in this section are
8183  * the functions in the API for probe management (found below), and
8184  * dtrace_probe() itself (found above).
8185  */
8186 
8187 /*
8188  * Register the calling provider with the DTrace framework.  This should
8189  * generally be called by DTrace providers in their attach(9E) entry point.
8190  */
8191 int
8192 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8193     cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8194 {
8195 	dtrace_provider_t *provider;
8196 
8197 	if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8198 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8199 		    "arguments", name ? name : "<NULL>");
8200 		return (EINVAL);
8201 	}
8202 
8203 	if (name[0] == '\0' || dtrace_badname(name)) {
8204 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8205 		    "provider name", name);
8206 		return (EINVAL);
8207 	}
8208 
8209 	if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8210 	    pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8211 	    pops->dtps_destroy == NULL ||
8212 	    ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8213 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8214 		    "provider ops", name);
8215 		return (EINVAL);
8216 	}
8217 
8218 	if (dtrace_badattr(&pap->dtpa_provider) ||
8219 	    dtrace_badattr(&pap->dtpa_mod) ||
8220 	    dtrace_badattr(&pap->dtpa_func) ||
8221 	    dtrace_badattr(&pap->dtpa_name) ||
8222 	    dtrace_badattr(&pap->dtpa_args)) {
8223 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8224 		    "provider attributes", name);
8225 		return (EINVAL);
8226 	}
8227 
8228 	if (priv & ~DTRACE_PRIV_ALL) {
8229 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8230 		    "privilege attributes", name);
8231 		return (EINVAL);
8232 	}
8233 
8234 	if ((priv & DTRACE_PRIV_KERNEL) &&
8235 	    (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8236 	    pops->dtps_mode == NULL) {
8237 		cmn_err(CE_WARN, "failed to register provider '%s': need "
8238 		    "dtps_mode() op for given privilege attributes", name);
8239 		return (EINVAL);
8240 	}
8241 
8242 	provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8243 	provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8244 	(void) strcpy(provider->dtpv_name, name);
8245 
8246 	provider->dtpv_attr = *pap;
8247 	provider->dtpv_priv.dtpp_flags = priv;
8248 	if (cr != NULL) {
8249 		provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8250 		provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8251 	}
8252 	provider->dtpv_pops = *pops;
8253 
8254 	if (pops->dtps_provide == NULL) {
8255 		ASSERT(pops->dtps_provide_module != NULL);
8256 		provider->dtpv_pops.dtps_provide =
8257 		    (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
8258 	}
8259 
8260 	if (pops->dtps_provide_module == NULL) {
8261 		ASSERT(pops->dtps_provide != NULL);
8262 		provider->dtpv_pops.dtps_provide_module =
8263 		    (void (*)(void *, struct modctl *))dtrace_nullop;
8264 	}
8265 
8266 	if (pops->dtps_suspend == NULL) {
8267 		ASSERT(pops->dtps_resume == NULL);
8268 		provider->dtpv_pops.dtps_suspend =
8269 		    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8270 		provider->dtpv_pops.dtps_resume =
8271 		    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8272 	}
8273 
8274 	provider->dtpv_arg = arg;
8275 	*idp = (dtrace_provider_id_t)provider;
8276 
8277 	if (pops == &dtrace_provider_ops) {
8278 		ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8279 		ASSERT(MUTEX_HELD(&dtrace_lock));
8280 		ASSERT(dtrace_anon.dta_enabling == NULL);
8281 
8282 		/*
8283 		 * We make sure that the DTrace provider is at the head of
8284 		 * the provider chain.
8285 		 */
8286 		provider->dtpv_next = dtrace_provider;
8287 		dtrace_provider = provider;
8288 		return (0);
8289 	}
8290 
8291 	mutex_enter(&dtrace_provider_lock);
8292 	mutex_enter(&dtrace_lock);
8293 
8294 	/*
8295 	 * If there is at least one provider registered, we'll add this
8296 	 * provider after the first provider.
8297 	 */
8298 	if (dtrace_provider != NULL) {
8299 		provider->dtpv_next = dtrace_provider->dtpv_next;
8300 		dtrace_provider->dtpv_next = provider;
8301 	} else {
8302 		dtrace_provider = provider;
8303 	}
8304 
8305 	if (dtrace_retained != NULL) {
8306 		dtrace_enabling_provide(provider);
8307 
8308 		/*
8309 		 * Now we need to call dtrace_enabling_matchall() -- which
8310 		 * will acquire cpu_lock and dtrace_lock.  We therefore need
8311 		 * to drop all of our locks before calling into it...
8312 		 */
8313 		mutex_exit(&dtrace_lock);
8314 		mutex_exit(&dtrace_provider_lock);
8315 		dtrace_enabling_matchall();
8316 
8317 		return (0);
8318 	}
8319 
8320 	mutex_exit(&dtrace_lock);
8321 	mutex_exit(&dtrace_provider_lock);
8322 
8323 	return (0);
8324 }
8325 
8326 /*
8327  * Unregister the specified provider from the DTrace framework.  This should
8328  * generally be called by DTrace providers in their detach(9E) entry point.
8329  */
8330 int
8331 dtrace_unregister(dtrace_provider_id_t id)
8332 {
8333 	dtrace_provider_t *old = (dtrace_provider_t *)id;
8334 	dtrace_provider_t *prev = NULL;
8335 	int i, self = 0, noreap = 0;
8336 	dtrace_probe_t *probe, *first = NULL;
8337 
8338 	if (old->dtpv_pops.dtps_enable ==
8339 	    (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
8340 		/*
8341 		 * If DTrace itself is the provider, we're called with locks
8342 		 * already held.
8343 		 */
8344 		ASSERT(old == dtrace_provider);
8345 		ASSERT(dtrace_devi != NULL);
8346 		ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8347 		ASSERT(MUTEX_HELD(&dtrace_lock));
8348 		self = 1;
8349 
8350 		if (dtrace_provider->dtpv_next != NULL) {
8351 			/*
8352 			 * There's another provider here; return failure.
8353 			 */
8354 			return (EBUSY);
8355 		}
8356 	} else {
8357 		mutex_enter(&dtrace_provider_lock);
8358 		mutex_enter(&mod_lock);
8359 		mutex_enter(&dtrace_lock);
8360 	}
8361 
8362 	/*
8363 	 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8364 	 * probes, we refuse to let providers slither away, unless this
8365 	 * provider has already been explicitly invalidated.
8366 	 */
8367 	if (!old->dtpv_defunct &&
8368 	    (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8369 	    dtrace_anon.dta_state->dts_necbs > 0))) {
8370 		if (!self) {
8371 			mutex_exit(&dtrace_lock);
8372 			mutex_exit(&mod_lock);
8373 			mutex_exit(&dtrace_provider_lock);
8374 		}
8375 		return (EBUSY);
8376 	}
8377 
8378 	/*
8379 	 * Attempt to destroy the probes associated with this provider.
8380 	 */
8381 	for (i = 0; i < dtrace_nprobes; i++) {
8382 		if ((probe = dtrace_probes[i]) == NULL)
8383 			continue;
8384 
8385 		if (probe->dtpr_provider != old)
8386 			continue;
8387 
8388 		if (probe->dtpr_ecb == NULL)
8389 			continue;
8390 
8391 		/*
8392 		 * If we are trying to unregister a defunct provider, and the
8393 		 * provider was made defunct within the interval dictated by
8394 		 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8395 		 * attempt to reap our enablings.  To denote that the provider
8396 		 * should reattempt to unregister itself at some point in the
8397 		 * future, we will return a differentiable error code (EAGAIN
8398 		 * instead of EBUSY) in this case.
8399 		 */
8400 		if (dtrace_gethrtime() - old->dtpv_defunct >
8401 		    dtrace_unregister_defunct_reap)
8402 			noreap = 1;
8403 
8404 		if (!self) {
8405 			mutex_exit(&dtrace_lock);
8406 			mutex_exit(&mod_lock);
8407 			mutex_exit(&dtrace_provider_lock);
8408 		}
8409 
8410 		if (noreap)
8411 			return (EBUSY);
8412 
8413 		(void) taskq_dispatch(dtrace_taskq,
8414 		    (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8415 
8416 		return (EAGAIN);
8417 	}
8418 
8419 	/*
8420 	 * All of the probes for this provider are disabled; we can safely
8421 	 * remove all of them from their hash chains and from the probe array.
8422 	 */
8423 	for (i = 0; i < dtrace_nprobes; i++) {
8424 		if ((probe = dtrace_probes[i]) == NULL)
8425 			continue;
8426 
8427 		if (probe->dtpr_provider != old)
8428 			continue;
8429 
8430 		dtrace_probes[i] = NULL;
8431 
8432 		dtrace_hash_remove(dtrace_bymod, probe);
8433 		dtrace_hash_remove(dtrace_byfunc, probe);
8434 		dtrace_hash_remove(dtrace_byname, probe);
8435 
8436 		if (first == NULL) {
8437 			first = probe;
8438 			probe->dtpr_nextmod = NULL;
8439 		} else {
8440 			probe->dtpr_nextmod = first;
8441 			first = probe;
8442 		}
8443 	}
8444 
8445 	/*
8446 	 * The provider's probes have been removed from the hash chains and
8447 	 * from the probe array.  Now issue a dtrace_sync() to be sure that
8448 	 * everyone has cleared out from any probe array processing.
8449 	 */
8450 	dtrace_sync();
8451 
8452 	for (probe = first; probe != NULL; probe = first) {
8453 		first = probe->dtpr_nextmod;
8454 
8455 		old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8456 		    probe->dtpr_arg);
8457 		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8458 		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8459 		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8460 		vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8461 		kmem_free(probe, sizeof (dtrace_probe_t));
8462 	}
8463 
8464 	if ((prev = dtrace_provider) == old) {
8465 		ASSERT(self || dtrace_devi == NULL);
8466 		ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8467 		dtrace_provider = old->dtpv_next;
8468 	} else {
8469 		while (prev != NULL && prev->dtpv_next != old)
8470 			prev = prev->dtpv_next;
8471 
8472 		if (prev == NULL) {
8473 			panic("attempt to unregister non-existent "
8474 			    "dtrace provider %p\n", (void *)id);
8475 		}
8476 
8477 		prev->dtpv_next = old->dtpv_next;
8478 	}
8479 
8480 	if (!self) {
8481 		mutex_exit(&dtrace_lock);
8482 		mutex_exit(&mod_lock);
8483 		mutex_exit(&dtrace_provider_lock);
8484 	}
8485 
8486 	kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
8487 	kmem_free(old, sizeof (dtrace_provider_t));
8488 
8489 	return (0);
8490 }
8491 
8492 /*
8493  * Invalidate the specified provider.  All subsequent probe lookups for the
8494  * specified provider will fail, but its probes will not be removed.
8495  */
8496 void
8497 dtrace_invalidate(dtrace_provider_id_t id)
8498 {
8499 	dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8500 
8501 	ASSERT(pvp->dtpv_pops.dtps_enable !=
8502 	    (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8503 
8504 	mutex_enter(&dtrace_provider_lock);
8505 	mutex_enter(&dtrace_lock);
8506 
8507 	pvp->dtpv_defunct = dtrace_gethrtime();
8508 
8509 	mutex_exit(&dtrace_lock);
8510 	mutex_exit(&dtrace_provider_lock);
8511 }
8512 
8513 /*
8514  * Indicate whether or not DTrace has attached.
8515  */
8516 int
8517 dtrace_attached(void)
8518 {
8519 	/*
8520 	 * dtrace_provider will be non-NULL iff the DTrace driver has
8521 	 * attached.  (It's non-NULL because DTrace is always itself a
8522 	 * provider.)
8523 	 */
8524 	return (dtrace_provider != NULL);
8525 }
8526 
8527 /*
8528  * Remove all the unenabled probes for the given provider.  This function is
8529  * not unlike dtrace_unregister(), except that it doesn't remove the provider
8530  * -- just as many of its associated probes as it can.
8531  */
8532 int
8533 dtrace_condense(dtrace_provider_id_t id)
8534 {
8535 	dtrace_provider_t *prov = (dtrace_provider_t *)id;
8536 	int i;
8537 	dtrace_probe_t *probe;
8538 
8539 	/*
8540 	 * Make sure this isn't the dtrace provider itself.
8541 	 */
8542 	ASSERT(prov->dtpv_pops.dtps_enable !=
8543 	    (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8544 
8545 	mutex_enter(&dtrace_provider_lock);
8546 	mutex_enter(&dtrace_lock);
8547 
8548 	/*
8549 	 * Attempt to destroy the probes associated with this provider.
8550 	 */
8551 	for (i = 0; i < dtrace_nprobes; i++) {
8552 		if ((probe = dtrace_probes[i]) == NULL)
8553 			continue;
8554 
8555 		if (probe->dtpr_provider != prov)
8556 			continue;
8557 
8558 		if (probe->dtpr_ecb != NULL)
8559 			continue;
8560 
8561 		dtrace_probes[i] = NULL;
8562 
8563 		dtrace_hash_remove(dtrace_bymod, probe);
8564 		dtrace_hash_remove(dtrace_byfunc, probe);
8565 		dtrace_hash_remove(dtrace_byname, probe);
8566 
8567 		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
8568 		    probe->dtpr_arg);
8569 		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8570 		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8571 		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8572 		kmem_free(probe, sizeof (dtrace_probe_t));
8573 		vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
8574 	}
8575 
8576 	mutex_exit(&dtrace_lock);
8577 	mutex_exit(&dtrace_provider_lock);
8578 
8579 	return (0);
8580 }
8581 
8582 /*
8583  * DTrace Probe Management Functions
8584  *
8585  * The functions in this section perform the DTrace probe management,
8586  * including functions to create probes, look-up probes, and call into the
8587  * providers to request that probes be provided.  Some of these functions are
8588  * in the Provider-to-Framework API; these functions can be identified by the
8589  * fact that they are not declared "static".
8590  */
8591 
8592 /*
8593  * Create a probe with the specified module name, function name, and name.
8594  */
8595 dtrace_id_t
8596 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
8597     const char *func, const char *name, int aframes, void *arg)
8598 {
8599 	dtrace_probe_t *probe, **probes;
8600 	dtrace_provider_t *provider = (dtrace_provider_t *)prov;
8601 	dtrace_id_t id;
8602 
8603 	if (provider == dtrace_provider) {
8604 		ASSERT(MUTEX_HELD(&dtrace_lock));
8605 	} else {
8606 		mutex_enter(&dtrace_lock);
8607 	}
8608 
8609 	id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
8610 	    VM_BESTFIT | VM_SLEEP);
8611 	probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
8612 
8613 	probe->dtpr_id = id;
8614 	probe->dtpr_gen = dtrace_probegen++;
8615 	probe->dtpr_mod = dtrace_strdup(mod);
8616 	probe->dtpr_func = dtrace_strdup(func);
8617 	probe->dtpr_name = dtrace_strdup(name);
8618 	probe->dtpr_arg = arg;
8619 	probe->dtpr_aframes = aframes;
8620 	probe->dtpr_provider = provider;
8621 
8622 	dtrace_hash_add(dtrace_bymod, probe);
8623 	dtrace_hash_add(dtrace_byfunc, probe);
8624 	dtrace_hash_add(dtrace_byname, probe);
8625 
8626 	if (id - 1 >= dtrace_nprobes) {
8627 		size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
8628 		size_t nsize = osize << 1;
8629 
8630 		if (nsize == 0) {
8631 			ASSERT(osize == 0);
8632 			ASSERT(dtrace_probes == NULL);
8633 			nsize = sizeof (dtrace_probe_t *);
8634 		}
8635 
8636 		probes = kmem_zalloc(nsize, KM_SLEEP);
8637 
8638 		if (dtrace_probes == NULL) {
8639 			ASSERT(osize == 0);
8640 			dtrace_probes = probes;
8641 			dtrace_nprobes = 1;
8642 		} else {
8643 			dtrace_probe_t **oprobes = dtrace_probes;
8644 
8645 			bcopy(oprobes, probes, osize);
8646 			dtrace_membar_producer();
8647 			dtrace_probes = probes;
8648 
8649 			dtrace_sync();
8650 
8651 			/*
8652 			 * All CPUs are now seeing the new probes array; we can
8653 			 * safely free the old array.
8654 			 */
8655 			kmem_free(oprobes, osize);
8656 			dtrace_nprobes <<= 1;
8657 		}
8658 
8659 		ASSERT(id - 1 < dtrace_nprobes);
8660 	}
8661 
8662 	ASSERT(dtrace_probes[id - 1] == NULL);
8663 	dtrace_probes[id - 1] = probe;
8664 
8665 	if (provider != dtrace_provider)
8666 		mutex_exit(&dtrace_lock);
8667 
8668 	return (id);
8669 }
8670 
8671 static dtrace_probe_t *
8672 dtrace_probe_lookup_id(dtrace_id_t id)
8673 {
8674 	ASSERT(MUTEX_HELD(&dtrace_lock));
8675 
8676 	if (id == 0 || id > dtrace_nprobes)
8677 		return (NULL);
8678 
8679 	return (dtrace_probes[id - 1]);
8680 }
8681 
8682 static int
8683 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
8684 {
8685 	*((dtrace_id_t *)arg) = probe->dtpr_id;
8686 
8687 	return (DTRACE_MATCH_DONE);
8688 }
8689 
8690 /*
8691  * Look up a probe based on provider and one or more of module name, function
8692  * name and probe name.
8693  */
8694 dtrace_id_t
8695 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
8696     const char *func, const char *name)
8697 {
8698 	dtrace_probekey_t pkey;
8699 	dtrace_id_t id;
8700 	int match;
8701 
8702 	pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
8703 	pkey.dtpk_pmatch = &dtrace_match_string;
8704 	pkey.dtpk_mod = mod;
8705 	pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
8706 	pkey.dtpk_func = func;
8707 	pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
8708 	pkey.dtpk_name = name;
8709 	pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
8710 	pkey.dtpk_id = DTRACE_IDNONE;
8711 
8712 	mutex_enter(&dtrace_lock);
8713 	match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
8714 	    dtrace_probe_lookup_match, &id);
8715 	mutex_exit(&dtrace_lock);
8716 
8717 	ASSERT(match == 1 || match == 0);
8718 	return (match ? id : 0);
8719 }
8720 
8721 /*
8722  * Returns the probe argument associated with the specified probe.
8723  */
8724 void *
8725 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
8726 {
8727 	dtrace_probe_t *probe;
8728 	void *rval = NULL;
8729 
8730 	mutex_enter(&dtrace_lock);
8731 
8732 	if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
8733 	    probe->dtpr_provider == (dtrace_provider_t *)id)
8734 		rval = probe->dtpr_arg;
8735 
8736 	mutex_exit(&dtrace_lock);
8737 
8738 	return (rval);
8739 }
8740 
8741 /*
8742  * Copy a probe into a probe description.
8743  */
8744 static void
8745 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8746 {
8747 	bzero(pdp, sizeof (dtrace_probedesc_t));
8748 	pdp->dtpd_id = prp->dtpr_id;
8749 
8750 	(void) strncpy(pdp->dtpd_provider,
8751 	    prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8752 
8753 	(void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8754 	(void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8755 	(void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8756 }
8757 
8758 /*
8759  * Called to indicate that a probe -- or probes -- should be provided by a
8760  * specfied provider.  If the specified description is NULL, the provider will
8761  * be told to provide all of its probes.  (This is done whenever a new
8762  * consumer comes along, or whenever a retained enabling is to be matched.) If
8763  * the specified description is non-NULL, the provider is given the
8764  * opportunity to dynamically provide the specified probe, allowing providers
8765  * to support the creation of probes on-the-fly.  (So-called _autocreated_
8766  * probes.)  If the provider is NULL, the operations will be applied to all
8767  * providers; if the provider is non-NULL the operations will only be applied
8768  * to the specified provider.  The dtrace_provider_lock must be held, and the
8769  * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8770  * will need to grab the dtrace_lock when it reenters the framework through
8771  * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8772  */
8773 static void
8774 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8775 {
8776 	struct modctl *ctl;
8777 	int all = 0;
8778 
8779 	ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8780 
8781 	if (prv == NULL) {
8782 		all = 1;
8783 		prv = dtrace_provider;
8784 	}
8785 
8786 	do {
8787 		/*
8788 		 * First, call the blanket provide operation.
8789 		 */
8790 		prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8791 
8792 		/*
8793 		 * Now call the per-module provide operation.  We will grab
8794 		 * mod_lock to prevent the list from being modified.  Note
8795 		 * that this also prevents the mod_busy bits from changing.
8796 		 * (mod_busy can only be changed with mod_lock held.)
8797 		 */
8798 		mutex_enter(&mod_lock);
8799 
8800 		ctl = &modules;
8801 		do {
8802 			if (ctl->mod_busy || ctl->mod_mp == NULL)
8803 				continue;
8804 
8805 			prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8806 
8807 		} while ((ctl = ctl->mod_next) != &modules);
8808 
8809 		mutex_exit(&mod_lock);
8810 	} while (all && (prv = prv->dtpv_next) != NULL);
8811 }
8812 
8813 /*
8814  * Iterate over each probe, and call the Framework-to-Provider API function
8815  * denoted by offs.
8816  */
8817 static void
8818 dtrace_probe_foreach(uintptr_t offs)
8819 {
8820 	dtrace_provider_t *prov;
8821 	void (*func)(void *, dtrace_id_t, void *);
8822 	dtrace_probe_t *probe;
8823 	dtrace_icookie_t cookie;
8824 	int i;
8825 
8826 	/*
8827 	 * We disable interrupts to walk through the probe array.  This is
8828 	 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8829 	 * won't see stale data.
8830 	 */
8831 	cookie = dtrace_interrupt_disable();
8832 
8833 	for (i = 0; i < dtrace_nprobes; i++) {
8834 		if ((probe = dtrace_probes[i]) == NULL)
8835 			continue;
8836 
8837 		if (probe->dtpr_ecb == NULL) {
8838 			/*
8839 			 * This probe isn't enabled -- don't call the function.
8840 			 */
8841 			continue;
8842 		}
8843 
8844 		prov = probe->dtpr_provider;
8845 		func = *((void(**)(void *, dtrace_id_t, void *))
8846 		    ((uintptr_t)&prov->dtpv_pops + offs));
8847 
8848 		func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8849 	}
8850 
8851 	dtrace_interrupt_enable(cookie);
8852 }
8853 
8854 static int
8855 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8856 {
8857 	dtrace_probekey_t pkey;
8858 	uint32_t priv;
8859 	uid_t uid;
8860 	zoneid_t zoneid;
8861 
8862 	ASSERT(MUTEX_HELD(&dtrace_lock));
8863 	dtrace_ecb_create_cache = NULL;
8864 
8865 	if (desc == NULL) {
8866 		/*
8867 		 * If we're passed a NULL description, we're being asked to
8868 		 * create an ECB with a NULL probe.
8869 		 */
8870 		(void) dtrace_ecb_create_enable(NULL, enab);
8871 		return (0);
8872 	}
8873 
8874 	dtrace_probekey(desc, &pkey);
8875 	dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8876 	    &priv, &uid, &zoneid);
8877 
8878 	return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8879 	    enab));
8880 }
8881 
8882 /*
8883  * DTrace Helper Provider Functions
8884  */
8885 static void
8886 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8887 {
8888 	attr->dtat_name = DOF_ATTR_NAME(dofattr);
8889 	attr->dtat_data = DOF_ATTR_DATA(dofattr);
8890 	attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8891 }
8892 
8893 static void
8894 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8895     const dof_provider_t *dofprov, char *strtab)
8896 {
8897 	hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8898 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8899 	    dofprov->dofpv_provattr);
8900 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8901 	    dofprov->dofpv_modattr);
8902 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8903 	    dofprov->dofpv_funcattr);
8904 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8905 	    dofprov->dofpv_nameattr);
8906 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8907 	    dofprov->dofpv_argsattr);
8908 }
8909 
8910 static void
8911 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8912 {
8913 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8914 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
8915 	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8916 	dof_provider_t *provider;
8917 	dof_probe_t *probe;
8918 	uint32_t *off, *enoff;
8919 	uint8_t *arg;
8920 	char *strtab;
8921 	uint_t i, nprobes;
8922 	dtrace_helper_provdesc_t dhpv;
8923 	dtrace_helper_probedesc_t dhpb;
8924 	dtrace_meta_t *meta = dtrace_meta_pid;
8925 	dtrace_mops_t *mops = &meta->dtm_mops;
8926 	void *parg;
8927 
8928 	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8929 	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8930 	    provider->dofpv_strtab * dof->dofh_secsize);
8931 	prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8932 	    provider->dofpv_probes * dof->dofh_secsize);
8933 	arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8934 	    provider->dofpv_prargs * dof->dofh_secsize);
8935 	off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8936 	    provider->dofpv_proffs * dof->dofh_secsize);
8937 
8938 	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8939 	off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8940 	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8941 	enoff = NULL;
8942 
8943 	/*
8944 	 * See dtrace_helper_provider_validate().
8945 	 */
8946 	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8947 	    provider->dofpv_prenoffs != DOF_SECT_NONE) {
8948 		enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8949 		    provider->dofpv_prenoffs * dof->dofh_secsize);
8950 		enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8951 	}
8952 
8953 	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8954 
8955 	/*
8956 	 * Create the provider.
8957 	 */
8958 	dtrace_dofprov2hprov(&dhpv, provider, strtab);
8959 
8960 	if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8961 		return;
8962 
8963 	meta->dtm_count++;
8964 
8965 	/*
8966 	 * Create the probes.
8967 	 */
8968 	for (i = 0; i < nprobes; i++) {
8969 		probe = (dof_probe_t *)(uintptr_t)(daddr +
8970 		    prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8971 
8972 		dhpb.dthpb_mod = dhp->dofhp_mod;
8973 		dhpb.dthpb_func = strtab + probe->dofpr_func;
8974 		dhpb.dthpb_name = strtab + probe->dofpr_name;
8975 		dhpb.dthpb_base = probe->dofpr_addr;
8976 		dhpb.dthpb_offs = off + probe->dofpr_offidx;
8977 		dhpb.dthpb_noffs = probe->dofpr_noffs;
8978 		if (enoff != NULL) {
8979 			dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8980 			dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8981 		} else {
8982 			dhpb.dthpb_enoffs = NULL;
8983 			dhpb.dthpb_nenoffs = 0;
8984 		}
8985 		dhpb.dthpb_args = arg + probe->dofpr_argidx;
8986 		dhpb.dthpb_nargc = probe->dofpr_nargc;
8987 		dhpb.dthpb_xargc = probe->dofpr_xargc;
8988 		dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8989 		dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8990 
8991 		mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8992 	}
8993 }
8994 
8995 static void
8996 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8997 {
8998 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8999 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9000 	int i;
9001 
9002 	ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9003 
9004 	for (i = 0; i < dof->dofh_secnum; i++) {
9005 		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9006 		    dof->dofh_secoff + i * dof->dofh_secsize);
9007 
9008 		if (sec->dofs_type != DOF_SECT_PROVIDER)
9009 			continue;
9010 
9011 		dtrace_helper_provide_one(dhp, sec, pid);
9012 	}
9013 
9014 	/*
9015 	 * We may have just created probes, so we must now rematch against
9016 	 * any retained enablings.  Note that this call will acquire both
9017 	 * cpu_lock and dtrace_lock; the fact that we are holding
9018 	 * dtrace_meta_lock now is what defines the ordering with respect to
9019 	 * these three locks.
9020 	 */
9021 	dtrace_enabling_matchall();
9022 }
9023 
9024 static void
9025 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9026 {
9027 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9028 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9029 	dof_sec_t *str_sec;
9030 	dof_provider_t *provider;
9031 	char *strtab;
9032 	dtrace_helper_provdesc_t dhpv;
9033 	dtrace_meta_t *meta = dtrace_meta_pid;
9034 	dtrace_mops_t *mops = &meta->dtm_mops;
9035 
9036 	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9037 	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9038 	    provider->dofpv_strtab * dof->dofh_secsize);
9039 
9040 	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9041 
9042 	/*
9043 	 * Create the provider.
9044 	 */
9045 	dtrace_dofprov2hprov(&dhpv, provider, strtab);
9046 
9047 	mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
9048 
9049 	meta->dtm_count--;
9050 }
9051 
9052 static void
9053 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
9054 {
9055 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9056 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9057 	int i;
9058 
9059 	ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9060 
9061 	for (i = 0; i < dof->dofh_secnum; i++) {
9062 		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9063 		    dof->dofh_secoff + i * dof->dofh_secsize);
9064 
9065 		if (sec->dofs_type != DOF_SECT_PROVIDER)
9066 			continue;
9067 
9068 		dtrace_helper_provider_remove_one(dhp, sec, pid);
9069 	}
9070 }
9071 
9072 /*
9073  * DTrace Meta Provider-to-Framework API Functions
9074  *
9075  * These functions implement the Meta Provider-to-Framework API, as described
9076  * in <sys/dtrace.h>.
9077  */
9078 int
9079 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9080     dtrace_meta_provider_id_t *idp)
9081 {
9082 	dtrace_meta_t *meta;
9083 	dtrace_helpers_t *help, *next;
9084 	int i;
9085 
9086 	*idp = DTRACE_METAPROVNONE;
9087 
9088 	/*
9089 	 * We strictly don't need the name, but we hold onto it for
9090 	 * debuggability. All hail error queues!
9091 	 */
9092 	if (name == NULL) {
9093 		cmn_err(CE_WARN, "failed to register meta-provider: "
9094 		    "invalid name");
9095 		return (EINVAL);
9096 	}
9097 
9098 	if (mops == NULL ||
9099 	    mops->dtms_create_probe == NULL ||
9100 	    mops->dtms_provide_pid == NULL ||
9101 	    mops->dtms_remove_pid == NULL) {
9102 		cmn_err(CE_WARN, "failed to register meta-register %s: "
9103 		    "invalid ops", name);
9104 		return (EINVAL);
9105 	}
9106 
9107 	meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9108 	meta->dtm_mops = *mops;
9109 	meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
9110 	(void) strcpy(meta->dtm_name, name);
9111 	meta->dtm_arg = arg;
9112 
9113 	mutex_enter(&dtrace_meta_lock);
9114 	mutex_enter(&dtrace_lock);
9115 
9116 	if (dtrace_meta_pid != NULL) {
9117 		mutex_exit(&dtrace_lock);
9118 		mutex_exit(&dtrace_meta_lock);
9119 		cmn_err(CE_WARN, "failed to register meta-register %s: "
9120 		    "user-land meta-provider exists", name);
9121 		kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
9122 		kmem_free(meta, sizeof (dtrace_meta_t));
9123 		return (EINVAL);
9124 	}
9125 
9126 	dtrace_meta_pid = meta;
9127 	*idp = (dtrace_meta_provider_id_t)meta;
9128 
9129 	/*
9130 	 * If there are providers and probes ready to go, pass them
9131 	 * off to the new meta provider now.
9132 	 */
9133 
9134 	help = dtrace_deferred_pid;
9135 	dtrace_deferred_pid = NULL;
9136 
9137 	mutex_exit(&dtrace_lock);
9138 
9139 	while (help != NULL) {
9140 		for (i = 0; i < help->dthps_nprovs; i++) {
9141 			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9142 			    help->dthps_pid);
9143 		}
9144 
9145 		next = help->dthps_next;
9146 		help->dthps_next = NULL;
9147 		help->dthps_prev = NULL;
9148 		help->dthps_deferred = 0;
9149 		help = next;
9150 	}
9151 
9152 	mutex_exit(&dtrace_meta_lock);
9153 
9154 	return (0);
9155 }
9156 
9157 int
9158 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9159 {
9160 	dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9161 
9162 	mutex_enter(&dtrace_meta_lock);
9163 	mutex_enter(&dtrace_lock);
9164 
9165 	if (old == dtrace_meta_pid) {
9166 		pp = &dtrace_meta_pid;
9167 	} else {
9168 		panic("attempt to unregister non-existent "
9169 		    "dtrace meta-provider %p\n", (void *)old);
9170 	}
9171 
9172 	if (old->dtm_count != 0) {
9173 		mutex_exit(&dtrace_lock);
9174 		mutex_exit(&dtrace_meta_lock);
9175 		return (EBUSY);
9176 	}
9177 
9178 	*pp = NULL;
9179 
9180 	mutex_exit(&dtrace_lock);
9181 	mutex_exit(&dtrace_meta_lock);
9182 
9183 	kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
9184 	kmem_free(old, sizeof (dtrace_meta_t));
9185 
9186 	return (0);
9187 }
9188 
9189 
9190 /*
9191  * DTrace DIF Object Functions
9192  */
9193 static int
9194 dtrace_difo_err(uint_t pc, const char *format, ...)
9195 {
9196 	if (dtrace_err_verbose) {
9197 		va_list alist;
9198 
9199 		(void) uprintf("dtrace DIF object error: [%u]: ", pc);
9200 		va_start(alist, format);
9201 		(void) vuprintf(format, alist);
9202 		va_end(alist);
9203 	}
9204 
9205 #ifdef DTRACE_ERRDEBUG
9206 	dtrace_errdebug(format);
9207 #endif
9208 	return (1);
9209 }
9210 
9211 /*
9212  * Validate a DTrace DIF object by checking the IR instructions.  The following
9213  * rules are currently enforced by dtrace_difo_validate():
9214  *
9215  * 1. Each instruction must have a valid opcode
9216  * 2. Each register, string, variable, or subroutine reference must be valid
9217  * 3. No instruction can modify register %r0 (must be zero)
9218  * 4. All instruction reserved bits must be set to zero
9219  * 5. The last instruction must be a "ret" instruction
9220  * 6. All branch targets must reference a valid instruction _after_ the branch
9221  */
9222 static int
9223 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9224     cred_t *cr)
9225 {
9226 	int err = 0, i;
9227 	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9228 	int kcheckload;
9229 	uint_t pc;
9230 	int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
9231 
9232 	kcheckload = cr == NULL ||
9233 	    (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
9234 
9235 	dp->dtdo_destructive = 0;
9236 
9237 	for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9238 		dif_instr_t instr = dp->dtdo_buf[pc];
9239 
9240 		uint_t r1 = DIF_INSTR_R1(instr);
9241 		uint_t r2 = DIF_INSTR_R2(instr);
9242 		uint_t rd = DIF_INSTR_RD(instr);
9243 		uint_t rs = DIF_INSTR_RS(instr);
9244 		uint_t label = DIF_INSTR_LABEL(instr);
9245 		uint_t v = DIF_INSTR_VAR(instr);
9246 		uint_t subr = DIF_INSTR_SUBR(instr);
9247 		uint_t type = DIF_INSTR_TYPE(instr);
9248 		uint_t op = DIF_INSTR_OP(instr);
9249 
9250 		switch (op) {
9251 		case DIF_OP_OR:
9252 		case DIF_OP_XOR:
9253 		case DIF_OP_AND:
9254 		case DIF_OP_SLL:
9255 		case DIF_OP_SRL:
9256 		case DIF_OP_SRA:
9257 		case DIF_OP_SUB:
9258 		case DIF_OP_ADD:
9259 		case DIF_OP_MUL:
9260 		case DIF_OP_SDIV:
9261 		case DIF_OP_UDIV:
9262 		case DIF_OP_SREM:
9263 		case DIF_OP_UREM:
9264 		case DIF_OP_COPYS:
9265 			if (r1 >= nregs)
9266 				err += efunc(pc, "invalid register %u\n", r1);
9267 			if (r2 >= nregs)
9268 				err += efunc(pc, "invalid register %u\n", r2);
9269 			if (rd >= nregs)
9270 				err += efunc(pc, "invalid register %u\n", rd);
9271 			if (rd == 0)
9272 				err += efunc(pc, "cannot write to %r0\n");
9273 			break;
9274 		case DIF_OP_NOT:
9275 		case DIF_OP_MOV:
9276 		case DIF_OP_ALLOCS:
9277 			if (r1 >= nregs)
9278 				err += efunc(pc, "invalid register %u\n", r1);
9279 			if (r2 != 0)
9280 				err += efunc(pc, "non-zero reserved bits\n");
9281 			if (rd >= nregs)
9282 				err += efunc(pc, "invalid register %u\n", rd);
9283 			if (rd == 0)
9284 				err += efunc(pc, "cannot write to %r0\n");
9285 			break;
9286 		case DIF_OP_LDSB:
9287 		case DIF_OP_LDSH:
9288 		case DIF_OP_LDSW:
9289 		case DIF_OP_LDUB:
9290 		case DIF_OP_LDUH:
9291 		case DIF_OP_LDUW:
9292 		case DIF_OP_LDX:
9293 			if (r1 >= nregs)
9294 				err += efunc(pc, "invalid register %u\n", r1);
9295 			if (r2 != 0)
9296 				err += efunc(pc, "non-zero reserved bits\n");
9297 			if (rd >= nregs)
9298 				err += efunc(pc, "invalid register %u\n", rd);
9299 			if (rd == 0)
9300 				err += efunc(pc, "cannot write to %r0\n");
9301 			if (kcheckload)
9302 				dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9303 				    DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9304 			break;
9305 		case DIF_OP_RLDSB:
9306 		case DIF_OP_RLDSH:
9307 		case DIF_OP_RLDSW:
9308 		case DIF_OP_RLDUB:
9309 		case DIF_OP_RLDUH:
9310 		case DIF_OP_RLDUW:
9311 		case DIF_OP_RLDX:
9312 			if (r1 >= nregs)
9313 				err += efunc(pc, "invalid register %u\n", r1);
9314 			if (r2 != 0)
9315 				err += efunc(pc, "non-zero reserved bits\n");
9316 			if (rd >= nregs)
9317 				err += efunc(pc, "invalid register %u\n", rd);
9318 			if (rd == 0)
9319 				err += efunc(pc, "cannot write to %r0\n");
9320 			break;
9321 		case DIF_OP_ULDSB:
9322 		case DIF_OP_ULDSH:
9323 		case DIF_OP_ULDSW:
9324 		case DIF_OP_ULDUB:
9325 		case DIF_OP_ULDUH:
9326 		case DIF_OP_ULDUW:
9327 		case DIF_OP_ULDX:
9328 			if (r1 >= nregs)
9329 				err += efunc(pc, "invalid register %u\n", r1);
9330 			if (r2 != 0)
9331 				err += efunc(pc, "non-zero reserved bits\n");
9332 			if (rd >= nregs)
9333 				err += efunc(pc, "invalid register %u\n", rd);
9334 			if (rd == 0)
9335 				err += efunc(pc, "cannot write to %r0\n");
9336 			break;
9337 		case DIF_OP_STB:
9338 		case DIF_OP_STH:
9339 		case DIF_OP_STW:
9340 		case DIF_OP_STX:
9341 			if (r1 >= nregs)
9342 				err += efunc(pc, "invalid register %u\n", r1);
9343 			if (r2 != 0)
9344 				err += efunc(pc, "non-zero reserved bits\n");
9345 			if (rd >= nregs)
9346 				err += efunc(pc, "invalid register %u\n", rd);
9347 			if (rd == 0)
9348 				err += efunc(pc, "cannot write to 0 address\n");
9349 			break;
9350 		case DIF_OP_CMP:
9351 		case DIF_OP_SCMP:
9352 			if (r1 >= nregs)
9353 				err += efunc(pc, "invalid register %u\n", r1);
9354 			if (r2 >= nregs)
9355 				err += efunc(pc, "invalid register %u\n", r2);
9356 			if (rd != 0)
9357 				err += efunc(pc, "non-zero reserved bits\n");
9358 			break;
9359 		case DIF_OP_TST:
9360 			if (r1 >= nregs)
9361 				err += efunc(pc, "invalid register %u\n", r1);
9362 			if (r2 != 0 || rd != 0)
9363 				err += efunc(pc, "non-zero reserved bits\n");
9364 			break;
9365 		case DIF_OP_BA:
9366 		case DIF_OP_BE:
9367 		case DIF_OP_BNE:
9368 		case DIF_OP_BG:
9369 		case DIF_OP_BGU:
9370 		case DIF_OP_BGE:
9371 		case DIF_OP_BGEU:
9372 		case DIF_OP_BL:
9373 		case DIF_OP_BLU:
9374 		case DIF_OP_BLE:
9375 		case DIF_OP_BLEU:
9376 			if (label >= dp->dtdo_len) {
9377 				err += efunc(pc, "invalid branch target %u\n",
9378 				    label);
9379 			}
9380 			if (label <= pc) {
9381 				err += efunc(pc, "backward branch to %u\n",
9382 				    label);
9383 			}
9384 			break;
9385 		case DIF_OP_RET:
9386 			if (r1 != 0 || r2 != 0)
9387 				err += efunc(pc, "non-zero reserved bits\n");
9388 			if (rd >= nregs)
9389 				err += efunc(pc, "invalid register %u\n", rd);
9390 			break;
9391 		case DIF_OP_NOP:
9392 		case DIF_OP_POPTS:
9393 		case DIF_OP_FLUSHTS:
9394 			if (r1 != 0 || r2 != 0 || rd != 0)
9395 				err += efunc(pc, "non-zero reserved bits\n");
9396 			break;
9397 		case DIF_OP_SETX:
9398 			if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9399 				err += efunc(pc, "invalid integer ref %u\n",
9400 				    DIF_INSTR_INTEGER(instr));
9401 			}
9402 			if (rd >= nregs)
9403 				err += efunc(pc, "invalid register %u\n", rd);
9404 			if (rd == 0)
9405 				err += efunc(pc, "cannot write to %r0\n");
9406 			break;
9407 		case DIF_OP_SETS:
9408 			if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9409 				err += efunc(pc, "invalid string ref %u\n",
9410 				    DIF_INSTR_STRING(instr));
9411 			}
9412 			if (rd >= nregs)
9413 				err += efunc(pc, "invalid register %u\n", rd);
9414 			if (rd == 0)
9415 				err += efunc(pc, "cannot write to %r0\n");
9416 			break;
9417 		case DIF_OP_LDGA:
9418 		case DIF_OP_LDTA:
9419 			if (r1 > DIF_VAR_ARRAY_MAX)
9420 				err += efunc(pc, "invalid array %u\n", r1);
9421 			if (r2 >= nregs)
9422 				err += efunc(pc, "invalid register %u\n", r2);
9423 			if (rd >= nregs)
9424 				err += efunc(pc, "invalid register %u\n", rd);
9425 			if (rd == 0)
9426 				err += efunc(pc, "cannot write to %r0\n");
9427 			break;
9428 		case DIF_OP_STGA:
9429 			if (r1 > DIF_VAR_ARRAY_MAX)
9430 				err += efunc(pc, "invalid array %u\n", r1);
9431 			if (r2 >= nregs)
9432 				err += efunc(pc, "invalid register %u\n", r2);
9433 			if (rd >= nregs)
9434 				err += efunc(pc, "invalid register %u\n", rd);
9435 			dp->dtdo_destructive = 1;
9436 			break;
9437 		case DIF_OP_LDGS:
9438 		case DIF_OP_LDTS:
9439 		case DIF_OP_LDLS:
9440 		case DIF_OP_LDGAA:
9441 		case DIF_OP_LDTAA:
9442 			if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9443 				err += efunc(pc, "invalid variable %u\n", v);
9444 			if (rd >= nregs)
9445 				err += efunc(pc, "invalid register %u\n", rd);
9446 			if (rd == 0)
9447 				err += efunc(pc, "cannot write to %r0\n");
9448 			break;
9449 		case DIF_OP_STGS:
9450 		case DIF_OP_STTS:
9451 		case DIF_OP_STLS:
9452 		case DIF_OP_STGAA:
9453 		case DIF_OP_STTAA:
9454 			if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9455 				err += efunc(pc, "invalid variable %u\n", v);
9456 			if (rs >= nregs)
9457 				err += efunc(pc, "invalid register %u\n", rd);
9458 			break;
9459 		case DIF_OP_CALL:
9460 			if (subr > DIF_SUBR_MAX)
9461 				err += efunc(pc, "invalid subr %u\n", subr);
9462 			if (rd >= nregs)
9463 				err += efunc(pc, "invalid register %u\n", rd);
9464 			if (rd == 0)
9465 				err += efunc(pc, "cannot write to %r0\n");
9466 
9467 			if (subr == DIF_SUBR_COPYOUT ||
9468 			    subr == DIF_SUBR_COPYOUTSTR) {
9469 				dp->dtdo_destructive = 1;
9470 			}
9471 
9472 			if (subr == DIF_SUBR_GETF) {
9473 				/*
9474 				 * If we have a getf() we need to record that
9475 				 * in our state.  Note that our state can be
9476 				 * NULL if this is a helper -- but in that
9477 				 * case, the call to getf() is itself illegal,
9478 				 * and will be caught (slightly later) when
9479 				 * the helper is validated.
9480 				 */
9481 				if (vstate->dtvs_state != NULL)
9482 					vstate->dtvs_state->dts_getf++;
9483 			}
9484 
9485 			break;
9486 		case DIF_OP_PUSHTR:
9487 			if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9488 				err += efunc(pc, "invalid ref type %u\n", type);
9489 			if (r2 >= nregs)
9490 				err += efunc(pc, "invalid register %u\n", r2);
9491 			if (rs >= nregs)
9492 				err += efunc(pc, "invalid register %u\n", rs);
9493 			break;
9494 		case DIF_OP_PUSHTV:
9495 			if (type != DIF_TYPE_CTF)
9496 				err += efunc(pc, "invalid val type %u\n", type);
9497 			if (r2 >= nregs)
9498 				err += efunc(pc, "invalid register %u\n", r2);
9499 			if (rs >= nregs)
9500 				err += efunc(pc, "invalid register %u\n", rs);
9501 			break;
9502 		default:
9503 			err += efunc(pc, "invalid opcode %u\n",
9504 			    DIF_INSTR_OP(instr));
9505 		}
9506 	}
9507 
9508 	if (dp->dtdo_len != 0 &&
9509 	    DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9510 		err += efunc(dp->dtdo_len - 1,
9511 		    "expected 'ret' as last DIF instruction\n");
9512 	}
9513 
9514 	if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9515 		/*
9516 		 * If we're not returning by reference, the size must be either
9517 		 * 0 or the size of one of the base types.
9518 		 */
9519 		switch (dp->dtdo_rtype.dtdt_size) {
9520 		case 0:
9521 		case sizeof (uint8_t):
9522 		case sizeof (uint16_t):
9523 		case sizeof (uint32_t):
9524 		case sizeof (uint64_t):
9525 			break;
9526 
9527 		default:
9528 			err += efunc(dp->dtdo_len - 1, "bad return size\n");
9529 		}
9530 	}
9531 
9532 	for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9533 		dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9534 		dtrace_diftype_t *vt, *et;
9535 		uint_t id, ndx;
9536 
9537 		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9538 		    v->dtdv_scope != DIFV_SCOPE_THREAD &&
9539 		    v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9540 			err += efunc(i, "unrecognized variable scope %d\n",
9541 			    v->dtdv_scope);
9542 			break;
9543 		}
9544 
9545 		if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9546 		    v->dtdv_kind != DIFV_KIND_SCALAR) {
9547 			err += efunc(i, "unrecognized variable type %d\n",
9548 			    v->dtdv_kind);
9549 			break;
9550 		}
9551 
9552 		if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9553 			err += efunc(i, "%d exceeds variable id limit\n", id);
9554 			break;
9555 		}
9556 
9557 		if (id < DIF_VAR_OTHER_UBASE)
9558 			continue;
9559 
9560 		/*
9561 		 * For user-defined variables, we need to check that this
9562 		 * definition is identical to any previous definition that we
9563 		 * encountered.
9564 		 */
9565 		ndx = id - DIF_VAR_OTHER_UBASE;
9566 
9567 		switch (v->dtdv_scope) {
9568 		case DIFV_SCOPE_GLOBAL:
9569 			if (maxglobal == -1 || ndx > maxglobal)
9570 				maxglobal = ndx;
9571 
9572 			if (ndx < vstate->dtvs_nglobals) {
9573 				dtrace_statvar_t *svar;
9574 
9575 				if ((svar = vstate->dtvs_globals[ndx]) != NULL)
9576 					existing = &svar->dtsv_var;
9577 			}
9578 
9579 			break;
9580 
9581 		case DIFV_SCOPE_THREAD:
9582 			if (maxtlocal == -1 || ndx > maxtlocal)
9583 				maxtlocal = ndx;
9584 
9585 			if (ndx < vstate->dtvs_ntlocals)
9586 				existing = &vstate->dtvs_tlocals[ndx];
9587 			break;
9588 
9589 		case DIFV_SCOPE_LOCAL:
9590 			if (maxlocal == -1 || ndx > maxlocal)
9591 				maxlocal = ndx;
9592 
9593 			if (ndx < vstate->dtvs_nlocals) {
9594 				dtrace_statvar_t *svar;
9595 
9596 				if ((svar = vstate->dtvs_locals[ndx]) != NULL)
9597 					existing = &svar->dtsv_var;
9598 			}
9599 
9600 			break;
9601 		}
9602 
9603 		vt = &v->dtdv_type;
9604 
9605 		if (vt->dtdt_flags & DIF_TF_BYREF) {
9606 			if (vt->dtdt_size == 0) {
9607 				err += efunc(i, "zero-sized variable\n");
9608 				break;
9609 			}
9610 
9611 			if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
9612 			    v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
9613 			    vt->dtdt_size > dtrace_statvar_maxsize) {
9614 				err += efunc(i, "oversized by-ref static\n");
9615 				break;
9616 			}
9617 		}
9618 
9619 		if (existing == NULL || existing->dtdv_id == 0)
9620 			continue;
9621 
9622 		ASSERT(existing->dtdv_id == v->dtdv_id);
9623 		ASSERT(existing->dtdv_scope == v->dtdv_scope);
9624 
9625 		if (existing->dtdv_kind != v->dtdv_kind)
9626 			err += efunc(i, "%d changed variable kind\n", id);
9627 
9628 		et = &existing->dtdv_type;
9629 
9630 		if (vt->dtdt_flags != et->dtdt_flags) {
9631 			err += efunc(i, "%d changed variable type flags\n", id);
9632 			break;
9633 		}
9634 
9635 		if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
9636 			err += efunc(i, "%d changed variable type size\n", id);
9637 			break;
9638 		}
9639 	}
9640 
9641 	for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9642 		dif_instr_t instr = dp->dtdo_buf[pc];
9643 
9644 		uint_t v = DIF_INSTR_VAR(instr);
9645 		uint_t op = DIF_INSTR_OP(instr);
9646 
9647 		switch (op) {
9648 		case DIF_OP_LDGS:
9649 		case DIF_OP_LDGAA:
9650 		case DIF_OP_STGS:
9651 		case DIF_OP_STGAA:
9652 			if (v > DIF_VAR_OTHER_UBASE + maxglobal)
9653 				err += efunc(pc, "invalid variable %u\n", v);
9654 			break;
9655 		case DIF_OP_LDTS:
9656 		case DIF_OP_LDTAA:
9657 		case DIF_OP_STTS:
9658 		case DIF_OP_STTAA:
9659 			if (v > DIF_VAR_OTHER_UBASE + maxtlocal)
9660 				err += efunc(pc, "invalid variable %u\n", v);
9661 			break;
9662 		case DIF_OP_LDLS:
9663 		case DIF_OP_STLS:
9664 			if (v > DIF_VAR_OTHER_UBASE + maxlocal)
9665 				err += efunc(pc, "invalid variable %u\n", v);
9666 			break;
9667 		default:
9668 			break;
9669 		}
9670 	}
9671 
9672 	return (err);
9673 }
9674 
9675 /*
9676  * Validate a DTrace DIF object that it is to be used as a helper.  Helpers
9677  * are much more constrained than normal DIFOs.  Specifically, they may
9678  * not:
9679  *
9680  * 1. Make calls to subroutines other than copyin(), copyinstr() or
9681  *    miscellaneous string routines
9682  * 2. Access DTrace variables other than the args[] array, and the
9683  *    curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
9684  * 3. Have thread-local variables.
9685  * 4. Have dynamic variables.
9686  */
9687 static int
9688 dtrace_difo_validate_helper(dtrace_difo_t *dp)
9689 {
9690 	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9691 	int err = 0;
9692 	uint_t pc;
9693 
9694 	for (pc = 0; pc < dp->dtdo_len; pc++) {
9695 		dif_instr_t instr = dp->dtdo_buf[pc];
9696 
9697 		uint_t v = DIF_INSTR_VAR(instr);
9698 		uint_t subr = DIF_INSTR_SUBR(instr);
9699 		uint_t op = DIF_INSTR_OP(instr);
9700 
9701 		switch (op) {
9702 		case DIF_OP_OR:
9703 		case DIF_OP_XOR:
9704 		case DIF_OP_AND:
9705 		case DIF_OP_SLL:
9706 		case DIF_OP_SRL:
9707 		case DIF_OP_SRA:
9708 		case DIF_OP_SUB:
9709 		case DIF_OP_ADD:
9710 		case DIF_OP_MUL:
9711 		case DIF_OP_SDIV:
9712 		case DIF_OP_UDIV:
9713 		case DIF_OP_SREM:
9714 		case DIF_OP_UREM:
9715 		case DIF_OP_COPYS:
9716 		case DIF_OP_NOT:
9717 		case DIF_OP_MOV:
9718 		case DIF_OP_RLDSB:
9719 		case DIF_OP_RLDSH:
9720 		case DIF_OP_RLDSW:
9721 		case DIF_OP_RLDUB:
9722 		case DIF_OP_RLDUH:
9723 		case DIF_OP_RLDUW:
9724 		case DIF_OP_RLDX:
9725 		case DIF_OP_ULDSB:
9726 		case DIF_OP_ULDSH:
9727 		case DIF_OP_ULDSW:
9728 		case DIF_OP_ULDUB:
9729 		case DIF_OP_ULDUH:
9730 		case DIF_OP_ULDUW:
9731 		case DIF_OP_ULDX:
9732 		case DIF_OP_STB:
9733 		case DIF_OP_STH:
9734 		case DIF_OP_STW:
9735 		case DIF_OP_STX:
9736 		case DIF_OP_ALLOCS:
9737 		case DIF_OP_CMP:
9738 		case DIF_OP_SCMP:
9739 		case DIF_OP_TST:
9740 		case DIF_OP_BA:
9741 		case DIF_OP_BE:
9742 		case DIF_OP_BNE:
9743 		case DIF_OP_BG:
9744 		case DIF_OP_BGU:
9745 		case DIF_OP_BGE:
9746 		case DIF_OP_BGEU:
9747 		case DIF_OP_BL:
9748 		case DIF_OP_BLU:
9749 		case DIF_OP_BLE:
9750 		case DIF_OP_BLEU:
9751 		case DIF_OP_RET:
9752 		case DIF_OP_NOP:
9753 		case DIF_OP_POPTS:
9754 		case DIF_OP_FLUSHTS:
9755 		case DIF_OP_SETX:
9756 		case DIF_OP_SETS:
9757 		case DIF_OP_LDGA:
9758 		case DIF_OP_LDLS:
9759 		case DIF_OP_STGS:
9760 		case DIF_OP_STLS:
9761 		case DIF_OP_PUSHTR:
9762 		case DIF_OP_PUSHTV:
9763 			break;
9764 
9765 		case DIF_OP_LDGS:
9766 			if (v >= DIF_VAR_OTHER_UBASE)
9767 				break;
9768 
9769 			if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
9770 				break;
9771 
9772 			if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
9773 			    v == DIF_VAR_PPID || v == DIF_VAR_TID ||
9774 			    v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
9775 			    v == DIF_VAR_UID || v == DIF_VAR_GID)
9776 				break;
9777 
9778 			err += efunc(pc, "illegal variable %u\n", v);
9779 			break;
9780 
9781 		case DIF_OP_LDTA:
9782 			if (v < DIF_VAR_OTHER_UBASE) {
9783 				err += efunc(pc, "illegal variable load\n");
9784 				break;
9785 			}
9786 			/* FALLTHROUGH */
9787 		case DIF_OP_LDTS:
9788 		case DIF_OP_LDGAA:
9789 		case DIF_OP_LDTAA:
9790 			err += efunc(pc, "illegal dynamic variable load\n");
9791 			break;
9792 
9793 		case DIF_OP_STGA:
9794 			if (v < DIF_VAR_OTHER_UBASE) {
9795 				err += efunc(pc, "illegal variable store\n");
9796 				break;
9797 			}
9798 			/* FALLTHROUGH */
9799 		case DIF_OP_STTS:
9800 		case DIF_OP_STGAA:
9801 		case DIF_OP_STTAA:
9802 			err += efunc(pc, "illegal dynamic variable store\n");
9803 			break;
9804 
9805 		case DIF_OP_CALL:
9806 			if (subr == DIF_SUBR_ALLOCA ||
9807 			    subr == DIF_SUBR_BCOPY ||
9808 			    subr == DIF_SUBR_COPYIN ||
9809 			    subr == DIF_SUBR_COPYINTO ||
9810 			    subr == DIF_SUBR_COPYINSTR ||
9811 			    subr == DIF_SUBR_INDEX ||
9812 			    subr == DIF_SUBR_INET_NTOA ||
9813 			    subr == DIF_SUBR_INET_NTOA6 ||
9814 			    subr == DIF_SUBR_INET_NTOP ||
9815 			    subr == DIF_SUBR_JSON ||
9816 			    subr == DIF_SUBR_LLTOSTR ||
9817 			    subr == DIF_SUBR_STRTOLL ||
9818 			    subr == DIF_SUBR_RINDEX ||
9819 			    subr == DIF_SUBR_STRCHR ||
9820 			    subr == DIF_SUBR_STRJOIN ||
9821 			    subr == DIF_SUBR_STRRCHR ||
9822 			    subr == DIF_SUBR_STRSTR ||
9823 			    subr == DIF_SUBR_HTONS ||
9824 			    subr == DIF_SUBR_HTONL ||
9825 			    subr == DIF_SUBR_HTONLL ||
9826 			    subr == DIF_SUBR_NTOHS ||
9827 			    subr == DIF_SUBR_NTOHL ||
9828 			    subr == DIF_SUBR_NTOHLL)
9829 				break;
9830 
9831 			err += efunc(pc, "invalid subr %u\n", subr);
9832 			break;
9833 
9834 		default:
9835 			err += efunc(pc, "invalid opcode %u\n",
9836 			    DIF_INSTR_OP(instr));
9837 		}
9838 	}
9839 
9840 	return (err);
9841 }
9842 
9843 /*
9844  * Returns 1 if the expression in the DIF object can be cached on a per-thread
9845  * basis; 0 if not.
9846  */
9847 static int
9848 dtrace_difo_cacheable(dtrace_difo_t *dp)
9849 {
9850 	int i;
9851 
9852 	if (dp == NULL)
9853 		return (0);
9854 
9855 	for (i = 0; i < dp->dtdo_varlen; i++) {
9856 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
9857 
9858 		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9859 			continue;
9860 
9861 		switch (v->dtdv_id) {
9862 		case DIF_VAR_CURTHREAD:
9863 		case DIF_VAR_PID:
9864 		case DIF_VAR_TID:
9865 		case DIF_VAR_EXECNAME:
9866 		case DIF_VAR_ZONENAME:
9867 			break;
9868 
9869 		default:
9870 			return (0);
9871 		}
9872 	}
9873 
9874 	/*
9875 	 * This DIF object may be cacheable.  Now we need to look for any
9876 	 * array loading instructions, any memory loading instructions, or
9877 	 * any stores to thread-local variables.
9878 	 */
9879 	for (i = 0; i < dp->dtdo_len; i++) {
9880 		uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9881 
9882 		if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9883 		    (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9884 		    (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9885 		    op == DIF_OP_LDGA || op == DIF_OP_STTS)
9886 			return (0);
9887 	}
9888 
9889 	return (1);
9890 }
9891 
9892 static void
9893 dtrace_difo_hold(dtrace_difo_t *dp)
9894 {
9895 	int i;
9896 
9897 	ASSERT(MUTEX_HELD(&dtrace_lock));
9898 
9899 	dp->dtdo_refcnt++;
9900 	ASSERT(dp->dtdo_refcnt != 0);
9901 
9902 	/*
9903 	 * We need to check this DIF object for references to the variable
9904 	 * DIF_VAR_VTIMESTAMP.
9905 	 */
9906 	for (i = 0; i < dp->dtdo_varlen; i++) {
9907 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
9908 
9909 		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9910 			continue;
9911 
9912 		if (dtrace_vtime_references++ == 0)
9913 			dtrace_vtime_enable();
9914 	}
9915 }
9916 
9917 /*
9918  * This routine calculates the dynamic variable chunksize for a given DIF
9919  * object.  The calculation is not fool-proof, and can probably be tricked by
9920  * malicious DIF -- but it works for all compiler-generated DIF.  Because this
9921  * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9922  * if a dynamic variable size exceeds the chunksize.
9923  */
9924 static void
9925 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9926 {
9927 	uint64_t sval;
9928 	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9929 	const dif_instr_t *text = dp->dtdo_buf;
9930 	uint_t pc, srd = 0;
9931 	uint_t ttop = 0;
9932 	size_t size, ksize;
9933 	uint_t id, i;
9934 
9935 	for (pc = 0; pc < dp->dtdo_len; pc++) {
9936 		dif_instr_t instr = text[pc];
9937 		uint_t op = DIF_INSTR_OP(instr);
9938 		uint_t rd = DIF_INSTR_RD(instr);
9939 		uint_t r1 = DIF_INSTR_R1(instr);
9940 		uint_t nkeys = 0;
9941 		uchar_t scope;
9942 
9943 		dtrace_key_t *key = tupregs;
9944 
9945 		switch (op) {
9946 		case DIF_OP_SETX:
9947 			sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9948 			srd = rd;
9949 			continue;
9950 
9951 		case DIF_OP_STTS:
9952 			key = &tupregs[DIF_DTR_NREGS];
9953 			key[0].dttk_size = 0;
9954 			key[1].dttk_size = 0;
9955 			nkeys = 2;
9956 			scope = DIFV_SCOPE_THREAD;
9957 			break;
9958 
9959 		case DIF_OP_STGAA:
9960 		case DIF_OP_STTAA:
9961 			nkeys = ttop;
9962 
9963 			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9964 				key[nkeys++].dttk_size = 0;
9965 
9966 			key[nkeys++].dttk_size = 0;
9967 
9968 			if (op == DIF_OP_STTAA) {
9969 				scope = DIFV_SCOPE_THREAD;
9970 			} else {
9971 				scope = DIFV_SCOPE_GLOBAL;
9972 			}
9973 
9974 			break;
9975 
9976 		case DIF_OP_PUSHTR:
9977 			if (ttop == DIF_DTR_NREGS)
9978 				return;
9979 
9980 			if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9981 				/*
9982 				 * If the register for the size of the "pushtr"
9983 				 * is %r0 (or the value is 0) and the type is
9984 				 * a string, we'll use the system-wide default
9985 				 * string size.
9986 				 */
9987 				tupregs[ttop++].dttk_size =
9988 				    dtrace_strsize_default;
9989 			} else {
9990 				if (srd == 0)
9991 					return;
9992 
9993 				if (sval > LONG_MAX)
9994 					return;
9995 
9996 				tupregs[ttop++].dttk_size = sval;
9997 			}
9998 
9999 			break;
10000 
10001 		case DIF_OP_PUSHTV:
10002 			if (ttop == DIF_DTR_NREGS)
10003 				return;
10004 
10005 			tupregs[ttop++].dttk_size = 0;
10006 			break;
10007 
10008 		case DIF_OP_FLUSHTS:
10009 			ttop = 0;
10010 			break;
10011 
10012 		case DIF_OP_POPTS:
10013 			if (ttop != 0)
10014 				ttop--;
10015 			break;
10016 		}
10017 
10018 		sval = 0;
10019 		srd = 0;
10020 
10021 		if (nkeys == 0)
10022 			continue;
10023 
10024 		/*
10025 		 * We have a dynamic variable allocation; calculate its size.
10026 		 */
10027 		for (ksize = 0, i = 0; i < nkeys; i++)
10028 			ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10029 
10030 		size = sizeof (dtrace_dynvar_t);
10031 		size += sizeof (dtrace_key_t) * (nkeys - 1);
10032 		size += ksize;
10033 
10034 		/*
10035 		 * Now we need to determine the size of the stored data.
10036 		 */
10037 		id = DIF_INSTR_VAR(instr);
10038 
10039 		for (i = 0; i < dp->dtdo_varlen; i++) {
10040 			dtrace_difv_t *v = &dp->dtdo_vartab[i];
10041 
10042 			if (v->dtdv_id == id && v->dtdv_scope == scope) {
10043 				size += v->dtdv_type.dtdt_size;
10044 				break;
10045 			}
10046 		}
10047 
10048 		if (i == dp->dtdo_varlen)
10049 			return;
10050 
10051 		/*
10052 		 * We have the size.  If this is larger than the chunk size
10053 		 * for our dynamic variable state, reset the chunk size.
10054 		 */
10055 		size = P2ROUNDUP(size, sizeof (uint64_t));
10056 
10057 		/*
10058 		 * Before setting the chunk size, check that we're not going
10059 		 * to set it to a negative value...
10060 		 */
10061 		if (size > LONG_MAX)
10062 			return;
10063 
10064 		/*
10065 		 * ...and make certain that we didn't badly overflow.
10066 		 */
10067 		if (size < ksize || size < sizeof (dtrace_dynvar_t))
10068 			return;
10069 
10070 		if (size > vstate->dtvs_dynvars.dtds_chunksize)
10071 			vstate->dtvs_dynvars.dtds_chunksize = size;
10072 	}
10073 }
10074 
10075 static void
10076 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10077 {
10078 	int i, oldsvars, osz, nsz, otlocals, ntlocals;
10079 	uint_t id;
10080 
10081 	ASSERT(MUTEX_HELD(&dtrace_lock));
10082 	ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10083 
10084 	for (i = 0; i < dp->dtdo_varlen; i++) {
10085 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10086 		dtrace_statvar_t *svar, ***svarp;
10087 		size_t dsize = 0;
10088 		uint8_t scope = v->dtdv_scope;
10089 		int *np;
10090 
10091 		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10092 			continue;
10093 
10094 		id -= DIF_VAR_OTHER_UBASE;
10095 
10096 		switch (scope) {
10097 		case DIFV_SCOPE_THREAD:
10098 			while (id >= (otlocals = vstate->dtvs_ntlocals)) {
10099 				dtrace_difv_t *tlocals;
10100 
10101 				if ((ntlocals = (otlocals << 1)) == 0)
10102 					ntlocals = 1;
10103 
10104 				osz = otlocals * sizeof (dtrace_difv_t);
10105 				nsz = ntlocals * sizeof (dtrace_difv_t);
10106 
10107 				tlocals = kmem_zalloc(nsz, KM_SLEEP);
10108 
10109 				if (osz != 0) {
10110 					bcopy(vstate->dtvs_tlocals,
10111 					    tlocals, osz);
10112 					kmem_free(vstate->dtvs_tlocals, osz);
10113 				}
10114 
10115 				vstate->dtvs_tlocals = tlocals;
10116 				vstate->dtvs_ntlocals = ntlocals;
10117 			}
10118 
10119 			vstate->dtvs_tlocals[id] = *v;
10120 			continue;
10121 
10122 		case DIFV_SCOPE_LOCAL:
10123 			np = &vstate->dtvs_nlocals;
10124 			svarp = &vstate->dtvs_locals;
10125 
10126 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10127 				dsize = NCPU * (v->dtdv_type.dtdt_size +
10128 				    sizeof (uint64_t));
10129 			else
10130 				dsize = NCPU * sizeof (uint64_t);
10131 
10132 			break;
10133 
10134 		case DIFV_SCOPE_GLOBAL:
10135 			np = &vstate->dtvs_nglobals;
10136 			svarp = &vstate->dtvs_globals;
10137 
10138 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10139 				dsize = v->dtdv_type.dtdt_size +
10140 				    sizeof (uint64_t);
10141 
10142 			break;
10143 
10144 		default:
10145 			ASSERT(0);
10146 		}
10147 
10148 		while (id >= (oldsvars = *np)) {
10149 			dtrace_statvar_t **statics;
10150 			int newsvars, oldsize, newsize;
10151 
10152 			if ((newsvars = (oldsvars << 1)) == 0)
10153 				newsvars = 1;
10154 
10155 			oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10156 			newsize = newsvars * sizeof (dtrace_statvar_t *);
10157 
10158 			statics = kmem_zalloc(newsize, KM_SLEEP);
10159 
10160 			if (oldsize != 0) {
10161 				bcopy(*svarp, statics, oldsize);
10162 				kmem_free(*svarp, oldsize);
10163 			}
10164 
10165 			*svarp = statics;
10166 			*np = newsvars;
10167 		}
10168 
10169 		if ((svar = (*svarp)[id]) == NULL) {
10170 			svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10171 			svar->dtsv_var = *v;
10172 
10173 			if ((svar->dtsv_size = dsize) != 0) {
10174 				svar->dtsv_data = (uint64_t)(uintptr_t)
10175 				    kmem_zalloc(dsize, KM_SLEEP);
10176 			}
10177 
10178 			(*svarp)[id] = svar;
10179 		}
10180 
10181 		svar->dtsv_refcnt++;
10182 	}
10183 
10184 	dtrace_difo_chunksize(dp, vstate);
10185 	dtrace_difo_hold(dp);
10186 }
10187 
10188 static dtrace_difo_t *
10189 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10190 {
10191 	dtrace_difo_t *new;
10192 	size_t sz;
10193 
10194 	ASSERT(dp->dtdo_buf != NULL);
10195 	ASSERT(dp->dtdo_refcnt != 0);
10196 
10197 	new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10198 
10199 	ASSERT(dp->dtdo_buf != NULL);
10200 	sz = dp->dtdo_len * sizeof (dif_instr_t);
10201 	new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10202 	bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10203 	new->dtdo_len = dp->dtdo_len;
10204 
10205 	if (dp->dtdo_strtab != NULL) {
10206 		ASSERT(dp->dtdo_strlen != 0);
10207 		new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10208 		bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10209 		new->dtdo_strlen = dp->dtdo_strlen;
10210 	}
10211 
10212 	if (dp->dtdo_inttab != NULL) {
10213 		ASSERT(dp->dtdo_intlen != 0);
10214 		sz = dp->dtdo_intlen * sizeof (uint64_t);
10215 		new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10216 		bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10217 		new->dtdo_intlen = dp->dtdo_intlen;
10218 	}
10219 
10220 	if (dp->dtdo_vartab != NULL) {
10221 		ASSERT(dp->dtdo_varlen != 0);
10222 		sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10223 		new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10224 		bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10225 		new->dtdo_varlen = dp->dtdo_varlen;
10226 	}
10227 
10228 	dtrace_difo_init(new, vstate);
10229 	return (new);
10230 }
10231 
10232 static void
10233 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10234 {
10235 	int i;
10236 
10237 	ASSERT(dp->dtdo_refcnt == 0);
10238 
10239 	for (i = 0; i < dp->dtdo_varlen; i++) {
10240 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10241 		dtrace_statvar_t *svar, **svarp;
10242 		uint_t id;
10243 		uint8_t scope = v->dtdv_scope;
10244 		int *np;
10245 
10246 		switch (scope) {
10247 		case DIFV_SCOPE_THREAD:
10248 			continue;
10249 
10250 		case DIFV_SCOPE_LOCAL:
10251 			np = &vstate->dtvs_nlocals;
10252 			svarp = vstate->dtvs_locals;
10253 			break;
10254 
10255 		case DIFV_SCOPE_GLOBAL:
10256 			np = &vstate->dtvs_nglobals;
10257 			svarp = vstate->dtvs_globals;
10258 			break;
10259 
10260 		default:
10261 			ASSERT(0);
10262 		}
10263 
10264 		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10265 			continue;
10266 
10267 		id -= DIF_VAR_OTHER_UBASE;
10268 		ASSERT(id < *np);
10269 
10270 		svar = svarp[id];
10271 		ASSERT(svar != NULL);
10272 		ASSERT(svar->dtsv_refcnt > 0);
10273 
10274 		if (--svar->dtsv_refcnt > 0)
10275 			continue;
10276 
10277 		if (svar->dtsv_size != 0) {
10278 			ASSERT(svar->dtsv_data != NULL);
10279 			kmem_free((void *)(uintptr_t)svar->dtsv_data,
10280 			    svar->dtsv_size);
10281 		}
10282 
10283 		kmem_free(svar, sizeof (dtrace_statvar_t));
10284 		svarp[id] = NULL;
10285 	}
10286 
10287 	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10288 	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10289 	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10290 	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10291 
10292 	kmem_free(dp, sizeof (dtrace_difo_t));
10293 }
10294 
10295 static void
10296 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10297 {
10298 	int i;
10299 
10300 	ASSERT(MUTEX_HELD(&dtrace_lock));
10301 	ASSERT(dp->dtdo_refcnt != 0);
10302 
10303 	for (i = 0; i < dp->dtdo_varlen; i++) {
10304 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10305 
10306 		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10307 			continue;
10308 
10309 		ASSERT(dtrace_vtime_references > 0);
10310 		if (--dtrace_vtime_references == 0)
10311 			dtrace_vtime_disable();
10312 	}
10313 
10314 	if (--dp->dtdo_refcnt == 0)
10315 		dtrace_difo_destroy(dp, vstate);
10316 }
10317 
10318 /*
10319  * DTrace Format Functions
10320  */
10321 static uint16_t
10322 dtrace_format_add(dtrace_state_t *state, char *str)
10323 {
10324 	char *fmt, **new;
10325 	uint16_t ndx, len = strlen(str) + 1;
10326 
10327 	fmt = kmem_zalloc(len, KM_SLEEP);
10328 	bcopy(str, fmt, len);
10329 
10330 	for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10331 		if (state->dts_formats[ndx] == NULL) {
10332 			state->dts_formats[ndx] = fmt;
10333 			return (ndx + 1);
10334 		}
10335 	}
10336 
10337 	if (state->dts_nformats == USHRT_MAX) {
10338 		/*
10339 		 * This is only likely if a denial-of-service attack is being
10340 		 * attempted.  As such, it's okay to fail silently here.
10341 		 */
10342 		kmem_free(fmt, len);
10343 		return (0);
10344 	}
10345 
10346 	/*
10347 	 * For simplicity, we always resize the formats array to be exactly the
10348 	 * number of formats.
10349 	 */
10350 	ndx = state->dts_nformats++;
10351 	new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
10352 
10353 	if (state->dts_formats != NULL) {
10354 		ASSERT(ndx != 0);
10355 		bcopy(state->dts_formats, new, ndx * sizeof (char *));
10356 		kmem_free(state->dts_formats, ndx * sizeof (char *));
10357 	}
10358 
10359 	state->dts_formats = new;
10360 	state->dts_formats[ndx] = fmt;
10361 
10362 	return (ndx + 1);
10363 }
10364 
10365 static void
10366 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10367 {
10368 	char *fmt;
10369 
10370 	ASSERT(state->dts_formats != NULL);
10371 	ASSERT(format <= state->dts_nformats);
10372 	ASSERT(state->dts_formats[format - 1] != NULL);
10373 
10374 	fmt = state->dts_formats[format - 1];
10375 	kmem_free(fmt, strlen(fmt) + 1);
10376 	state->dts_formats[format - 1] = NULL;
10377 }
10378 
10379 static void
10380 dtrace_format_destroy(dtrace_state_t *state)
10381 {
10382 	int i;
10383 
10384 	if (state->dts_nformats == 0) {
10385 		ASSERT(state->dts_formats == NULL);
10386 		return;
10387 	}
10388 
10389 	ASSERT(state->dts_formats != NULL);
10390 
10391 	for (i = 0; i < state->dts_nformats; i++) {
10392 		char *fmt = state->dts_formats[i];
10393 
10394 		if (fmt == NULL)
10395 			continue;
10396 
10397 		kmem_free(fmt, strlen(fmt) + 1);
10398 	}
10399 
10400 	kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10401 	state->dts_nformats = 0;
10402 	state->dts_formats = NULL;
10403 }
10404 
10405 /*
10406  * DTrace Predicate Functions
10407  */
10408 static dtrace_predicate_t *
10409 dtrace_predicate_create(dtrace_difo_t *dp)
10410 {
10411 	dtrace_predicate_t *pred;
10412 
10413 	ASSERT(MUTEX_HELD(&dtrace_lock));
10414 	ASSERT(dp->dtdo_refcnt != 0);
10415 
10416 	pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10417 	pred->dtp_difo = dp;
10418 	pred->dtp_refcnt = 1;
10419 
10420 	if (!dtrace_difo_cacheable(dp))
10421 		return (pred);
10422 
10423 	if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10424 		/*
10425 		 * This is only theoretically possible -- we have had 2^32
10426 		 * cacheable predicates on this machine.  We cannot allow any
10427 		 * more predicates to become cacheable:  as unlikely as it is,
10428 		 * there may be a thread caching a (now stale) predicate cache
10429 		 * ID. (N.B.: the temptation is being successfully resisted to
10430 		 * have this cmn_err() "Holy shit -- we executed this code!")
10431 		 */
10432 		return (pred);
10433 	}
10434 
10435 	pred->dtp_cacheid = dtrace_predcache_id++;
10436 
10437 	return (pred);
10438 }
10439 
10440 static void
10441 dtrace_predicate_hold(dtrace_predicate_t *pred)
10442 {
10443 	ASSERT(MUTEX_HELD(&dtrace_lock));
10444 	ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10445 	ASSERT(pred->dtp_refcnt > 0);
10446 
10447 	pred->dtp_refcnt++;
10448 }
10449 
10450 static void
10451 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10452 {
10453 	dtrace_difo_t *dp = pred->dtp_difo;
10454 
10455 	ASSERT(MUTEX_HELD(&dtrace_lock));
10456 	ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10457 	ASSERT(pred->dtp_refcnt > 0);
10458 
10459 	if (--pred->dtp_refcnt == 0) {
10460 		dtrace_difo_release(pred->dtp_difo, vstate);
10461 		kmem_free(pred, sizeof (dtrace_predicate_t));
10462 	}
10463 }
10464 
10465 /*
10466  * DTrace Action Description Functions
10467  */
10468 static dtrace_actdesc_t *
10469 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10470     uint64_t uarg, uint64_t arg)
10471 {
10472 	dtrace_actdesc_t *act;
10473 
10474 	ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
10475 	    arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
10476 
10477 	act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10478 	act->dtad_kind = kind;
10479 	act->dtad_ntuple = ntuple;
10480 	act->dtad_uarg = uarg;
10481 	act->dtad_arg = arg;
10482 	act->dtad_refcnt = 1;
10483 
10484 	return (act);
10485 }
10486 
10487 static void
10488 dtrace_actdesc_hold(dtrace_actdesc_t *act)
10489 {
10490 	ASSERT(act->dtad_refcnt >= 1);
10491 	act->dtad_refcnt++;
10492 }
10493 
10494 static void
10495 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10496 {
10497 	dtrace_actkind_t kind = act->dtad_kind;
10498 	dtrace_difo_t *dp;
10499 
10500 	ASSERT(act->dtad_refcnt >= 1);
10501 
10502 	if (--act->dtad_refcnt != 0)
10503 		return;
10504 
10505 	if ((dp = act->dtad_difo) != NULL)
10506 		dtrace_difo_release(dp, vstate);
10507 
10508 	if (DTRACEACT_ISPRINTFLIKE(kind)) {
10509 		char *str = (char *)(uintptr_t)act->dtad_arg;
10510 
10511 		ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10512 		    (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10513 
10514 		if (str != NULL)
10515 			kmem_free(str, strlen(str) + 1);
10516 	}
10517 
10518 	kmem_free(act, sizeof (dtrace_actdesc_t));
10519 }
10520 
10521 /*
10522  * DTrace ECB Functions
10523  */
10524 static dtrace_ecb_t *
10525 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10526 {
10527 	dtrace_ecb_t *ecb;
10528 	dtrace_epid_t epid;
10529 
10530 	ASSERT(MUTEX_HELD(&dtrace_lock));
10531 
10532 	ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10533 	ecb->dte_predicate = NULL;
10534 	ecb->dte_probe = probe;
10535 
10536 	/*
10537 	 * The default size is the size of the default action: recording
10538 	 * the header.
10539 	 */
10540 	ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
10541 	ecb->dte_alignment = sizeof (dtrace_epid_t);
10542 
10543 	epid = state->dts_epid++;
10544 
10545 	if (epid - 1 >= state->dts_necbs) {
10546 		dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10547 		int necbs = state->dts_necbs << 1;
10548 
10549 		ASSERT(epid == state->dts_necbs + 1);
10550 
10551 		if (necbs == 0) {
10552 			ASSERT(oecbs == NULL);
10553 			necbs = 1;
10554 		}
10555 
10556 		ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
10557 
10558 		if (oecbs != NULL)
10559 			bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
10560 
10561 		dtrace_membar_producer();
10562 		state->dts_ecbs = ecbs;
10563 
10564 		if (oecbs != NULL) {
10565 			/*
10566 			 * If this state is active, we must dtrace_sync()
10567 			 * before we can free the old dts_ecbs array:  we're
10568 			 * coming in hot, and there may be active ring
10569 			 * buffer processing (which indexes into the dts_ecbs
10570 			 * array) on another CPU.
10571 			 */
10572 			if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
10573 				dtrace_sync();
10574 
10575 			kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
10576 		}
10577 
10578 		dtrace_membar_producer();
10579 		state->dts_necbs = necbs;
10580 	}
10581 
10582 	ecb->dte_state = state;
10583 
10584 	ASSERT(state->dts_ecbs[epid - 1] == NULL);
10585 	dtrace_membar_producer();
10586 	state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
10587 
10588 	return (ecb);
10589 }
10590 
10591 static int
10592 dtrace_ecb_enable(dtrace_ecb_t *ecb)
10593 {
10594 	dtrace_probe_t *probe = ecb->dte_probe;
10595 
10596 	ASSERT(MUTEX_HELD(&cpu_lock));
10597 	ASSERT(MUTEX_HELD(&dtrace_lock));
10598 	ASSERT(ecb->dte_next == NULL);
10599 
10600 	if (probe == NULL) {
10601 		/*
10602 		 * This is the NULL probe -- there's nothing to do.
10603 		 */
10604 		return (0);
10605 	}
10606 
10607 	if (probe->dtpr_ecb == NULL) {
10608 		dtrace_provider_t *prov = probe->dtpr_provider;
10609 
10610 		/*
10611 		 * We're the first ECB on this probe.
10612 		 */
10613 		probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
10614 
10615 		if (ecb->dte_predicate != NULL)
10616 			probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
10617 
10618 		return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
10619 		    probe->dtpr_id, probe->dtpr_arg));
10620 	} else {
10621 		/*
10622 		 * This probe is already active.  Swing the last pointer to
10623 		 * point to the new ECB, and issue a dtrace_sync() to assure
10624 		 * that all CPUs have seen the change.
10625 		 */
10626 		ASSERT(probe->dtpr_ecb_last != NULL);
10627 		probe->dtpr_ecb_last->dte_next = ecb;
10628 		probe->dtpr_ecb_last = ecb;
10629 		probe->dtpr_predcache = 0;
10630 
10631 		dtrace_sync();
10632 		return (0);
10633 	}
10634 }
10635 
10636 static int
10637 dtrace_ecb_resize(dtrace_ecb_t *ecb)
10638 {
10639 	dtrace_action_t *act;
10640 	uint32_t curneeded = UINT32_MAX;
10641 	uint32_t aggbase = UINT32_MAX;
10642 
10643 	/*
10644 	 * If we record anything, we always record the dtrace_rechdr_t.  (And
10645 	 * we always record it first.)
10646 	 */
10647 	ecb->dte_size = sizeof (dtrace_rechdr_t);
10648 	ecb->dte_alignment = sizeof (dtrace_epid_t);
10649 
10650 	for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10651 		dtrace_recdesc_t *rec = &act->dta_rec;
10652 		ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
10653 
10654 		ecb->dte_alignment = MAX(ecb->dte_alignment,
10655 		    rec->dtrd_alignment);
10656 
10657 		if (DTRACEACT_ISAGG(act->dta_kind)) {
10658 			dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10659 
10660 			ASSERT(rec->dtrd_size != 0);
10661 			ASSERT(agg->dtag_first != NULL);
10662 			ASSERT(act->dta_prev->dta_intuple);
10663 			ASSERT(aggbase != UINT32_MAX);
10664 			ASSERT(curneeded != UINT32_MAX);
10665 
10666 			agg->dtag_base = aggbase;
10667 
10668 			curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10669 			rec->dtrd_offset = curneeded;
10670 			if (curneeded + rec->dtrd_size < curneeded)
10671 				return (EINVAL);
10672 			curneeded += rec->dtrd_size;
10673 			ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
10674 
10675 			aggbase = UINT32_MAX;
10676 			curneeded = UINT32_MAX;
10677 		} else if (act->dta_intuple) {
10678 			if (curneeded == UINT32_MAX) {
10679 				/*
10680 				 * This is the first record in a tuple.  Align
10681 				 * curneeded to be at offset 4 in an 8-byte
10682 				 * aligned block.
10683 				 */
10684 				ASSERT(act->dta_prev == NULL ||
10685 				    !act->dta_prev->dta_intuple);
10686 				ASSERT3U(aggbase, ==, UINT32_MAX);
10687 				curneeded = P2PHASEUP(ecb->dte_size,
10688 				    sizeof (uint64_t), sizeof (dtrace_aggid_t));
10689 
10690 				aggbase = curneeded - sizeof (dtrace_aggid_t);
10691 				ASSERT(IS_P2ALIGNED(aggbase,
10692 				    sizeof (uint64_t)));
10693 			}
10694 			curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10695 			rec->dtrd_offset = curneeded;
10696 			if (curneeded + rec->dtrd_size < curneeded)
10697 				return (EINVAL);
10698 			curneeded += rec->dtrd_size;
10699 		} else {
10700 			/* tuples must be followed by an aggregation */
10701 			ASSERT(act->dta_prev == NULL ||
10702 			    !act->dta_prev->dta_intuple);
10703 
10704 			ecb->dte_size = P2ROUNDUP(ecb->dte_size,
10705 			    rec->dtrd_alignment);
10706 			rec->dtrd_offset = ecb->dte_size;
10707 			if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
10708 				return (EINVAL);
10709 			ecb->dte_size += rec->dtrd_size;
10710 			ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
10711 		}
10712 	}
10713 
10714 	if ((act = ecb->dte_action) != NULL &&
10715 	    !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
10716 	    ecb->dte_size == sizeof (dtrace_rechdr_t)) {
10717 		/*
10718 		 * If the size is still sizeof (dtrace_rechdr_t), then all
10719 		 * actions store no data; set the size to 0.
10720 		 */
10721 		ecb->dte_size = 0;
10722 	}
10723 
10724 	ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
10725 	ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
10726 	ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
10727 	    ecb->dte_needed);
10728 	return (0);
10729 }
10730 
10731 static dtrace_action_t *
10732 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10733 {
10734 	dtrace_aggregation_t *agg;
10735 	size_t size = sizeof (uint64_t);
10736 	int ntuple = desc->dtad_ntuple;
10737 	dtrace_action_t *act;
10738 	dtrace_recdesc_t *frec;
10739 	dtrace_aggid_t aggid;
10740 	dtrace_state_t *state = ecb->dte_state;
10741 
10742 	agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
10743 	agg->dtag_ecb = ecb;
10744 
10745 	ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
10746 
10747 	switch (desc->dtad_kind) {
10748 	case DTRACEAGG_MIN:
10749 		agg->dtag_initial = INT64_MAX;
10750 		agg->dtag_aggregate = dtrace_aggregate_min;
10751 		break;
10752 
10753 	case DTRACEAGG_MAX:
10754 		agg->dtag_initial = INT64_MIN;
10755 		agg->dtag_aggregate = dtrace_aggregate_max;
10756 		break;
10757 
10758 	case DTRACEAGG_COUNT:
10759 		agg->dtag_aggregate = dtrace_aggregate_count;
10760 		break;
10761 
10762 	case DTRACEAGG_QUANTIZE:
10763 		agg->dtag_aggregate = dtrace_aggregate_quantize;
10764 		size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
10765 		    sizeof (uint64_t);
10766 		break;
10767 
10768 	case DTRACEAGG_LQUANTIZE: {
10769 		uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
10770 		uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
10771 
10772 		agg->dtag_initial = desc->dtad_arg;
10773 		agg->dtag_aggregate = dtrace_aggregate_lquantize;
10774 
10775 		if (step == 0 || levels == 0)
10776 			goto err;
10777 
10778 		size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10779 		break;
10780 	}
10781 
10782 	case DTRACEAGG_LLQUANTIZE: {
10783 		uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10784 		uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10785 		uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10786 		uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10787 		int64_t v;
10788 
10789 		agg->dtag_initial = desc->dtad_arg;
10790 		agg->dtag_aggregate = dtrace_aggregate_llquantize;
10791 
10792 		if (factor < 2 || low >= high || nsteps < factor)
10793 			goto err;
10794 
10795 		/*
10796 		 * Now check that the number of steps evenly divides a power
10797 		 * of the factor.  (This assures both integer bucket size and
10798 		 * linearity within each magnitude.)
10799 		 */
10800 		for (v = factor; v < nsteps; v *= factor)
10801 			continue;
10802 
10803 		if ((v % nsteps) || (nsteps % factor))
10804 			goto err;
10805 
10806 		size = (dtrace_aggregate_llquantize_bucket(factor,
10807 		    low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10808 		break;
10809 	}
10810 
10811 	case DTRACEAGG_AVG:
10812 		agg->dtag_aggregate = dtrace_aggregate_avg;
10813 		size = sizeof (uint64_t) * 2;
10814 		break;
10815 
10816 	case DTRACEAGG_STDDEV:
10817 		agg->dtag_aggregate = dtrace_aggregate_stddev;
10818 		size = sizeof (uint64_t) * 4;
10819 		break;
10820 
10821 	case DTRACEAGG_SUM:
10822 		agg->dtag_aggregate = dtrace_aggregate_sum;
10823 		break;
10824 
10825 	default:
10826 		goto err;
10827 	}
10828 
10829 	agg->dtag_action.dta_rec.dtrd_size = size;
10830 
10831 	if (ntuple == 0)
10832 		goto err;
10833 
10834 	/*
10835 	 * We must make sure that we have enough actions for the n-tuple.
10836 	 */
10837 	for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10838 		if (DTRACEACT_ISAGG(act->dta_kind))
10839 			break;
10840 
10841 		if (--ntuple == 0) {
10842 			/*
10843 			 * This is the action with which our n-tuple begins.
10844 			 */
10845 			agg->dtag_first = act;
10846 			goto success;
10847 		}
10848 	}
10849 
10850 	/*
10851 	 * This n-tuple is short by ntuple elements.  Return failure.
10852 	 */
10853 	ASSERT(ntuple != 0);
10854 err:
10855 	kmem_free(agg, sizeof (dtrace_aggregation_t));
10856 	return (NULL);
10857 
10858 success:
10859 	/*
10860 	 * If the last action in the tuple has a size of zero, it's actually
10861 	 * an expression argument for the aggregating action.
10862 	 */
10863 	ASSERT(ecb->dte_action_last != NULL);
10864 	act = ecb->dte_action_last;
10865 
10866 	if (act->dta_kind == DTRACEACT_DIFEXPR) {
10867 		ASSERT(act->dta_difo != NULL);
10868 
10869 		if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10870 			agg->dtag_hasarg = 1;
10871 	}
10872 
10873 	/*
10874 	 * We need to allocate an id for this aggregation.
10875 	 */
10876 	aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10877 	    VM_BESTFIT | VM_SLEEP);
10878 
10879 	if (aggid - 1 >= state->dts_naggregations) {
10880 		dtrace_aggregation_t **oaggs = state->dts_aggregations;
10881 		dtrace_aggregation_t **aggs;
10882 		int naggs = state->dts_naggregations << 1;
10883 		int onaggs = state->dts_naggregations;
10884 
10885 		ASSERT(aggid == state->dts_naggregations + 1);
10886 
10887 		if (naggs == 0) {
10888 			ASSERT(oaggs == NULL);
10889 			naggs = 1;
10890 		}
10891 
10892 		aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10893 
10894 		if (oaggs != NULL) {
10895 			bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10896 			kmem_free(oaggs, onaggs * sizeof (*aggs));
10897 		}
10898 
10899 		state->dts_aggregations = aggs;
10900 		state->dts_naggregations = naggs;
10901 	}
10902 
10903 	ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10904 	state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10905 
10906 	frec = &agg->dtag_first->dta_rec;
10907 	if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10908 		frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10909 
10910 	for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10911 		ASSERT(!act->dta_intuple);
10912 		act->dta_intuple = 1;
10913 	}
10914 
10915 	return (&agg->dtag_action);
10916 }
10917 
10918 static void
10919 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10920 {
10921 	dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10922 	dtrace_state_t *state = ecb->dte_state;
10923 	dtrace_aggid_t aggid = agg->dtag_id;
10924 
10925 	ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10926 	vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10927 
10928 	ASSERT(state->dts_aggregations[aggid - 1] == agg);
10929 	state->dts_aggregations[aggid - 1] = NULL;
10930 
10931 	kmem_free(agg, sizeof (dtrace_aggregation_t));
10932 }
10933 
10934 static int
10935 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10936 {
10937 	dtrace_action_t *action, *last;
10938 	dtrace_difo_t *dp = desc->dtad_difo;
10939 	uint32_t size = 0, align = sizeof (uint8_t), mask;
10940 	uint16_t format = 0;
10941 	dtrace_recdesc_t *rec;
10942 	dtrace_state_t *state = ecb->dte_state;
10943 	dtrace_optval_t *opt = state->dts_options, nframes, strsize;
10944 	uint64_t arg = desc->dtad_arg;
10945 
10946 	ASSERT(MUTEX_HELD(&dtrace_lock));
10947 	ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10948 
10949 	if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10950 		/*
10951 		 * If this is an aggregating action, there must be neither
10952 		 * a speculate nor a commit on the action chain.
10953 		 */
10954 		dtrace_action_t *act;
10955 
10956 		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10957 			if (act->dta_kind == DTRACEACT_COMMIT)
10958 				return (EINVAL);
10959 
10960 			if (act->dta_kind == DTRACEACT_SPECULATE)
10961 				return (EINVAL);
10962 		}
10963 
10964 		action = dtrace_ecb_aggregation_create(ecb, desc);
10965 
10966 		if (action == NULL)
10967 			return (EINVAL);
10968 	} else {
10969 		if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10970 		    (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10971 		    dp != NULL && dp->dtdo_destructive)) {
10972 			state->dts_destructive = 1;
10973 		}
10974 
10975 		switch (desc->dtad_kind) {
10976 		case DTRACEACT_PRINTF:
10977 		case DTRACEACT_PRINTA:
10978 		case DTRACEACT_SYSTEM:
10979 		case DTRACEACT_FREOPEN:
10980 		case DTRACEACT_DIFEXPR:
10981 			/*
10982 			 * We know that our arg is a string -- turn it into a
10983 			 * format.
10984 			 */
10985 			if (arg == NULL) {
10986 				ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10987 				    desc->dtad_kind == DTRACEACT_DIFEXPR);
10988 				format = 0;
10989 			} else {
10990 				ASSERT(arg != NULL);
10991 				ASSERT(arg > KERNELBASE);
10992 				format = dtrace_format_add(state,
10993 				    (char *)(uintptr_t)arg);
10994 			}
10995 
10996 			/*FALLTHROUGH*/
10997 		case DTRACEACT_LIBACT:
10998 		case DTRACEACT_TRACEMEM:
10999 		case DTRACEACT_TRACEMEM_DYNSIZE:
11000 			if (dp == NULL)
11001 				return (EINVAL);
11002 
11003 			if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11004 				break;
11005 
11006 			if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11007 				if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11008 					return (EINVAL);
11009 
11010 				size = opt[DTRACEOPT_STRSIZE];
11011 			}
11012 
11013 			break;
11014 
11015 		case DTRACEACT_STACK:
11016 			if ((nframes = arg) == 0) {
11017 				nframes = opt[DTRACEOPT_STACKFRAMES];
11018 				ASSERT(nframes > 0);
11019 				arg = nframes;
11020 			}
11021 
11022 			size = nframes * sizeof (pc_t);
11023 			break;
11024 
11025 		case DTRACEACT_JSTACK:
11026 			if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11027 				strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11028 
11029 			if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11030 				nframes = opt[DTRACEOPT_JSTACKFRAMES];
11031 
11032 			arg = DTRACE_USTACK_ARG(nframes, strsize);
11033 
11034 			/*FALLTHROUGH*/
11035 		case DTRACEACT_USTACK:
11036 			if (desc->dtad_kind != DTRACEACT_JSTACK &&
11037 			    (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11038 				strsize = DTRACE_USTACK_STRSIZE(arg);
11039 				nframes = opt[DTRACEOPT_USTACKFRAMES];
11040 				ASSERT(nframes > 0);
11041 				arg = DTRACE_USTACK_ARG(nframes, strsize);
11042 			}
11043 
11044 			/*
11045 			 * Save a slot for the pid.
11046 			 */
11047 			size = (nframes + 1) * sizeof (uint64_t);
11048 			size += DTRACE_USTACK_STRSIZE(arg);
11049 			size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11050 
11051 			break;
11052 
11053 		case DTRACEACT_SYM:
11054 		case DTRACEACT_MOD:
11055 			if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11056 			    sizeof (uint64_t)) ||
11057 			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11058 				return (EINVAL);
11059 			break;
11060 
11061 		case DTRACEACT_USYM:
11062 		case DTRACEACT_UMOD:
11063 		case DTRACEACT_UADDR:
11064 			if (dp == NULL ||
11065 			    (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11066 			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11067 				return (EINVAL);
11068 
11069 			/*
11070 			 * We have a slot for the pid, plus a slot for the
11071 			 * argument.  To keep things simple (aligned with
11072 			 * bitness-neutral sizing), we store each as a 64-bit
11073 			 * quantity.
11074 			 */
11075 			size = 2 * sizeof (uint64_t);
11076 			break;
11077 
11078 		case DTRACEACT_STOP:
11079 		case DTRACEACT_BREAKPOINT:
11080 		case DTRACEACT_PANIC:
11081 			break;
11082 
11083 		case DTRACEACT_CHILL:
11084 		case DTRACEACT_DISCARD:
11085 		case DTRACEACT_RAISE:
11086 			if (dp == NULL)
11087 				return (EINVAL);
11088 			break;
11089 
11090 		case DTRACEACT_EXIT:
11091 			if (dp == NULL ||
11092 			    (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11093 			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11094 				return (EINVAL);
11095 			break;
11096 
11097 		case DTRACEACT_SPECULATE:
11098 			if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11099 				return (EINVAL);
11100 
11101 			if (dp == NULL)
11102 				return (EINVAL);
11103 
11104 			state->dts_speculates = 1;
11105 			break;
11106 
11107 		case DTRACEACT_COMMIT: {
11108 			dtrace_action_t *act = ecb->dte_action;
11109 
11110 			for (; act != NULL; act = act->dta_next) {
11111 				if (act->dta_kind == DTRACEACT_COMMIT)
11112 					return (EINVAL);
11113 			}
11114 
11115 			if (dp == NULL)
11116 				return (EINVAL);
11117 			break;
11118 		}
11119 
11120 		default:
11121 			return (EINVAL);
11122 		}
11123 
11124 		if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11125 			/*
11126 			 * If this is a data-storing action or a speculate,
11127 			 * we must be sure that there isn't a commit on the
11128 			 * action chain.
11129 			 */
11130 			dtrace_action_t *act = ecb->dte_action;
11131 
11132 			for (; act != NULL; act = act->dta_next) {
11133 				if (act->dta_kind == DTRACEACT_COMMIT)
11134 					return (EINVAL);
11135 			}
11136 		}
11137 
11138 		action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11139 		action->dta_rec.dtrd_size = size;
11140 	}
11141 
11142 	action->dta_refcnt = 1;
11143 	rec = &action->dta_rec;
11144 	size = rec->dtrd_size;
11145 
11146 	for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11147 		if (!(size & mask)) {
11148 			align = mask + 1;
11149 			break;
11150 		}
11151 	}
11152 
11153 	action->dta_kind = desc->dtad_kind;
11154 
11155 	if ((action->dta_difo = dp) != NULL)
11156 		dtrace_difo_hold(dp);
11157 
11158 	rec->dtrd_action = action->dta_kind;
11159 	rec->dtrd_arg = arg;
11160 	rec->dtrd_uarg = desc->dtad_uarg;
11161 	rec->dtrd_alignment = (uint16_t)align;
11162 	rec->dtrd_format = format;
11163 
11164 	if ((last = ecb->dte_action_last) != NULL) {
11165 		ASSERT(ecb->dte_action != NULL);
11166 		action->dta_prev = last;
11167 		last->dta_next = action;
11168 	} else {
11169 		ASSERT(ecb->dte_action == NULL);
11170 		ecb->dte_action = action;
11171 	}
11172 
11173 	ecb->dte_action_last = action;
11174 
11175 	return (0);
11176 }
11177 
11178 static void
11179 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11180 {
11181 	dtrace_action_t *act = ecb->dte_action, *next;
11182 	dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11183 	dtrace_difo_t *dp;
11184 	uint16_t format;
11185 
11186 	if (act != NULL && act->dta_refcnt > 1) {
11187 		ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11188 		act->dta_refcnt--;
11189 	} else {
11190 		for (; act != NULL; act = next) {
11191 			next = act->dta_next;
11192 			ASSERT(next != NULL || act == ecb->dte_action_last);
11193 			ASSERT(act->dta_refcnt == 1);
11194 
11195 			if ((format = act->dta_rec.dtrd_format) != 0)
11196 				dtrace_format_remove(ecb->dte_state, format);
11197 
11198 			if ((dp = act->dta_difo) != NULL)
11199 				dtrace_difo_release(dp, vstate);
11200 
11201 			if (DTRACEACT_ISAGG(act->dta_kind)) {
11202 				dtrace_ecb_aggregation_destroy(ecb, act);
11203 			} else {
11204 				kmem_free(act, sizeof (dtrace_action_t));
11205 			}
11206 		}
11207 	}
11208 
11209 	ecb->dte_action = NULL;
11210 	ecb->dte_action_last = NULL;
11211 	ecb->dte_size = 0;
11212 }
11213 
11214 static void
11215 dtrace_ecb_disable(dtrace_ecb_t *ecb)
11216 {
11217 	/*
11218 	 * We disable the ECB by removing it from its probe.
11219 	 */
11220 	dtrace_ecb_t *pecb, *prev = NULL;
11221 	dtrace_probe_t *probe = ecb->dte_probe;
11222 
11223 	ASSERT(MUTEX_HELD(&dtrace_lock));
11224 
11225 	if (probe == NULL) {
11226 		/*
11227 		 * This is the NULL probe; there is nothing to disable.
11228 		 */
11229 		return;
11230 	}
11231 
11232 	for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11233 		if (pecb == ecb)
11234 			break;
11235 		prev = pecb;
11236 	}
11237 
11238 	ASSERT(pecb != NULL);
11239 
11240 	if (prev == NULL) {
11241 		probe->dtpr_ecb = ecb->dte_next;
11242 	} else {
11243 		prev->dte_next = ecb->dte_next;
11244 	}
11245 
11246 	if (ecb == probe->dtpr_ecb_last) {
11247 		ASSERT(ecb->dte_next == NULL);
11248 		probe->dtpr_ecb_last = prev;
11249 	}
11250 
11251 	/*
11252 	 * The ECB has been disconnected from the probe; now sync to assure
11253 	 * that all CPUs have seen the change before returning.
11254 	 */
11255 	dtrace_sync();
11256 
11257 	if (probe->dtpr_ecb == NULL) {
11258 		/*
11259 		 * That was the last ECB on the probe; clear the predicate
11260 		 * cache ID for the probe, disable it and sync one more time
11261 		 * to assure that we'll never hit it again.
11262 		 */
11263 		dtrace_provider_t *prov = probe->dtpr_provider;
11264 
11265 		ASSERT(ecb->dte_next == NULL);
11266 		ASSERT(probe->dtpr_ecb_last == NULL);
11267 		probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11268 		prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11269 		    probe->dtpr_id, probe->dtpr_arg);
11270 		dtrace_sync();
11271 	} else {
11272 		/*
11273 		 * There is at least one ECB remaining on the probe.  If there
11274 		 * is _exactly_ one, set the probe's predicate cache ID to be
11275 		 * the predicate cache ID of the remaining ECB.
11276 		 */
11277 		ASSERT(probe->dtpr_ecb_last != NULL);
11278 		ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11279 
11280 		if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11281 			dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11282 
11283 			ASSERT(probe->dtpr_ecb->dte_next == NULL);
11284 
11285 			if (p != NULL)
11286 				probe->dtpr_predcache = p->dtp_cacheid;
11287 		}
11288 
11289 		ecb->dte_next = NULL;
11290 	}
11291 }
11292 
11293 static void
11294 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
11295 {
11296 	dtrace_state_t *state = ecb->dte_state;
11297 	dtrace_vstate_t *vstate = &state->dts_vstate;
11298 	dtrace_predicate_t *pred;
11299 	dtrace_epid_t epid = ecb->dte_epid;
11300 
11301 	ASSERT(MUTEX_HELD(&dtrace_lock));
11302 	ASSERT(ecb->dte_next == NULL);
11303 	ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11304 
11305 	if ((pred = ecb->dte_predicate) != NULL)
11306 		dtrace_predicate_release(pred, vstate);
11307 
11308 	dtrace_ecb_action_remove(ecb);
11309 
11310 	ASSERT(state->dts_ecbs[epid - 1] == ecb);
11311 	state->dts_ecbs[epid - 1] = NULL;
11312 
11313 	kmem_free(ecb, sizeof (dtrace_ecb_t));
11314 }
11315 
11316 static dtrace_ecb_t *
11317 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11318     dtrace_enabling_t *enab)
11319 {
11320 	dtrace_ecb_t *ecb;
11321 	dtrace_predicate_t *pred;
11322 	dtrace_actdesc_t *act;
11323 	dtrace_provider_t *prov;
11324 	dtrace_ecbdesc_t *desc = enab->dten_current;
11325 
11326 	ASSERT(MUTEX_HELD(&dtrace_lock));
11327 	ASSERT(state != NULL);
11328 
11329 	ecb = dtrace_ecb_add(state, probe);
11330 	ecb->dte_uarg = desc->dted_uarg;
11331 
11332 	if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11333 		dtrace_predicate_hold(pred);
11334 		ecb->dte_predicate = pred;
11335 	}
11336 
11337 	if (probe != NULL) {
11338 		/*
11339 		 * If the provider shows more leg than the consumer is old
11340 		 * enough to see, we need to enable the appropriate implicit
11341 		 * predicate bits to prevent the ecb from activating at
11342 		 * revealing times.
11343 		 *
11344 		 * Providers specifying DTRACE_PRIV_USER at register time
11345 		 * are stating that they need the /proc-style privilege
11346 		 * model to be enforced, and this is what DTRACE_COND_OWNER
11347 		 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11348 		 */
11349 		prov = probe->dtpr_provider;
11350 		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11351 		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11352 			ecb->dte_cond |= DTRACE_COND_OWNER;
11353 
11354 		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11355 		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11356 			ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11357 
11358 		/*
11359 		 * If the provider shows us kernel innards and the user
11360 		 * is lacking sufficient privilege, enable the
11361 		 * DTRACE_COND_USERMODE implicit predicate.
11362 		 */
11363 		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11364 		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11365 			ecb->dte_cond |= DTRACE_COND_USERMODE;
11366 	}
11367 
11368 	if (dtrace_ecb_create_cache != NULL) {
11369 		/*
11370 		 * If we have a cached ecb, we'll use its action list instead
11371 		 * of creating our own (saving both time and space).
11372 		 */
11373 		dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11374 		dtrace_action_t *act = cached->dte_action;
11375 
11376 		if (act != NULL) {
11377 			ASSERT(act->dta_refcnt > 0);
11378 			act->dta_refcnt++;
11379 			ecb->dte_action = act;
11380 			ecb->dte_action_last = cached->dte_action_last;
11381 			ecb->dte_needed = cached->dte_needed;
11382 			ecb->dte_size = cached->dte_size;
11383 			ecb->dte_alignment = cached->dte_alignment;
11384 		}
11385 
11386 		return (ecb);
11387 	}
11388 
11389 	for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11390 		if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11391 			dtrace_ecb_destroy(ecb);
11392 			return (NULL);
11393 		}
11394 	}
11395 
11396 	if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
11397 		dtrace_ecb_destroy(ecb);
11398 		return (NULL);
11399 	}
11400 
11401 	return (dtrace_ecb_create_cache = ecb);
11402 }
11403 
11404 static int
11405 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11406 {
11407 	dtrace_ecb_t *ecb;
11408 	dtrace_enabling_t *enab = arg;
11409 	dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11410 
11411 	ASSERT(state != NULL);
11412 
11413 	if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11414 		/*
11415 		 * This probe was created in a generation for which this
11416 		 * enabling has previously created ECBs; we don't want to
11417 		 * enable it again, so just kick out.
11418 		 */
11419 		return (DTRACE_MATCH_NEXT);
11420 	}
11421 
11422 	if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11423 		return (DTRACE_MATCH_DONE);
11424 
11425 	if (dtrace_ecb_enable(ecb) < 0)
11426 		return (DTRACE_MATCH_FAIL);
11427 
11428 	return (DTRACE_MATCH_NEXT);
11429 }
11430 
11431 static dtrace_ecb_t *
11432 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11433 {
11434 	dtrace_ecb_t *ecb;
11435 
11436 	ASSERT(MUTEX_HELD(&dtrace_lock));
11437 
11438 	if (id == 0 || id > state->dts_necbs)
11439 		return (NULL);
11440 
11441 	ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11442 	ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11443 
11444 	return (state->dts_ecbs[id - 1]);
11445 }
11446 
11447 static dtrace_aggregation_t *
11448 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11449 {
11450 	dtrace_aggregation_t *agg;
11451 
11452 	ASSERT(MUTEX_HELD(&dtrace_lock));
11453 
11454 	if (id == 0 || id > state->dts_naggregations)
11455 		return (NULL);
11456 
11457 	ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11458 	ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11459 	    agg->dtag_id == id);
11460 
11461 	return (state->dts_aggregations[id - 1]);
11462 }
11463 
11464 /*
11465  * DTrace Buffer Functions
11466  *
11467  * The following functions manipulate DTrace buffers.  Most of these functions
11468  * are called in the context of establishing or processing consumer state;
11469  * exceptions are explicitly noted.
11470  */
11471 
11472 /*
11473  * Note:  called from cross call context.  This function switches the two
11474  * buffers on a given CPU.  The atomicity of this operation is assured by
11475  * disabling interrupts while the actual switch takes place; the disabling of
11476  * interrupts serializes the execution with any execution of dtrace_probe() on
11477  * the same CPU.
11478  */
11479 static void
11480 dtrace_buffer_switch(dtrace_buffer_t *buf)
11481 {
11482 	caddr_t tomax = buf->dtb_tomax;
11483 	caddr_t xamot = buf->dtb_xamot;
11484 	dtrace_icookie_t cookie;
11485 	hrtime_t now;
11486 
11487 	ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11488 	ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11489 
11490 	cookie = dtrace_interrupt_disable();
11491 	now = dtrace_gethrtime();
11492 	buf->dtb_tomax = xamot;
11493 	buf->dtb_xamot = tomax;
11494 	buf->dtb_xamot_drops = buf->dtb_drops;
11495 	buf->dtb_xamot_offset = buf->dtb_offset;
11496 	buf->dtb_xamot_errors = buf->dtb_errors;
11497 	buf->dtb_xamot_flags = buf->dtb_flags;
11498 	buf->dtb_offset = 0;
11499 	buf->dtb_drops = 0;
11500 	buf->dtb_errors = 0;
11501 	buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11502 	buf->dtb_interval = now - buf->dtb_switched;
11503 	buf->dtb_switched = now;
11504 	dtrace_interrupt_enable(cookie);
11505 }
11506 
11507 /*
11508  * Note:  called from cross call context.  This function activates a buffer
11509  * on a CPU.  As with dtrace_buffer_switch(), the atomicity of the operation
11510  * is guaranteed by the disabling of interrupts.
11511  */
11512 static void
11513 dtrace_buffer_activate(dtrace_state_t *state)
11514 {
11515 	dtrace_buffer_t *buf;
11516 	dtrace_icookie_t cookie = dtrace_interrupt_disable();
11517 
11518 	buf = &state->dts_buffer[CPU->cpu_id];
11519 
11520 	if (buf->dtb_tomax != NULL) {
11521 		/*
11522 		 * We might like to assert that the buffer is marked inactive,
11523 		 * but this isn't necessarily true:  the buffer for the CPU
11524 		 * that processes the BEGIN probe has its buffer activated
11525 		 * manually.  In this case, we take the (harmless) action
11526 		 * re-clearing the bit INACTIVE bit.
11527 		 */
11528 		buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11529 	}
11530 
11531 	dtrace_interrupt_enable(cookie);
11532 }
11533 
11534 static int
11535 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
11536     processorid_t cpu, int *factor)
11537 {
11538 	cpu_t *cp;
11539 	dtrace_buffer_t *buf;
11540 	int allocated = 0, desired = 0;
11541 
11542 	ASSERT(MUTEX_HELD(&cpu_lock));
11543 	ASSERT(MUTEX_HELD(&dtrace_lock));
11544 
11545 	*factor = 1;
11546 
11547 	if (size > dtrace_nonroot_maxsize &&
11548 	    !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
11549 		return (EFBIG);
11550 
11551 	cp = cpu_list;
11552 
11553 	do {
11554 		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11555 			continue;
11556 
11557 		buf = &bufs[cp->cpu_id];
11558 
11559 		/*
11560 		 * If there is already a buffer allocated for this CPU, it
11561 		 * is only possible that this is a DR event.  In this case,
11562 		 * the buffer size must match our specified size.
11563 		 */
11564 		if (buf->dtb_tomax != NULL) {
11565 			ASSERT(buf->dtb_size == size);
11566 			continue;
11567 		}
11568 
11569 		ASSERT(buf->dtb_xamot == NULL);
11570 
11571 		if ((buf->dtb_tomax = kmem_zalloc(size,
11572 		    KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11573 			goto err;
11574 
11575 		buf->dtb_size = size;
11576 		buf->dtb_flags = flags;
11577 		buf->dtb_offset = 0;
11578 		buf->dtb_drops = 0;
11579 
11580 		if (flags & DTRACEBUF_NOSWITCH)
11581 			continue;
11582 
11583 		if ((buf->dtb_xamot = kmem_zalloc(size,
11584 		    KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11585 			goto err;
11586 	} while ((cp = cp->cpu_next) != cpu_list);
11587 
11588 	return (0);
11589 
11590 err:
11591 	cp = cpu_list;
11592 
11593 	do {
11594 		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11595 			continue;
11596 
11597 		buf = &bufs[cp->cpu_id];
11598 		desired += 2;
11599 
11600 		if (buf->dtb_xamot != NULL) {
11601 			ASSERT(buf->dtb_tomax != NULL);
11602 			ASSERT(buf->dtb_size == size);
11603 			kmem_free(buf->dtb_xamot, size);
11604 			allocated++;
11605 		}
11606 
11607 		if (buf->dtb_tomax != NULL) {
11608 			ASSERT(buf->dtb_size == size);
11609 			kmem_free(buf->dtb_tomax, size);
11610 			allocated++;
11611 		}
11612 
11613 		buf->dtb_tomax = NULL;
11614 		buf->dtb_xamot = NULL;
11615 		buf->dtb_size = 0;
11616 	} while ((cp = cp->cpu_next) != cpu_list);
11617 
11618 	*factor = desired / (allocated > 0 ? allocated : 1);
11619 
11620 	return (ENOMEM);
11621 }
11622 
11623 /*
11624  * Note:  called from probe context.  This function just increments the drop
11625  * count on a buffer.  It has been made a function to allow for the
11626  * possibility of understanding the source of mysterious drop counts.  (A
11627  * problem for which one may be particularly disappointed that DTrace cannot
11628  * be used to understand DTrace.)
11629  */
11630 static void
11631 dtrace_buffer_drop(dtrace_buffer_t *buf)
11632 {
11633 	buf->dtb_drops++;
11634 }
11635 
11636 /*
11637  * Note:  called from probe context.  This function is called to reserve space
11638  * in a buffer.  If mstate is non-NULL, sets the scratch base and size in the
11639  * mstate.  Returns the new offset in the buffer, or a negative value if an
11640  * error has occurred.
11641  */
11642 static intptr_t
11643 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
11644     dtrace_state_t *state, dtrace_mstate_t *mstate)
11645 {
11646 	intptr_t offs = buf->dtb_offset, soffs;
11647 	intptr_t woffs;
11648 	caddr_t tomax;
11649 	size_t total;
11650 
11651 	if (buf->dtb_flags & DTRACEBUF_INACTIVE)
11652 		return (-1);
11653 
11654 	if ((tomax = buf->dtb_tomax) == NULL) {
11655 		dtrace_buffer_drop(buf);
11656 		return (-1);
11657 	}
11658 
11659 	if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
11660 		while (offs & (align - 1)) {
11661 			/*
11662 			 * Assert that our alignment is off by a number which
11663 			 * is itself sizeof (uint32_t) aligned.
11664 			 */
11665 			ASSERT(!((align - (offs & (align - 1))) &
11666 			    (sizeof (uint32_t) - 1)));
11667 			DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11668 			offs += sizeof (uint32_t);
11669 		}
11670 
11671 		if ((soffs = offs + needed) > buf->dtb_size) {
11672 			dtrace_buffer_drop(buf);
11673 			return (-1);
11674 		}
11675 
11676 		if (mstate == NULL)
11677 			return (offs);
11678 
11679 		mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
11680 		mstate->dtms_scratch_size = buf->dtb_size - soffs;
11681 		mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11682 
11683 		return (offs);
11684 	}
11685 
11686 	if (buf->dtb_flags & DTRACEBUF_FILL) {
11687 		if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
11688 		    (buf->dtb_flags & DTRACEBUF_FULL))
11689 			return (-1);
11690 		goto out;
11691 	}
11692 
11693 	total = needed + (offs & (align - 1));
11694 
11695 	/*
11696 	 * For a ring buffer, life is quite a bit more complicated.  Before
11697 	 * we can store any padding, we need to adjust our wrapping offset.
11698 	 * (If we've never before wrapped or we're not about to, no adjustment
11699 	 * is required.)
11700 	 */
11701 	if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11702 	    offs + total > buf->dtb_size) {
11703 		woffs = buf->dtb_xamot_offset;
11704 
11705 		if (offs + total > buf->dtb_size) {
11706 			/*
11707 			 * We can't fit in the end of the buffer.  First, a
11708 			 * sanity check that we can fit in the buffer at all.
11709 			 */
11710 			if (total > buf->dtb_size) {
11711 				dtrace_buffer_drop(buf);
11712 				return (-1);
11713 			}
11714 
11715 			/*
11716 			 * We're going to be storing at the top of the buffer,
11717 			 * so now we need to deal with the wrapped offset.  We
11718 			 * only reset our wrapped offset to 0 if it is
11719 			 * currently greater than the current offset.  If it
11720 			 * is less than the current offset, it is because a
11721 			 * previous allocation induced a wrap -- but the
11722 			 * allocation didn't subsequently take the space due
11723 			 * to an error or false predicate evaluation.  In this
11724 			 * case, we'll just leave the wrapped offset alone: if
11725 			 * the wrapped offset hasn't been advanced far enough
11726 			 * for this allocation, it will be adjusted in the
11727 			 * lower loop.
11728 			 */
11729 			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11730 				if (woffs >= offs)
11731 					woffs = 0;
11732 			} else {
11733 				woffs = 0;
11734 			}
11735 
11736 			/*
11737 			 * Now we know that we're going to be storing to the
11738 			 * top of the buffer and that there is room for us
11739 			 * there.  We need to clear the buffer from the current
11740 			 * offset to the end (there may be old gunk there).
11741 			 */
11742 			while (offs < buf->dtb_size)
11743 				tomax[offs++] = 0;
11744 
11745 			/*
11746 			 * We need to set our offset to zero.  And because we
11747 			 * are wrapping, we need to set the bit indicating as
11748 			 * much.  We can also adjust our needed space back
11749 			 * down to the space required by the ECB -- we know
11750 			 * that the top of the buffer is aligned.
11751 			 */
11752 			offs = 0;
11753 			total = needed;
11754 			buf->dtb_flags |= DTRACEBUF_WRAPPED;
11755 		} else {
11756 			/*
11757 			 * There is room for us in the buffer, so we simply
11758 			 * need to check the wrapped offset.
11759 			 */
11760 			if (woffs < offs) {
11761 				/*
11762 				 * The wrapped offset is less than the offset.
11763 				 * This can happen if we allocated buffer space
11764 				 * that induced a wrap, but then we didn't
11765 				 * subsequently take the space due to an error
11766 				 * or false predicate evaluation.  This is
11767 				 * okay; we know that _this_ allocation isn't
11768 				 * going to induce a wrap.  We still can't
11769 				 * reset the wrapped offset to be zero,
11770 				 * however: the space may have been trashed in
11771 				 * the previous failed probe attempt.  But at
11772 				 * least the wrapped offset doesn't need to
11773 				 * be adjusted at all...
11774 				 */
11775 				goto out;
11776 			}
11777 		}
11778 
11779 		while (offs + total > woffs) {
11780 			dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11781 			size_t size;
11782 
11783 			if (epid == DTRACE_EPIDNONE) {
11784 				size = sizeof (uint32_t);
11785 			} else {
11786 				ASSERT3U(epid, <=, state->dts_necbs);
11787 				ASSERT(state->dts_ecbs[epid - 1] != NULL);
11788 
11789 				size = state->dts_ecbs[epid - 1]->dte_size;
11790 			}
11791 
11792 			ASSERT(woffs + size <= buf->dtb_size);
11793 			ASSERT(size != 0);
11794 
11795 			if (woffs + size == buf->dtb_size) {
11796 				/*
11797 				 * We've reached the end of the buffer; we want
11798 				 * to set the wrapped offset to 0 and break
11799 				 * out.  However, if the offs is 0, then we're
11800 				 * in a strange edge-condition:  the amount of
11801 				 * space that we want to reserve plus the size
11802 				 * of the record that we're overwriting is
11803 				 * greater than the size of the buffer.  This
11804 				 * is problematic because if we reserve the
11805 				 * space but subsequently don't consume it (due
11806 				 * to a failed predicate or error) the wrapped
11807 				 * offset will be 0 -- yet the EPID at offset 0
11808 				 * will not be committed.  This situation is
11809 				 * relatively easy to deal with:  if we're in
11810 				 * this case, the buffer is indistinguishable
11811 				 * from one that hasn't wrapped; we need only
11812 				 * finish the job by clearing the wrapped bit,
11813 				 * explicitly setting the offset to be 0, and
11814 				 * zero'ing out the old data in the buffer.
11815 				 */
11816 				if (offs == 0) {
11817 					buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11818 					buf->dtb_offset = 0;
11819 					woffs = total;
11820 
11821 					while (woffs < buf->dtb_size)
11822 						tomax[woffs++] = 0;
11823 				}
11824 
11825 				woffs = 0;
11826 				break;
11827 			}
11828 
11829 			woffs += size;
11830 		}
11831 
11832 		/*
11833 		 * We have a wrapped offset.  It may be that the wrapped offset
11834 		 * has become zero -- that's okay.
11835 		 */
11836 		buf->dtb_xamot_offset = woffs;
11837 	}
11838 
11839 out:
11840 	/*
11841 	 * Now we can plow the buffer with any necessary padding.
11842 	 */
11843 	while (offs & (align - 1)) {
11844 		/*
11845 		 * Assert that our alignment is off by a number which
11846 		 * is itself sizeof (uint32_t) aligned.
11847 		 */
11848 		ASSERT(!((align - (offs & (align - 1))) &
11849 		    (sizeof (uint32_t) - 1)));
11850 		DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11851 		offs += sizeof (uint32_t);
11852 	}
11853 
11854 	if (buf->dtb_flags & DTRACEBUF_FILL) {
11855 		if (offs + needed > buf->dtb_size - state->dts_reserve) {
11856 			buf->dtb_flags |= DTRACEBUF_FULL;
11857 			return (-1);
11858 		}
11859 	}
11860 
11861 	if (mstate == NULL)
11862 		return (offs);
11863 
11864 	/*
11865 	 * For ring buffers and fill buffers, the scratch space is always
11866 	 * the inactive buffer.
11867 	 */
11868 	mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11869 	mstate->dtms_scratch_size = buf->dtb_size;
11870 	mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11871 
11872 	return (offs);
11873 }
11874 
11875 static void
11876 dtrace_buffer_polish(dtrace_buffer_t *buf)
11877 {
11878 	ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11879 	ASSERT(MUTEX_HELD(&dtrace_lock));
11880 
11881 	if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11882 		return;
11883 
11884 	/*
11885 	 * We need to polish the ring buffer.  There are three cases:
11886 	 *
11887 	 * - The first (and presumably most common) is that there is no gap
11888 	 *   between the buffer offset and the wrapped offset.  In this case,
11889 	 *   there is nothing in the buffer that isn't valid data; we can
11890 	 *   mark the buffer as polished and return.
11891 	 *
11892 	 * - The second (less common than the first but still more common
11893 	 *   than the third) is that there is a gap between the buffer offset
11894 	 *   and the wrapped offset, and the wrapped offset is larger than the
11895 	 *   buffer offset.  This can happen because of an alignment issue, or
11896 	 *   can happen because of a call to dtrace_buffer_reserve() that
11897 	 *   didn't subsequently consume the buffer space.  In this case,
11898 	 *   we need to zero the data from the buffer offset to the wrapped
11899 	 *   offset.
11900 	 *
11901 	 * - The third (and least common) is that there is a gap between the
11902 	 *   buffer offset and the wrapped offset, but the wrapped offset is
11903 	 *   _less_ than the buffer offset.  This can only happen because a
11904 	 *   call to dtrace_buffer_reserve() induced a wrap, but the space
11905 	 *   was not subsequently consumed.  In this case, we need to zero the
11906 	 *   space from the offset to the end of the buffer _and_ from the
11907 	 *   top of the buffer to the wrapped offset.
11908 	 */
11909 	if (buf->dtb_offset < buf->dtb_xamot_offset) {
11910 		bzero(buf->dtb_tomax + buf->dtb_offset,
11911 		    buf->dtb_xamot_offset - buf->dtb_offset);
11912 	}
11913 
11914 	if (buf->dtb_offset > buf->dtb_xamot_offset) {
11915 		bzero(buf->dtb_tomax + buf->dtb_offset,
11916 		    buf->dtb_size - buf->dtb_offset);
11917 		bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11918 	}
11919 }
11920 
11921 /*
11922  * This routine determines if data generated at the specified time has likely
11923  * been entirely consumed at user-level.  This routine is called to determine
11924  * if an ECB on a defunct probe (but for an active enabling) can be safely
11925  * disabled and destroyed.
11926  */
11927 static int
11928 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
11929 {
11930 	int i;
11931 
11932 	for (i = 0; i < NCPU; i++) {
11933 		dtrace_buffer_t *buf = &bufs[i];
11934 
11935 		if (buf->dtb_size == 0)
11936 			continue;
11937 
11938 		if (buf->dtb_flags & DTRACEBUF_RING)
11939 			return (0);
11940 
11941 		if (!buf->dtb_switched && buf->dtb_offset != 0)
11942 			return (0);
11943 
11944 		if (buf->dtb_switched - buf->dtb_interval < when)
11945 			return (0);
11946 	}
11947 
11948 	return (1);
11949 }
11950 
11951 static void
11952 dtrace_buffer_free(dtrace_buffer_t *bufs)
11953 {
11954 	int i;
11955 
11956 	for (i = 0; i < NCPU; i++) {
11957 		dtrace_buffer_t *buf = &bufs[i];
11958 
11959 		if (buf->dtb_tomax == NULL) {
11960 			ASSERT(buf->dtb_xamot == NULL);
11961 			ASSERT(buf->dtb_size == 0);
11962 			continue;
11963 		}
11964 
11965 		if (buf->dtb_xamot != NULL) {
11966 			ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11967 			kmem_free(buf->dtb_xamot, buf->dtb_size);
11968 		}
11969 
11970 		kmem_free(buf->dtb_tomax, buf->dtb_size);
11971 		buf->dtb_size = 0;
11972 		buf->dtb_tomax = NULL;
11973 		buf->dtb_xamot = NULL;
11974 	}
11975 }
11976 
11977 /*
11978  * DTrace Enabling Functions
11979  */
11980 static dtrace_enabling_t *
11981 dtrace_enabling_create(dtrace_vstate_t *vstate)
11982 {
11983 	dtrace_enabling_t *enab;
11984 
11985 	enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11986 	enab->dten_vstate = vstate;
11987 
11988 	return (enab);
11989 }
11990 
11991 static void
11992 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11993 {
11994 	dtrace_ecbdesc_t **ndesc;
11995 	size_t osize, nsize;
11996 
11997 	/*
11998 	 * We can't add to enablings after we've enabled them, or after we've
11999 	 * retained them.
12000 	 */
12001 	ASSERT(enab->dten_probegen == 0);
12002 	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12003 
12004 	if (enab->dten_ndesc < enab->dten_maxdesc) {
12005 		enab->dten_desc[enab->dten_ndesc++] = ecb;
12006 		return;
12007 	}
12008 
12009 	osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12010 
12011 	if (enab->dten_maxdesc == 0) {
12012 		enab->dten_maxdesc = 1;
12013 	} else {
12014 		enab->dten_maxdesc <<= 1;
12015 	}
12016 
12017 	ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12018 
12019 	nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12020 	ndesc = kmem_zalloc(nsize, KM_SLEEP);
12021 	bcopy(enab->dten_desc, ndesc, osize);
12022 	kmem_free(enab->dten_desc, osize);
12023 
12024 	enab->dten_desc = ndesc;
12025 	enab->dten_desc[enab->dten_ndesc++] = ecb;
12026 }
12027 
12028 static void
12029 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12030     dtrace_probedesc_t *pd)
12031 {
12032 	dtrace_ecbdesc_t *new;
12033 	dtrace_predicate_t *pred;
12034 	dtrace_actdesc_t *act;
12035 
12036 	/*
12037 	 * We're going to create a new ECB description that matches the
12038 	 * specified ECB in every way, but has the specified probe description.
12039 	 */
12040 	new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12041 
12042 	if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12043 		dtrace_predicate_hold(pred);
12044 
12045 	for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12046 		dtrace_actdesc_hold(act);
12047 
12048 	new->dted_action = ecb->dted_action;
12049 	new->dted_pred = ecb->dted_pred;
12050 	new->dted_probe = *pd;
12051 	new->dted_uarg = ecb->dted_uarg;
12052 
12053 	dtrace_enabling_add(enab, new);
12054 }
12055 
12056 static void
12057 dtrace_enabling_dump(dtrace_enabling_t *enab)
12058 {
12059 	int i;
12060 
12061 	for (i = 0; i < enab->dten_ndesc; i++) {
12062 		dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12063 
12064 		cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12065 		    desc->dtpd_provider, desc->dtpd_mod,
12066 		    desc->dtpd_func, desc->dtpd_name);
12067 	}
12068 }
12069 
12070 static void
12071 dtrace_enabling_destroy(dtrace_enabling_t *enab)
12072 {
12073 	int i;
12074 	dtrace_ecbdesc_t *ep;
12075 	dtrace_vstate_t *vstate = enab->dten_vstate;
12076 
12077 	ASSERT(MUTEX_HELD(&dtrace_lock));
12078 
12079 	for (i = 0; i < enab->dten_ndesc; i++) {
12080 		dtrace_actdesc_t *act, *next;
12081 		dtrace_predicate_t *pred;
12082 
12083 		ep = enab->dten_desc[i];
12084 
12085 		if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12086 			dtrace_predicate_release(pred, vstate);
12087 
12088 		for (act = ep->dted_action; act != NULL; act = next) {
12089 			next = act->dtad_next;
12090 			dtrace_actdesc_release(act, vstate);
12091 		}
12092 
12093 		kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12094 	}
12095 
12096 	kmem_free(enab->dten_desc,
12097 	    enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12098 
12099 	/*
12100 	 * If this was a retained enabling, decrement the dts_nretained count
12101 	 * and take it off of the dtrace_retained list.
12102 	 */
12103 	if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12104 	    dtrace_retained == enab) {
12105 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12106 		ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12107 		enab->dten_vstate->dtvs_state->dts_nretained--;
12108 		dtrace_retained_gen++;
12109 	}
12110 
12111 	if (enab->dten_prev == NULL) {
12112 		if (dtrace_retained == enab) {
12113 			dtrace_retained = enab->dten_next;
12114 
12115 			if (dtrace_retained != NULL)
12116 				dtrace_retained->dten_prev = NULL;
12117 		}
12118 	} else {
12119 		ASSERT(enab != dtrace_retained);
12120 		ASSERT(dtrace_retained != NULL);
12121 		enab->dten_prev->dten_next = enab->dten_next;
12122 	}
12123 
12124 	if (enab->dten_next != NULL) {
12125 		ASSERT(dtrace_retained != NULL);
12126 		enab->dten_next->dten_prev = enab->dten_prev;
12127 	}
12128 
12129 	kmem_free(enab, sizeof (dtrace_enabling_t));
12130 }
12131 
12132 static int
12133 dtrace_enabling_retain(dtrace_enabling_t *enab)
12134 {
12135 	dtrace_state_t *state;
12136 
12137 	ASSERT(MUTEX_HELD(&dtrace_lock));
12138 	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12139 	ASSERT(enab->dten_vstate != NULL);
12140 
12141 	state = enab->dten_vstate->dtvs_state;
12142 	ASSERT(state != NULL);
12143 
12144 	/*
12145 	 * We only allow each state to retain dtrace_retain_max enablings.
12146 	 */
12147 	if (state->dts_nretained >= dtrace_retain_max)
12148 		return (ENOSPC);
12149 
12150 	state->dts_nretained++;
12151 	dtrace_retained_gen++;
12152 
12153 	if (dtrace_retained == NULL) {
12154 		dtrace_retained = enab;
12155 		return (0);
12156 	}
12157 
12158 	enab->dten_next = dtrace_retained;
12159 	dtrace_retained->dten_prev = enab;
12160 	dtrace_retained = enab;
12161 
12162 	return (0);
12163 }
12164 
12165 static int
12166 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12167     dtrace_probedesc_t *create)
12168 {
12169 	dtrace_enabling_t *new, *enab;
12170 	int found = 0, err = ENOENT;
12171 
12172 	ASSERT(MUTEX_HELD(&dtrace_lock));
12173 	ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12174 	ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12175 	ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12176 	ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12177 
12178 	new = dtrace_enabling_create(&state->dts_vstate);
12179 
12180 	/*
12181 	 * Iterate over all retained enablings, looking for enablings that
12182 	 * match the specified state.
12183 	 */
12184 	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12185 		int i;
12186 
12187 		/*
12188 		 * dtvs_state can only be NULL for helper enablings -- and
12189 		 * helper enablings can't be retained.
12190 		 */
12191 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12192 
12193 		if (enab->dten_vstate->dtvs_state != state)
12194 			continue;
12195 
12196 		/*
12197 		 * Now iterate over each probe description; we're looking for
12198 		 * an exact match to the specified probe description.
12199 		 */
12200 		for (i = 0; i < enab->dten_ndesc; i++) {
12201 			dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12202 			dtrace_probedesc_t *pd = &ep->dted_probe;
12203 
12204 			if (strcmp(pd->dtpd_provider, match->dtpd_provider))
12205 				continue;
12206 
12207 			if (strcmp(pd->dtpd_mod, match->dtpd_mod))
12208 				continue;
12209 
12210 			if (strcmp(pd->dtpd_func, match->dtpd_func))
12211 				continue;
12212 
12213 			if (strcmp(pd->dtpd_name, match->dtpd_name))
12214 				continue;
12215 
12216 			/*
12217 			 * We have a winning probe!  Add it to our growing
12218 			 * enabling.
12219 			 */
12220 			found = 1;
12221 			dtrace_enabling_addlike(new, ep, create);
12222 		}
12223 	}
12224 
12225 	if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12226 		dtrace_enabling_destroy(new);
12227 		return (err);
12228 	}
12229 
12230 	return (0);
12231 }
12232 
12233 static void
12234 dtrace_enabling_retract(dtrace_state_t *state)
12235 {
12236 	dtrace_enabling_t *enab, *next;
12237 
12238 	ASSERT(MUTEX_HELD(&dtrace_lock));
12239 
12240 	/*
12241 	 * Iterate over all retained enablings, destroy the enablings retained
12242 	 * for the specified state.
12243 	 */
12244 	for (enab = dtrace_retained; enab != NULL; enab = next) {
12245 		next = enab->dten_next;
12246 
12247 		/*
12248 		 * dtvs_state can only be NULL for helper enablings -- and
12249 		 * helper enablings can't be retained.
12250 		 */
12251 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12252 
12253 		if (enab->dten_vstate->dtvs_state == state) {
12254 			ASSERT(state->dts_nretained > 0);
12255 			dtrace_enabling_destroy(enab);
12256 		}
12257 	}
12258 
12259 	ASSERT(state->dts_nretained == 0);
12260 }
12261 
12262 static int
12263 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
12264 {
12265 	int i = 0;
12266 	int total_matched = 0, matched = 0;
12267 
12268 	ASSERT(MUTEX_HELD(&cpu_lock));
12269 	ASSERT(MUTEX_HELD(&dtrace_lock));
12270 
12271 	for (i = 0; i < enab->dten_ndesc; i++) {
12272 		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12273 
12274 		enab->dten_current = ep;
12275 		enab->dten_error = 0;
12276 
12277 		/*
12278 		 * If a provider failed to enable a probe then get out and
12279 		 * let the consumer know we failed.
12280 		 */
12281 		if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
12282 			return (EBUSY);
12283 
12284 		total_matched += matched;
12285 
12286 		if (enab->dten_error != 0) {
12287 			/*
12288 			 * If we get an error half-way through enabling the
12289 			 * probes, we kick out -- perhaps with some number of
12290 			 * them enabled.  Leaving enabled probes enabled may
12291 			 * be slightly confusing for user-level, but we expect
12292 			 * that no one will attempt to actually drive on in
12293 			 * the face of such errors.  If this is an anonymous
12294 			 * enabling (indicated with a NULL nmatched pointer),
12295 			 * we cmn_err() a message.  We aren't expecting to
12296 			 * get such an error -- such as it can exist at all,
12297 			 * it would be a result of corrupted DOF in the driver
12298 			 * properties.
12299 			 */
12300 			if (nmatched == NULL) {
12301 				cmn_err(CE_WARN, "dtrace_enabling_match() "
12302 				    "error on %p: %d", (void *)ep,
12303 				    enab->dten_error);
12304 			}
12305 
12306 			return (enab->dten_error);
12307 		}
12308 	}
12309 
12310 	enab->dten_probegen = dtrace_probegen;
12311 	if (nmatched != NULL)
12312 		*nmatched = total_matched;
12313 
12314 	return (0);
12315 }
12316 
12317 static void
12318 dtrace_enabling_matchall(void)
12319 {
12320 	dtrace_enabling_t *enab;
12321 
12322 	mutex_enter(&cpu_lock);
12323 	mutex_enter(&dtrace_lock);
12324 
12325 	/*
12326 	 * Iterate over all retained enablings to see if any probes match
12327 	 * against them.  We only perform this operation on enablings for which
12328 	 * we have sufficient permissions by virtue of being in the global zone
12329 	 * or in the same zone as the DTrace client.  Because we can be called
12330 	 * after dtrace_detach() has been called, we cannot assert that there
12331 	 * are retained enablings.  We can safely load from dtrace_retained,
12332 	 * however:  the taskq_destroy() at the end of dtrace_detach() will
12333 	 * block pending our completion.
12334 	 */
12335 	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12336 		dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred;
12337 		cred_t *cr = dcr->dcr_cred;
12338 		zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0;
12339 
12340 		if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL &&
12341 		    (zone == GLOBAL_ZONEID || getzoneid() == zone)))
12342 			(void) dtrace_enabling_match(enab, NULL);
12343 	}
12344 
12345 	mutex_exit(&dtrace_lock);
12346 	mutex_exit(&cpu_lock);
12347 }
12348 
12349 /*
12350  * If an enabling is to be enabled without having matched probes (that is, if
12351  * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12352  * enabling must be _primed_ by creating an ECB for every ECB description.
12353  * This must be done to assure that we know the number of speculations, the
12354  * number of aggregations, the minimum buffer size needed, etc. before we
12355  * transition out of DTRACE_ACTIVITY_INACTIVE.  To do this without actually
12356  * enabling any probes, we create ECBs for every ECB decription, but with a
12357  * NULL probe -- which is exactly what this function does.
12358  */
12359 static void
12360 dtrace_enabling_prime(dtrace_state_t *state)
12361 {
12362 	dtrace_enabling_t *enab;
12363 	int i;
12364 
12365 	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12366 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12367 
12368 		if (enab->dten_vstate->dtvs_state != state)
12369 			continue;
12370 
12371 		/*
12372 		 * We don't want to prime an enabling more than once, lest
12373 		 * we allow a malicious user to induce resource exhaustion.
12374 		 * (The ECBs that result from priming an enabling aren't
12375 		 * leaked -- but they also aren't deallocated until the
12376 		 * consumer state is destroyed.)
12377 		 */
12378 		if (enab->dten_primed)
12379 			continue;
12380 
12381 		for (i = 0; i < enab->dten_ndesc; i++) {
12382 			enab->dten_current = enab->dten_desc[i];
12383 			(void) dtrace_probe_enable(NULL, enab);
12384 		}
12385 
12386 		enab->dten_primed = 1;
12387 	}
12388 }
12389 
12390 /*
12391  * Called to indicate that probes should be provided due to retained
12392  * enablings.  This is implemented in terms of dtrace_probe_provide(), but it
12393  * must take an initial lap through the enabling calling the dtps_provide()
12394  * entry point explicitly to allow for autocreated probes.
12395  */
12396 static void
12397 dtrace_enabling_provide(dtrace_provider_t *prv)
12398 {
12399 	int i, all = 0;
12400 	dtrace_probedesc_t desc;
12401 	dtrace_genid_t gen;
12402 
12403 	ASSERT(MUTEX_HELD(&dtrace_lock));
12404 	ASSERT(MUTEX_HELD(&dtrace_provider_lock));
12405 
12406 	if (prv == NULL) {
12407 		all = 1;
12408 		prv = dtrace_provider;
12409 	}
12410 
12411 	do {
12412 		dtrace_enabling_t *enab;
12413 		void *parg = prv->dtpv_arg;
12414 
12415 retry:
12416 		gen = dtrace_retained_gen;
12417 		for (enab = dtrace_retained; enab != NULL;
12418 		    enab = enab->dten_next) {
12419 			for (i = 0; i < enab->dten_ndesc; i++) {
12420 				desc = enab->dten_desc[i]->dted_probe;
12421 				mutex_exit(&dtrace_lock);
12422 				prv->dtpv_pops.dtps_provide(parg, &desc);
12423 				mutex_enter(&dtrace_lock);
12424 				/*
12425 				 * Process the retained enablings again if
12426 				 * they have changed while we weren't holding
12427 				 * dtrace_lock.
12428 				 */
12429 				if (gen != dtrace_retained_gen)
12430 					goto retry;
12431 			}
12432 		}
12433 	} while (all && (prv = prv->dtpv_next) != NULL);
12434 
12435 	mutex_exit(&dtrace_lock);
12436 	dtrace_probe_provide(NULL, all ? NULL : prv);
12437 	mutex_enter(&dtrace_lock);
12438 }
12439 
12440 /*
12441  * Called to reap ECBs that are attached to probes from defunct providers.
12442  */
12443 static void
12444 dtrace_enabling_reap(void)
12445 {
12446 	dtrace_provider_t *prov;
12447 	dtrace_probe_t *probe;
12448 	dtrace_ecb_t *ecb;
12449 	hrtime_t when;
12450 	int i;
12451 
12452 	mutex_enter(&cpu_lock);
12453 	mutex_enter(&dtrace_lock);
12454 
12455 	for (i = 0; i < dtrace_nprobes; i++) {
12456 		if ((probe = dtrace_probes[i]) == NULL)
12457 			continue;
12458 
12459 		if (probe->dtpr_ecb == NULL)
12460 			continue;
12461 
12462 		prov = probe->dtpr_provider;
12463 
12464 		if ((when = prov->dtpv_defunct) == 0)
12465 			continue;
12466 
12467 		/*
12468 		 * We have ECBs on a defunct provider:  we want to reap these
12469 		 * ECBs to allow the provider to unregister.  The destruction
12470 		 * of these ECBs must be done carefully:  if we destroy the ECB
12471 		 * and the consumer later wishes to consume an EPID that
12472 		 * corresponds to the destroyed ECB (and if the EPID metadata
12473 		 * has not been previously consumed), the consumer will abort
12474 		 * processing on the unknown EPID.  To reduce (but not, sadly,
12475 		 * eliminate) the possibility of this, we will only destroy an
12476 		 * ECB for a defunct provider if, for the state that
12477 		 * corresponds to the ECB:
12478 		 *
12479 		 *  (a)	There is no speculative tracing (which can effectively
12480 		 *	cache an EPID for an arbitrary amount of time).
12481 		 *
12482 		 *  (b)	The principal buffers have been switched twice since the
12483 		 *	provider became defunct.
12484 		 *
12485 		 *  (c)	The aggregation buffers are of zero size or have been
12486 		 *	switched twice since the provider became defunct.
12487 		 *
12488 		 * We use dts_speculates to determine (a) and call a function
12489 		 * (dtrace_buffer_consumed()) to determine (b) and (c).  Note
12490 		 * that as soon as we've been unable to destroy one of the ECBs
12491 		 * associated with the probe, we quit trying -- reaping is only
12492 		 * fruitful in as much as we can destroy all ECBs associated
12493 		 * with the defunct provider's probes.
12494 		 */
12495 		while ((ecb = probe->dtpr_ecb) != NULL) {
12496 			dtrace_state_t *state = ecb->dte_state;
12497 			dtrace_buffer_t *buf = state->dts_buffer;
12498 			dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
12499 
12500 			if (state->dts_speculates)
12501 				break;
12502 
12503 			if (!dtrace_buffer_consumed(buf, when))
12504 				break;
12505 
12506 			if (!dtrace_buffer_consumed(aggbuf, when))
12507 				break;
12508 
12509 			dtrace_ecb_disable(ecb);
12510 			ASSERT(probe->dtpr_ecb != ecb);
12511 			dtrace_ecb_destroy(ecb);
12512 		}
12513 	}
12514 
12515 	mutex_exit(&dtrace_lock);
12516 	mutex_exit(&cpu_lock);
12517 }
12518 
12519 /*
12520  * DTrace DOF Functions
12521  */
12522 /*ARGSUSED*/
12523 static void
12524 dtrace_dof_error(dof_hdr_t *dof, const char *str)
12525 {
12526 	if (dtrace_err_verbose)
12527 		cmn_err(CE_WARN, "failed to process DOF: %s", str);
12528 
12529 #ifdef DTRACE_ERRDEBUG
12530 	dtrace_errdebug(str);
12531 #endif
12532 }
12533 
12534 /*
12535  * Create DOF out of a currently enabled state.  Right now, we only create
12536  * DOF containing the run-time options -- but this could be expanded to create
12537  * complete DOF representing the enabled state.
12538  */
12539 static dof_hdr_t *
12540 dtrace_dof_create(dtrace_state_t *state)
12541 {
12542 	dof_hdr_t *dof;
12543 	dof_sec_t *sec;
12544 	dof_optdesc_t *opt;
12545 	int i, len = sizeof (dof_hdr_t) +
12546 	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12547 	    sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12548 
12549 	ASSERT(MUTEX_HELD(&dtrace_lock));
12550 
12551 	dof = kmem_zalloc(len, KM_SLEEP);
12552 	dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12553 	dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12554 	dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12555 	dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12556 
12557 	dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12558 	dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12559 	dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12560 	dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12561 	dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12562 	dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12563 
12564 	dof->dofh_flags = 0;
12565 	dof->dofh_hdrsize = sizeof (dof_hdr_t);
12566 	dof->dofh_secsize = sizeof (dof_sec_t);
12567 	dof->dofh_secnum = 1;	/* only DOF_SECT_OPTDESC */
12568 	dof->dofh_secoff = sizeof (dof_hdr_t);
12569 	dof->dofh_loadsz = len;
12570 	dof->dofh_filesz = len;
12571 	dof->dofh_pad = 0;
12572 
12573 	/*
12574 	 * Fill in the option section header...
12575 	 */
12576 	sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12577 	sec->dofs_type = DOF_SECT_OPTDESC;
12578 	sec->dofs_align = sizeof (uint64_t);
12579 	sec->dofs_flags = DOF_SECF_LOAD;
12580 	sec->dofs_entsize = sizeof (dof_optdesc_t);
12581 
12582 	opt = (dof_optdesc_t *)((uintptr_t)sec +
12583 	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12584 
12585 	sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12586 	sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12587 
12588 	for (i = 0; i < DTRACEOPT_MAX; i++) {
12589 		opt[i].dofo_option = i;
12590 		opt[i].dofo_strtab = DOF_SECIDX_NONE;
12591 		opt[i].dofo_value = state->dts_options[i];
12592 	}
12593 
12594 	return (dof);
12595 }
12596 
12597 static dof_hdr_t *
12598 dtrace_dof_copyin(uintptr_t uarg, int *errp)
12599 {
12600 	dof_hdr_t hdr, *dof;
12601 
12602 	ASSERT(!MUTEX_HELD(&dtrace_lock));
12603 
12604 	/*
12605 	 * First, we're going to copyin() the sizeof (dof_hdr_t).
12606 	 */
12607 	if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
12608 		dtrace_dof_error(NULL, "failed to copyin DOF header");
12609 		*errp = EFAULT;
12610 		return (NULL);
12611 	}
12612 
12613 	/*
12614 	 * Now we'll allocate the entire DOF and copy it in -- provided
12615 	 * that the length isn't outrageous.
12616 	 */
12617 	if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
12618 		dtrace_dof_error(&hdr, "load size exceeds maximum");
12619 		*errp = E2BIG;
12620 		return (NULL);
12621 	}
12622 
12623 	if (hdr.dofh_loadsz < sizeof (hdr)) {
12624 		dtrace_dof_error(&hdr, "invalid load size");
12625 		*errp = EINVAL;
12626 		return (NULL);
12627 	}
12628 
12629 	dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
12630 
12631 	if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
12632 	    dof->dofh_loadsz != hdr.dofh_loadsz) {
12633 		kmem_free(dof, hdr.dofh_loadsz);
12634 		*errp = EFAULT;
12635 		return (NULL);
12636 	}
12637 
12638 	return (dof);
12639 }
12640 
12641 static dof_hdr_t *
12642 dtrace_dof_property(const char *name)
12643 {
12644 	uchar_t *buf;
12645 	uint64_t loadsz;
12646 	unsigned int len, i;
12647 	dof_hdr_t *dof;
12648 
12649 	/*
12650 	 * Unfortunately, array of values in .conf files are always (and
12651 	 * only) interpreted to be integer arrays.  We must read our DOF
12652 	 * as an integer array, and then squeeze it into a byte array.
12653 	 */
12654 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
12655 	    (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
12656 		return (NULL);
12657 
12658 	for (i = 0; i < len; i++)
12659 		buf[i] = (uchar_t)(((int *)buf)[i]);
12660 
12661 	if (len < sizeof (dof_hdr_t)) {
12662 		ddi_prop_free(buf);
12663 		dtrace_dof_error(NULL, "truncated header");
12664 		return (NULL);
12665 	}
12666 
12667 	if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
12668 		ddi_prop_free(buf);
12669 		dtrace_dof_error(NULL, "truncated DOF");
12670 		return (NULL);
12671 	}
12672 
12673 	if (loadsz >= dtrace_dof_maxsize) {
12674 		ddi_prop_free(buf);
12675 		dtrace_dof_error(NULL, "oversized DOF");
12676 		return (NULL);
12677 	}
12678 
12679 	dof = kmem_alloc(loadsz, KM_SLEEP);
12680 	bcopy(buf, dof, loadsz);
12681 	ddi_prop_free(buf);
12682 
12683 	return (dof);
12684 }
12685 
12686 static void
12687 dtrace_dof_destroy(dof_hdr_t *dof)
12688 {
12689 	kmem_free(dof, dof->dofh_loadsz);
12690 }
12691 
12692 /*
12693  * Return the dof_sec_t pointer corresponding to a given section index.  If the
12694  * index is not valid, dtrace_dof_error() is called and NULL is returned.  If
12695  * a type other than DOF_SECT_NONE is specified, the header is checked against
12696  * this type and NULL is returned if the types do not match.
12697  */
12698 static dof_sec_t *
12699 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
12700 {
12701 	dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
12702 	    ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
12703 
12704 	if (i >= dof->dofh_secnum) {
12705 		dtrace_dof_error(dof, "referenced section index is invalid");
12706 		return (NULL);
12707 	}
12708 
12709 	if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
12710 		dtrace_dof_error(dof, "referenced section is not loadable");
12711 		return (NULL);
12712 	}
12713 
12714 	if (type != DOF_SECT_NONE && type != sec->dofs_type) {
12715 		dtrace_dof_error(dof, "referenced section is the wrong type");
12716 		return (NULL);
12717 	}
12718 
12719 	return (sec);
12720 }
12721 
12722 static dtrace_probedesc_t *
12723 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
12724 {
12725 	dof_probedesc_t *probe;
12726 	dof_sec_t *strtab;
12727 	uintptr_t daddr = (uintptr_t)dof;
12728 	uintptr_t str;
12729 	size_t size;
12730 
12731 	if (sec->dofs_type != DOF_SECT_PROBEDESC) {
12732 		dtrace_dof_error(dof, "invalid probe section");
12733 		return (NULL);
12734 	}
12735 
12736 	if (sec->dofs_align != sizeof (dof_secidx_t)) {
12737 		dtrace_dof_error(dof, "bad alignment in probe description");
12738 		return (NULL);
12739 	}
12740 
12741 	if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
12742 		dtrace_dof_error(dof, "truncated probe description");
12743 		return (NULL);
12744 	}
12745 
12746 	probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
12747 	strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12748 
12749 	if (strtab == NULL)
12750 		return (NULL);
12751 
12752 	str = daddr + strtab->dofs_offset;
12753 	size = strtab->dofs_size;
12754 
12755 	if (probe->dofp_provider >= strtab->dofs_size) {
12756 		dtrace_dof_error(dof, "corrupt probe provider");
12757 		return (NULL);
12758 	}
12759 
12760 	(void) strncpy(desc->dtpd_provider,
12761 	    (char *)(str + probe->dofp_provider),
12762 	    MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12763 
12764 	if (probe->dofp_mod >= strtab->dofs_size) {
12765 		dtrace_dof_error(dof, "corrupt probe module");
12766 		return (NULL);
12767 	}
12768 
12769 	(void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12770 	    MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12771 
12772 	if (probe->dofp_func >= strtab->dofs_size) {
12773 		dtrace_dof_error(dof, "corrupt probe function");
12774 		return (NULL);
12775 	}
12776 
12777 	(void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12778 	    MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12779 
12780 	if (probe->dofp_name >= strtab->dofs_size) {
12781 		dtrace_dof_error(dof, "corrupt probe name");
12782 		return (NULL);
12783 	}
12784 
12785 	(void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12786 	    MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12787 
12788 	return (desc);
12789 }
12790 
12791 static dtrace_difo_t *
12792 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12793     cred_t *cr)
12794 {
12795 	dtrace_difo_t *dp;
12796 	size_t ttl = 0;
12797 	dof_difohdr_t *dofd;
12798 	uintptr_t daddr = (uintptr_t)dof;
12799 	size_t max = dtrace_difo_maxsize;
12800 	int i, l, n;
12801 
12802 	static const struct {
12803 		int section;
12804 		int bufoffs;
12805 		int lenoffs;
12806 		int entsize;
12807 		int align;
12808 		const char *msg;
12809 	} difo[] = {
12810 		{ DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12811 		offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12812 		sizeof (dif_instr_t), "multiple DIF sections" },
12813 
12814 		{ DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12815 		offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12816 		sizeof (uint64_t), "multiple integer tables" },
12817 
12818 		{ DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12819 		offsetof(dtrace_difo_t, dtdo_strlen), 0,
12820 		sizeof (char), "multiple string tables" },
12821 
12822 		{ DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12823 		offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12824 		sizeof (uint_t), "multiple variable tables" },
12825 
12826 		{ DOF_SECT_NONE, 0, 0, 0, NULL }
12827 	};
12828 
12829 	if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12830 		dtrace_dof_error(dof, "invalid DIFO header section");
12831 		return (NULL);
12832 	}
12833 
12834 	if (sec->dofs_align != sizeof (dof_secidx_t)) {
12835 		dtrace_dof_error(dof, "bad alignment in DIFO header");
12836 		return (NULL);
12837 	}
12838 
12839 	if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12840 	    sec->dofs_size % sizeof (dof_secidx_t)) {
12841 		dtrace_dof_error(dof, "bad size in DIFO header");
12842 		return (NULL);
12843 	}
12844 
12845 	dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12846 	n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12847 
12848 	dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12849 	dp->dtdo_rtype = dofd->dofd_rtype;
12850 
12851 	for (l = 0; l < n; l++) {
12852 		dof_sec_t *subsec;
12853 		void **bufp;
12854 		uint32_t *lenp;
12855 
12856 		if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12857 		    dofd->dofd_links[l])) == NULL)
12858 			goto err; /* invalid section link */
12859 
12860 		if (ttl + subsec->dofs_size > max) {
12861 			dtrace_dof_error(dof, "exceeds maximum size");
12862 			goto err;
12863 		}
12864 
12865 		ttl += subsec->dofs_size;
12866 
12867 		for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12868 			if (subsec->dofs_type != difo[i].section)
12869 				continue;
12870 
12871 			if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12872 				dtrace_dof_error(dof, "section not loaded");
12873 				goto err;
12874 			}
12875 
12876 			if (subsec->dofs_align != difo[i].align) {
12877 				dtrace_dof_error(dof, "bad alignment");
12878 				goto err;
12879 			}
12880 
12881 			bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12882 			lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12883 
12884 			if (*bufp != NULL) {
12885 				dtrace_dof_error(dof, difo[i].msg);
12886 				goto err;
12887 			}
12888 
12889 			if (difo[i].entsize != subsec->dofs_entsize) {
12890 				dtrace_dof_error(dof, "entry size mismatch");
12891 				goto err;
12892 			}
12893 
12894 			if (subsec->dofs_entsize != 0 &&
12895 			    (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12896 				dtrace_dof_error(dof, "corrupt entry size");
12897 				goto err;
12898 			}
12899 
12900 			*lenp = subsec->dofs_size;
12901 			*bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12902 			bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12903 			    *bufp, subsec->dofs_size);
12904 
12905 			if (subsec->dofs_entsize != 0)
12906 				*lenp /= subsec->dofs_entsize;
12907 
12908 			break;
12909 		}
12910 
12911 		/*
12912 		 * If we encounter a loadable DIFO sub-section that is not
12913 		 * known to us, assume this is a broken program and fail.
12914 		 */
12915 		if (difo[i].section == DOF_SECT_NONE &&
12916 		    (subsec->dofs_flags & DOF_SECF_LOAD)) {
12917 			dtrace_dof_error(dof, "unrecognized DIFO subsection");
12918 			goto err;
12919 		}
12920 	}
12921 
12922 	if (dp->dtdo_buf == NULL) {
12923 		/*
12924 		 * We can't have a DIF object without DIF text.
12925 		 */
12926 		dtrace_dof_error(dof, "missing DIF text");
12927 		goto err;
12928 	}
12929 
12930 	/*
12931 	 * Before we validate the DIF object, run through the variable table
12932 	 * looking for the strings -- if any of their size are under, we'll set
12933 	 * their size to be the system-wide default string size.  Note that
12934 	 * this should _not_ happen if the "strsize" option has been set --
12935 	 * in this case, the compiler should have set the size to reflect the
12936 	 * setting of the option.
12937 	 */
12938 	for (i = 0; i < dp->dtdo_varlen; i++) {
12939 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
12940 		dtrace_diftype_t *t = &v->dtdv_type;
12941 
12942 		if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12943 			continue;
12944 
12945 		if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12946 			t->dtdt_size = dtrace_strsize_default;
12947 	}
12948 
12949 	if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12950 		goto err;
12951 
12952 	dtrace_difo_init(dp, vstate);
12953 	return (dp);
12954 
12955 err:
12956 	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12957 	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12958 	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12959 	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12960 
12961 	kmem_free(dp, sizeof (dtrace_difo_t));
12962 	return (NULL);
12963 }
12964 
12965 static dtrace_predicate_t *
12966 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12967     cred_t *cr)
12968 {
12969 	dtrace_difo_t *dp;
12970 
12971 	if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12972 		return (NULL);
12973 
12974 	return (dtrace_predicate_create(dp));
12975 }
12976 
12977 static dtrace_actdesc_t *
12978 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12979     cred_t *cr)
12980 {
12981 	dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12982 	dof_actdesc_t *desc;
12983 	dof_sec_t *difosec;
12984 	size_t offs;
12985 	uintptr_t daddr = (uintptr_t)dof;
12986 	uint64_t arg;
12987 	dtrace_actkind_t kind;
12988 
12989 	if (sec->dofs_type != DOF_SECT_ACTDESC) {
12990 		dtrace_dof_error(dof, "invalid action section");
12991 		return (NULL);
12992 	}
12993 
12994 	if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12995 		dtrace_dof_error(dof, "truncated action description");
12996 		return (NULL);
12997 	}
12998 
12999 	if (sec->dofs_align != sizeof (uint64_t)) {
13000 		dtrace_dof_error(dof, "bad alignment in action description");
13001 		return (NULL);
13002 	}
13003 
13004 	if (sec->dofs_size < sec->dofs_entsize) {
13005 		dtrace_dof_error(dof, "section entry size exceeds total size");
13006 		return (NULL);
13007 	}
13008 
13009 	if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
13010 		dtrace_dof_error(dof, "bad entry size in action description");
13011 		return (NULL);
13012 	}
13013 
13014 	if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
13015 		dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
13016 		return (NULL);
13017 	}
13018 
13019 	for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
13020 		desc = (dof_actdesc_t *)(daddr +
13021 		    (uintptr_t)sec->dofs_offset + offs);
13022 		kind = (dtrace_actkind_t)desc->dofa_kind;
13023 
13024 		if ((DTRACEACT_ISPRINTFLIKE(kind) &&
13025 		    (kind != DTRACEACT_PRINTA ||
13026 		    desc->dofa_strtab != DOF_SECIDX_NONE)) ||
13027 		    (kind == DTRACEACT_DIFEXPR &&
13028 		    desc->dofa_strtab != DOF_SECIDX_NONE)) {
13029 			dof_sec_t *strtab;
13030 			char *str, *fmt;
13031 			uint64_t i;
13032 
13033 			/*
13034 			 * The argument to these actions is an index into the
13035 			 * DOF string table.  For printf()-like actions, this
13036 			 * is the format string.  For print(), this is the
13037 			 * CTF type of the expression result.
13038 			 */
13039 			if ((strtab = dtrace_dof_sect(dof,
13040 			    DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
13041 				goto err;
13042 
13043 			str = (char *)((uintptr_t)dof +
13044 			    (uintptr_t)strtab->dofs_offset);
13045 
13046 			for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
13047 				if (str[i] == '\0')
13048 					break;
13049 			}
13050 
13051 			if (i >= strtab->dofs_size) {
13052 				dtrace_dof_error(dof, "bogus format string");
13053 				goto err;
13054 			}
13055 
13056 			if (i == desc->dofa_arg) {
13057 				dtrace_dof_error(dof, "empty format string");
13058 				goto err;
13059 			}
13060 
13061 			i -= desc->dofa_arg;
13062 			fmt = kmem_alloc(i + 1, KM_SLEEP);
13063 			bcopy(&str[desc->dofa_arg], fmt, i + 1);
13064 			arg = (uint64_t)(uintptr_t)fmt;
13065 		} else {
13066 			if (kind == DTRACEACT_PRINTA) {
13067 				ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13068 				arg = 0;
13069 			} else {
13070 				arg = desc->dofa_arg;
13071 			}
13072 		}
13073 
13074 		act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13075 		    desc->dofa_uarg, arg);
13076 
13077 		if (last != NULL) {
13078 			last->dtad_next = act;
13079 		} else {
13080 			first = act;
13081 		}
13082 
13083 		last = act;
13084 
13085 		if (desc->dofa_difo == DOF_SECIDX_NONE)
13086 			continue;
13087 
13088 		if ((difosec = dtrace_dof_sect(dof,
13089 		    DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13090 			goto err;
13091 
13092 		act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13093 
13094 		if (act->dtad_difo == NULL)
13095 			goto err;
13096 	}
13097 
13098 	ASSERT(first != NULL);
13099 	return (first);
13100 
13101 err:
13102 	for (act = first; act != NULL; act = next) {
13103 		next = act->dtad_next;
13104 		dtrace_actdesc_release(act, vstate);
13105 	}
13106 
13107 	return (NULL);
13108 }
13109 
13110 static dtrace_ecbdesc_t *
13111 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13112     cred_t *cr)
13113 {
13114 	dtrace_ecbdesc_t *ep;
13115 	dof_ecbdesc_t *ecb;
13116 	dtrace_probedesc_t *desc;
13117 	dtrace_predicate_t *pred = NULL;
13118 
13119 	if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13120 		dtrace_dof_error(dof, "truncated ECB description");
13121 		return (NULL);
13122 	}
13123 
13124 	if (sec->dofs_align != sizeof (uint64_t)) {
13125 		dtrace_dof_error(dof, "bad alignment in ECB description");
13126 		return (NULL);
13127 	}
13128 
13129 	ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13130 	sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13131 
13132 	if (sec == NULL)
13133 		return (NULL);
13134 
13135 	ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13136 	ep->dted_uarg = ecb->dofe_uarg;
13137 	desc = &ep->dted_probe;
13138 
13139 	if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13140 		goto err;
13141 
13142 	if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13143 		if ((sec = dtrace_dof_sect(dof,
13144 		    DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13145 			goto err;
13146 
13147 		if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13148 			goto err;
13149 
13150 		ep->dted_pred.dtpdd_predicate = pred;
13151 	}
13152 
13153 	if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13154 		if ((sec = dtrace_dof_sect(dof,
13155 		    DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13156 			goto err;
13157 
13158 		ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13159 
13160 		if (ep->dted_action == NULL)
13161 			goto err;
13162 	}
13163 
13164 	return (ep);
13165 
13166 err:
13167 	if (pred != NULL)
13168 		dtrace_predicate_release(pred, vstate);
13169 	kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13170 	return (NULL);
13171 }
13172 
13173 /*
13174  * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
13175  * specified DOF.  At present, this amounts to simply adding 'ubase' to the
13176  * site of any user SETX relocations to account for load object base address.
13177  * In the future, if we need other relocations, this function can be extended.
13178  */
13179 static int
13180 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
13181 {
13182 	uintptr_t daddr = (uintptr_t)dof;
13183 	uintptr_t ts_end;
13184 	dof_relohdr_t *dofr =
13185 	    (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13186 	dof_sec_t *ss, *rs, *ts;
13187 	dof_relodesc_t *r;
13188 	uint_t i, n;
13189 
13190 	if (sec->dofs_size < sizeof (dof_relohdr_t) ||
13191 	    sec->dofs_align != sizeof (dof_secidx_t)) {
13192 		dtrace_dof_error(dof, "invalid relocation header");
13193 		return (-1);
13194 	}
13195 
13196 	ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
13197 	rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
13198 	ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
13199 	ts_end = (uintptr_t)ts + sizeof (dof_sec_t);
13200 
13201 	if (ss == NULL || rs == NULL || ts == NULL)
13202 		return (-1); /* dtrace_dof_error() has been called already */
13203 
13204 	if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
13205 	    rs->dofs_align != sizeof (uint64_t)) {
13206 		dtrace_dof_error(dof, "invalid relocation section");
13207 		return (-1);
13208 	}
13209 
13210 	r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
13211 	n = rs->dofs_size / rs->dofs_entsize;
13212 
13213 	for (i = 0; i < n; i++) {
13214 		uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
13215 
13216 		switch (r->dofr_type) {
13217 		case DOF_RELO_NONE:
13218 			break;
13219 		case DOF_RELO_SETX:
13220 			if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
13221 			    sizeof (uint64_t) > ts->dofs_size) {
13222 				dtrace_dof_error(dof, "bad relocation offset");
13223 				return (-1);
13224 			}
13225 
13226 			if (taddr >= (uintptr_t)ts && taddr < ts_end) {
13227 				dtrace_dof_error(dof, "bad relocation offset");
13228 				return (-1);
13229 			}
13230 
13231 			if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
13232 				dtrace_dof_error(dof, "misaligned setx relo");
13233 				return (-1);
13234 			}
13235 
13236 			*(uint64_t *)taddr += ubase;
13237 			break;
13238 		default:
13239 			dtrace_dof_error(dof, "invalid relocation type");
13240 			return (-1);
13241 		}
13242 
13243 		r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
13244 	}
13245 
13246 	return (0);
13247 }
13248 
13249 /*
13250  * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
13251  * header:  it should be at the front of a memory region that is at least
13252  * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
13253  * size.  It need not be validated in any other way.
13254  */
13255 static int
13256 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
13257     dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
13258 {
13259 	uint64_t len = dof->dofh_loadsz, seclen;
13260 	uintptr_t daddr = (uintptr_t)dof;
13261 	dtrace_ecbdesc_t *ep;
13262 	dtrace_enabling_t *enab;
13263 	uint_t i;
13264 
13265 	ASSERT(MUTEX_HELD(&dtrace_lock));
13266 	ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
13267 
13268 	/*
13269 	 * Check the DOF header identification bytes.  In addition to checking
13270 	 * valid settings, we also verify that unused bits/bytes are zeroed so
13271 	 * we can use them later without fear of regressing existing binaries.
13272 	 */
13273 	if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
13274 	    DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
13275 		dtrace_dof_error(dof, "DOF magic string mismatch");
13276 		return (-1);
13277 	}
13278 
13279 	if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
13280 	    dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
13281 		dtrace_dof_error(dof, "DOF has invalid data model");
13282 		return (-1);
13283 	}
13284 
13285 	if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
13286 		dtrace_dof_error(dof, "DOF encoding mismatch");
13287 		return (-1);
13288 	}
13289 
13290 	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13291 	    dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
13292 		dtrace_dof_error(dof, "DOF version mismatch");
13293 		return (-1);
13294 	}
13295 
13296 	if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
13297 		dtrace_dof_error(dof, "DOF uses unsupported instruction set");
13298 		return (-1);
13299 	}
13300 
13301 	if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
13302 		dtrace_dof_error(dof, "DOF uses too many integer registers");
13303 		return (-1);
13304 	}
13305 
13306 	if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
13307 		dtrace_dof_error(dof, "DOF uses too many tuple registers");
13308 		return (-1);
13309 	}
13310 
13311 	for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
13312 		if (dof->dofh_ident[i] != 0) {
13313 			dtrace_dof_error(dof, "DOF has invalid ident byte set");
13314 			return (-1);
13315 		}
13316 	}
13317 
13318 	if (dof->dofh_flags & ~DOF_FL_VALID) {
13319 		dtrace_dof_error(dof, "DOF has invalid flag bits set");
13320 		return (-1);
13321 	}
13322 
13323 	if (dof->dofh_secsize == 0) {
13324 		dtrace_dof_error(dof, "zero section header size");
13325 		return (-1);
13326 	}
13327 
13328 	/*
13329 	 * Check that the section headers don't exceed the amount of DOF
13330 	 * data.  Note that we cast the section size and number of sections
13331 	 * to uint64_t's to prevent possible overflow in the multiplication.
13332 	 */
13333 	seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
13334 
13335 	if (dof->dofh_secoff > len || seclen > len ||
13336 	    dof->dofh_secoff + seclen > len) {
13337 		dtrace_dof_error(dof, "truncated section headers");
13338 		return (-1);
13339 	}
13340 
13341 	if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13342 		dtrace_dof_error(dof, "misaligned section headers");
13343 		return (-1);
13344 	}
13345 
13346 	if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13347 		dtrace_dof_error(dof, "misaligned section size");
13348 		return (-1);
13349 	}
13350 
13351 	/*
13352 	 * Take an initial pass through the section headers to be sure that
13353 	 * the headers don't have stray offsets.  If the 'noprobes' flag is
13354 	 * set, do not permit sections relating to providers, probes, or args.
13355 	 */
13356 	for (i = 0; i < dof->dofh_secnum; i++) {
13357 		dof_sec_t *sec = (dof_sec_t *)(daddr +
13358 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13359 
13360 		if (noprobes) {
13361 			switch (sec->dofs_type) {
13362 			case DOF_SECT_PROVIDER:
13363 			case DOF_SECT_PROBES:
13364 			case DOF_SECT_PRARGS:
13365 			case DOF_SECT_PROFFS:
13366 				dtrace_dof_error(dof, "illegal sections "
13367 				    "for enabling");
13368 				return (-1);
13369 			}
13370 		}
13371 
13372 		if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
13373 		    !(sec->dofs_flags & DOF_SECF_LOAD)) {
13374 			dtrace_dof_error(dof, "loadable section with load "
13375 			    "flag unset");
13376 			return (-1);
13377 		}
13378 
13379 		if (!(sec->dofs_flags & DOF_SECF_LOAD))
13380 			continue; /* just ignore non-loadable sections */
13381 
13382 		if (!ISP2(sec->dofs_align)) {
13383 			dtrace_dof_error(dof, "bad section alignment");
13384 			return (-1);
13385 		}
13386 
13387 		if (sec->dofs_offset & (sec->dofs_align - 1)) {
13388 			dtrace_dof_error(dof, "misaligned section");
13389 			return (-1);
13390 		}
13391 
13392 		if (sec->dofs_offset > len || sec->dofs_size > len ||
13393 		    sec->dofs_offset + sec->dofs_size > len) {
13394 			dtrace_dof_error(dof, "corrupt section header");
13395 			return (-1);
13396 		}
13397 
13398 		if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13399 		    sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13400 			dtrace_dof_error(dof, "non-terminating string table");
13401 			return (-1);
13402 		}
13403 	}
13404 
13405 	/*
13406 	 * Take a second pass through the sections and locate and perform any
13407 	 * relocations that are present.  We do this after the first pass to
13408 	 * be sure that all sections have had their headers validated.
13409 	 */
13410 	for (i = 0; i < dof->dofh_secnum; i++) {
13411 		dof_sec_t *sec = (dof_sec_t *)(daddr +
13412 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13413 
13414 		if (!(sec->dofs_flags & DOF_SECF_LOAD))
13415 			continue; /* skip sections that are not loadable */
13416 
13417 		switch (sec->dofs_type) {
13418 		case DOF_SECT_URELHDR:
13419 			if (dtrace_dof_relocate(dof, sec, ubase) != 0)
13420 				return (-1);
13421 			break;
13422 		}
13423 	}
13424 
13425 	if ((enab = *enabp) == NULL)
13426 		enab = *enabp = dtrace_enabling_create(vstate);
13427 
13428 	for (i = 0; i < dof->dofh_secnum; i++) {
13429 		dof_sec_t *sec = (dof_sec_t *)(daddr +
13430 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13431 
13432 		if (sec->dofs_type != DOF_SECT_ECBDESC)
13433 			continue;
13434 
13435 		if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
13436 			dtrace_enabling_destroy(enab);
13437 			*enabp = NULL;
13438 			return (-1);
13439 		}
13440 
13441 		dtrace_enabling_add(enab, ep);
13442 	}
13443 
13444 	return (0);
13445 }
13446 
13447 /*
13448  * Process DOF for any options.  This routine assumes that the DOF has been
13449  * at least processed by dtrace_dof_slurp().
13450  */
13451 static int
13452 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13453 {
13454 	int i, rval;
13455 	uint32_t entsize;
13456 	size_t offs;
13457 	dof_optdesc_t *desc;
13458 
13459 	for (i = 0; i < dof->dofh_secnum; i++) {
13460 		dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13461 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13462 
13463 		if (sec->dofs_type != DOF_SECT_OPTDESC)
13464 			continue;
13465 
13466 		if (sec->dofs_align != sizeof (uint64_t)) {
13467 			dtrace_dof_error(dof, "bad alignment in "
13468 			    "option description");
13469 			return (EINVAL);
13470 		}
13471 
13472 		if ((entsize = sec->dofs_entsize) == 0) {
13473 			dtrace_dof_error(dof, "zeroed option entry size");
13474 			return (EINVAL);
13475 		}
13476 
13477 		if (entsize < sizeof (dof_optdesc_t)) {
13478 			dtrace_dof_error(dof, "bad option entry size");
13479 			return (EINVAL);
13480 		}
13481 
13482 		for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13483 			desc = (dof_optdesc_t *)((uintptr_t)dof +
13484 			    (uintptr_t)sec->dofs_offset + offs);
13485 
13486 			if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13487 				dtrace_dof_error(dof, "non-zero option string");
13488 				return (EINVAL);
13489 			}
13490 
13491 			if (desc->dofo_value == DTRACEOPT_UNSET) {
13492 				dtrace_dof_error(dof, "unset option");
13493 				return (EINVAL);
13494 			}
13495 
13496 			if ((rval = dtrace_state_option(state,
13497 			    desc->dofo_option, desc->dofo_value)) != 0) {
13498 				dtrace_dof_error(dof, "rejected option");
13499 				return (rval);
13500 			}
13501 		}
13502 	}
13503 
13504 	return (0);
13505 }
13506 
13507 /*
13508  * DTrace Consumer State Functions
13509  */
13510 int
13511 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13512 {
13513 	size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13514 	void *base;
13515 	uintptr_t limit;
13516 	dtrace_dynvar_t *dvar, *next, *start;
13517 	int i;
13518 
13519 	ASSERT(MUTEX_HELD(&dtrace_lock));
13520 	ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13521 
13522 	bzero(dstate, sizeof (dtrace_dstate_t));
13523 
13524 	if ((dstate->dtds_chunksize = chunksize) == 0)
13525 		dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13526 
13527 	VERIFY(dstate->dtds_chunksize < LONG_MAX);
13528 
13529 	if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13530 		size = min;
13531 
13532 	if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13533 		return (ENOMEM);
13534 
13535 	dstate->dtds_size = size;
13536 	dstate->dtds_base = base;
13537 	dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13538 	bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13539 
13540 	hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13541 
13542 	if (hashsize != 1 && (hashsize & 1))
13543 		hashsize--;
13544 
13545 	dstate->dtds_hashsize = hashsize;
13546 	dstate->dtds_hash = dstate->dtds_base;
13547 
13548 	/*
13549 	 * Set all of our hash buckets to point to the single sink, and (if
13550 	 * it hasn't already been set), set the sink's hash value to be the
13551 	 * sink sentinel value.  The sink is needed for dynamic variable
13552 	 * lookups to know that they have iterated over an entire, valid hash
13553 	 * chain.
13554 	 */
13555 	for (i = 0; i < hashsize; i++)
13556 		dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13557 
13558 	if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13559 		dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13560 
13561 	/*
13562 	 * Determine number of active CPUs.  Divide free list evenly among
13563 	 * active CPUs.
13564 	 */
13565 	start = (dtrace_dynvar_t *)
13566 	    ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13567 	limit = (uintptr_t)base + size;
13568 
13569 	VERIFY((uintptr_t)start < limit);
13570 	VERIFY((uintptr_t)start >= (uintptr_t)base);
13571 
13572 	maxper = (limit - (uintptr_t)start) / NCPU;
13573 	maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13574 
13575 	for (i = 0; i < NCPU; i++) {
13576 		dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13577 
13578 		/*
13579 		 * If we don't even have enough chunks to make it once through
13580 		 * NCPUs, we're just going to allocate everything to the first
13581 		 * CPU.  And if we're on the last CPU, we're going to allocate
13582 		 * whatever is left over.  In either case, we set the limit to
13583 		 * be the limit of the dynamic variable space.
13584 		 */
13585 		if (maxper == 0 || i == NCPU - 1) {
13586 			limit = (uintptr_t)base + size;
13587 			start = NULL;
13588 		} else {
13589 			limit = (uintptr_t)start + maxper;
13590 			start = (dtrace_dynvar_t *)limit;
13591 		}
13592 
13593 		VERIFY(limit <= (uintptr_t)base + size);
13594 
13595 		for (;;) {
13596 			next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13597 			    dstate->dtds_chunksize);
13598 
13599 			if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
13600 				break;
13601 
13602 			VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
13603 			    (uintptr_t)dvar <= (uintptr_t)base + size);
13604 			dvar->dtdv_next = next;
13605 			dvar = next;
13606 		}
13607 
13608 		if (maxper == 0)
13609 			break;
13610 	}
13611 
13612 	return (0);
13613 }
13614 
13615 void
13616 dtrace_dstate_fini(dtrace_dstate_t *dstate)
13617 {
13618 	ASSERT(MUTEX_HELD(&cpu_lock));
13619 
13620 	if (dstate->dtds_base == NULL)
13621 		return;
13622 
13623 	kmem_free(dstate->dtds_base, dstate->dtds_size);
13624 	kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
13625 }
13626 
13627 static void
13628 dtrace_vstate_fini(dtrace_vstate_t *vstate)
13629 {
13630 	/*
13631 	 * Logical XOR, where are you?
13632 	 */
13633 	ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
13634 
13635 	if (vstate->dtvs_nglobals > 0) {
13636 		kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
13637 		    sizeof (dtrace_statvar_t *));
13638 	}
13639 
13640 	if (vstate->dtvs_ntlocals > 0) {
13641 		kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
13642 		    sizeof (dtrace_difv_t));
13643 	}
13644 
13645 	ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
13646 
13647 	if (vstate->dtvs_nlocals > 0) {
13648 		kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
13649 		    sizeof (dtrace_statvar_t *));
13650 	}
13651 }
13652 
13653 static void
13654 dtrace_state_clean(dtrace_state_t *state)
13655 {
13656 	if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13657 		return;
13658 
13659 	dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13660 	dtrace_speculation_clean(state);
13661 }
13662 
13663 static void
13664 dtrace_state_deadman(dtrace_state_t *state)
13665 {
13666 	hrtime_t now;
13667 
13668 	dtrace_sync();
13669 
13670 	now = dtrace_gethrtime();
13671 
13672 	if (state != dtrace_anon.dta_state &&
13673 	    now - state->dts_laststatus >= dtrace_deadman_user)
13674 		return;
13675 
13676 	/*
13677 	 * We must be sure that dts_alive never appears to be less than the
13678 	 * value upon entry to dtrace_state_deadman(), and because we lack a
13679 	 * dtrace_cas64(), we cannot store to it atomically.  We thus instead
13680 	 * store INT64_MAX to it, followed by a memory barrier, followed by
13681 	 * the new value.  This assures that dts_alive never appears to be
13682 	 * less than its true value, regardless of the order in which the
13683 	 * stores to the underlying storage are issued.
13684 	 */
13685 	state->dts_alive = INT64_MAX;
13686 	dtrace_membar_producer();
13687 	state->dts_alive = now;
13688 }
13689 
13690 dtrace_state_t *
13691 dtrace_state_create(dev_t *devp, cred_t *cr)
13692 {
13693 	minor_t minor;
13694 	major_t major;
13695 	char c[30];
13696 	dtrace_state_t *state;
13697 	dtrace_optval_t *opt;
13698 	int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
13699 
13700 	ASSERT(MUTEX_HELD(&dtrace_lock));
13701 	ASSERT(MUTEX_HELD(&cpu_lock));
13702 
13703 	minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13704 	    VM_BESTFIT | VM_SLEEP);
13705 
13706 	if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13707 		vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13708 		return (NULL);
13709 	}
13710 
13711 	state = ddi_get_soft_state(dtrace_softstate, minor);
13712 	state->dts_epid = DTRACE_EPIDNONE + 1;
13713 
13714 	(void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
13715 	state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13716 	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13717 
13718 	if (devp != NULL) {
13719 		major = getemajor(*devp);
13720 	} else {
13721 		major = ddi_driver_major(dtrace_devi);
13722 	}
13723 
13724 	state->dts_dev = makedevice(major, minor);
13725 
13726 	if (devp != NULL)
13727 		*devp = state->dts_dev;
13728 
13729 	/*
13730 	 * We allocate NCPU buffers.  On the one hand, this can be quite
13731 	 * a bit of memory per instance (nearly 36K on a Starcat).  On the
13732 	 * other hand, it saves an additional memory reference in the probe
13733 	 * path.
13734 	 */
13735 	state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13736 	state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13737 	state->dts_cleaner = CYCLIC_NONE;
13738 	state->dts_deadman = CYCLIC_NONE;
13739 	state->dts_vstate.dtvs_state = state;
13740 
13741 	for (i = 0; i < DTRACEOPT_MAX; i++)
13742 		state->dts_options[i] = DTRACEOPT_UNSET;
13743 
13744 	/*
13745 	 * Set the default options.
13746 	 */
13747 	opt = state->dts_options;
13748 	opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13749 	opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13750 	opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13751 	opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13752 	opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13753 	opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13754 	opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13755 	opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13756 	opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13757 	opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13758 	opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13759 	opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13760 	opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13761 	opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13762 
13763 	state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13764 
13765 	/*
13766 	 * Depending on the user credentials, we set flag bits which alter probe
13767 	 * visibility or the amount of destructiveness allowed.  In the case of
13768 	 * actual anonymous tracing, or the possession of all privileges, all of
13769 	 * the normal checks are bypassed.
13770 	 */
13771 	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13772 		state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13773 		state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13774 	} else {
13775 		/*
13776 		 * Set up the credentials for this instantiation.  We take a
13777 		 * hold on the credential to prevent it from disappearing on
13778 		 * us; this in turn prevents the zone_t referenced by this
13779 		 * credential from disappearing.  This means that we can
13780 		 * examine the credential and the zone from probe context.
13781 		 */
13782 		crhold(cr);
13783 		state->dts_cred.dcr_cred = cr;
13784 
13785 		/*
13786 		 * CRA_PROC means "we have *some* privilege for dtrace" and
13787 		 * unlocks the use of variables like pid, zonename, etc.
13788 		 */
13789 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13790 		    PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13791 			state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13792 		}
13793 
13794 		/*
13795 		 * dtrace_user allows use of syscall and profile providers.
13796 		 * If the user also has proc_owner and/or proc_zone, we
13797 		 * extend the scope to include additional visibility and
13798 		 * destructive power.
13799 		 */
13800 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13801 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13802 				state->dts_cred.dcr_visible |=
13803 				    DTRACE_CRV_ALLPROC;
13804 
13805 				state->dts_cred.dcr_action |=
13806 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13807 			}
13808 
13809 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13810 				state->dts_cred.dcr_visible |=
13811 				    DTRACE_CRV_ALLZONE;
13812 
13813 				state->dts_cred.dcr_action |=
13814 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13815 			}
13816 
13817 			/*
13818 			 * If we have all privs in whatever zone this is,
13819 			 * we can do destructive things to processes which
13820 			 * have altered credentials.
13821 			 */
13822 			if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13823 			    cr->cr_zone->zone_privset)) {
13824 				state->dts_cred.dcr_action |=
13825 				    DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13826 			}
13827 		}
13828 
13829 		/*
13830 		 * Holding the dtrace_kernel privilege also implies that
13831 		 * the user has the dtrace_user privilege from a visibility
13832 		 * perspective.  But without further privileges, some
13833 		 * destructive actions are not available.
13834 		 */
13835 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13836 			/*
13837 			 * Make all probes in all zones visible.  However,
13838 			 * this doesn't mean that all actions become available
13839 			 * to all zones.
13840 			 */
13841 			state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13842 			    DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13843 
13844 			state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13845 			    DTRACE_CRA_PROC;
13846 			/*
13847 			 * Holding proc_owner means that destructive actions
13848 			 * for *this* zone are allowed.
13849 			 */
13850 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13851 				state->dts_cred.dcr_action |=
13852 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13853 
13854 			/*
13855 			 * Holding proc_zone means that destructive actions
13856 			 * for this user/group ID in all zones is allowed.
13857 			 */
13858 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13859 				state->dts_cred.dcr_action |=
13860 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13861 
13862 			/*
13863 			 * If we have all privs in whatever zone this is,
13864 			 * we can do destructive things to processes which
13865 			 * have altered credentials.
13866 			 */
13867 			if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13868 			    cr->cr_zone->zone_privset)) {
13869 				state->dts_cred.dcr_action |=
13870 				    DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13871 			}
13872 		}
13873 
13874 		/*
13875 		 * Holding the dtrace_proc privilege gives control over fasttrap
13876 		 * and pid providers.  We need to grant wider destructive
13877 		 * privileges in the event that the user has proc_owner and/or
13878 		 * proc_zone.
13879 		 */
13880 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13881 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13882 				state->dts_cred.dcr_action |=
13883 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13884 
13885 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13886 				state->dts_cred.dcr_action |=
13887 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13888 		}
13889 	}
13890 
13891 	return (state);
13892 }
13893 
13894 static int
13895 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13896 {
13897 	dtrace_optval_t *opt = state->dts_options, size;
13898 	processorid_t cpu;
13899 	int flags = 0, rval, factor, divisor = 1;
13900 
13901 	ASSERT(MUTEX_HELD(&dtrace_lock));
13902 	ASSERT(MUTEX_HELD(&cpu_lock));
13903 	ASSERT(which < DTRACEOPT_MAX);
13904 	ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13905 	    (state == dtrace_anon.dta_state &&
13906 	    state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13907 
13908 	if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13909 		return (0);
13910 
13911 	if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13912 		cpu = opt[DTRACEOPT_CPU];
13913 
13914 	if (which == DTRACEOPT_SPECSIZE)
13915 		flags |= DTRACEBUF_NOSWITCH;
13916 
13917 	if (which == DTRACEOPT_BUFSIZE) {
13918 		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13919 			flags |= DTRACEBUF_RING;
13920 
13921 		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13922 			flags |= DTRACEBUF_FILL;
13923 
13924 		if (state != dtrace_anon.dta_state ||
13925 		    state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13926 			flags |= DTRACEBUF_INACTIVE;
13927 	}
13928 
13929 	for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
13930 		/*
13931 		 * The size must be 8-byte aligned.  If the size is not 8-byte
13932 		 * aligned, drop it down by the difference.
13933 		 */
13934 		if (size & (sizeof (uint64_t) - 1))
13935 			size -= size & (sizeof (uint64_t) - 1);
13936 
13937 		if (size < state->dts_reserve) {
13938 			/*
13939 			 * Buffers always must be large enough to accommodate
13940 			 * their prereserved space.  We return E2BIG instead
13941 			 * of ENOMEM in this case to allow for user-level
13942 			 * software to differentiate the cases.
13943 			 */
13944 			return (E2BIG);
13945 		}
13946 
13947 		rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
13948 
13949 		if (rval != ENOMEM) {
13950 			opt[which] = size;
13951 			return (rval);
13952 		}
13953 
13954 		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13955 			return (rval);
13956 
13957 		for (divisor = 2; divisor < factor; divisor <<= 1)
13958 			continue;
13959 	}
13960 
13961 	return (ENOMEM);
13962 }
13963 
13964 static int
13965 dtrace_state_buffers(dtrace_state_t *state)
13966 {
13967 	dtrace_speculation_t *spec = state->dts_speculations;
13968 	int rval, i;
13969 
13970 	if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13971 	    DTRACEOPT_BUFSIZE)) != 0)
13972 		return (rval);
13973 
13974 	if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13975 	    DTRACEOPT_AGGSIZE)) != 0)
13976 		return (rval);
13977 
13978 	for (i = 0; i < state->dts_nspeculations; i++) {
13979 		if ((rval = dtrace_state_buffer(state,
13980 		    spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13981 			return (rval);
13982 	}
13983 
13984 	return (0);
13985 }
13986 
13987 static void
13988 dtrace_state_prereserve(dtrace_state_t *state)
13989 {
13990 	dtrace_ecb_t *ecb;
13991 	dtrace_probe_t *probe;
13992 
13993 	state->dts_reserve = 0;
13994 
13995 	if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13996 		return;
13997 
13998 	/*
13999 	 * If our buffer policy is a "fill" buffer policy, we need to set the
14000 	 * prereserved space to be the space required by the END probes.
14001 	 */
14002 	probe = dtrace_probes[dtrace_probeid_end - 1];
14003 	ASSERT(probe != NULL);
14004 
14005 	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
14006 		if (ecb->dte_state != state)
14007 			continue;
14008 
14009 		state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
14010 	}
14011 }
14012 
14013 static int
14014 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
14015 {
14016 	dtrace_optval_t *opt = state->dts_options, sz, nspec;
14017 	dtrace_speculation_t *spec;
14018 	dtrace_buffer_t *buf;
14019 	cyc_handler_t hdlr;
14020 	cyc_time_t when;
14021 	int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14022 	dtrace_icookie_t cookie;
14023 
14024 	mutex_enter(&cpu_lock);
14025 	mutex_enter(&dtrace_lock);
14026 
14027 	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14028 		rval = EBUSY;
14029 		goto out;
14030 	}
14031 
14032 	/*
14033 	 * Before we can perform any checks, we must prime all of the
14034 	 * retained enablings that correspond to this state.
14035 	 */
14036 	dtrace_enabling_prime(state);
14037 
14038 	if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
14039 		rval = EACCES;
14040 		goto out;
14041 	}
14042 
14043 	dtrace_state_prereserve(state);
14044 
14045 	/*
14046 	 * Now we want to do is try to allocate our speculations.
14047 	 * We do not automatically resize the number of speculations; if
14048 	 * this fails, we will fail the operation.
14049 	 */
14050 	nspec = opt[DTRACEOPT_NSPEC];
14051 	ASSERT(nspec != DTRACEOPT_UNSET);
14052 
14053 	if (nspec > INT_MAX) {
14054 		rval = ENOMEM;
14055 		goto out;
14056 	}
14057 
14058 	spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
14059 	    KM_NOSLEEP | KM_NORMALPRI);
14060 
14061 	if (spec == NULL) {
14062 		rval = ENOMEM;
14063 		goto out;
14064 	}
14065 
14066 	state->dts_speculations = spec;
14067 	state->dts_nspeculations = (int)nspec;
14068 
14069 	for (i = 0; i < nspec; i++) {
14070 		if ((buf = kmem_zalloc(bufsize,
14071 		    KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
14072 			rval = ENOMEM;
14073 			goto err;
14074 		}
14075 
14076 		spec[i].dtsp_buffer = buf;
14077 	}
14078 
14079 	if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14080 		if (dtrace_anon.dta_state == NULL) {
14081 			rval = ENOENT;
14082 			goto out;
14083 		}
14084 
14085 		if (state->dts_necbs != 0) {
14086 			rval = EALREADY;
14087 			goto out;
14088 		}
14089 
14090 		state->dts_anon = dtrace_anon_grab();
14091 		ASSERT(state->dts_anon != NULL);
14092 		state = state->dts_anon;
14093 
14094 		/*
14095 		 * We want "grabanon" to be set in the grabbed state, so we'll
14096 		 * copy that option value from the grabbing state into the
14097 		 * grabbed state.
14098 		 */
14099 		state->dts_options[DTRACEOPT_GRABANON] =
14100 		    opt[DTRACEOPT_GRABANON];
14101 
14102 		*cpu = dtrace_anon.dta_beganon;
14103 
14104 		/*
14105 		 * If the anonymous state is active (as it almost certainly
14106 		 * is if the anonymous enabling ultimately matched anything),
14107 		 * we don't allow any further option processing -- but we
14108 		 * don't return failure.
14109 		 */
14110 		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14111 			goto out;
14112 	}
14113 
14114 	if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
14115 	    opt[DTRACEOPT_AGGSIZE] != 0) {
14116 		if (state->dts_aggregations == NULL) {
14117 			/*
14118 			 * We're not going to create an aggregation buffer
14119 			 * because we don't have any ECBs that contain
14120 			 * aggregations -- set this option to 0.
14121 			 */
14122 			opt[DTRACEOPT_AGGSIZE] = 0;
14123 		} else {
14124 			/*
14125 			 * If we have an aggregation buffer, we must also have
14126 			 * a buffer to use as scratch.
14127 			 */
14128 			if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
14129 			    opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
14130 				opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
14131 			}
14132 		}
14133 	}
14134 
14135 	if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
14136 	    opt[DTRACEOPT_SPECSIZE] != 0) {
14137 		if (!state->dts_speculates) {
14138 			/*
14139 			 * We're not going to create speculation buffers
14140 			 * because we don't have any ECBs that actually
14141 			 * speculate -- set the speculation size to 0.
14142 			 */
14143 			opt[DTRACEOPT_SPECSIZE] = 0;
14144 		}
14145 	}
14146 
14147 	/*
14148 	 * The bare minimum size for any buffer that we're actually going to
14149 	 * do anything to is sizeof (uint64_t).
14150 	 */
14151 	sz = sizeof (uint64_t);
14152 
14153 	if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
14154 	    (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
14155 	    (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
14156 		/*
14157 		 * A buffer size has been explicitly set to 0 (or to a size
14158 		 * that will be adjusted to 0) and we need the space -- we
14159 		 * need to return failure.  We return ENOSPC to differentiate
14160 		 * it from failing to allocate a buffer due to failure to meet
14161 		 * the reserve (for which we return E2BIG).
14162 		 */
14163 		rval = ENOSPC;
14164 		goto out;
14165 	}
14166 
14167 	if ((rval = dtrace_state_buffers(state)) != 0)
14168 		goto err;
14169 
14170 	if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
14171 		sz = dtrace_dstate_defsize;
14172 
14173 	do {
14174 		rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
14175 
14176 		if (rval == 0)
14177 			break;
14178 
14179 		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14180 			goto err;
14181 	} while (sz >>= 1);
14182 
14183 	opt[DTRACEOPT_DYNVARSIZE] = sz;
14184 
14185 	if (rval != 0)
14186 		goto err;
14187 
14188 	if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
14189 		opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
14190 
14191 	if (opt[DTRACEOPT_CLEANRATE] == 0)
14192 		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14193 
14194 	if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
14195 		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
14196 
14197 	if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
14198 		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14199 
14200 	hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
14201 	hdlr.cyh_arg = state;
14202 	hdlr.cyh_level = CY_LOW_LEVEL;
14203 
14204 	when.cyt_when = 0;
14205 	when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
14206 
14207 	state->dts_cleaner = cyclic_add(&hdlr, &when);
14208 
14209 	hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
14210 	hdlr.cyh_arg = state;
14211 	hdlr.cyh_level = CY_LOW_LEVEL;
14212 
14213 	when.cyt_when = 0;
14214 	when.cyt_interval = dtrace_deadman_interval;
14215 
14216 	state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
14217 	state->dts_deadman = cyclic_add(&hdlr, &when);
14218 
14219 	state->dts_activity = DTRACE_ACTIVITY_WARMUP;
14220 
14221 	if (state->dts_getf != 0 &&
14222 	    !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14223 		/*
14224 		 * We don't have kernel privs but we have at least one call
14225 		 * to getf(); we need to bump our zone's count, and (if
14226 		 * this is the first enabling to have an unprivileged call
14227 		 * to getf()) we need to hook into closef().
14228 		 */
14229 		state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
14230 
14231 		if (dtrace_getf++ == 0) {
14232 			ASSERT(dtrace_closef == NULL);
14233 			dtrace_closef = dtrace_getf_barrier;
14234 		}
14235 	}
14236 
14237 	/*
14238 	 * Now it's time to actually fire the BEGIN probe.  We need to disable
14239 	 * interrupts here both to record the CPU on which we fired the BEGIN
14240 	 * probe (the data from this CPU will be processed first at user
14241 	 * level) and to manually activate the buffer for this CPU.
14242 	 */
14243 	cookie = dtrace_interrupt_disable();
14244 	*cpu = CPU->cpu_id;
14245 	ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
14246 	state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
14247 
14248 	dtrace_probe(dtrace_probeid_begin,
14249 	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14250 	dtrace_interrupt_enable(cookie);
14251 	/*
14252 	 * We may have had an exit action from a BEGIN probe; only change our
14253 	 * state to ACTIVE if we're still in WARMUP.
14254 	 */
14255 	ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
14256 	    state->dts_activity == DTRACE_ACTIVITY_DRAINING);
14257 
14258 	if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
14259 		state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
14260 
14261 	/*
14262 	 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
14263 	 * want each CPU to transition its principal buffer out of the
14264 	 * INACTIVE state.  Doing this assures that no CPU will suddenly begin
14265 	 * processing an ECB halfway down a probe's ECB chain; all CPUs will
14266 	 * atomically transition from processing none of a state's ECBs to
14267 	 * processing all of them.
14268 	 */
14269 	dtrace_xcall(DTRACE_CPUALL,
14270 	    (dtrace_xcall_t)dtrace_buffer_activate, state);
14271 	goto out;
14272 
14273 err:
14274 	dtrace_buffer_free(state->dts_buffer);
14275 	dtrace_buffer_free(state->dts_aggbuffer);
14276 
14277 	if ((nspec = state->dts_nspeculations) == 0) {
14278 		ASSERT(state->dts_speculations == NULL);
14279 		goto out;
14280 	}
14281 
14282 	spec = state->dts_speculations;
14283 	ASSERT(spec != NULL);
14284 
14285 	for (i = 0; i < state->dts_nspeculations; i++) {
14286 		if ((buf = spec[i].dtsp_buffer) == NULL)
14287 			break;
14288 
14289 		dtrace_buffer_free(buf);
14290 		kmem_free(buf, bufsize);
14291 	}
14292 
14293 	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14294 	state->dts_nspeculations = 0;
14295 	state->dts_speculations = NULL;
14296 
14297 out:
14298 	mutex_exit(&dtrace_lock);
14299 	mutex_exit(&cpu_lock);
14300 
14301 	return (rval);
14302 }
14303 
14304 static int
14305 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
14306 {
14307 	dtrace_icookie_t cookie;
14308 
14309 	ASSERT(MUTEX_HELD(&dtrace_lock));
14310 
14311 	if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
14312 	    state->dts_activity != DTRACE_ACTIVITY_DRAINING)
14313 		return (EINVAL);
14314 
14315 	/*
14316 	 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
14317 	 * to be sure that every CPU has seen it.  See below for the details
14318 	 * on why this is done.
14319 	 */
14320 	state->dts_activity = DTRACE_ACTIVITY_DRAINING;
14321 	dtrace_sync();
14322 
14323 	/*
14324 	 * By this point, it is impossible for any CPU to be still processing
14325 	 * with DTRACE_ACTIVITY_ACTIVE.  We can thus set our activity to
14326 	 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
14327 	 * other CPU in dtrace_buffer_reserve().  This allows dtrace_probe()
14328 	 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
14329 	 * iff we're in the END probe.
14330 	 */
14331 	state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
14332 	dtrace_sync();
14333 	ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
14334 
14335 	/*
14336 	 * Finally, we can release the reserve and call the END probe.  We
14337 	 * disable interrupts across calling the END probe to allow us to
14338 	 * return the CPU on which we actually called the END probe.  This
14339 	 * allows user-land to be sure that this CPU's principal buffer is
14340 	 * processed last.
14341 	 */
14342 	state->dts_reserve = 0;
14343 
14344 	cookie = dtrace_interrupt_disable();
14345 	*cpu = CPU->cpu_id;
14346 	dtrace_probe(dtrace_probeid_end,
14347 	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14348 	dtrace_interrupt_enable(cookie);
14349 
14350 	state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14351 	dtrace_sync();
14352 
14353 	if (state->dts_getf != 0 &&
14354 	    !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14355 		/*
14356 		 * We don't have kernel privs but we have at least one call
14357 		 * to getf(); we need to lower our zone's count, and (if
14358 		 * this is the last enabling to have an unprivileged call
14359 		 * to getf()) we need to clear the closef() hook.
14360 		 */
14361 		ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
14362 		ASSERT(dtrace_closef == dtrace_getf_barrier);
14363 		ASSERT(dtrace_getf > 0);
14364 
14365 		state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
14366 
14367 		if (--dtrace_getf == 0)
14368 			dtrace_closef = NULL;
14369 	}
14370 
14371 	return (0);
14372 }
14373 
14374 static int
14375 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14376     dtrace_optval_t val)
14377 {
14378 	ASSERT(MUTEX_HELD(&dtrace_lock));
14379 
14380 	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14381 		return (EBUSY);
14382 
14383 	if (option >= DTRACEOPT_MAX)
14384 		return (EINVAL);
14385 
14386 	if (option != DTRACEOPT_CPU && val < 0)
14387 		return (EINVAL);
14388 
14389 	switch (option) {
14390 	case DTRACEOPT_DESTRUCTIVE:
14391 		if (dtrace_destructive_disallow)
14392 			return (EACCES);
14393 
14394 		state->dts_cred.dcr_destructive = 1;
14395 		break;
14396 
14397 	case DTRACEOPT_BUFSIZE:
14398 	case DTRACEOPT_DYNVARSIZE:
14399 	case DTRACEOPT_AGGSIZE:
14400 	case DTRACEOPT_SPECSIZE:
14401 	case DTRACEOPT_STRSIZE:
14402 		if (val < 0)
14403 			return (EINVAL);
14404 
14405 		if (val >= LONG_MAX) {
14406 			/*
14407 			 * If this is an otherwise negative value, set it to
14408 			 * the highest multiple of 128m less than LONG_MAX.
14409 			 * Technically, we're adjusting the size without
14410 			 * regard to the buffer resizing policy, but in fact,
14411 			 * this has no effect -- if we set the buffer size to
14412 			 * ~LONG_MAX and the buffer policy is ultimately set to
14413 			 * be "manual", the buffer allocation is guaranteed to
14414 			 * fail, if only because the allocation requires two
14415 			 * buffers.  (We set the the size to the highest
14416 			 * multiple of 128m because it ensures that the size
14417 			 * will remain a multiple of a megabyte when
14418 			 * repeatedly halved -- all the way down to 15m.)
14419 			 */
14420 			val = LONG_MAX - (1 << 27) + 1;
14421 		}
14422 	}
14423 
14424 	state->dts_options[option] = val;
14425 
14426 	return (0);
14427 }
14428 
14429 static void
14430 dtrace_state_destroy(dtrace_state_t *state)
14431 {
14432 	dtrace_ecb_t *ecb;
14433 	dtrace_vstate_t *vstate = &state->dts_vstate;
14434 	minor_t minor = getminor(state->dts_dev);
14435 	int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14436 	dtrace_speculation_t *spec = state->dts_speculations;
14437 	int nspec = state->dts_nspeculations;
14438 	uint32_t match;
14439 
14440 	ASSERT(MUTEX_HELD(&dtrace_lock));
14441 	ASSERT(MUTEX_HELD(&cpu_lock));
14442 
14443 	/*
14444 	 * First, retract any retained enablings for this state.
14445 	 */
14446 	dtrace_enabling_retract(state);
14447 	ASSERT(state->dts_nretained == 0);
14448 
14449 	if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14450 	    state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14451 		/*
14452 		 * We have managed to come into dtrace_state_destroy() on a
14453 		 * hot enabling -- almost certainly because of a disorderly
14454 		 * shutdown of a consumer.  (That is, a consumer that is
14455 		 * exiting without having called dtrace_stop().) In this case,
14456 		 * we're going to set our activity to be KILLED, and then
14457 		 * issue a sync to be sure that everyone is out of probe
14458 		 * context before we start blowing away ECBs.
14459 		 */
14460 		state->dts_activity = DTRACE_ACTIVITY_KILLED;
14461 		dtrace_sync();
14462 	}
14463 
14464 	/*
14465 	 * Release the credential hold we took in dtrace_state_create().
14466 	 */
14467 	if (state->dts_cred.dcr_cred != NULL)
14468 		crfree(state->dts_cred.dcr_cred);
14469 
14470 	/*
14471 	 * Now we can safely disable and destroy any enabled probes.  Because
14472 	 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14473 	 * (especially if they're all enabled), we take two passes through the
14474 	 * ECBs:  in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14475 	 * in the second we disable whatever is left over.
14476 	 */
14477 	for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14478 		for (i = 0; i < state->dts_necbs; i++) {
14479 			if ((ecb = state->dts_ecbs[i]) == NULL)
14480 				continue;
14481 
14482 			if (match && ecb->dte_probe != NULL) {
14483 				dtrace_probe_t *probe = ecb->dte_probe;
14484 				dtrace_provider_t *prov = probe->dtpr_provider;
14485 
14486 				if (!(prov->dtpv_priv.dtpp_flags & match))
14487 					continue;
14488 			}
14489 
14490 			dtrace_ecb_disable(ecb);
14491 			dtrace_ecb_destroy(ecb);
14492 		}
14493 
14494 		if (!match)
14495 			break;
14496 	}
14497 
14498 	/*
14499 	 * Before we free the buffers, perform one more sync to assure that
14500 	 * every CPU is out of probe context.
14501 	 */
14502 	dtrace_sync();
14503 
14504 	dtrace_buffer_free(state->dts_buffer);
14505 	dtrace_buffer_free(state->dts_aggbuffer);
14506 
14507 	for (i = 0; i < nspec; i++)
14508 		dtrace_buffer_free(spec[i].dtsp_buffer);
14509 
14510 	if (state->dts_cleaner != CYCLIC_NONE)
14511 		cyclic_remove(state->dts_cleaner);
14512 
14513 	if (state->dts_deadman != CYCLIC_NONE)
14514 		cyclic_remove(state->dts_deadman);
14515 
14516 	dtrace_dstate_fini(&vstate->dtvs_dynvars);
14517 	dtrace_vstate_fini(vstate);
14518 	kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14519 
14520 	if (state->dts_aggregations != NULL) {
14521 #ifdef DEBUG
14522 		for (i = 0; i < state->dts_naggregations; i++)
14523 			ASSERT(state->dts_aggregations[i] == NULL);
14524 #endif
14525 		ASSERT(state->dts_naggregations > 0);
14526 		kmem_free(state->dts_aggregations,
14527 		    state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14528 	}
14529 
14530 	kmem_free(state->dts_buffer, bufsize);
14531 	kmem_free(state->dts_aggbuffer, bufsize);
14532 
14533 	for (i = 0; i < nspec; i++)
14534 		kmem_free(spec[i].dtsp_buffer, bufsize);
14535 
14536 	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14537 
14538 	dtrace_format_destroy(state);
14539 
14540 	vmem_destroy(state->dts_aggid_arena);
14541 	ddi_soft_state_free(dtrace_softstate, minor);
14542 	vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14543 }
14544 
14545 /*
14546  * DTrace Anonymous Enabling Functions
14547  */
14548 static dtrace_state_t *
14549 dtrace_anon_grab(void)
14550 {
14551 	dtrace_state_t *state;
14552 
14553 	ASSERT(MUTEX_HELD(&dtrace_lock));
14554 
14555 	if ((state = dtrace_anon.dta_state) == NULL) {
14556 		ASSERT(dtrace_anon.dta_enabling == NULL);
14557 		return (NULL);
14558 	}
14559 
14560 	ASSERT(dtrace_anon.dta_enabling != NULL);
14561 	ASSERT(dtrace_retained != NULL);
14562 
14563 	dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14564 	dtrace_anon.dta_enabling = NULL;
14565 	dtrace_anon.dta_state = NULL;
14566 
14567 	return (state);
14568 }
14569 
14570 static void
14571 dtrace_anon_property(void)
14572 {
14573 	int i, rv;
14574 	dtrace_state_t *state;
14575 	dof_hdr_t *dof;
14576 	char c[32];		/* enough for "dof-data-" + digits */
14577 
14578 	ASSERT(MUTEX_HELD(&dtrace_lock));
14579 	ASSERT(MUTEX_HELD(&cpu_lock));
14580 
14581 	for (i = 0; ; i++) {
14582 		(void) snprintf(c, sizeof (c), "dof-data-%d", i);
14583 
14584 		dtrace_err_verbose = 1;
14585 
14586 		if ((dof = dtrace_dof_property(c)) == NULL) {
14587 			dtrace_err_verbose = 0;
14588 			break;
14589 		}
14590 
14591 		/*
14592 		 * We want to create anonymous state, so we need to transition
14593 		 * the kernel debugger to indicate that DTrace is active.  If
14594 		 * this fails (e.g. because the debugger has modified text in
14595 		 * some way), we won't continue with the processing.
14596 		 */
14597 		if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14598 			cmn_err(CE_NOTE, "kernel debugger active; anonymous "
14599 			    "enabling ignored.");
14600 			dtrace_dof_destroy(dof);
14601 			break;
14602 		}
14603 
14604 		/*
14605 		 * If we haven't allocated an anonymous state, we'll do so now.
14606 		 */
14607 		if ((state = dtrace_anon.dta_state) == NULL) {
14608 			state = dtrace_state_create(NULL, NULL);
14609 			dtrace_anon.dta_state = state;
14610 
14611 			if (state == NULL) {
14612 				/*
14613 				 * This basically shouldn't happen:  the only
14614 				 * failure mode from dtrace_state_create() is a
14615 				 * failure of ddi_soft_state_zalloc() that
14616 				 * itself should never happen.  Still, the
14617 				 * interface allows for a failure mode, and
14618 				 * we want to fail as gracefully as possible:
14619 				 * we'll emit an error message and cease
14620 				 * processing anonymous state in this case.
14621 				 */
14622 				cmn_err(CE_WARN, "failed to create "
14623 				    "anonymous state");
14624 				dtrace_dof_destroy(dof);
14625 				break;
14626 			}
14627 		}
14628 
14629 		rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
14630 		    &dtrace_anon.dta_enabling, 0, B_TRUE);
14631 
14632 		if (rv == 0)
14633 			rv = dtrace_dof_options(dof, state);
14634 
14635 		dtrace_err_verbose = 0;
14636 		dtrace_dof_destroy(dof);
14637 
14638 		if (rv != 0) {
14639 			/*
14640 			 * This is malformed DOF; chuck any anonymous state
14641 			 * that we created.
14642 			 */
14643 			ASSERT(dtrace_anon.dta_enabling == NULL);
14644 			dtrace_state_destroy(state);
14645 			dtrace_anon.dta_state = NULL;
14646 			break;
14647 		}
14648 
14649 		ASSERT(dtrace_anon.dta_enabling != NULL);
14650 	}
14651 
14652 	if (dtrace_anon.dta_enabling != NULL) {
14653 		int rval;
14654 
14655 		/*
14656 		 * dtrace_enabling_retain() can only fail because we are
14657 		 * trying to retain more enablings than are allowed -- but
14658 		 * we only have one anonymous enabling, and we are guaranteed
14659 		 * to be allowed at least one retained enabling; we assert
14660 		 * that dtrace_enabling_retain() returns success.
14661 		 */
14662 		rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14663 		ASSERT(rval == 0);
14664 
14665 		dtrace_enabling_dump(dtrace_anon.dta_enabling);
14666 	}
14667 }
14668 
14669 /*
14670  * DTrace Helper Functions
14671  */
14672 static void
14673 dtrace_helper_trace(dtrace_helper_action_t *helper,
14674     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14675 {
14676 	uint32_t size, next, nnext, i;
14677 	dtrace_helptrace_t *ent, *buffer;
14678 	uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14679 
14680 	if ((buffer = dtrace_helptrace_buffer) == NULL)
14681 		return;
14682 
14683 	ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14684 
14685 	/*
14686 	 * What would a tracing framework be without its own tracing
14687 	 * framework?  (Well, a hell of a lot simpler, for starters...)
14688 	 */
14689 	size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14690 	    sizeof (uint64_t) - sizeof (uint64_t);
14691 
14692 	/*
14693 	 * Iterate until we can allocate a slot in the trace buffer.
14694 	 */
14695 	do {
14696 		next = dtrace_helptrace_next;
14697 
14698 		if (next + size < dtrace_helptrace_bufsize) {
14699 			nnext = next + size;
14700 		} else {
14701 			nnext = size;
14702 		}
14703 	} while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14704 
14705 	/*
14706 	 * We have our slot; fill it in.
14707 	 */
14708 	if (nnext == size) {
14709 		dtrace_helptrace_wrapped++;
14710 		next = 0;
14711 	}
14712 
14713 	ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
14714 	ent->dtht_helper = helper;
14715 	ent->dtht_where = where;
14716 	ent->dtht_nlocals = vstate->dtvs_nlocals;
14717 
14718 	ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14719 	    mstate->dtms_fltoffs : -1;
14720 	ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14721 	ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14722 
14723 	for (i = 0; i < vstate->dtvs_nlocals; i++) {
14724 		dtrace_statvar_t *svar;
14725 
14726 		if ((svar = vstate->dtvs_locals[i]) == NULL)
14727 			continue;
14728 
14729 		ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14730 		ent->dtht_locals[i] =
14731 		    ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14732 	}
14733 }
14734 
14735 static uint64_t
14736 dtrace_helper(int which, dtrace_mstate_t *mstate,
14737     dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14738 {
14739 	uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14740 	uint64_t sarg0 = mstate->dtms_arg[0];
14741 	uint64_t sarg1 = mstate->dtms_arg[1];
14742 	uint64_t rval;
14743 	dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14744 	dtrace_helper_action_t *helper;
14745 	dtrace_vstate_t *vstate;
14746 	dtrace_difo_t *pred;
14747 	int i, trace = dtrace_helptrace_buffer != NULL;
14748 
14749 	ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14750 
14751 	if (helpers == NULL)
14752 		return (0);
14753 
14754 	if ((helper = helpers->dthps_actions[which]) == NULL)
14755 		return (0);
14756 
14757 	vstate = &helpers->dthps_vstate;
14758 	mstate->dtms_arg[0] = arg0;
14759 	mstate->dtms_arg[1] = arg1;
14760 
14761 	/*
14762 	 * Now iterate over each helper.  If its predicate evaluates to 'true',
14763 	 * we'll call the corresponding actions.  Note that the below calls
14764 	 * to dtrace_dif_emulate() may set faults in machine state.  This is
14765 	 * okay:  our caller (the outer dtrace_dif_emulate()) will simply plow
14766 	 * the stored DIF offset with its own (which is the desired behavior).
14767 	 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14768 	 * from machine state; this is okay, too.
14769 	 */
14770 	for (; helper != NULL; helper = helper->dtha_next) {
14771 		if ((pred = helper->dtha_predicate) != NULL) {
14772 			if (trace)
14773 				dtrace_helper_trace(helper, mstate, vstate, 0);
14774 
14775 			if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14776 				goto next;
14777 
14778 			if (*flags & CPU_DTRACE_FAULT)
14779 				goto err;
14780 		}
14781 
14782 		for (i = 0; i < helper->dtha_nactions; i++) {
14783 			if (trace)
14784 				dtrace_helper_trace(helper,
14785 				    mstate, vstate, i + 1);
14786 
14787 			rval = dtrace_dif_emulate(helper->dtha_actions[i],
14788 			    mstate, vstate, state);
14789 
14790 			if (*flags & CPU_DTRACE_FAULT)
14791 				goto err;
14792 		}
14793 
14794 next:
14795 		if (trace)
14796 			dtrace_helper_trace(helper, mstate, vstate,
14797 			    DTRACE_HELPTRACE_NEXT);
14798 	}
14799 
14800 	if (trace)
14801 		dtrace_helper_trace(helper, mstate, vstate,
14802 		    DTRACE_HELPTRACE_DONE);
14803 
14804 	/*
14805 	 * Restore the arg0 that we saved upon entry.
14806 	 */
14807 	mstate->dtms_arg[0] = sarg0;
14808 	mstate->dtms_arg[1] = sarg1;
14809 
14810 	return (rval);
14811 
14812 err:
14813 	if (trace)
14814 		dtrace_helper_trace(helper, mstate, vstate,
14815 		    DTRACE_HELPTRACE_ERR);
14816 
14817 	/*
14818 	 * Restore the arg0 that we saved upon entry.
14819 	 */
14820 	mstate->dtms_arg[0] = sarg0;
14821 	mstate->dtms_arg[1] = sarg1;
14822 
14823 	return (NULL);
14824 }
14825 
14826 static void
14827 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14828     dtrace_vstate_t *vstate)
14829 {
14830 	int i;
14831 
14832 	if (helper->dtha_predicate != NULL)
14833 		dtrace_difo_release(helper->dtha_predicate, vstate);
14834 
14835 	for (i = 0; i < helper->dtha_nactions; i++) {
14836 		ASSERT(helper->dtha_actions[i] != NULL);
14837 		dtrace_difo_release(helper->dtha_actions[i], vstate);
14838 	}
14839 
14840 	kmem_free(helper->dtha_actions,
14841 	    helper->dtha_nactions * sizeof (dtrace_difo_t *));
14842 	kmem_free(helper, sizeof (dtrace_helper_action_t));
14843 }
14844 
14845 static int
14846 dtrace_helper_destroygen(int gen)
14847 {
14848 	proc_t *p = curproc;
14849 	dtrace_helpers_t *help = p->p_dtrace_helpers;
14850 	dtrace_vstate_t *vstate;
14851 	int i;
14852 
14853 	ASSERT(MUTEX_HELD(&dtrace_lock));
14854 
14855 	if (help == NULL || gen > help->dthps_generation)
14856 		return (EINVAL);
14857 
14858 	vstate = &help->dthps_vstate;
14859 
14860 	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14861 		dtrace_helper_action_t *last = NULL, *h, *next;
14862 
14863 		for (h = help->dthps_actions[i]; h != NULL; h = next) {
14864 			next = h->dtha_next;
14865 
14866 			if (h->dtha_generation == gen) {
14867 				if (last != NULL) {
14868 					last->dtha_next = next;
14869 				} else {
14870 					help->dthps_actions[i] = next;
14871 				}
14872 
14873 				dtrace_helper_action_destroy(h, vstate);
14874 			} else {
14875 				last = h;
14876 			}
14877 		}
14878 	}
14879 
14880 	/*
14881 	 * Interate until we've cleared out all helper providers with the
14882 	 * given generation number.
14883 	 */
14884 	for (;;) {
14885 		dtrace_helper_provider_t *prov;
14886 
14887 		/*
14888 		 * Look for a helper provider with the right generation. We
14889 		 * have to start back at the beginning of the list each time
14890 		 * because we drop dtrace_lock. It's unlikely that we'll make
14891 		 * more than two passes.
14892 		 */
14893 		for (i = 0; i < help->dthps_nprovs; i++) {
14894 			prov = help->dthps_provs[i];
14895 
14896 			if (prov->dthp_generation == gen)
14897 				break;
14898 		}
14899 
14900 		/*
14901 		 * If there were no matches, we're done.
14902 		 */
14903 		if (i == help->dthps_nprovs)
14904 			break;
14905 
14906 		/*
14907 		 * Move the last helper provider into this slot.
14908 		 */
14909 		help->dthps_nprovs--;
14910 		help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14911 		help->dthps_provs[help->dthps_nprovs] = NULL;
14912 
14913 		mutex_exit(&dtrace_lock);
14914 
14915 		/*
14916 		 * If we have a meta provider, remove this helper provider.
14917 		 */
14918 		mutex_enter(&dtrace_meta_lock);
14919 		if (dtrace_meta_pid != NULL) {
14920 			ASSERT(dtrace_deferred_pid == NULL);
14921 			dtrace_helper_provider_remove(&prov->dthp_prov,
14922 			    p->p_pid);
14923 		}
14924 		mutex_exit(&dtrace_meta_lock);
14925 
14926 		dtrace_helper_provider_destroy(prov);
14927 
14928 		mutex_enter(&dtrace_lock);
14929 	}
14930 
14931 	return (0);
14932 }
14933 
14934 static int
14935 dtrace_helper_validate(dtrace_helper_action_t *helper)
14936 {
14937 	int err = 0, i;
14938 	dtrace_difo_t *dp;
14939 
14940 	if ((dp = helper->dtha_predicate) != NULL)
14941 		err += dtrace_difo_validate_helper(dp);
14942 
14943 	for (i = 0; i < helper->dtha_nactions; i++)
14944 		err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14945 
14946 	return (err == 0);
14947 }
14948 
14949 static int
14950 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14951 {
14952 	dtrace_helpers_t *help;
14953 	dtrace_helper_action_t *helper, *last;
14954 	dtrace_actdesc_t *act;
14955 	dtrace_vstate_t *vstate;
14956 	dtrace_predicate_t *pred;
14957 	int count = 0, nactions = 0, i;
14958 
14959 	if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14960 		return (EINVAL);
14961 
14962 	help = curproc->p_dtrace_helpers;
14963 	last = help->dthps_actions[which];
14964 	vstate = &help->dthps_vstate;
14965 
14966 	for (count = 0; last != NULL; last = last->dtha_next) {
14967 		count++;
14968 		if (last->dtha_next == NULL)
14969 			break;
14970 	}
14971 
14972 	/*
14973 	 * If we already have dtrace_helper_actions_max helper actions for this
14974 	 * helper action type, we'll refuse to add a new one.
14975 	 */
14976 	if (count >= dtrace_helper_actions_max)
14977 		return (ENOSPC);
14978 
14979 	helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14980 	helper->dtha_generation = help->dthps_generation;
14981 
14982 	if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14983 		ASSERT(pred->dtp_difo != NULL);
14984 		dtrace_difo_hold(pred->dtp_difo);
14985 		helper->dtha_predicate = pred->dtp_difo;
14986 	}
14987 
14988 	for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14989 		if (act->dtad_kind != DTRACEACT_DIFEXPR)
14990 			goto err;
14991 
14992 		if (act->dtad_difo == NULL)
14993 			goto err;
14994 
14995 		nactions++;
14996 	}
14997 
14998 	helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14999 	    (helper->dtha_nactions = nactions), KM_SLEEP);
15000 
15001 	for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
15002 		dtrace_difo_hold(act->dtad_difo);
15003 		helper->dtha_actions[i++] = act->dtad_difo;
15004 	}
15005 
15006 	if (!dtrace_helper_validate(helper))
15007 		goto err;
15008 
15009 	if (last == NULL) {
15010 		help->dthps_actions[which] = helper;
15011 	} else {
15012 		last->dtha_next = helper;
15013 	}
15014 
15015 	if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
15016 		dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
15017 		dtrace_helptrace_next = 0;
15018 	}
15019 
15020 	return (0);
15021 err:
15022 	dtrace_helper_action_destroy(helper, vstate);
15023 	return (EINVAL);
15024 }
15025 
15026 static void
15027 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
15028     dof_helper_t *dofhp)
15029 {
15030 	ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
15031 
15032 	mutex_enter(&dtrace_meta_lock);
15033 	mutex_enter(&dtrace_lock);
15034 
15035 	if (!dtrace_attached() || dtrace_meta_pid == NULL) {
15036 		/*
15037 		 * If the dtrace module is loaded but not attached, or if
15038 		 * there aren't isn't a meta provider registered to deal with
15039 		 * these provider descriptions, we need to postpone creating
15040 		 * the actual providers until later.
15041 		 */
15042 
15043 		if (help->dthps_next == NULL && help->dthps_prev == NULL &&
15044 		    dtrace_deferred_pid != help) {
15045 			help->dthps_deferred = 1;
15046 			help->dthps_pid = p->p_pid;
15047 			help->dthps_next = dtrace_deferred_pid;
15048 			help->dthps_prev = NULL;
15049 			if (dtrace_deferred_pid != NULL)
15050 				dtrace_deferred_pid->dthps_prev = help;
15051 			dtrace_deferred_pid = help;
15052 		}
15053 
15054 		mutex_exit(&dtrace_lock);
15055 
15056 	} else if (dofhp != NULL) {
15057 		/*
15058 		 * If the dtrace module is loaded and we have a particular
15059 		 * helper provider description, pass that off to the
15060 		 * meta provider.
15061 		 */
15062 
15063 		mutex_exit(&dtrace_lock);
15064 
15065 		dtrace_helper_provide(dofhp, p->p_pid);
15066 
15067 	} else {
15068 		/*
15069 		 * Otherwise, just pass all the helper provider descriptions
15070 		 * off to the meta provider.
15071 		 */
15072 
15073 		int i;
15074 		mutex_exit(&dtrace_lock);
15075 
15076 		for (i = 0; i < help->dthps_nprovs; i++) {
15077 			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
15078 			    p->p_pid);
15079 		}
15080 	}
15081 
15082 	mutex_exit(&dtrace_meta_lock);
15083 }
15084 
15085 static int
15086 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
15087 {
15088 	dtrace_helpers_t *help;
15089 	dtrace_helper_provider_t *hprov, **tmp_provs;
15090 	uint_t tmp_maxprovs, i;
15091 
15092 	ASSERT(MUTEX_HELD(&dtrace_lock));
15093 
15094 	help = curproc->p_dtrace_helpers;
15095 	ASSERT(help != NULL);
15096 
15097 	/*
15098 	 * If we already have dtrace_helper_providers_max helper providers,
15099 	 * we're refuse to add a new one.
15100 	 */
15101 	if (help->dthps_nprovs >= dtrace_helper_providers_max)
15102 		return (ENOSPC);
15103 
15104 	/*
15105 	 * Check to make sure this isn't a duplicate.
15106 	 */
15107 	for (i = 0; i < help->dthps_nprovs; i++) {
15108 		if (dofhp->dofhp_addr ==
15109 		    help->dthps_provs[i]->dthp_prov.dofhp_addr)
15110 			return (EALREADY);
15111 	}
15112 
15113 	hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
15114 	hprov->dthp_prov = *dofhp;
15115 	hprov->dthp_ref = 1;
15116 	hprov->dthp_generation = gen;
15117 
15118 	/*
15119 	 * Allocate a bigger table for helper providers if it's already full.
15120 	 */
15121 	if (help->dthps_maxprovs == help->dthps_nprovs) {
15122 		tmp_maxprovs = help->dthps_maxprovs;
15123 		tmp_provs = help->dthps_provs;
15124 
15125 		if (help->dthps_maxprovs == 0)
15126 			help->dthps_maxprovs = 2;
15127 		else
15128 			help->dthps_maxprovs *= 2;
15129 		if (help->dthps_maxprovs > dtrace_helper_providers_max)
15130 			help->dthps_maxprovs = dtrace_helper_providers_max;
15131 
15132 		ASSERT(tmp_maxprovs < help->dthps_maxprovs);
15133 
15134 		help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
15135 		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15136 
15137 		if (tmp_provs != NULL) {
15138 			bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
15139 			    sizeof (dtrace_helper_provider_t *));
15140 			kmem_free(tmp_provs, tmp_maxprovs *
15141 			    sizeof (dtrace_helper_provider_t *));
15142 		}
15143 	}
15144 
15145 	help->dthps_provs[help->dthps_nprovs] = hprov;
15146 	help->dthps_nprovs++;
15147 
15148 	return (0);
15149 }
15150 
15151 static void
15152 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
15153 {
15154 	mutex_enter(&dtrace_lock);
15155 
15156 	if (--hprov->dthp_ref == 0) {
15157 		dof_hdr_t *dof;
15158 		mutex_exit(&dtrace_lock);
15159 		dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
15160 		dtrace_dof_destroy(dof);
15161 		kmem_free(hprov, sizeof (dtrace_helper_provider_t));
15162 	} else {
15163 		mutex_exit(&dtrace_lock);
15164 	}
15165 }
15166 
15167 static int
15168 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
15169 {
15170 	uintptr_t daddr = (uintptr_t)dof;
15171 	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
15172 	dof_provider_t *provider;
15173 	dof_probe_t *probe;
15174 	uint8_t *arg;
15175 	char *strtab, *typestr;
15176 	dof_stridx_t typeidx;
15177 	size_t typesz;
15178 	uint_t nprobes, j, k;
15179 
15180 	ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
15181 
15182 	if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
15183 		dtrace_dof_error(dof, "misaligned section offset");
15184 		return (-1);
15185 	}
15186 
15187 	/*
15188 	 * The section needs to be large enough to contain the DOF provider
15189 	 * structure appropriate for the given version.
15190 	 */
15191 	if (sec->dofs_size <
15192 	    ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
15193 	    offsetof(dof_provider_t, dofpv_prenoffs) :
15194 	    sizeof (dof_provider_t))) {
15195 		dtrace_dof_error(dof, "provider section too small");
15196 		return (-1);
15197 	}
15198 
15199 	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
15200 	str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
15201 	prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
15202 	arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
15203 	off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
15204 
15205 	if (str_sec == NULL || prb_sec == NULL ||
15206 	    arg_sec == NULL || off_sec == NULL)
15207 		return (-1);
15208 
15209 	enoff_sec = NULL;
15210 
15211 	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
15212 	    provider->dofpv_prenoffs != DOF_SECT_NONE &&
15213 	    (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
15214 	    provider->dofpv_prenoffs)) == NULL)
15215 		return (-1);
15216 
15217 	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
15218 
15219 	if (provider->dofpv_name >= str_sec->dofs_size ||
15220 	    strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
15221 		dtrace_dof_error(dof, "invalid provider name");
15222 		return (-1);
15223 	}
15224 
15225 	if (prb_sec->dofs_entsize == 0 ||
15226 	    prb_sec->dofs_entsize > prb_sec->dofs_size) {
15227 		dtrace_dof_error(dof, "invalid entry size");
15228 		return (-1);
15229 	}
15230 
15231 	if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
15232 		dtrace_dof_error(dof, "misaligned entry size");
15233 		return (-1);
15234 	}
15235 
15236 	if (off_sec->dofs_entsize != sizeof (uint32_t)) {
15237 		dtrace_dof_error(dof, "invalid entry size");
15238 		return (-1);
15239 	}
15240 
15241 	if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
15242 		dtrace_dof_error(dof, "misaligned section offset");
15243 		return (-1);
15244 	}
15245 
15246 	if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
15247 		dtrace_dof_error(dof, "invalid entry size");
15248 		return (-1);
15249 	}
15250 
15251 	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
15252 
15253 	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
15254 
15255 	/*
15256 	 * Take a pass through the probes to check for errors.
15257 	 */
15258 	for (j = 0; j < nprobes; j++) {
15259 		probe = (dof_probe_t *)(uintptr_t)(daddr +
15260 		    prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
15261 
15262 		if (probe->dofpr_func >= str_sec->dofs_size) {
15263 			dtrace_dof_error(dof, "invalid function name");
15264 			return (-1);
15265 		}
15266 
15267 		if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
15268 			dtrace_dof_error(dof, "function name too long");
15269 			return (-1);
15270 		}
15271 
15272 		if (probe->dofpr_name >= str_sec->dofs_size ||
15273 		    strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
15274 			dtrace_dof_error(dof, "invalid probe name");
15275 			return (-1);
15276 		}
15277 
15278 		/*
15279 		 * The offset count must not wrap the index, and the offsets
15280 		 * must also not overflow the section's data.
15281 		 */
15282 		if (probe->dofpr_offidx + probe->dofpr_noffs <
15283 		    probe->dofpr_offidx ||
15284 		    (probe->dofpr_offidx + probe->dofpr_noffs) *
15285 		    off_sec->dofs_entsize > off_sec->dofs_size) {
15286 			dtrace_dof_error(dof, "invalid probe offset");
15287 			return (-1);
15288 		}
15289 
15290 		if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
15291 			/*
15292 			 * If there's no is-enabled offset section, make sure
15293 			 * there aren't any is-enabled offsets. Otherwise
15294 			 * perform the same checks as for probe offsets
15295 			 * (immediately above).
15296 			 */
15297 			if (enoff_sec == NULL) {
15298 				if (probe->dofpr_enoffidx != 0 ||
15299 				    probe->dofpr_nenoffs != 0) {
15300 					dtrace_dof_error(dof, "is-enabled "
15301 					    "offsets with null section");
15302 					return (-1);
15303 				}
15304 			} else if (probe->dofpr_enoffidx +
15305 			    probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
15306 			    (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
15307 			    enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
15308 				dtrace_dof_error(dof, "invalid is-enabled "
15309 				    "offset");
15310 				return (-1);
15311 			}
15312 
15313 			if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
15314 				dtrace_dof_error(dof, "zero probe and "
15315 				    "is-enabled offsets");
15316 				return (-1);
15317 			}
15318 		} else if (probe->dofpr_noffs == 0) {
15319 			dtrace_dof_error(dof, "zero probe offsets");
15320 			return (-1);
15321 		}
15322 
15323 		if (probe->dofpr_argidx + probe->dofpr_xargc <
15324 		    probe->dofpr_argidx ||
15325 		    (probe->dofpr_argidx + probe->dofpr_xargc) *
15326 		    arg_sec->dofs_entsize > arg_sec->dofs_size) {
15327 			dtrace_dof_error(dof, "invalid args");
15328 			return (-1);
15329 		}
15330 
15331 		typeidx = probe->dofpr_nargv;
15332 		typestr = strtab + probe->dofpr_nargv;
15333 		for (k = 0; k < probe->dofpr_nargc; k++) {
15334 			if (typeidx >= str_sec->dofs_size) {
15335 				dtrace_dof_error(dof, "bad "
15336 				    "native argument type");
15337 				return (-1);
15338 			}
15339 
15340 			typesz = strlen(typestr) + 1;
15341 			if (typesz > DTRACE_ARGTYPELEN) {
15342 				dtrace_dof_error(dof, "native "
15343 				    "argument type too long");
15344 				return (-1);
15345 			}
15346 			typeidx += typesz;
15347 			typestr += typesz;
15348 		}
15349 
15350 		typeidx = probe->dofpr_xargv;
15351 		typestr = strtab + probe->dofpr_xargv;
15352 		for (k = 0; k < probe->dofpr_xargc; k++) {
15353 			if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15354 				dtrace_dof_error(dof, "bad "
15355 				    "native argument index");
15356 				return (-1);
15357 			}
15358 
15359 			if (typeidx >= str_sec->dofs_size) {
15360 				dtrace_dof_error(dof, "bad "
15361 				    "translated argument type");
15362 				return (-1);
15363 			}
15364 
15365 			typesz = strlen(typestr) + 1;
15366 			if (typesz > DTRACE_ARGTYPELEN) {
15367 				dtrace_dof_error(dof, "translated argument "
15368 				    "type too long");
15369 				return (-1);
15370 			}
15371 
15372 			typeidx += typesz;
15373 			typestr += typesz;
15374 		}
15375 	}
15376 
15377 	return (0);
15378 }
15379 
15380 static int
15381 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
15382 {
15383 	dtrace_helpers_t *help;
15384 	dtrace_vstate_t *vstate;
15385 	dtrace_enabling_t *enab = NULL;
15386 	int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15387 	uintptr_t daddr = (uintptr_t)dof;
15388 
15389 	ASSERT(MUTEX_HELD(&dtrace_lock));
15390 
15391 	if ((help = curproc->p_dtrace_helpers) == NULL)
15392 		help = dtrace_helpers_create(curproc);
15393 
15394 	vstate = &help->dthps_vstate;
15395 
15396 	if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15397 	    dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15398 		dtrace_dof_destroy(dof);
15399 		return (rv);
15400 	}
15401 
15402 	/*
15403 	 * Look for helper providers and validate their descriptions.
15404 	 */
15405 	if (dhp != NULL) {
15406 		for (i = 0; i < dof->dofh_secnum; i++) {
15407 			dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15408 			    dof->dofh_secoff + i * dof->dofh_secsize);
15409 
15410 			if (sec->dofs_type != DOF_SECT_PROVIDER)
15411 				continue;
15412 
15413 			if (dtrace_helper_provider_validate(dof, sec) != 0) {
15414 				dtrace_enabling_destroy(enab);
15415 				dtrace_dof_destroy(dof);
15416 				return (-1);
15417 			}
15418 
15419 			nprovs++;
15420 		}
15421 	}
15422 
15423 	/*
15424 	 * Now we need to walk through the ECB descriptions in the enabling.
15425 	 */
15426 	for (i = 0; i < enab->dten_ndesc; i++) {
15427 		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15428 		dtrace_probedesc_t *desc = &ep->dted_probe;
15429 
15430 		if (strcmp(desc->dtpd_provider, "dtrace") != 0)
15431 			continue;
15432 
15433 		if (strcmp(desc->dtpd_mod, "helper") != 0)
15434 			continue;
15435 
15436 		if (strcmp(desc->dtpd_func, "ustack") != 0)
15437 			continue;
15438 
15439 		if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
15440 		    ep)) != 0) {
15441 			/*
15442 			 * Adding this helper action failed -- we are now going
15443 			 * to rip out the entire generation and return failure.
15444 			 */
15445 			(void) dtrace_helper_destroygen(help->dthps_generation);
15446 			dtrace_enabling_destroy(enab);
15447 			dtrace_dof_destroy(dof);
15448 			return (-1);
15449 		}
15450 
15451 		nhelpers++;
15452 	}
15453 
15454 	if (nhelpers < enab->dten_ndesc)
15455 		dtrace_dof_error(dof, "unmatched helpers");
15456 
15457 	gen = help->dthps_generation++;
15458 	dtrace_enabling_destroy(enab);
15459 
15460 	if (dhp != NULL && nprovs > 0) {
15461 		/*
15462 		 * Now that this is in-kernel, we change the sense of the
15463 		 * members:  dofhp_dof denotes the in-kernel copy of the DOF
15464 		 * and dofhp_addr denotes the address at user-level.
15465 		 */
15466 		dhp->dofhp_addr = dhp->dofhp_dof;
15467 		dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15468 
15469 		if (dtrace_helper_provider_add(dhp, gen) == 0) {
15470 			mutex_exit(&dtrace_lock);
15471 			dtrace_helper_provider_register(curproc, help, dhp);
15472 			mutex_enter(&dtrace_lock);
15473 
15474 			destroy = 0;
15475 		}
15476 	}
15477 
15478 	if (destroy)
15479 		dtrace_dof_destroy(dof);
15480 
15481 	return (gen);
15482 }
15483 
15484 static dtrace_helpers_t *
15485 dtrace_helpers_create(proc_t *p)
15486 {
15487 	dtrace_helpers_t *help;
15488 
15489 	ASSERT(MUTEX_HELD(&dtrace_lock));
15490 	ASSERT(p->p_dtrace_helpers == NULL);
15491 
15492 	help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
15493 	help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
15494 	    DTRACE_NHELPER_ACTIONS, KM_SLEEP);
15495 
15496 	p->p_dtrace_helpers = help;
15497 	dtrace_helpers++;
15498 
15499 	return (help);
15500 }
15501 
15502 static void
15503 dtrace_helpers_destroy(proc_t *p)
15504 {
15505 	dtrace_helpers_t *help;
15506 	dtrace_vstate_t *vstate;
15507 	int i;
15508 
15509 	mutex_enter(&dtrace_lock);
15510 
15511 	ASSERT(p->p_dtrace_helpers != NULL);
15512 	ASSERT(dtrace_helpers > 0);
15513 
15514 	help = p->p_dtrace_helpers;
15515 	vstate = &help->dthps_vstate;
15516 
15517 	/*
15518 	 * We're now going to lose the help from this process.
15519 	 */
15520 	p->p_dtrace_helpers = NULL;
15521 	if (p == curproc) {
15522 		dtrace_sync();
15523 	} else {
15524 		/*
15525 		 * It is sometimes necessary to clean up dtrace helpers from a
15526 		 * an incomplete child process as part of a failed fork
15527 		 * operation.  In such situations, a dtrace_sync() call should
15528 		 * be unnecessary as the process should be devoid of threads,
15529 		 * much less any in probe context.
15530 		 */
15531 		VERIFY(p->p_stat == SIDL);
15532 	}
15533 
15534 	/*
15535 	 * Destroy the helper actions.
15536 	 */
15537 	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15538 		dtrace_helper_action_t *h, *next;
15539 
15540 		for (h = help->dthps_actions[i]; h != NULL; h = next) {
15541 			next = h->dtha_next;
15542 			dtrace_helper_action_destroy(h, vstate);
15543 			h = next;
15544 		}
15545 	}
15546 
15547 	mutex_exit(&dtrace_lock);
15548 
15549 	/*
15550 	 * Destroy the helper providers.
15551 	 */
15552 	if (help->dthps_maxprovs > 0) {
15553 		mutex_enter(&dtrace_meta_lock);
15554 		if (dtrace_meta_pid != NULL) {
15555 			ASSERT(dtrace_deferred_pid == NULL);
15556 
15557 			for (i = 0; i < help->dthps_nprovs; i++) {
15558 				dtrace_helper_provider_remove(
15559 				    &help->dthps_provs[i]->dthp_prov, p->p_pid);
15560 			}
15561 		} else {
15562 			mutex_enter(&dtrace_lock);
15563 			ASSERT(help->dthps_deferred == 0 ||
15564 			    help->dthps_next != NULL ||
15565 			    help->dthps_prev != NULL ||
15566 			    help == dtrace_deferred_pid);
15567 
15568 			/*
15569 			 * Remove the helper from the deferred list.
15570 			 */
15571 			if (help->dthps_next != NULL)
15572 				help->dthps_next->dthps_prev = help->dthps_prev;
15573 			if (help->dthps_prev != NULL)
15574 				help->dthps_prev->dthps_next = help->dthps_next;
15575 			if (dtrace_deferred_pid == help) {
15576 				dtrace_deferred_pid = help->dthps_next;
15577 				ASSERT(help->dthps_prev == NULL);
15578 			}
15579 
15580 			mutex_exit(&dtrace_lock);
15581 		}
15582 
15583 		mutex_exit(&dtrace_meta_lock);
15584 
15585 		for (i = 0; i < help->dthps_nprovs; i++) {
15586 			dtrace_helper_provider_destroy(help->dthps_provs[i]);
15587 		}
15588 
15589 		kmem_free(help->dthps_provs, help->dthps_maxprovs *
15590 		    sizeof (dtrace_helper_provider_t *));
15591 	}
15592 
15593 	mutex_enter(&dtrace_lock);
15594 
15595 	dtrace_vstate_fini(&help->dthps_vstate);
15596 	kmem_free(help->dthps_actions,
15597 	    sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
15598 	kmem_free(help, sizeof (dtrace_helpers_t));
15599 
15600 	--dtrace_helpers;
15601 	mutex_exit(&dtrace_lock);
15602 }
15603 
15604 static void
15605 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
15606 {
15607 	dtrace_helpers_t *help, *newhelp;
15608 	dtrace_helper_action_t *helper, *new, *last;
15609 	dtrace_difo_t *dp;
15610 	dtrace_vstate_t *vstate;
15611 	int i, j, sz, hasprovs = 0;
15612 
15613 	mutex_enter(&dtrace_lock);
15614 	ASSERT(from->p_dtrace_helpers != NULL);
15615 	ASSERT(dtrace_helpers > 0);
15616 
15617 	help = from->p_dtrace_helpers;
15618 	newhelp = dtrace_helpers_create(to);
15619 	ASSERT(to->p_dtrace_helpers != NULL);
15620 
15621 	newhelp->dthps_generation = help->dthps_generation;
15622 	vstate = &newhelp->dthps_vstate;
15623 
15624 	/*
15625 	 * Duplicate the helper actions.
15626 	 */
15627 	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15628 		if ((helper = help->dthps_actions[i]) == NULL)
15629 			continue;
15630 
15631 		for (last = NULL; helper != NULL; helper = helper->dtha_next) {
15632 			new = kmem_zalloc(sizeof (dtrace_helper_action_t),
15633 			    KM_SLEEP);
15634 			new->dtha_generation = helper->dtha_generation;
15635 
15636 			if ((dp = helper->dtha_predicate) != NULL) {
15637 				dp = dtrace_difo_duplicate(dp, vstate);
15638 				new->dtha_predicate = dp;
15639 			}
15640 
15641 			new->dtha_nactions = helper->dtha_nactions;
15642 			sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
15643 			new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
15644 
15645 			for (j = 0; j < new->dtha_nactions; j++) {
15646 				dtrace_difo_t *dp = helper->dtha_actions[j];
15647 
15648 				ASSERT(dp != NULL);
15649 				dp = dtrace_difo_duplicate(dp, vstate);
15650 				new->dtha_actions[j] = dp;
15651 			}
15652 
15653 			if (last != NULL) {
15654 				last->dtha_next = new;
15655 			} else {
15656 				newhelp->dthps_actions[i] = new;
15657 			}
15658 
15659 			last = new;
15660 		}
15661 	}
15662 
15663 	/*
15664 	 * Duplicate the helper providers and register them with the
15665 	 * DTrace framework.
15666 	 */
15667 	if (help->dthps_nprovs > 0) {
15668 		newhelp->dthps_nprovs = help->dthps_nprovs;
15669 		newhelp->dthps_maxprovs = help->dthps_nprovs;
15670 		newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
15671 		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15672 		for (i = 0; i < newhelp->dthps_nprovs; i++) {
15673 			newhelp->dthps_provs[i] = help->dthps_provs[i];
15674 			newhelp->dthps_provs[i]->dthp_ref++;
15675 		}
15676 
15677 		hasprovs = 1;
15678 	}
15679 
15680 	mutex_exit(&dtrace_lock);
15681 
15682 	if (hasprovs)
15683 		dtrace_helper_provider_register(to, newhelp, NULL);
15684 }
15685 
15686 /*
15687  * DTrace Hook Functions
15688  */
15689 static void
15690 dtrace_module_loaded(struct modctl *ctl)
15691 {
15692 	dtrace_provider_t *prv;
15693 
15694 	mutex_enter(&dtrace_provider_lock);
15695 	mutex_enter(&mod_lock);
15696 
15697 	ASSERT(ctl->mod_busy);
15698 
15699 	/*
15700 	 * We're going to call each providers per-module provide operation
15701 	 * specifying only this module.
15702 	 */
15703 	for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15704 		prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15705 
15706 	mutex_exit(&mod_lock);
15707 	mutex_exit(&dtrace_provider_lock);
15708 
15709 	/*
15710 	 * If we have any retained enablings, we need to match against them.
15711 	 * Enabling probes requires that cpu_lock be held, and we cannot hold
15712 	 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15713 	 * module.  (In particular, this happens when loading scheduling
15714 	 * classes.)  So if we have any retained enablings, we need to dispatch
15715 	 * our task queue to do the match for us.
15716 	 */
15717 	mutex_enter(&dtrace_lock);
15718 
15719 	if (dtrace_retained == NULL) {
15720 		mutex_exit(&dtrace_lock);
15721 		return;
15722 	}
15723 
15724 	(void) taskq_dispatch(dtrace_taskq,
15725 	    (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15726 
15727 	mutex_exit(&dtrace_lock);
15728 
15729 	/*
15730 	 * And now, for a little heuristic sleaze:  in general, we want to
15731 	 * match modules as soon as they load.  However, we cannot guarantee
15732 	 * this, because it would lead us to the lock ordering violation
15733 	 * outlined above.  The common case, of course, is that cpu_lock is
15734 	 * _not_ held -- so we delay here for a clock tick, hoping that that's
15735 	 * long enough for the task queue to do its work.  If it's not, it's
15736 	 * not a serious problem -- it just means that the module that we
15737 	 * just loaded may not be immediately instrumentable.
15738 	 */
15739 	delay(1);
15740 }
15741 
15742 static void
15743 dtrace_module_unloaded(struct modctl *ctl)
15744 {
15745 	dtrace_probe_t template, *probe, *first, *next;
15746 	dtrace_provider_t *prov;
15747 
15748 	template.dtpr_mod = ctl->mod_modname;
15749 
15750 	mutex_enter(&dtrace_provider_lock);
15751 	mutex_enter(&mod_lock);
15752 	mutex_enter(&dtrace_lock);
15753 
15754 	if (dtrace_bymod == NULL) {
15755 		/*
15756 		 * The DTrace module is loaded (obviously) but not attached;
15757 		 * we don't have any work to do.
15758 		 */
15759 		mutex_exit(&dtrace_provider_lock);
15760 		mutex_exit(&mod_lock);
15761 		mutex_exit(&dtrace_lock);
15762 		return;
15763 	}
15764 
15765 	for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15766 	    probe != NULL; probe = probe->dtpr_nextmod) {
15767 		if (probe->dtpr_ecb != NULL) {
15768 			mutex_exit(&dtrace_provider_lock);
15769 			mutex_exit(&mod_lock);
15770 			mutex_exit(&dtrace_lock);
15771 
15772 			/*
15773 			 * This shouldn't _actually_ be possible -- we're
15774 			 * unloading a module that has an enabled probe in it.
15775 			 * (It's normally up to the provider to make sure that
15776 			 * this can't happen.)  However, because dtps_enable()
15777 			 * doesn't have a failure mode, there can be an
15778 			 * enable/unload race.  Upshot:  we don't want to
15779 			 * assert, but we're not going to disable the
15780 			 * probe, either.
15781 			 */
15782 			if (dtrace_err_verbose) {
15783 				cmn_err(CE_WARN, "unloaded module '%s' had "
15784 				    "enabled probes", ctl->mod_modname);
15785 			}
15786 
15787 			return;
15788 		}
15789 	}
15790 
15791 	probe = first;
15792 
15793 	for (first = NULL; probe != NULL; probe = next) {
15794 		ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15795 
15796 		dtrace_probes[probe->dtpr_id - 1] = NULL;
15797 
15798 		next = probe->dtpr_nextmod;
15799 		dtrace_hash_remove(dtrace_bymod, probe);
15800 		dtrace_hash_remove(dtrace_byfunc, probe);
15801 		dtrace_hash_remove(dtrace_byname, probe);
15802 
15803 		if (first == NULL) {
15804 			first = probe;
15805 			probe->dtpr_nextmod = NULL;
15806 		} else {
15807 			probe->dtpr_nextmod = first;
15808 			first = probe;
15809 		}
15810 	}
15811 
15812 	/*
15813 	 * We've removed all of the module's probes from the hash chains and
15814 	 * from the probe array.  Now issue a dtrace_sync() to be sure that
15815 	 * everyone has cleared out from any probe array processing.
15816 	 */
15817 	dtrace_sync();
15818 
15819 	for (probe = first; probe != NULL; probe = first) {
15820 		first = probe->dtpr_nextmod;
15821 		prov = probe->dtpr_provider;
15822 		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15823 		    probe->dtpr_arg);
15824 		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15825 		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15826 		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15827 		vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15828 		kmem_free(probe, sizeof (dtrace_probe_t));
15829 	}
15830 
15831 	mutex_exit(&dtrace_lock);
15832 	mutex_exit(&mod_lock);
15833 	mutex_exit(&dtrace_provider_lock);
15834 }
15835 
15836 void
15837 dtrace_suspend(void)
15838 {
15839 	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15840 }
15841 
15842 void
15843 dtrace_resume(void)
15844 {
15845 	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15846 }
15847 
15848 static int
15849 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15850 {
15851 	ASSERT(MUTEX_HELD(&cpu_lock));
15852 	mutex_enter(&dtrace_lock);
15853 
15854 	switch (what) {
15855 	case CPU_CONFIG: {
15856 		dtrace_state_t *state;
15857 		dtrace_optval_t *opt, rs, c;
15858 
15859 		/*
15860 		 * For now, we only allocate a new buffer for anonymous state.
15861 		 */
15862 		if ((state = dtrace_anon.dta_state) == NULL)
15863 			break;
15864 
15865 		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15866 			break;
15867 
15868 		opt = state->dts_options;
15869 		c = opt[DTRACEOPT_CPU];
15870 
15871 		if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15872 			break;
15873 
15874 		/*
15875 		 * Regardless of what the actual policy is, we're going to
15876 		 * temporarily set our resize policy to be manual.  We're
15877 		 * also going to temporarily set our CPU option to denote
15878 		 * the newly configured CPU.
15879 		 */
15880 		rs = opt[DTRACEOPT_BUFRESIZE];
15881 		opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15882 		opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15883 
15884 		(void) dtrace_state_buffers(state);
15885 
15886 		opt[DTRACEOPT_BUFRESIZE] = rs;
15887 		opt[DTRACEOPT_CPU] = c;
15888 
15889 		break;
15890 	}
15891 
15892 	case CPU_UNCONFIG:
15893 		/*
15894 		 * We don't free the buffer in the CPU_UNCONFIG case.  (The
15895 		 * buffer will be freed when the consumer exits.)
15896 		 */
15897 		break;
15898 
15899 	default:
15900 		break;
15901 	}
15902 
15903 	mutex_exit(&dtrace_lock);
15904 	return (0);
15905 }
15906 
15907 static void
15908 dtrace_cpu_setup_initial(processorid_t cpu)
15909 {
15910 	(void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15911 }
15912 
15913 static void
15914 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15915 {
15916 	if (dtrace_toxranges >= dtrace_toxranges_max) {
15917 		int osize, nsize;
15918 		dtrace_toxrange_t *range;
15919 
15920 		osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15921 
15922 		if (osize == 0) {
15923 			ASSERT(dtrace_toxrange == NULL);
15924 			ASSERT(dtrace_toxranges_max == 0);
15925 			dtrace_toxranges_max = 1;
15926 		} else {
15927 			dtrace_toxranges_max <<= 1;
15928 		}
15929 
15930 		nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15931 		range = kmem_zalloc(nsize, KM_SLEEP);
15932 
15933 		if (dtrace_toxrange != NULL) {
15934 			ASSERT(osize != 0);
15935 			bcopy(dtrace_toxrange, range, osize);
15936 			kmem_free(dtrace_toxrange, osize);
15937 		}
15938 
15939 		dtrace_toxrange = range;
15940 	}
15941 
15942 	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
15943 	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
15944 
15945 	dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15946 	dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15947 	dtrace_toxranges++;
15948 }
15949 
15950 static void
15951 dtrace_getf_barrier()
15952 {
15953 	/*
15954 	 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
15955 	 * that contain calls to getf(), this routine will be called on every
15956 	 * closef() before either the underlying vnode is released or the
15957 	 * file_t itself is freed.  By the time we are here, it is essential
15958 	 * that the file_t can no longer be accessed from a call to getf()
15959 	 * in probe context -- that assures that a dtrace_sync() can be used
15960 	 * to clear out any enablings referring to the old structures.
15961 	 */
15962 	if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
15963 	    kcred->cr_zone->zone_dtrace_getf != 0)
15964 		dtrace_sync();
15965 }
15966 
15967 /*
15968  * DTrace Driver Cookbook Functions
15969  */
15970 /*ARGSUSED*/
15971 static int
15972 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15973 {
15974 	dtrace_provider_id_t id;
15975 	dtrace_state_t *state = NULL;
15976 	dtrace_enabling_t *enab;
15977 
15978 	mutex_enter(&cpu_lock);
15979 	mutex_enter(&dtrace_provider_lock);
15980 	mutex_enter(&dtrace_lock);
15981 
15982 	if (ddi_soft_state_init(&dtrace_softstate,
15983 	    sizeof (dtrace_state_t), 0) != 0) {
15984 		cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15985 		mutex_exit(&cpu_lock);
15986 		mutex_exit(&dtrace_provider_lock);
15987 		mutex_exit(&dtrace_lock);
15988 		return (DDI_FAILURE);
15989 	}
15990 
15991 	if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15992 	    DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15993 	    ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15994 	    DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15995 		cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15996 		ddi_remove_minor_node(devi, NULL);
15997 		ddi_soft_state_fini(&dtrace_softstate);
15998 		mutex_exit(&cpu_lock);
15999 		mutex_exit(&dtrace_provider_lock);
16000 		mutex_exit(&dtrace_lock);
16001 		return (DDI_FAILURE);
16002 	}
16003 
16004 	ddi_report_dev(devi);
16005 	dtrace_devi = devi;
16006 
16007 	dtrace_modload = dtrace_module_loaded;
16008 	dtrace_modunload = dtrace_module_unloaded;
16009 	dtrace_cpu_init = dtrace_cpu_setup_initial;
16010 	dtrace_helpers_cleanup = dtrace_helpers_destroy;
16011 	dtrace_helpers_fork = dtrace_helpers_duplicate;
16012 	dtrace_cpustart_init = dtrace_suspend;
16013 	dtrace_cpustart_fini = dtrace_resume;
16014 	dtrace_debugger_init = dtrace_suspend;
16015 	dtrace_debugger_fini = dtrace_resume;
16016 
16017 	register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16018 
16019 	ASSERT(MUTEX_HELD(&cpu_lock));
16020 
16021 	dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
16022 	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
16023 	dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
16024 	    UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
16025 	    VM_SLEEP | VMC_IDENTIFIER);
16026 	dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
16027 	    1, INT_MAX, 0);
16028 
16029 	dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
16030 	    sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
16031 	    NULL, NULL, NULL, NULL, NULL, 0);
16032 
16033 	ASSERT(MUTEX_HELD(&cpu_lock));
16034 	dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
16035 	    offsetof(dtrace_probe_t, dtpr_nextmod),
16036 	    offsetof(dtrace_probe_t, dtpr_prevmod));
16037 
16038 	dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
16039 	    offsetof(dtrace_probe_t, dtpr_nextfunc),
16040 	    offsetof(dtrace_probe_t, dtpr_prevfunc));
16041 
16042 	dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
16043 	    offsetof(dtrace_probe_t, dtpr_nextname),
16044 	    offsetof(dtrace_probe_t, dtpr_prevname));
16045 
16046 	if (dtrace_retain_max < 1) {
16047 		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
16048 		    "setting to 1", dtrace_retain_max);
16049 		dtrace_retain_max = 1;
16050 	}
16051 
16052 	/*
16053 	 * Now discover our toxic ranges.
16054 	 */
16055 	dtrace_toxic_ranges(dtrace_toxrange_add);
16056 
16057 	/*
16058 	 * Before we register ourselves as a provider to our own framework,
16059 	 * we would like to assert that dtrace_provider is NULL -- but that's
16060 	 * not true if we were loaded as a dependency of a DTrace provider.
16061 	 * Once we've registered, we can assert that dtrace_provider is our
16062 	 * pseudo provider.
16063 	 */
16064 	(void) dtrace_register("dtrace", &dtrace_provider_attr,
16065 	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
16066 
16067 	ASSERT(dtrace_provider != NULL);
16068 	ASSERT((dtrace_provider_id_t)dtrace_provider == id);
16069 
16070 	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
16071 	    dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
16072 	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
16073 	    dtrace_provider, NULL, NULL, "END", 0, NULL);
16074 	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
16075 	    dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
16076 
16077 	dtrace_anon_property();
16078 	mutex_exit(&cpu_lock);
16079 
16080 	/*
16081 	 * If there are already providers, we must ask them to provide their
16082 	 * probes, and then match any anonymous enabling against them.  Note
16083 	 * that there should be no other retained enablings at this time:
16084 	 * the only retained enablings at this time should be the anonymous
16085 	 * enabling.
16086 	 */
16087 	if (dtrace_anon.dta_enabling != NULL) {
16088 		ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
16089 
16090 		dtrace_enabling_provide(NULL);
16091 		state = dtrace_anon.dta_state;
16092 
16093 		/*
16094 		 * We couldn't hold cpu_lock across the above call to
16095 		 * dtrace_enabling_provide(), but we must hold it to actually
16096 		 * enable the probes.  We have to drop all of our locks, pick
16097 		 * up cpu_lock, and regain our locks before matching the
16098 		 * retained anonymous enabling.
16099 		 */
16100 		mutex_exit(&dtrace_lock);
16101 		mutex_exit(&dtrace_provider_lock);
16102 
16103 		mutex_enter(&cpu_lock);
16104 		mutex_enter(&dtrace_provider_lock);
16105 		mutex_enter(&dtrace_lock);
16106 
16107 		if ((enab = dtrace_anon.dta_enabling) != NULL)
16108 			(void) dtrace_enabling_match(enab, NULL);
16109 
16110 		mutex_exit(&cpu_lock);
16111 	}
16112 
16113 	mutex_exit(&dtrace_lock);
16114 	mutex_exit(&dtrace_provider_lock);
16115 
16116 	if (state != NULL) {
16117 		/*
16118 		 * If we created any anonymous state, set it going now.
16119 		 */
16120 		(void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
16121 	}
16122 
16123 	return (DDI_SUCCESS);
16124 }
16125 
16126 /*ARGSUSED*/
16127 static int
16128 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
16129 {
16130 	dtrace_state_t *state;
16131 	uint32_t priv;
16132 	uid_t uid;
16133 	zoneid_t zoneid;
16134 
16135 	if (getminor(*devp) == DTRACEMNRN_HELPER)
16136 		return (0);
16137 
16138 	/*
16139 	 * If this wasn't an open with the "helper" minor, then it must be
16140 	 * the "dtrace" minor.
16141 	 */
16142 	if (getminor(*devp) != DTRACEMNRN_DTRACE)
16143 		return (ENXIO);
16144 
16145 	/*
16146 	 * If no DTRACE_PRIV_* bits are set in the credential, then the
16147 	 * caller lacks sufficient permission to do anything with DTrace.
16148 	 */
16149 	dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
16150 	if (priv == DTRACE_PRIV_NONE)
16151 		return (EACCES);
16152 
16153 	/*
16154 	 * Ask all providers to provide all their probes.
16155 	 */
16156 	mutex_enter(&dtrace_provider_lock);
16157 	dtrace_probe_provide(NULL, NULL);
16158 	mutex_exit(&dtrace_provider_lock);
16159 
16160 	mutex_enter(&cpu_lock);
16161 	mutex_enter(&dtrace_lock);
16162 	dtrace_opens++;
16163 	dtrace_membar_producer();
16164 
16165 	/*
16166 	 * If the kernel debugger is active (that is, if the kernel debugger
16167 	 * modified text in some way), we won't allow the open.
16168 	 */
16169 	if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
16170 		dtrace_opens--;
16171 		mutex_exit(&cpu_lock);
16172 		mutex_exit(&dtrace_lock);
16173 		return (EBUSY);
16174 	}
16175 
16176 	if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
16177 		/*
16178 		 * If DTrace helper tracing is enabled, we need to allocate the
16179 		 * trace buffer and initialize the values.
16180 		 */
16181 		dtrace_helptrace_buffer =
16182 		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
16183 		dtrace_helptrace_next = 0;
16184 		dtrace_helptrace_wrapped = 0;
16185 		dtrace_helptrace_enable = 0;
16186 	}
16187 
16188 	state = dtrace_state_create(devp, cred_p);
16189 	mutex_exit(&cpu_lock);
16190 
16191 	if (state == NULL) {
16192 		if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16193 			(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16194 		mutex_exit(&dtrace_lock);
16195 		return (EAGAIN);
16196 	}
16197 
16198 	mutex_exit(&dtrace_lock);
16199 
16200 	return (0);
16201 }
16202 
16203 /*ARGSUSED*/
16204 static int
16205 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
16206 {
16207 	minor_t minor = getminor(dev);
16208 	dtrace_state_t *state;
16209 	dtrace_helptrace_t *buf = NULL;
16210 
16211 	if (minor == DTRACEMNRN_HELPER)
16212 		return (0);
16213 
16214 	state = ddi_get_soft_state(dtrace_softstate, minor);
16215 
16216 	mutex_enter(&cpu_lock);
16217 	mutex_enter(&dtrace_lock);
16218 
16219 	if (state->dts_anon) {
16220 		/*
16221 		 * There is anonymous state. Destroy that first.
16222 		 */
16223 		ASSERT(dtrace_anon.dta_state == NULL);
16224 		dtrace_state_destroy(state->dts_anon);
16225 	}
16226 
16227 	if (dtrace_helptrace_disable) {
16228 		/*
16229 		 * If we have been told to disable helper tracing, set the
16230 		 * buffer to NULL before calling into dtrace_state_destroy();
16231 		 * we take advantage of its dtrace_sync() to know that no
16232 		 * CPU is in probe context with enabled helper tracing
16233 		 * after it returns.
16234 		 */
16235 		buf = dtrace_helptrace_buffer;
16236 		dtrace_helptrace_buffer = NULL;
16237 	}
16238 
16239 	dtrace_state_destroy(state);
16240 	ASSERT(dtrace_opens > 0);
16241 
16242 	/*
16243 	 * Only relinquish control of the kernel debugger interface when there
16244 	 * are no consumers and no anonymous enablings.
16245 	 */
16246 	if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16247 		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16248 
16249 	if (buf != NULL) {
16250 		kmem_free(buf, dtrace_helptrace_bufsize);
16251 		dtrace_helptrace_disable = 0;
16252 	}
16253 
16254 	mutex_exit(&dtrace_lock);
16255 	mutex_exit(&cpu_lock);
16256 
16257 	return (0);
16258 }
16259 
16260 /*ARGSUSED*/
16261 static int
16262 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
16263 {
16264 	int rval;
16265 	dof_helper_t help, *dhp = NULL;
16266 
16267 	switch (cmd) {
16268 	case DTRACEHIOC_ADDDOF:
16269 		if (copyin((void *)arg, &help, sizeof (help)) != 0) {
16270 			dtrace_dof_error(NULL, "failed to copyin DOF helper");
16271 			return (EFAULT);
16272 		}
16273 
16274 		dhp = &help;
16275 		arg = (intptr_t)help.dofhp_dof;
16276 		/*FALLTHROUGH*/
16277 
16278 	case DTRACEHIOC_ADD: {
16279 		dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
16280 
16281 		if (dof == NULL)
16282 			return (rval);
16283 
16284 		mutex_enter(&dtrace_lock);
16285 
16286 		/*
16287 		 * dtrace_helper_slurp() takes responsibility for the dof --
16288 		 * it may free it now or it may save it and free it later.
16289 		 */
16290 		if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
16291 			*rv = rval;
16292 			rval = 0;
16293 		} else {
16294 			rval = EINVAL;
16295 		}
16296 
16297 		mutex_exit(&dtrace_lock);
16298 		return (rval);
16299 	}
16300 
16301 	case DTRACEHIOC_REMOVE: {
16302 		mutex_enter(&dtrace_lock);
16303 		rval = dtrace_helper_destroygen(arg);
16304 		mutex_exit(&dtrace_lock);
16305 
16306 		return (rval);
16307 	}
16308 
16309 	default:
16310 		break;
16311 	}
16312 
16313 	return (ENOTTY);
16314 }
16315 
16316 /*ARGSUSED*/
16317 static int
16318 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
16319 {
16320 	minor_t minor = getminor(dev);
16321 	dtrace_state_t *state;
16322 	int rval;
16323 
16324 	if (minor == DTRACEMNRN_HELPER)
16325 		return (dtrace_ioctl_helper(cmd, arg, rv));
16326 
16327 	state = ddi_get_soft_state(dtrace_softstate, minor);
16328 
16329 	if (state->dts_anon) {
16330 		ASSERT(dtrace_anon.dta_state == NULL);
16331 		state = state->dts_anon;
16332 	}
16333 
16334 	switch (cmd) {
16335 	case DTRACEIOC_PROVIDER: {
16336 		dtrace_providerdesc_t pvd;
16337 		dtrace_provider_t *pvp;
16338 
16339 		if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
16340 			return (EFAULT);
16341 
16342 		pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
16343 		mutex_enter(&dtrace_provider_lock);
16344 
16345 		for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
16346 			if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
16347 				break;
16348 		}
16349 
16350 		mutex_exit(&dtrace_provider_lock);
16351 
16352 		if (pvp == NULL)
16353 			return (ESRCH);
16354 
16355 		bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
16356 		bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
16357 		if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
16358 			return (EFAULT);
16359 
16360 		return (0);
16361 	}
16362 
16363 	case DTRACEIOC_EPROBE: {
16364 		dtrace_eprobedesc_t epdesc;
16365 		dtrace_ecb_t *ecb;
16366 		dtrace_action_t *act;
16367 		void *buf;
16368 		size_t size;
16369 		uintptr_t dest;
16370 		int nrecs;
16371 
16372 		if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
16373 			return (EFAULT);
16374 
16375 		mutex_enter(&dtrace_lock);
16376 
16377 		if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
16378 			mutex_exit(&dtrace_lock);
16379 			return (EINVAL);
16380 		}
16381 
16382 		if (ecb->dte_probe == NULL) {
16383 			mutex_exit(&dtrace_lock);
16384 			return (EINVAL);
16385 		}
16386 
16387 		epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
16388 		epdesc.dtepd_uarg = ecb->dte_uarg;
16389 		epdesc.dtepd_size = ecb->dte_size;
16390 
16391 		nrecs = epdesc.dtepd_nrecs;
16392 		epdesc.dtepd_nrecs = 0;
16393 		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16394 			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16395 				continue;
16396 
16397 			epdesc.dtepd_nrecs++;
16398 		}
16399 
16400 		/*
16401 		 * Now that we have the size, we need to allocate a temporary
16402 		 * buffer in which to store the complete description.  We need
16403 		 * the temporary buffer to be able to drop dtrace_lock()
16404 		 * across the copyout(), below.
16405 		 */
16406 		size = sizeof (dtrace_eprobedesc_t) +
16407 		    (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
16408 
16409 		buf = kmem_alloc(size, KM_SLEEP);
16410 		dest = (uintptr_t)buf;
16411 
16412 		bcopy(&epdesc, (void *)dest, sizeof (epdesc));
16413 		dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
16414 
16415 		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16416 			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16417 				continue;
16418 
16419 			if (nrecs-- == 0)
16420 				break;
16421 
16422 			bcopy(&act->dta_rec, (void *)dest,
16423 			    sizeof (dtrace_recdesc_t));
16424 			dest += sizeof (dtrace_recdesc_t);
16425 		}
16426 
16427 		mutex_exit(&dtrace_lock);
16428 
16429 		if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16430 			kmem_free(buf, size);
16431 			return (EFAULT);
16432 		}
16433 
16434 		kmem_free(buf, size);
16435 		return (0);
16436 	}
16437 
16438 	case DTRACEIOC_AGGDESC: {
16439 		dtrace_aggdesc_t aggdesc;
16440 		dtrace_action_t *act;
16441 		dtrace_aggregation_t *agg;
16442 		int nrecs;
16443 		uint32_t offs;
16444 		dtrace_recdesc_t *lrec;
16445 		void *buf;
16446 		size_t size;
16447 		uintptr_t dest;
16448 
16449 		if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
16450 			return (EFAULT);
16451 
16452 		mutex_enter(&dtrace_lock);
16453 
16454 		if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
16455 			mutex_exit(&dtrace_lock);
16456 			return (EINVAL);
16457 		}
16458 
16459 		aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
16460 
16461 		nrecs = aggdesc.dtagd_nrecs;
16462 		aggdesc.dtagd_nrecs = 0;
16463 
16464 		offs = agg->dtag_base;
16465 		lrec = &agg->dtag_action.dta_rec;
16466 		aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
16467 
16468 		for (act = agg->dtag_first; ; act = act->dta_next) {
16469 			ASSERT(act->dta_intuple ||
16470 			    DTRACEACT_ISAGG(act->dta_kind));
16471 
16472 			/*
16473 			 * If this action has a record size of zero, it
16474 			 * denotes an argument to the aggregating action.
16475 			 * Because the presence of this record doesn't (or
16476 			 * shouldn't) affect the way the data is interpreted,
16477 			 * we don't copy it out to save user-level the
16478 			 * confusion of dealing with a zero-length record.
16479 			 */
16480 			if (act->dta_rec.dtrd_size == 0) {
16481 				ASSERT(agg->dtag_hasarg);
16482 				continue;
16483 			}
16484 
16485 			aggdesc.dtagd_nrecs++;
16486 
16487 			if (act == &agg->dtag_action)
16488 				break;
16489 		}
16490 
16491 		/*
16492 		 * Now that we have the size, we need to allocate a temporary
16493 		 * buffer in which to store the complete description.  We need
16494 		 * the temporary buffer to be able to drop dtrace_lock()
16495 		 * across the copyout(), below.
16496 		 */
16497 		size = sizeof (dtrace_aggdesc_t) +
16498 		    (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
16499 
16500 		buf = kmem_alloc(size, KM_SLEEP);
16501 		dest = (uintptr_t)buf;
16502 
16503 		bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
16504 		dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
16505 
16506 		for (act = agg->dtag_first; ; act = act->dta_next) {
16507 			dtrace_recdesc_t rec = act->dta_rec;
16508 
16509 			/*
16510 			 * See the comment in the above loop for why we pass
16511 			 * over zero-length records.
16512 			 */
16513 			if (rec.dtrd_size == 0) {
16514 				ASSERT(agg->dtag_hasarg);
16515 				continue;
16516 			}
16517 
16518 			if (nrecs-- == 0)
16519 				break;
16520 
16521 			rec.dtrd_offset -= offs;
16522 			bcopy(&rec, (void *)dest, sizeof (rec));
16523 			dest += sizeof (dtrace_recdesc_t);
16524 
16525 			if (act == &agg->dtag_action)
16526 				break;
16527 		}
16528 
16529 		mutex_exit(&dtrace_lock);
16530 
16531 		if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16532 			kmem_free(buf, size);
16533 			return (EFAULT);
16534 		}
16535 
16536 		kmem_free(buf, size);
16537 		return (0);
16538 	}
16539 
16540 	case DTRACEIOC_ENABLE: {
16541 		dof_hdr_t *dof;
16542 		dtrace_enabling_t *enab = NULL;
16543 		dtrace_vstate_t *vstate;
16544 		int err = 0;
16545 
16546 		*rv = 0;
16547 
16548 		/*
16549 		 * If a NULL argument has been passed, we take this as our
16550 		 * cue to reevaluate our enablings.
16551 		 */
16552 		if (arg == NULL) {
16553 			dtrace_enabling_matchall();
16554 
16555 			return (0);
16556 		}
16557 
16558 		if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
16559 			return (rval);
16560 
16561 		mutex_enter(&cpu_lock);
16562 		mutex_enter(&dtrace_lock);
16563 		vstate = &state->dts_vstate;
16564 
16565 		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
16566 			mutex_exit(&dtrace_lock);
16567 			mutex_exit(&cpu_lock);
16568 			dtrace_dof_destroy(dof);
16569 			return (EBUSY);
16570 		}
16571 
16572 		if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
16573 			mutex_exit(&dtrace_lock);
16574 			mutex_exit(&cpu_lock);
16575 			dtrace_dof_destroy(dof);
16576 			return (EINVAL);
16577 		}
16578 
16579 		if ((rval = dtrace_dof_options(dof, state)) != 0) {
16580 			dtrace_enabling_destroy(enab);
16581 			mutex_exit(&dtrace_lock);
16582 			mutex_exit(&cpu_lock);
16583 			dtrace_dof_destroy(dof);
16584 			return (rval);
16585 		}
16586 
16587 		if ((err = dtrace_enabling_match(enab, rv)) == 0) {
16588 			err = dtrace_enabling_retain(enab);
16589 		} else {
16590 			dtrace_enabling_destroy(enab);
16591 		}
16592 
16593 		mutex_exit(&cpu_lock);
16594 		mutex_exit(&dtrace_lock);
16595 		dtrace_dof_destroy(dof);
16596 
16597 		return (err);
16598 	}
16599 
16600 	case DTRACEIOC_REPLICATE: {
16601 		dtrace_repldesc_t desc;
16602 		dtrace_probedesc_t *match = &desc.dtrpd_match;
16603 		dtrace_probedesc_t *create = &desc.dtrpd_create;
16604 		int err;
16605 
16606 		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16607 			return (EFAULT);
16608 
16609 		match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16610 		match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16611 		match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16612 		match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16613 
16614 		create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16615 		create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16616 		create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16617 		create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16618 
16619 		mutex_enter(&dtrace_lock);
16620 		err = dtrace_enabling_replicate(state, match, create);
16621 		mutex_exit(&dtrace_lock);
16622 
16623 		return (err);
16624 	}
16625 
16626 	case DTRACEIOC_PROBEMATCH:
16627 	case DTRACEIOC_PROBES: {
16628 		dtrace_probe_t *probe = NULL;
16629 		dtrace_probedesc_t desc;
16630 		dtrace_probekey_t pkey;
16631 		dtrace_id_t i;
16632 		int m = 0;
16633 		uint32_t priv;
16634 		uid_t uid;
16635 		zoneid_t zoneid;
16636 
16637 		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16638 			return (EFAULT);
16639 
16640 		desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16641 		desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16642 		desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16643 		desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16644 
16645 		/*
16646 		 * Before we attempt to match this probe, we want to give
16647 		 * all providers the opportunity to provide it.
16648 		 */
16649 		if (desc.dtpd_id == DTRACE_IDNONE) {
16650 			mutex_enter(&dtrace_provider_lock);
16651 			dtrace_probe_provide(&desc, NULL);
16652 			mutex_exit(&dtrace_provider_lock);
16653 			desc.dtpd_id++;
16654 		}
16655 
16656 		if (cmd == DTRACEIOC_PROBEMATCH)  {
16657 			dtrace_probekey(&desc, &pkey);
16658 			pkey.dtpk_id = DTRACE_IDNONE;
16659 		}
16660 
16661 		dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16662 
16663 		mutex_enter(&dtrace_lock);
16664 
16665 		if (cmd == DTRACEIOC_PROBEMATCH) {
16666 			for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16667 				if ((probe = dtrace_probes[i - 1]) != NULL &&
16668 				    (m = dtrace_match_probe(probe, &pkey,
16669 				    priv, uid, zoneid)) != 0)
16670 					break;
16671 			}
16672 
16673 			if (m < 0) {
16674 				mutex_exit(&dtrace_lock);
16675 				return (EINVAL);
16676 			}
16677 
16678 		} else {
16679 			for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16680 				if ((probe = dtrace_probes[i - 1]) != NULL &&
16681 				    dtrace_match_priv(probe, priv, uid, zoneid))
16682 					break;
16683 			}
16684 		}
16685 
16686 		if (probe == NULL) {
16687 			mutex_exit(&dtrace_lock);
16688 			return (ESRCH);
16689 		}
16690 
16691 		dtrace_probe_description(probe, &desc);
16692 		mutex_exit(&dtrace_lock);
16693 
16694 		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16695 			return (EFAULT);
16696 
16697 		return (0);
16698 	}
16699 
16700 	case DTRACEIOC_PROBEARG: {
16701 		dtrace_argdesc_t desc;
16702 		dtrace_probe_t *probe;
16703 		dtrace_provider_t *prov;
16704 
16705 		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16706 			return (EFAULT);
16707 
16708 		if (desc.dtargd_id == DTRACE_IDNONE)
16709 			return (EINVAL);
16710 
16711 		if (desc.dtargd_ndx == DTRACE_ARGNONE)
16712 			return (EINVAL);
16713 
16714 		mutex_enter(&dtrace_provider_lock);
16715 		mutex_enter(&mod_lock);
16716 		mutex_enter(&dtrace_lock);
16717 
16718 		if (desc.dtargd_id > dtrace_nprobes) {
16719 			mutex_exit(&dtrace_lock);
16720 			mutex_exit(&mod_lock);
16721 			mutex_exit(&dtrace_provider_lock);
16722 			return (EINVAL);
16723 		}
16724 
16725 		if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16726 			mutex_exit(&dtrace_lock);
16727 			mutex_exit(&mod_lock);
16728 			mutex_exit(&dtrace_provider_lock);
16729 			return (EINVAL);
16730 		}
16731 
16732 		mutex_exit(&dtrace_lock);
16733 
16734 		prov = probe->dtpr_provider;
16735 
16736 		if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16737 			/*
16738 			 * There isn't any typed information for this probe.
16739 			 * Set the argument number to DTRACE_ARGNONE.
16740 			 */
16741 			desc.dtargd_ndx = DTRACE_ARGNONE;
16742 		} else {
16743 			desc.dtargd_native[0] = '\0';
16744 			desc.dtargd_xlate[0] = '\0';
16745 			desc.dtargd_mapping = desc.dtargd_ndx;
16746 
16747 			prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16748 			    probe->dtpr_id, probe->dtpr_arg, &desc);
16749 		}
16750 
16751 		mutex_exit(&mod_lock);
16752 		mutex_exit(&dtrace_provider_lock);
16753 
16754 		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16755 			return (EFAULT);
16756 
16757 		return (0);
16758 	}
16759 
16760 	case DTRACEIOC_GO: {
16761 		processorid_t cpuid;
16762 		rval = dtrace_state_go(state, &cpuid);
16763 
16764 		if (rval != 0)
16765 			return (rval);
16766 
16767 		if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16768 			return (EFAULT);
16769 
16770 		return (0);
16771 	}
16772 
16773 	case DTRACEIOC_STOP: {
16774 		processorid_t cpuid;
16775 
16776 		mutex_enter(&dtrace_lock);
16777 		rval = dtrace_state_stop(state, &cpuid);
16778 		mutex_exit(&dtrace_lock);
16779 
16780 		if (rval != 0)
16781 			return (rval);
16782 
16783 		if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16784 			return (EFAULT);
16785 
16786 		return (0);
16787 	}
16788 
16789 	case DTRACEIOC_DOFGET: {
16790 		dof_hdr_t hdr, *dof;
16791 		uint64_t len;
16792 
16793 		if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16794 			return (EFAULT);
16795 
16796 		mutex_enter(&dtrace_lock);
16797 		dof = dtrace_dof_create(state);
16798 		mutex_exit(&dtrace_lock);
16799 
16800 		len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16801 		rval = copyout(dof, (void *)arg, len);
16802 		dtrace_dof_destroy(dof);
16803 
16804 		return (rval == 0 ? 0 : EFAULT);
16805 	}
16806 
16807 	case DTRACEIOC_AGGSNAP:
16808 	case DTRACEIOC_BUFSNAP: {
16809 		dtrace_bufdesc_t desc;
16810 		caddr_t cached;
16811 		dtrace_buffer_t *buf;
16812 
16813 		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16814 			return (EFAULT);
16815 
16816 		if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16817 			return (EINVAL);
16818 
16819 		mutex_enter(&dtrace_lock);
16820 
16821 		if (cmd == DTRACEIOC_BUFSNAP) {
16822 			buf = &state->dts_buffer[desc.dtbd_cpu];
16823 		} else {
16824 			buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16825 		}
16826 
16827 		if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16828 			size_t sz = buf->dtb_offset;
16829 
16830 			if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16831 				mutex_exit(&dtrace_lock);
16832 				return (EBUSY);
16833 			}
16834 
16835 			/*
16836 			 * If this buffer has already been consumed, we're
16837 			 * going to indicate that there's nothing left here
16838 			 * to consume.
16839 			 */
16840 			if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16841 				mutex_exit(&dtrace_lock);
16842 
16843 				desc.dtbd_size = 0;
16844 				desc.dtbd_drops = 0;
16845 				desc.dtbd_errors = 0;
16846 				desc.dtbd_oldest = 0;
16847 				sz = sizeof (desc);
16848 
16849 				if (copyout(&desc, (void *)arg, sz) != 0)
16850 					return (EFAULT);
16851 
16852 				return (0);
16853 			}
16854 
16855 			/*
16856 			 * If this is a ring buffer that has wrapped, we want
16857 			 * to copy the whole thing out.
16858 			 */
16859 			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16860 				dtrace_buffer_polish(buf);
16861 				sz = buf->dtb_size;
16862 			}
16863 
16864 			if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16865 				mutex_exit(&dtrace_lock);
16866 				return (EFAULT);
16867 			}
16868 
16869 			desc.dtbd_size = sz;
16870 			desc.dtbd_drops = buf->dtb_drops;
16871 			desc.dtbd_errors = buf->dtb_errors;
16872 			desc.dtbd_oldest = buf->dtb_xamot_offset;
16873 			desc.dtbd_timestamp = dtrace_gethrtime();
16874 
16875 			mutex_exit(&dtrace_lock);
16876 
16877 			if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16878 				return (EFAULT);
16879 
16880 			buf->dtb_flags |= DTRACEBUF_CONSUMED;
16881 
16882 			return (0);
16883 		}
16884 
16885 		if (buf->dtb_tomax == NULL) {
16886 			ASSERT(buf->dtb_xamot == NULL);
16887 			mutex_exit(&dtrace_lock);
16888 			return (ENOENT);
16889 		}
16890 
16891 		cached = buf->dtb_tomax;
16892 		ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16893 
16894 		dtrace_xcall(desc.dtbd_cpu,
16895 		    (dtrace_xcall_t)dtrace_buffer_switch, buf);
16896 
16897 		state->dts_errors += buf->dtb_xamot_errors;
16898 
16899 		/*
16900 		 * If the buffers did not actually switch, then the cross call
16901 		 * did not take place -- presumably because the given CPU is
16902 		 * not in the ready set.  If this is the case, we'll return
16903 		 * ENOENT.
16904 		 */
16905 		if (buf->dtb_tomax == cached) {
16906 			ASSERT(buf->dtb_xamot != cached);
16907 			mutex_exit(&dtrace_lock);
16908 			return (ENOENT);
16909 		}
16910 
16911 		ASSERT(cached == buf->dtb_xamot);
16912 
16913 		/*
16914 		 * We have our snapshot; now copy it out.
16915 		 */
16916 		if (copyout(buf->dtb_xamot, desc.dtbd_data,
16917 		    buf->dtb_xamot_offset) != 0) {
16918 			mutex_exit(&dtrace_lock);
16919 			return (EFAULT);
16920 		}
16921 
16922 		desc.dtbd_size = buf->dtb_xamot_offset;
16923 		desc.dtbd_drops = buf->dtb_xamot_drops;
16924 		desc.dtbd_errors = buf->dtb_xamot_errors;
16925 		desc.dtbd_oldest = 0;
16926 		desc.dtbd_timestamp = buf->dtb_switched;
16927 
16928 		mutex_exit(&dtrace_lock);
16929 
16930 		/*
16931 		 * Finally, copy out the buffer description.
16932 		 */
16933 		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16934 			return (EFAULT);
16935 
16936 		return (0);
16937 	}
16938 
16939 	case DTRACEIOC_CONF: {
16940 		dtrace_conf_t conf;
16941 
16942 		bzero(&conf, sizeof (conf));
16943 		conf.dtc_difversion = DIF_VERSION;
16944 		conf.dtc_difintregs = DIF_DIR_NREGS;
16945 		conf.dtc_diftupregs = DIF_DTR_NREGS;
16946 		conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16947 
16948 		if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16949 			return (EFAULT);
16950 
16951 		return (0);
16952 	}
16953 
16954 	case DTRACEIOC_STATUS: {
16955 		dtrace_status_t stat;
16956 		dtrace_dstate_t *dstate;
16957 		int i, j;
16958 		uint64_t nerrs;
16959 
16960 		/*
16961 		 * See the comment in dtrace_state_deadman() for the reason
16962 		 * for setting dts_laststatus to INT64_MAX before setting
16963 		 * it to the correct value.
16964 		 */
16965 		state->dts_laststatus = INT64_MAX;
16966 		dtrace_membar_producer();
16967 		state->dts_laststatus = dtrace_gethrtime();
16968 
16969 		bzero(&stat, sizeof (stat));
16970 
16971 		mutex_enter(&dtrace_lock);
16972 
16973 		if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16974 			mutex_exit(&dtrace_lock);
16975 			return (ENOENT);
16976 		}
16977 
16978 		if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16979 			stat.dtst_exiting = 1;
16980 
16981 		nerrs = state->dts_errors;
16982 		dstate = &state->dts_vstate.dtvs_dynvars;
16983 
16984 		for (i = 0; i < NCPU; i++) {
16985 			dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16986 
16987 			stat.dtst_dyndrops += dcpu->dtdsc_drops;
16988 			stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16989 			stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16990 
16991 			if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16992 				stat.dtst_filled++;
16993 
16994 			nerrs += state->dts_buffer[i].dtb_errors;
16995 
16996 			for (j = 0; j < state->dts_nspeculations; j++) {
16997 				dtrace_speculation_t *spec;
16998 				dtrace_buffer_t *buf;
16999 
17000 				spec = &state->dts_speculations[j];
17001 				buf = &spec->dtsp_buffer[i];
17002 				stat.dtst_specdrops += buf->dtb_xamot_drops;
17003 			}
17004 		}
17005 
17006 		stat.dtst_specdrops_busy = state->dts_speculations_busy;
17007 		stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
17008 		stat.dtst_stkstroverflows = state->dts_stkstroverflows;
17009 		stat.dtst_dblerrors = state->dts_dblerrors;
17010 		stat.dtst_killed =
17011 		    (state->dts_activity == DTRACE_ACTIVITY_KILLED);
17012 		stat.dtst_errors = nerrs;
17013 
17014 		mutex_exit(&dtrace_lock);
17015 
17016 		if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
17017 			return (EFAULT);
17018 
17019 		return (0);
17020 	}
17021 
17022 	case DTRACEIOC_FORMAT: {
17023 		dtrace_fmtdesc_t fmt;
17024 		char *str;
17025 		int len;
17026 
17027 		if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
17028 			return (EFAULT);
17029 
17030 		mutex_enter(&dtrace_lock);
17031 
17032 		if (fmt.dtfd_format == 0 ||
17033 		    fmt.dtfd_format > state->dts_nformats) {
17034 			mutex_exit(&dtrace_lock);
17035 			return (EINVAL);
17036 		}
17037 
17038 		/*
17039 		 * Format strings are allocated contiguously and they are
17040 		 * never freed; if a format index is less than the number
17041 		 * of formats, we can assert that the format map is non-NULL
17042 		 * and that the format for the specified index is non-NULL.
17043 		 */
17044 		ASSERT(state->dts_formats != NULL);
17045 		str = state->dts_formats[fmt.dtfd_format - 1];
17046 		ASSERT(str != NULL);
17047 
17048 		len = strlen(str) + 1;
17049 
17050 		if (len > fmt.dtfd_length) {
17051 			fmt.dtfd_length = len;
17052 
17053 			if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
17054 				mutex_exit(&dtrace_lock);
17055 				return (EINVAL);
17056 			}
17057 		} else {
17058 			if (copyout(str, fmt.dtfd_string, len) != 0) {
17059 				mutex_exit(&dtrace_lock);
17060 				return (EINVAL);
17061 			}
17062 		}
17063 
17064 		mutex_exit(&dtrace_lock);
17065 		return (0);
17066 	}
17067 
17068 	default:
17069 		break;
17070 	}
17071 
17072 	return (ENOTTY);
17073 }
17074 
17075 /*ARGSUSED*/
17076 static int
17077 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
17078 {
17079 	dtrace_state_t *state;
17080 
17081 	switch (cmd) {
17082 	case DDI_DETACH:
17083 		break;
17084 
17085 	case DDI_SUSPEND:
17086 		return (DDI_SUCCESS);
17087 
17088 	default:
17089 		return (DDI_FAILURE);
17090 	}
17091 
17092 	mutex_enter(&cpu_lock);
17093 	mutex_enter(&dtrace_provider_lock);
17094 	mutex_enter(&dtrace_lock);
17095 
17096 	ASSERT(dtrace_opens == 0);
17097 
17098 	if (dtrace_helpers > 0) {
17099 		mutex_exit(&dtrace_provider_lock);
17100 		mutex_exit(&dtrace_lock);
17101 		mutex_exit(&cpu_lock);
17102 		return (DDI_FAILURE);
17103 	}
17104 
17105 	if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
17106 		mutex_exit(&dtrace_provider_lock);
17107 		mutex_exit(&dtrace_lock);
17108 		mutex_exit(&cpu_lock);
17109 		return (DDI_FAILURE);
17110 	}
17111 
17112 	dtrace_provider = NULL;
17113 
17114 	if ((state = dtrace_anon_grab()) != NULL) {
17115 		/*
17116 		 * If there were ECBs on this state, the provider should
17117 		 * have not been allowed to detach; assert that there is
17118 		 * none.
17119 		 */
17120 		ASSERT(state->dts_necbs == 0);
17121 		dtrace_state_destroy(state);
17122 
17123 		/*
17124 		 * If we're being detached with anonymous state, we need to
17125 		 * indicate to the kernel debugger that DTrace is now inactive.
17126 		 */
17127 		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17128 	}
17129 
17130 	bzero(&dtrace_anon, sizeof (dtrace_anon_t));
17131 	unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17132 	dtrace_cpu_init = NULL;
17133 	dtrace_helpers_cleanup = NULL;
17134 	dtrace_helpers_fork = NULL;
17135 	dtrace_cpustart_init = NULL;
17136 	dtrace_cpustart_fini = NULL;
17137 	dtrace_debugger_init = NULL;
17138 	dtrace_debugger_fini = NULL;
17139 	dtrace_modload = NULL;
17140 	dtrace_modunload = NULL;
17141 
17142 	ASSERT(dtrace_getf == 0);
17143 	ASSERT(dtrace_closef == NULL);
17144 
17145 	mutex_exit(&cpu_lock);
17146 
17147 	kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
17148 	dtrace_probes = NULL;
17149 	dtrace_nprobes = 0;
17150 
17151 	dtrace_hash_destroy(dtrace_bymod);
17152 	dtrace_hash_destroy(dtrace_byfunc);
17153 	dtrace_hash_destroy(dtrace_byname);
17154 	dtrace_bymod = NULL;
17155 	dtrace_byfunc = NULL;
17156 	dtrace_byname = NULL;
17157 
17158 	kmem_cache_destroy(dtrace_state_cache);
17159 	vmem_destroy(dtrace_minor);
17160 	vmem_destroy(dtrace_arena);
17161 
17162 	if (dtrace_toxrange != NULL) {
17163 		kmem_free(dtrace_toxrange,
17164 		    dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
17165 		dtrace_toxrange = NULL;
17166 		dtrace_toxranges = 0;
17167 		dtrace_toxranges_max = 0;
17168 	}
17169 
17170 	ddi_remove_minor_node(dtrace_devi, NULL);
17171 	dtrace_devi = NULL;
17172 
17173 	ddi_soft_state_fini(&dtrace_softstate);
17174 
17175 	ASSERT(dtrace_vtime_references == 0);
17176 	ASSERT(dtrace_opens == 0);
17177 	ASSERT(dtrace_retained == NULL);
17178 
17179 	mutex_exit(&dtrace_lock);
17180 	mutex_exit(&dtrace_provider_lock);
17181 
17182 	/*
17183 	 * We don't destroy the task queue until after we have dropped our
17184 	 * locks (taskq_destroy() may block on running tasks).  To prevent
17185 	 * attempting to do work after we have effectively detached but before
17186 	 * the task queue has been destroyed, all tasks dispatched via the
17187 	 * task queue must check that DTrace is still attached before
17188 	 * performing any operation.
17189 	 */
17190 	taskq_destroy(dtrace_taskq);
17191 	dtrace_taskq = NULL;
17192 
17193 	return (DDI_SUCCESS);
17194 }
17195 
17196 /*ARGSUSED*/
17197 static int
17198 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
17199 {
17200 	int error;
17201 
17202 	switch (infocmd) {
17203 	case DDI_INFO_DEVT2DEVINFO:
17204 		*result = (void *)dtrace_devi;
17205 		error = DDI_SUCCESS;
17206 		break;
17207 	case DDI_INFO_DEVT2INSTANCE:
17208 		*result = (void *)0;
17209 		error = DDI_SUCCESS;
17210 		break;
17211 	default:
17212 		error = DDI_FAILURE;
17213 	}
17214 	return (error);
17215 }
17216 
17217 static struct cb_ops dtrace_cb_ops = {
17218 	dtrace_open,		/* open */
17219 	dtrace_close,		/* close */
17220 	nulldev,		/* strategy */
17221 	nulldev,		/* print */
17222 	nodev,			/* dump */
17223 	nodev,			/* read */
17224 	nodev,			/* write */
17225 	dtrace_ioctl,		/* ioctl */
17226 	nodev,			/* devmap */
17227 	nodev,			/* mmap */
17228 	nodev,			/* segmap */
17229 	nochpoll,		/* poll */
17230 	ddi_prop_op,		/* cb_prop_op */
17231 	0,			/* streamtab  */
17232 	D_NEW | D_MP		/* Driver compatibility flag */
17233 };
17234 
17235 static struct dev_ops dtrace_ops = {
17236 	DEVO_REV,		/* devo_rev */
17237 	0,			/* refcnt */
17238 	dtrace_info,		/* get_dev_info */
17239 	nulldev,		/* identify */
17240 	nulldev,		/* probe */
17241 	dtrace_attach,		/* attach */
17242 	dtrace_detach,		/* detach */
17243 	nodev,			/* reset */
17244 	&dtrace_cb_ops,		/* driver operations */
17245 	NULL,			/* bus operations */
17246 	nodev,			/* dev power */
17247 	ddi_quiesce_not_needed,		/* quiesce */
17248 };
17249 
17250 static struct modldrv modldrv = {
17251 	&mod_driverops,		/* module type (this is a pseudo driver) */
17252 	"Dynamic Tracing",	/* name of module */
17253 	&dtrace_ops,		/* driver ops */
17254 };
17255 
17256 static struct modlinkage modlinkage = {
17257 	MODREV_1,
17258 	(void *)&modldrv,
17259 	NULL
17260 };
17261 
17262 int
17263 _init(void)
17264 {
17265 	return (mod_install(&modlinkage));
17266 }
17267 
17268 int
17269 _info(struct modinfo *modinfop)
17270 {
17271 	return (mod_info(&modlinkage, modinfop));
17272 }
17273 
17274 int
17275 _fini(void)
17276 {
17277 	return (mod_remove(&modlinkage));
17278 }
17279