1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <dt_impl.h>
34 #include <assert.h>
35 #include <alloca.h>
36 #include <limits.h>
37 
38 #define	DTRACE_AHASHSIZE	32779		/* big 'ol prime */
39 
40 /*
41  * Because qsort(3C) does not allow an argument to be passed to a comparison
42  * function, the variables that affect comparison must regrettably be global;
43  * they are protected by a global static lock, dt_qsort_lock.
44  */
45 static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
46 
47 static int dt_revsort;
48 static int dt_keysort;
49 static int dt_keypos;
50 
51 #define	DT_LESSTHAN	(dt_revsort == 0 ? -1 : 1)
52 #define	DT_GREATERTHAN	(dt_revsort == 0 ? 1 : -1)
53 
54 static void
55 dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
56 {
57 	int i;
58 
59 	for (i = 0; i < size / sizeof (int64_t); i++)
60 		existing[i] = existing[i] + new[i];
61 }
62 
63 static int
64 dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
65 {
66 	int64_t lvar = *lhs;
67 	int64_t rvar = *rhs;
68 
69 	if (lvar < rvar)
70 		return (DT_LESSTHAN);
71 
72 	if (lvar > rvar)
73 		return (DT_GREATERTHAN);
74 
75 	return (0);
76 }
77 
78 /*ARGSUSED*/
79 static void
80 dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
81 {
82 	if (*new < *existing)
83 		*existing = *new;
84 }
85 
86 /*ARGSUSED*/
87 static void
88 dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
89 {
90 	if (*new > *existing)
91 		*existing = *new;
92 }
93 
94 static int
95 dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
96 {
97 	int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
98 	int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
99 
100 	if (lavg < ravg)
101 		return (DT_LESSTHAN);
102 
103 	if (lavg > ravg)
104 		return (DT_GREATERTHAN);
105 
106 	return (0);
107 }
108 
109 /*ARGSUSED*/
110 static void
111 dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
112 {
113 	int64_t arg = *existing++;
114 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
115 	int i;
116 
117 	for (i = 0; i <= levels + 1; i++)
118 		existing[i] = existing[i] + new[i + 1];
119 }
120 
121 static long double
122 dt_aggregate_lquantizedsum(int64_t *lquanta)
123 {
124 	int64_t arg = *lquanta++;
125 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
126 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
127 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
128 	long double total = (long double)lquanta[0] * (long double)(base - 1);
129 
130 	for (i = 0; i < levels; base += step, i++)
131 		total += (long double)lquanta[i + 1] * (long double)base;
132 
133 	return (total + (long double)lquanta[levels + 1] *
134 	    (long double)(base + 1));
135 }
136 
137 static int64_t
138 dt_aggregate_lquantizedzero(int64_t *lquanta)
139 {
140 	int64_t arg = *lquanta++;
141 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
142 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
143 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
144 
145 	if (base - 1 == 0)
146 		return (lquanta[0]);
147 
148 	for (i = 0; i < levels; base += step, i++) {
149 		if (base != 0)
150 			continue;
151 
152 		return (lquanta[i + 1]);
153 	}
154 
155 	if (base + 1 == 0)
156 		return (lquanta[levels + 1]);
157 
158 	return (0);
159 }
160 
161 static int
162 dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
163 {
164 	long double lsum = dt_aggregate_lquantizedsum(lhs);
165 	long double rsum = dt_aggregate_lquantizedsum(rhs);
166 	int64_t lzero, rzero;
167 
168 	if (lsum < rsum)
169 		return (DT_LESSTHAN);
170 
171 	if (lsum > rsum)
172 		return (DT_GREATERTHAN);
173 
174 	/*
175 	 * If they're both equal, then we will compare based on the weights at
176 	 * zero.  If the weights at zero are equal (or if zero is not within
177 	 * the range of the linear quantization), then this will be judged a
178 	 * tie and will be resolved based on the key comparison.
179 	 */
180 	lzero = dt_aggregate_lquantizedzero(lhs);
181 	rzero = dt_aggregate_lquantizedzero(rhs);
182 
183 	if (lzero < rzero)
184 		return (DT_LESSTHAN);
185 
186 	if (lzero > rzero)
187 		return (DT_GREATERTHAN);
188 
189 	return (0);
190 }
191 
192 static int
193 dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
194 {
195 	int nbuckets = DTRACE_QUANTIZE_NBUCKETS, i;
196 	long double ltotal = 0, rtotal = 0;
197 	int64_t lzero, rzero;
198 
199 	for (i = 0; i < nbuckets; i++) {
200 		int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
201 
202 		if (bucketval == 0) {
203 			lzero = lhs[i];
204 			rzero = rhs[i];
205 		}
206 
207 		ltotal += (long double)bucketval * (long double)lhs[i];
208 		rtotal += (long double)bucketval * (long double)rhs[i];
209 	}
210 
211 	if (ltotal < rtotal)
212 		return (DT_LESSTHAN);
213 
214 	if (ltotal > rtotal)
215 		return (DT_GREATERTHAN);
216 
217 	/*
218 	 * If they're both equal, then we will compare based on the weights at
219 	 * zero.  If the weights at zero are equal, then this will be judged a
220 	 * tie and will be resolved based on the key comparison.
221 	 */
222 	if (lzero < rzero)
223 		return (DT_LESSTHAN);
224 
225 	if (lzero > rzero)
226 		return (DT_GREATERTHAN);
227 
228 	return (0);
229 }
230 
231 static void
232 dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
233 {
234 	uint64_t pid = data[0];
235 	uint64_t *pc = &data[1];
236 	struct ps_prochandle *P;
237 	GElf_Sym sym;
238 
239 	if (dtp->dt_vector != NULL)
240 		return;
241 
242 	if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
243 		return;
244 
245 	dt_proc_lock(dtp, P);
246 
247 	if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
248 		*pc = sym.st_value;
249 
250 	dt_proc_unlock(dtp, P);
251 	dt_proc_release(dtp, P);
252 }
253 
254 static void
255 dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
256 {
257 	uint64_t pid = data[0];
258 	uint64_t *pc = &data[1];
259 	struct ps_prochandle *P;
260 	const prmap_t *map;
261 
262 	if (dtp->dt_vector != NULL)
263 		return;
264 
265 	if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
266 		return;
267 
268 	dt_proc_lock(dtp, P);
269 
270 	if ((map = Paddr_to_map(P, *pc)) != NULL)
271 		*pc = map->pr_vaddr;
272 
273 	dt_proc_unlock(dtp, P);
274 	dt_proc_release(dtp, P);
275 }
276 
277 static void
278 dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
279 {
280 	GElf_Sym sym;
281 	uint64_t *pc = data;
282 
283 	if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
284 		*pc = sym.st_value;
285 }
286 
287 static void
288 dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
289 {
290 	uint64_t *pc = data;
291 	dt_module_t *dmp;
292 
293 	if (dtp->dt_vector != NULL) {
294 		/*
295 		 * We don't have a way of just getting the module for a
296 		 * vectored open, and it doesn't seem to be worth defining
297 		 * one.  This means that use of mod() won't get true
298 		 * aggregation in the postmortem case (some modules may
299 		 * appear more than once in aggregation output).  It seems
300 		 * unlikely that anyone will ever notice or care...
301 		 */
302 		return;
303 	}
304 
305 	for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
306 	    dmp = dt_list_next(dmp)) {
307 		if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
308 			*pc = dmp->dm_text_va;
309 			return;
310 		}
311 	}
312 }
313 
314 static dtrace_aggvarid_t
315 dt_aggregate_aggvarid(dt_ahashent_t *ent)
316 {
317 	dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
318 	caddr_t data = ent->dtahe_data.dtada_data;
319 	dtrace_recdesc_t *rec = agg->dtagd_rec;
320 
321 	/*
322 	 * First, we'll check the variable ID in the aggdesc.  If it's valid,
323 	 * we'll return it.  If not, we'll use the compiler-generated ID
324 	 * present as the first record.
325 	 */
326 	if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
327 		return (agg->dtagd_varid);
328 
329 	agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
330 	    rec->dtrd_offset));
331 
332 	return (agg->dtagd_varid);
333 }
334 
335 
336 static int
337 dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
338 {
339 	dtrace_epid_t id;
340 	uint64_t hashval;
341 	size_t offs, roffs, size, ndx;
342 	int i, j, rval;
343 	caddr_t addr, data;
344 	dtrace_recdesc_t *rec;
345 	dt_aggregate_t *agp = &dtp->dt_aggregate;
346 	dtrace_aggdesc_t *agg;
347 	dt_ahash_t *hash = &agp->dtat_hash;
348 	dt_ahashent_t *h;
349 	dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
350 	dtrace_aggdata_t *aggdata;
351 	int flags = agp->dtat_flags;
352 
353 	buf->dtbd_cpu = cpu;
354 
355 	if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
356 		if (errno == ENOENT) {
357 			/*
358 			 * If that failed with ENOENT, it may be because the
359 			 * CPU was unconfigured.  This is okay; we'll just
360 			 * do nothing but return success.
361 			 */
362 			return (0);
363 		}
364 
365 		return (dt_set_errno(dtp, errno));
366 	}
367 
368 	if (buf->dtbd_drops != 0) {
369 		if (dt_handle_cpudrop(dtp, cpu,
370 		    DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1)
371 			return (-1);
372 	}
373 
374 	if (buf->dtbd_size == 0)
375 		return (0);
376 
377 	if (hash->dtah_hash == NULL) {
378 		size_t size;
379 
380 		hash->dtah_size = DTRACE_AHASHSIZE;
381 		size = hash->dtah_size * sizeof (dt_ahashent_t *);
382 
383 		if ((hash->dtah_hash = malloc(size)) == NULL)
384 			return (dt_set_errno(dtp, EDT_NOMEM));
385 
386 		bzero(hash->dtah_hash, size);
387 	}
388 
389 	for (offs = 0; offs < buf->dtbd_size; ) {
390 		/*
391 		 * We're guaranteed to have an ID.
392 		 */
393 		id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
394 		    (uintptr_t)offs));
395 
396 		if (id == DTRACE_AGGIDNONE) {
397 			/*
398 			 * This is filler to assure proper alignment of the
399 			 * next record; we simply ignore it.
400 			 */
401 			offs += sizeof (id);
402 			continue;
403 		}
404 
405 		if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
406 			return (rval);
407 
408 		addr = buf->dtbd_data + offs;
409 		size = agg->dtagd_size;
410 		hashval = 0;
411 
412 		for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
413 			rec = &agg->dtagd_rec[j];
414 			roffs = rec->dtrd_offset;
415 
416 			switch (rec->dtrd_action) {
417 			case DTRACEACT_USYM:
418 				dt_aggregate_usym(dtp,
419 				    /* LINTED - alignment */
420 				    (uint64_t *)&addr[roffs]);
421 				break;
422 
423 			case DTRACEACT_UMOD:
424 				dt_aggregate_umod(dtp,
425 				    /* LINTED - alignment */
426 				    (uint64_t *)&addr[roffs]);
427 				break;
428 
429 			case DTRACEACT_SYM:
430 				/* LINTED - alignment */
431 				dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
432 				break;
433 
434 			case DTRACEACT_MOD:
435 				/* LINTED - alignment */
436 				dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
437 				break;
438 
439 			default:
440 				break;
441 			}
442 
443 			for (i = 0; i < rec->dtrd_size; i++)
444 				hashval += addr[roffs + i];
445 		}
446 
447 		ndx = hashval % hash->dtah_size;
448 
449 		for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
450 			if (h->dtahe_hashval != hashval)
451 				continue;
452 
453 			if (h->dtahe_size != size)
454 				continue;
455 
456 			aggdata = &h->dtahe_data;
457 			data = aggdata->dtada_data;
458 
459 			for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
460 				rec = &agg->dtagd_rec[j];
461 				roffs = rec->dtrd_offset;
462 
463 				for (i = 0; i < rec->dtrd_size; i++)
464 					if (addr[roffs + i] != data[roffs + i])
465 						goto hashnext;
466 			}
467 
468 			/*
469 			 * We found it.  Now we need to apply the aggregating
470 			 * action on the data here.
471 			 */
472 			rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
473 			roffs = rec->dtrd_offset;
474 			/* LINTED - alignment */
475 			h->dtahe_aggregate((int64_t *)&data[roffs],
476 			    /* LINTED - alignment */
477 			    (int64_t *)&addr[roffs], rec->dtrd_size);
478 
479 			/*
480 			 * If we're keeping per CPU data, apply the aggregating
481 			 * action there as well.
482 			 */
483 			if (aggdata->dtada_percpu != NULL) {
484 				data = aggdata->dtada_percpu[cpu];
485 
486 				/* LINTED - alignment */
487 				h->dtahe_aggregate((int64_t *)data,
488 				    /* LINTED - alignment */
489 				    (int64_t *)&addr[roffs], rec->dtrd_size);
490 			}
491 
492 			goto bufnext;
493 hashnext:
494 			continue;
495 		}
496 
497 		/*
498 		 * If we're here, we couldn't find an entry for this record.
499 		 */
500 		if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
501 			return (dt_set_errno(dtp, EDT_NOMEM));
502 		bzero(h, sizeof (dt_ahashent_t));
503 		aggdata = &h->dtahe_data;
504 
505 		if ((aggdata->dtada_data = malloc(size)) == NULL) {
506 			free(h);
507 			return (dt_set_errno(dtp, EDT_NOMEM));
508 		}
509 
510 		bcopy(addr, aggdata->dtada_data, size);
511 		aggdata->dtada_size = size;
512 		aggdata->dtada_desc = agg;
513 		aggdata->dtada_handle = dtp;
514 		(void) dt_epid_lookup(dtp, agg->dtagd_epid,
515 		    &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
516 		aggdata->dtada_normal = 1;
517 
518 		h->dtahe_hashval = hashval;
519 		h->dtahe_size = size;
520 		(void) dt_aggregate_aggvarid(h);
521 
522 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
523 
524 		if (flags & DTRACE_A_PERCPU) {
525 			int max_cpus = agp->dtat_maxcpu;
526 			caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
527 
528 			if (percpu == NULL) {
529 				free(aggdata->dtada_data);
530 				free(h);
531 				return (dt_set_errno(dtp, EDT_NOMEM));
532 			}
533 
534 			for (j = 0; j < max_cpus; j++) {
535 				percpu[j] = malloc(rec->dtrd_size);
536 
537 				if (percpu[j] == NULL) {
538 					while (--j >= 0)
539 						free(percpu[j]);
540 
541 					free(aggdata->dtada_data);
542 					free(h);
543 					return (dt_set_errno(dtp, EDT_NOMEM));
544 				}
545 
546 				if (j == cpu) {
547 					bcopy(&addr[rec->dtrd_offset],
548 					    percpu[j], rec->dtrd_size);
549 				} else {
550 					bzero(percpu[j], rec->dtrd_size);
551 				}
552 			}
553 
554 			aggdata->dtada_percpu = percpu;
555 		}
556 
557 		switch (rec->dtrd_action) {
558 		case DTRACEAGG_MIN:
559 			h->dtahe_aggregate = dt_aggregate_min;
560 			break;
561 
562 		case DTRACEAGG_MAX:
563 			h->dtahe_aggregate = dt_aggregate_max;
564 			break;
565 
566 		case DTRACEAGG_LQUANTIZE:
567 			h->dtahe_aggregate = dt_aggregate_lquantize;
568 			break;
569 
570 		case DTRACEAGG_COUNT:
571 		case DTRACEAGG_SUM:
572 		case DTRACEAGG_AVG:
573 		case DTRACEAGG_QUANTIZE:
574 			h->dtahe_aggregate = dt_aggregate_count;
575 			break;
576 
577 		default:
578 			return (dt_set_errno(dtp, EDT_BADAGG));
579 		}
580 
581 		if (hash->dtah_hash[ndx] != NULL)
582 			hash->dtah_hash[ndx]->dtahe_prev = h;
583 
584 		h->dtahe_next = hash->dtah_hash[ndx];
585 		hash->dtah_hash[ndx] = h;
586 
587 		if (hash->dtah_all != NULL)
588 			hash->dtah_all->dtahe_prevall = h;
589 
590 		h->dtahe_nextall = hash->dtah_all;
591 		hash->dtah_all = h;
592 bufnext:
593 		offs += agg->dtagd_size;
594 	}
595 
596 	return (0);
597 }
598 
599 int
600 dtrace_aggregate_snap(dtrace_hdl_t *dtp)
601 {
602 	int i, rval;
603 	dt_aggregate_t *agp = &dtp->dt_aggregate;
604 	hrtime_t now = gethrtime();
605 	dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
606 
607 	if (dtp->dt_lastagg != 0) {
608 		if (now - dtp->dt_lastagg < interval)
609 			return (0);
610 
611 		dtp->dt_lastagg += interval;
612 	} else {
613 		dtp->dt_lastagg = now;
614 	}
615 
616 	if (!dtp->dt_active)
617 		return (dt_set_errno(dtp, EINVAL));
618 
619 	if (agp->dtat_buf.dtbd_size == 0)
620 		return (0);
621 
622 	for (i = 0; i < agp->dtat_ncpus; i++) {
623 		if (rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i]))
624 			return (rval);
625 	}
626 
627 	return (0);
628 }
629 
630 static int
631 dt_aggregate_hashcmp(const void *lhs, const void *rhs)
632 {
633 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
634 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
635 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
636 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
637 
638 	if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
639 		return (DT_LESSTHAN);
640 
641 	if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
642 		return (DT_GREATERTHAN);
643 
644 	return (0);
645 }
646 
647 static int
648 dt_aggregate_varcmp(const void *lhs, const void *rhs)
649 {
650 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
651 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
652 	dtrace_aggvarid_t lid, rid;
653 
654 	lid = dt_aggregate_aggvarid(lh);
655 	rid = dt_aggregate_aggvarid(rh);
656 
657 	if (lid < rid)
658 		return (DT_LESSTHAN);
659 
660 	if (lid > rid)
661 		return (DT_GREATERTHAN);
662 
663 	return (0);
664 }
665 
666 static int
667 dt_aggregate_keycmp(const void *lhs, const void *rhs)
668 {
669 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
670 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
671 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
672 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
673 	dtrace_recdesc_t *lrec, *rrec;
674 	char *ldata, *rdata;
675 	int rval, i, j, keypos, nrecs;
676 
677 	if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
678 		return (rval);
679 
680 	nrecs = lagg->dtagd_nrecs - 1;
681 	assert(nrecs == ragg->dtagd_nrecs - 1);
682 
683 	keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
684 
685 	for (i = 1; i < nrecs; i++) {
686 		uint64_t lval, rval;
687 		int ndx = i + keypos;
688 
689 		if (ndx >= nrecs)
690 			ndx = ndx - nrecs + 1;
691 
692 		lrec = &lagg->dtagd_rec[ndx];
693 		rrec = &ragg->dtagd_rec[ndx];
694 
695 		ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
696 		rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
697 
698 		if (lrec->dtrd_size < rrec->dtrd_size)
699 			return (DT_LESSTHAN);
700 
701 		if (lrec->dtrd_size > rrec->dtrd_size)
702 			return (DT_GREATERTHAN);
703 
704 		switch (lrec->dtrd_size) {
705 		case sizeof (uint64_t):
706 			/* LINTED - alignment */
707 			lval = *((uint64_t *)ldata);
708 			/* LINTED - alignment */
709 			rval = *((uint64_t *)rdata);
710 			break;
711 
712 		case sizeof (uint32_t):
713 			/* LINTED - alignment */
714 			lval = *((uint32_t *)ldata);
715 			/* LINTED - alignment */
716 			rval = *((uint32_t *)rdata);
717 			break;
718 
719 		case sizeof (uint16_t):
720 			/* LINTED - alignment */
721 			lval = *((uint16_t *)ldata);
722 			/* LINTED - alignment */
723 			rval = *((uint16_t *)rdata);
724 			break;
725 
726 		case sizeof (uint8_t):
727 			lval = *((uint8_t *)ldata);
728 			rval = *((uint8_t *)rdata);
729 			break;
730 
731 		default:
732 			switch (lrec->dtrd_action) {
733 			case DTRACEACT_UMOD:
734 			case DTRACEACT_UADDR:
735 			case DTRACEACT_USYM:
736 				for (j = 0; j < 2; j++) {
737 					/* LINTED - alignment */
738 					lval = ((uint64_t *)ldata)[j];
739 					/* LINTED - alignment */
740 					rval = ((uint64_t *)rdata)[j];
741 
742 					if (lval < rval)
743 						return (DT_LESSTHAN);
744 
745 					if (lval > rval)
746 						return (DT_GREATERTHAN);
747 				}
748 
749 				break;
750 
751 			default:
752 				for (j = 0; j < lrec->dtrd_size; j++) {
753 					lval = ((uint8_t *)ldata)[j];
754 					rval = ((uint8_t *)rdata)[j];
755 
756 					if (lval < rval)
757 						return (DT_LESSTHAN);
758 
759 					if (lval > rval)
760 						return (DT_GREATERTHAN);
761 				}
762 			}
763 
764 			continue;
765 		}
766 
767 		if (lval < rval)
768 			return (DT_LESSTHAN);
769 
770 		if (lval > rval)
771 			return (DT_GREATERTHAN);
772 	}
773 
774 	return (0);
775 }
776 
777 static int
778 dt_aggregate_valcmp(const void *lhs, const void *rhs)
779 {
780 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
781 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
782 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
783 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
784 	caddr_t ldata = lh->dtahe_data.dtada_data;
785 	caddr_t rdata = rh->dtahe_data.dtada_data;
786 	dtrace_recdesc_t *lrec, *rrec;
787 	int64_t *laddr, *raddr;
788 	int rval, i;
789 
790 	if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
791 		return (rval);
792 
793 	if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
794 		return (DT_GREATERTHAN);
795 
796 	if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
797 		return (DT_LESSTHAN);
798 
799 	for (i = 0; i < lagg->dtagd_nrecs; i++) {
800 		lrec = &lagg->dtagd_rec[i];
801 		rrec = &ragg->dtagd_rec[i];
802 
803 		if (lrec->dtrd_offset < rrec->dtrd_offset)
804 			return (DT_LESSTHAN);
805 
806 		if (lrec->dtrd_offset > rrec->dtrd_offset)
807 			return (DT_GREATERTHAN);
808 
809 		if (lrec->dtrd_action < rrec->dtrd_action)
810 			return (DT_LESSTHAN);
811 
812 		if (lrec->dtrd_action > rrec->dtrd_action)
813 			return (DT_GREATERTHAN);
814 	}
815 
816 	laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
817 	raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
818 
819 	switch (lrec->dtrd_action) {
820 	case DTRACEAGG_AVG:
821 		rval = dt_aggregate_averagecmp(laddr, raddr);
822 		break;
823 
824 	case DTRACEAGG_QUANTIZE:
825 		rval = dt_aggregate_quantizedcmp(laddr, raddr);
826 		break;
827 
828 	case DTRACEAGG_LQUANTIZE:
829 		rval = dt_aggregate_lquantizedcmp(laddr, raddr);
830 		break;
831 
832 	case DTRACEAGG_COUNT:
833 	case DTRACEAGG_SUM:
834 	case DTRACEAGG_MIN:
835 	case DTRACEAGG_MAX:
836 		rval = dt_aggregate_countcmp(laddr, raddr);
837 		break;
838 
839 	default:
840 		assert(0);
841 	}
842 
843 	return (rval);
844 }
845 
846 static int
847 dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
848 {
849 	int rval;
850 
851 	if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
852 		return (rval);
853 
854 	/*
855 	 * If we're here, the values for the two aggregation elements are
856 	 * equal.  We already know that the key layout is the same for the two
857 	 * elements; we must now compare the keys themselves as a tie-breaker.
858 	 */
859 	return (dt_aggregate_keycmp(lhs, rhs));
860 }
861 
862 static int
863 dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
864 {
865 	int rval;
866 
867 	if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
868 		return (rval);
869 
870 	return (dt_aggregate_varcmp(lhs, rhs));
871 }
872 
873 static int
874 dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
875 {
876 	int rval;
877 
878 	if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
879 		return (rval);
880 
881 	return (dt_aggregate_keycmp(lhs, rhs));
882 }
883 
884 static int
885 dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
886 {
887 	int rval;
888 
889 	if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
890 		return (rval);
891 
892 	return (dt_aggregate_varcmp(lhs, rhs));
893 }
894 
895 static int
896 dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
897 {
898 	int rval;
899 
900 	if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
901 		return (rval);
902 
903 	return (dt_aggregate_valkeycmp(lhs, rhs));
904 }
905 
906 static int
907 dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
908 {
909 	return (dt_aggregate_keyvarcmp(rhs, lhs));
910 }
911 
912 static int
913 dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
914 {
915 	return (dt_aggregate_varkeycmp(rhs, lhs));
916 }
917 
918 static int
919 dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
920 {
921 	return (dt_aggregate_valvarcmp(rhs, lhs));
922 }
923 
924 static int
925 dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
926 {
927 	return (dt_aggregate_varvalcmp(rhs, lhs));
928 }
929 
930 static int
931 dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
932 {
933 	dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
934 	dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
935 	int i, rval;
936 
937 	if (dt_keysort) {
938 		/*
939 		 * If we're sorting on keys, we need to scan until we find the
940 		 * last entry -- that's the representative key.  (The order of
941 		 * the bundle is values followed by key to accommodate the
942 		 * default behavior of sorting by value.)  If the keys are
943 		 * equal, we'll fall into the value comparison loop, below.
944 		 */
945 		for (i = 0; lh[i + 1] != NULL; i++)
946 			continue;
947 
948 		assert(i != 0);
949 		assert(rh[i + 1] == NULL);
950 
951 		if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
952 			return (rval);
953 	}
954 
955 	for (i = 0; ; i++) {
956 		if (lh[i + 1] == NULL) {
957 			/*
958 			 * All of the values are equal; if we're sorting on
959 			 * keys, then we're only here because the keys were
960 			 * found to be equal and these records are therefore
961 			 * equal.  If we're not sorting on keys, we'll use the
962 			 * key comparison from the representative key as the
963 			 * tie-breaker.
964 			 */
965 			if (dt_keysort)
966 				return (0);
967 
968 			assert(i != 0);
969 			assert(rh[i + 1] == NULL);
970 			return (dt_aggregate_keycmp(&lh[i], &rh[i]));
971 		} else {
972 			if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
973 				return (rval);
974 		}
975 	}
976 }
977 
978 int
979 dt_aggregate_go(dtrace_hdl_t *dtp)
980 {
981 	dt_aggregate_t *agp = &dtp->dt_aggregate;
982 	dtrace_optval_t size, cpu;
983 	dtrace_bufdesc_t *buf = &agp->dtat_buf;
984 	int rval, i;
985 
986 	assert(agp->dtat_maxcpu == 0);
987 	assert(agp->dtat_ncpu == 0);
988 	assert(agp->dtat_cpus == NULL);
989 
990 	agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
991 	agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX);
992 	agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
993 
994 	if (agp->dtat_cpus == NULL)
995 		return (dt_set_errno(dtp, EDT_NOMEM));
996 
997 	/*
998 	 * Use the aggregation buffer size as reloaded from the kernel.
999 	 */
1000 	size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1001 
1002 	rval = dtrace_getopt(dtp, "aggsize", &size);
1003 	assert(rval == 0);
1004 
1005 	if (size == 0 || size == DTRACEOPT_UNSET)
1006 		return (0);
1007 
1008 	buf = &agp->dtat_buf;
1009 	buf->dtbd_size = size;
1010 
1011 	if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1012 		return (dt_set_errno(dtp, EDT_NOMEM));
1013 
1014 	/*
1015 	 * Now query for the CPUs enabled.
1016 	 */
1017 	rval = dtrace_getopt(dtp, "cpu", &cpu);
1018 	assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1019 
1020 	if (cpu != DTRACE_CPUALL) {
1021 		assert(cpu < agp->dtat_ncpu);
1022 		agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1023 
1024 		return (0);
1025 	}
1026 
1027 	agp->dtat_ncpus = 0;
1028 	for (i = 0; i < agp->dtat_maxcpu; i++) {
1029 		if (dt_status(dtp, i) == -1)
1030 			continue;
1031 
1032 		agp->dtat_cpus[agp->dtat_ncpus++] = i;
1033 	}
1034 
1035 	return (0);
1036 }
1037 
1038 static int
1039 dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1040 {
1041 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1042 	dtrace_aggdata_t *data;
1043 	dtrace_aggdesc_t *aggdesc;
1044 	dtrace_recdesc_t *rec;
1045 	int i;
1046 
1047 	switch (rval) {
1048 	case DTRACE_AGGWALK_NEXT:
1049 		break;
1050 
1051 	case DTRACE_AGGWALK_CLEAR: {
1052 		uint32_t size, offs = 0;
1053 
1054 		aggdesc = h->dtahe_data.dtada_desc;
1055 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1056 		size = rec->dtrd_size;
1057 		data = &h->dtahe_data;
1058 
1059 		if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1060 			offs = sizeof (uint64_t);
1061 			size -= sizeof (uint64_t);
1062 		}
1063 
1064 		bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1065 
1066 		if (data->dtada_percpu == NULL)
1067 			break;
1068 
1069 		for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1070 			bzero(data->dtada_percpu[i] + offs, size);
1071 		break;
1072 	}
1073 
1074 	case DTRACE_AGGWALK_ERROR:
1075 		/*
1076 		 * We assume that errno is already set in this case.
1077 		 */
1078 		return (dt_set_errno(dtp, errno));
1079 
1080 	case DTRACE_AGGWALK_ABORT:
1081 		return (dt_set_errno(dtp, EDT_DIRABORT));
1082 
1083 	case DTRACE_AGGWALK_DENORMALIZE:
1084 		h->dtahe_data.dtada_normal = 1;
1085 		return (0);
1086 
1087 	case DTRACE_AGGWALK_NORMALIZE:
1088 		if (h->dtahe_data.dtada_normal == 0) {
1089 			h->dtahe_data.dtada_normal = 1;
1090 			return (dt_set_errno(dtp, EDT_BADRVAL));
1091 		}
1092 
1093 		return (0);
1094 
1095 	case DTRACE_AGGWALK_REMOVE: {
1096 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1097 		int i, max_cpus = agp->dtat_maxcpu;
1098 
1099 		/*
1100 		 * First, remove this hash entry from its hash chain.
1101 		 */
1102 		if (h->dtahe_prev != NULL) {
1103 			h->dtahe_prev->dtahe_next = h->dtahe_next;
1104 		} else {
1105 			dt_ahash_t *hash = &agp->dtat_hash;
1106 			size_t ndx = h->dtahe_hashval % hash->dtah_size;
1107 
1108 			assert(hash->dtah_hash[ndx] == h);
1109 			hash->dtah_hash[ndx] = h->dtahe_next;
1110 		}
1111 
1112 		if (h->dtahe_next != NULL)
1113 			h->dtahe_next->dtahe_prev = h->dtahe_prev;
1114 
1115 		/*
1116 		 * Now remove it from the list of all hash entries.
1117 		 */
1118 		if (h->dtahe_prevall != NULL) {
1119 			h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1120 		} else {
1121 			dt_ahash_t *hash = &agp->dtat_hash;
1122 
1123 			assert(hash->dtah_all == h);
1124 			hash->dtah_all = h->dtahe_nextall;
1125 		}
1126 
1127 		if (h->dtahe_nextall != NULL)
1128 			h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1129 
1130 		/*
1131 		 * We're unlinked.  We can safely destroy the data.
1132 		 */
1133 		if (aggdata->dtada_percpu != NULL) {
1134 			for (i = 0; i < max_cpus; i++)
1135 				free(aggdata->dtada_percpu[i]);
1136 			free(aggdata->dtada_percpu);
1137 		}
1138 
1139 		free(aggdata->dtada_data);
1140 		free(h);
1141 
1142 		return (0);
1143 	}
1144 
1145 	default:
1146 		return (dt_set_errno(dtp, EDT_BADRVAL));
1147 	}
1148 
1149 	return (0);
1150 }
1151 
1152 void
1153 dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1154     int (*compar)(const void *, const void *))
1155 {
1156 	int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1157 	dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1158 
1159 	dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1160 	dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1161 
1162 	if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1163 		dt_keypos = (int)keyposopt;
1164 	} else {
1165 		dt_keypos = 0;
1166 	}
1167 
1168 	if (compar == NULL) {
1169 		if (!dt_keysort) {
1170 			compar = dt_aggregate_varvalcmp;
1171 		} else {
1172 			compar = dt_aggregate_varkeycmp;
1173 		}
1174 	}
1175 
1176 	qsort(base, nel, width, compar);
1177 
1178 	dt_revsort = rev;
1179 	dt_keysort = key;
1180 	dt_keypos = keypos;
1181 }
1182 
1183 int
1184 dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1185 {
1186 	dt_ahashent_t *h, *next;
1187 	dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1188 
1189 	for (h = hash->dtah_all; h != NULL; h = next) {
1190 		/*
1191 		 * dt_aggwalk_rval() can potentially remove the current hash
1192 		 * entry; we need to load the next hash entry before calling
1193 		 * into it.
1194 		 */
1195 		next = h->dtahe_nextall;
1196 
1197 		if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1198 			return (-1);
1199 	}
1200 
1201 	return (0);
1202 }
1203 
1204 static int
1205 dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1206     dtrace_aggregate_f *func, void *arg,
1207     int (*sfunc)(const void *, const void *))
1208 {
1209 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1210 	dt_ahashent_t *h, **sorted;
1211 	dt_ahash_t *hash = &agp->dtat_hash;
1212 	size_t i, nentries = 0;
1213 
1214 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1215 		nentries++;
1216 
1217 	sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1218 
1219 	if (sorted == NULL)
1220 		return (-1);
1221 
1222 	for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1223 		sorted[i++] = h;
1224 
1225 	(void) pthread_mutex_lock(&dt_qsort_lock);
1226 
1227 	if (sfunc == NULL) {
1228 		dt_aggregate_qsort(dtp, sorted, nentries,
1229 		    sizeof (dt_ahashent_t *), NULL);
1230 	} else {
1231 		/*
1232 		 * If we've been explicitly passed a sorting function,
1233 		 * we'll use that -- ignoring the values of the "aggsortrev",
1234 		 * "aggsortkey" and "aggsortkeypos" options.
1235 		 */
1236 		qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1237 	}
1238 
1239 	(void) pthread_mutex_unlock(&dt_qsort_lock);
1240 
1241 	for (i = 0; i < nentries; i++) {
1242 		h = sorted[i];
1243 
1244 		if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) {
1245 			dt_free(dtp, sorted);
1246 			return (-1);
1247 		}
1248 	}
1249 
1250 	dt_free(dtp, sorted);
1251 	return (0);
1252 }
1253 
1254 int
1255 dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1256     dtrace_aggregate_f *func, void *arg)
1257 {
1258 	return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1259 }
1260 
1261 int
1262 dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1263     dtrace_aggregate_f *func, void *arg)
1264 {
1265 	return (dt_aggregate_walk_sorted(dtp, func,
1266 	    arg, dt_aggregate_varkeycmp));
1267 }
1268 
1269 int
1270 dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1271     dtrace_aggregate_f *func, void *arg)
1272 {
1273 	return (dt_aggregate_walk_sorted(dtp, func,
1274 	    arg, dt_aggregate_varvalcmp));
1275 }
1276 
1277 int
1278 dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1279     dtrace_aggregate_f *func, void *arg)
1280 {
1281 	return (dt_aggregate_walk_sorted(dtp, func,
1282 	    arg, dt_aggregate_keyvarcmp));
1283 }
1284 
1285 int
1286 dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1287     dtrace_aggregate_f *func, void *arg)
1288 {
1289 	return (dt_aggregate_walk_sorted(dtp, func,
1290 	    arg, dt_aggregate_valvarcmp));
1291 }
1292 
1293 int
1294 dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1295     dtrace_aggregate_f *func, void *arg)
1296 {
1297 	return (dt_aggregate_walk_sorted(dtp, func,
1298 	    arg, dt_aggregate_varkeyrevcmp));
1299 }
1300 
1301 int
1302 dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1303     dtrace_aggregate_f *func, void *arg)
1304 {
1305 	return (dt_aggregate_walk_sorted(dtp, func,
1306 	    arg, dt_aggregate_varvalrevcmp));
1307 }
1308 
1309 int
1310 dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1311     dtrace_aggregate_f *func, void *arg)
1312 {
1313 	return (dt_aggregate_walk_sorted(dtp, func,
1314 	    arg, dt_aggregate_keyvarrevcmp));
1315 }
1316 
1317 int
1318 dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1319     dtrace_aggregate_f *func, void *arg)
1320 {
1321 	return (dt_aggregate_walk_sorted(dtp, func,
1322 	    arg, dt_aggregate_valvarrevcmp));
1323 }
1324 
1325 int
1326 dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1327     int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1328 {
1329 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1330 	dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1331 	const dtrace_aggdata_t **data;
1332 	dt_ahashent_t *zaggdata = NULL;
1333 	dt_ahash_t *hash = &agp->dtat_hash;
1334 	size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1335 	dtrace_aggvarid_t max = 0, aggvar;
1336 	int rval = -1, *map, *remap = NULL;
1337 	int i, j;
1338 	dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1339 
1340 	/*
1341 	 * If the sorting position is greater than the number of aggregation
1342 	 * variable IDs, we silently set it to 0.
1343 	 */
1344 	if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1345 		sortpos = 0;
1346 
1347 	/*
1348 	 * First we need to translate the specified aggregation variable IDs
1349 	 * into a linear map that will allow us to translate an aggregation
1350 	 * variable ID into its position in the specified aggvars.
1351 	 */
1352 	for (i = 0; i < naggvars; i++) {
1353 		if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1354 			return (dt_set_errno(dtp, EDT_BADAGGVAR));
1355 
1356 		if (aggvars[i] > max)
1357 			max = aggvars[i];
1358 	}
1359 
1360 	if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1361 		return (-1);
1362 
1363 	zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1364 
1365 	if (zaggdata == NULL)
1366 		goto out;
1367 
1368 	for (i = 0; i < naggvars; i++) {
1369 		int ndx = i + sortpos;
1370 
1371 		if (ndx >= naggvars)
1372 			ndx -= naggvars;
1373 
1374 		aggvar = aggvars[ndx];
1375 		assert(aggvar <= max);
1376 
1377 		if (map[aggvar]) {
1378 			/*
1379 			 * We have an aggregation variable that is present
1380 			 * more than once in the array of aggregation
1381 			 * variables.  While it's unclear why one might want
1382 			 * to do this, it's legal.  To support this construct,
1383 			 * we will allocate a remap that will indicate the
1384 			 * position from which this aggregation variable
1385 			 * should be pulled.  (That is, where the remap will
1386 			 * map from one position to another.)
1387 			 */
1388 			if (remap == NULL) {
1389 				remap = dt_zalloc(dtp, naggvars * sizeof (int));
1390 
1391 				if (remap == NULL)
1392 					goto out;
1393 			}
1394 
1395 			/*
1396 			 * Given that the variable is already present, assert
1397 			 * that following through the mapping and adjusting
1398 			 * for the sort position yields the same aggregation
1399 			 * variable ID.
1400 			 */
1401 			assert(aggvars[(map[aggvar] - 1 + sortpos) %
1402 			    naggvars] == aggvars[ndx]);
1403 
1404 			remap[i] = map[aggvar];
1405 			continue;
1406 		}
1407 
1408 		map[aggvar] = i + 1;
1409 	}
1410 
1411 	/*
1412 	 * We need to take two passes over the data to size our allocation, so
1413 	 * we'll use the first pass to also fill in the zero-filled data to be
1414 	 * used to properly format a zero-valued aggregation.
1415 	 */
1416 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1417 		dtrace_aggvarid_t id;
1418 		int ndx;
1419 
1420 		if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1421 			continue;
1422 
1423 		if (zaggdata[ndx - 1].dtahe_size == 0) {
1424 			zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1425 			zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1426 		}
1427 
1428 		nentries++;
1429 	}
1430 
1431 	if (nentries == 0) {
1432 		/*
1433 		 * We couldn't find any entries; there is nothing else to do.
1434 		 */
1435 		rval = 0;
1436 		goto out;
1437 	}
1438 
1439 	/*
1440 	 * Before we sort the data, we're going to look for any holes in our
1441 	 * zero-filled data.  This will occur if an aggregation variable that
1442 	 * we are being asked to print has not yet been assigned the result of
1443 	 * any aggregating action for _any_ tuple.  The issue becomes that we
1444 	 * would like a zero value to be printed for all columns for this
1445 	 * aggregation, but without any record description, we don't know the
1446 	 * aggregating action that corresponds to the aggregation variable.  To
1447 	 * try to find a match, we're simply going to lookup aggregation IDs
1448 	 * (which are guaranteed to be contiguous and to start from 1), looking
1449 	 * for the specified aggregation variable ID.  If we find a match,
1450 	 * we'll use that.  If we iterate over all aggregation IDs and don't
1451 	 * find a match, then we must be an anonymous enabling.  (Anonymous
1452 	 * enablings can't currently derive either aggregation variable IDs or
1453 	 * aggregation variable names given only an aggregation ID.)  In this
1454 	 * obscure case (anonymous enabling, multiple aggregation printa() with
1455 	 * some aggregations not represented for any tuple), our defined
1456 	 * behavior is that the zero will be printed in the format of the first
1457 	 * aggregation variable that contains any non-zero value.
1458 	 */
1459 	for (i = 0; i < naggvars; i++) {
1460 		if (zaggdata[i].dtahe_size == 0) {
1461 			dtrace_aggvarid_t aggvar;
1462 
1463 			aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1464 			assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1465 
1466 			for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1467 				dtrace_aggdesc_t *agg;
1468 				dtrace_aggdata_t *aggdata;
1469 
1470 				if (dt_aggid_lookup(dtp, j, &agg) != 0)
1471 					break;
1472 
1473 				if (agg->dtagd_varid != aggvar)
1474 					continue;
1475 
1476 				/*
1477 				 * We have our description -- now we need to
1478 				 * cons up the zaggdata entry for it.
1479 				 */
1480 				aggdata = &zaggdata[i].dtahe_data;
1481 				aggdata->dtada_size = agg->dtagd_size;
1482 				aggdata->dtada_desc = agg;
1483 				aggdata->dtada_handle = dtp;
1484 				(void) dt_epid_lookup(dtp, agg->dtagd_epid,
1485 				    &aggdata->dtada_edesc,
1486 				    &aggdata->dtada_pdesc);
1487 				aggdata->dtada_normal = 1;
1488 				zaggdata[i].dtahe_hashval = 0;
1489 				zaggdata[i].dtahe_size = agg->dtagd_size;
1490 				break;
1491 			}
1492 
1493 			if (zaggdata[i].dtahe_size == 0) {
1494 				caddr_t data;
1495 
1496 				/*
1497 				 * We couldn't find this aggregation, meaning
1498 				 * that we have never seen it before for any
1499 				 * tuple _and_ this is an anonymous enabling.
1500 				 * That is, we're in the obscure case outlined
1501 				 * above.  In this case, our defined behavior
1502 				 * is to format the data in the format of the
1503 				 * first non-zero aggregation -- of which, of
1504 				 * course, we know there to be at least one
1505 				 * (or nentries would have been zero).
1506 				 */
1507 				for (j = 0; j < naggvars; j++) {
1508 					if (zaggdata[j].dtahe_size != 0)
1509 						break;
1510 				}
1511 
1512 				assert(j < naggvars);
1513 				zaggdata[i] = zaggdata[j];
1514 
1515 				data = zaggdata[i].dtahe_data.dtada_data;
1516 				assert(data != NULL);
1517 			}
1518 		}
1519 	}
1520 
1521 	/*
1522 	 * Now we need to allocate our zero-filled data for use for
1523 	 * aggregations that don't have a value corresponding to a given key.
1524 	 */
1525 	for (i = 0; i < naggvars; i++) {
1526 		dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1527 		dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1528 		dtrace_recdesc_t *rec;
1529 		uint64_t larg;
1530 		caddr_t zdata;
1531 
1532 		zsize = zaggdata[i].dtahe_size;
1533 		assert(zsize != 0);
1534 
1535 		if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1536 			/*
1537 			 * If we failed to allocated some zero-filled data, we
1538 			 * need to zero out the remaining dtada_data pointers
1539 			 * to prevent the wrong data from being freed below.
1540 			 */
1541 			for (j = i; j < naggvars; j++)
1542 				zaggdata[j].dtahe_data.dtada_data = NULL;
1543 			goto out;
1544 		}
1545 
1546 		aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1547 
1548 		/*
1549 		 * First, the easy bit.  To maintain compatibility with
1550 		 * consumers that pull the compiler-generated ID out of the
1551 		 * data, we put that ID at the top of the zero-filled data.
1552 		 */
1553 		rec = &aggdesc->dtagd_rec[0];
1554 		/* LINTED - alignment */
1555 		*((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1556 
1557 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1558 
1559 		/*
1560 		 * Now for the more complicated part.  If (and only if) this
1561 		 * is an lquantize() aggregating action, zero-filled data is
1562 		 * not equivalent to an empty record:  we must also get the
1563 		 * parameters for the lquantize().
1564 		 */
1565 		if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1566 			if (aggdata->dtada_data != NULL) {
1567 				/*
1568 				 * The easier case here is if we actually have
1569 				 * some prototype data -- in which case we
1570 				 * manually dig it out of the aggregation
1571 				 * record.
1572 				 */
1573 				/* LINTED - alignment */
1574 				larg = *((uint64_t *)(aggdata->dtada_data +
1575 				    rec->dtrd_offset));
1576 			} else {
1577 				/*
1578 				 * We don't have any prototype data.  As a
1579 				 * result, we know that we _do_ have the
1580 				 * compiler-generated information.  (If this
1581 				 * were an anonymous enabling, all of our
1582 				 * zero-filled data would have prototype data
1583 				 * -- either directly or indirectly.) So as
1584 				 * gross as it is, we'll grovel around in the
1585 				 * compiler-generated information to find the
1586 				 * lquantize() parameters.
1587 				 */
1588 				dtrace_stmtdesc_t *sdp;
1589 				dt_ident_t *aid;
1590 				dt_idsig_t *isp;
1591 
1592 				sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1593 				    aggdesc->dtagd_rec[0].dtrd_uarg;
1594 				aid = sdp->dtsd_aggdata;
1595 				isp = (dt_idsig_t *)aid->di_data;
1596 				assert(isp->dis_auxinfo != 0);
1597 				larg = isp->dis_auxinfo;
1598 			}
1599 
1600 			/* LINTED - alignment */
1601 			*((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1602 		}
1603 
1604 		aggdata->dtada_data = zdata;
1605 	}
1606 
1607 	/*
1608 	 * Now that we've dealt with setting up our zero-filled data, we can
1609 	 * allocate our sorted array, and take another pass over the data to
1610 	 * fill it.
1611 	 */
1612 	sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1613 
1614 	if (sorted == NULL)
1615 		goto out;
1616 
1617 	for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1618 		dtrace_aggvarid_t id;
1619 
1620 		if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1621 			continue;
1622 
1623 		sorted[i++] = h;
1624 	}
1625 
1626 	assert(i == nentries);
1627 
1628 	/*
1629 	 * We've loaded our array; now we need to sort by value to allow us
1630 	 * to create bundles of like value.  We're going to acquire the
1631 	 * dt_qsort_lock here, and hold it across all of our subsequent
1632 	 * comparison and sorting.
1633 	 */
1634 	(void) pthread_mutex_lock(&dt_qsort_lock);
1635 
1636 	qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1637 	    dt_aggregate_keyvarcmp);
1638 
1639 	/*
1640 	 * Now we need to go through and create bundles.  Because the number
1641 	 * of bundles is bounded by the size of the sorted array, we're going
1642 	 * to reuse the underlying storage.  And note that "bundle" is an
1643 	 * array of pointers to arrays of pointers to dt_ahashent_t -- making
1644 	 * its type (regrettably) "dt_ahashent_t ***".  (Regrettable because
1645 	 * '*' -- like '_' and 'X' -- should never appear in triplicate in
1646 	 * an ideal world.)
1647 	 */
1648 	bundle = (dt_ahashent_t ***)sorted;
1649 
1650 	for (i = 1, start = 0; i <= nentries; i++) {
1651 		if (i < nentries &&
1652 		    dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
1653 			continue;
1654 
1655 		/*
1656 		 * We have a bundle boundary.  Everything from start to
1657 		 * (i - 1) belongs in one bundle.
1658 		 */
1659 		assert(i - start <= naggvars);
1660 		bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
1661 
1662 		if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
1663 			(void) pthread_mutex_unlock(&dt_qsort_lock);
1664 			goto out;
1665 		}
1666 
1667 		for (j = start; j < i; j++) {
1668 			dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
1669 
1670 			assert(id <= max);
1671 			assert(map[id] != 0);
1672 			assert(map[id] - 1 < naggvars);
1673 			assert(nbundle[map[id] - 1] == NULL);
1674 			nbundle[map[id] - 1] = sorted[j];
1675 
1676 			if (nbundle[naggvars] == NULL)
1677 				nbundle[naggvars] = sorted[j];
1678 		}
1679 
1680 		for (j = 0; j < naggvars; j++) {
1681 			if (nbundle[j] != NULL)
1682 				continue;
1683 
1684 			/*
1685 			 * Before we assume that this aggregation variable
1686 			 * isn't present (and fall back to using the
1687 			 * zero-filled data allocated earlier), check the
1688 			 * remap.  If we have a remapping, we'll drop it in
1689 			 * here.  Note that we might be remapping an
1690 			 * aggregation variable that isn't present for this
1691 			 * key; in this case, the aggregation data that we
1692 			 * copy will point to the zeroed data.
1693 			 */
1694 			if (remap != NULL && remap[j]) {
1695 				assert(remap[j] - 1 < j);
1696 				assert(nbundle[remap[j] - 1] != NULL);
1697 				nbundle[j] = nbundle[remap[j] - 1];
1698 			} else {
1699 				nbundle[j] = &zaggdata[j];
1700 			}
1701 		}
1702 
1703 		bundle[nbundles++] = nbundle;
1704 		start = i;
1705 	}
1706 
1707 	/*
1708 	 * Now we need to re-sort based on the first value.
1709 	 */
1710 	dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
1711 	    dt_aggregate_bundlecmp);
1712 
1713 	(void) pthread_mutex_unlock(&dt_qsort_lock);
1714 
1715 	/*
1716 	 * We're done!  Now we just need to go back over the sorted bundles,
1717 	 * calling the function.
1718 	 */
1719 	data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
1720 
1721 	for (i = 0; i < nbundles; i++) {
1722 		for (j = 0; j < naggvars; j++)
1723 			data[j + 1] = NULL;
1724 
1725 		for (j = 0; j < naggvars; j++) {
1726 			int ndx = j - sortpos;
1727 
1728 			if (ndx < 0)
1729 				ndx += naggvars;
1730 
1731 			assert(bundle[i][ndx] != NULL);
1732 			data[j + 1] = &bundle[i][ndx]->dtahe_data;
1733 		}
1734 
1735 		for (j = 0; j < naggvars; j++)
1736 			assert(data[j + 1] != NULL);
1737 
1738 		/*
1739 		 * The representative key is the last element in the bundle.
1740 		 * Assert that we have one, and then set it to be the first
1741 		 * element of data.
1742 		 */
1743 		assert(bundle[i][j] != NULL);
1744 		data[0] = &bundle[i][j]->dtahe_data;
1745 
1746 		if ((rval = func(data, naggvars + 1, arg)) == -1)
1747 			goto out;
1748 	}
1749 
1750 	rval = 0;
1751 out:
1752 	for (i = 0; i < nbundles; i++)
1753 		dt_free(dtp, bundle[i]);
1754 
1755 	if (zaggdata != NULL) {
1756 		for (i = 0; i < naggvars; i++)
1757 			dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
1758 	}
1759 
1760 	dt_free(dtp, zaggdata);
1761 	dt_free(dtp, sorted);
1762 	dt_free(dtp, remap);
1763 	dt_free(dtp, map);
1764 
1765 	return (rval);
1766 }
1767 
1768 int
1769 dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
1770     dtrace_aggregate_walk_f *func)
1771 {
1772 	dt_print_aggdata_t pd;
1773 
1774 	pd.dtpa_dtp = dtp;
1775 	pd.dtpa_fp = fp;
1776 	pd.dtpa_allunprint = 1;
1777 
1778 	if (func == NULL)
1779 		func = dtrace_aggregate_walk_sorted;
1780 
1781 	if ((*func)(dtp, dt_print_agg, &pd) == -1)
1782 		return (dt_set_errno(dtp, dtp->dt_errno));
1783 
1784 	return (0);
1785 }
1786 
1787 void
1788 dtrace_aggregate_clear(dtrace_hdl_t *dtp)
1789 {
1790 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1791 	dt_ahash_t *hash = &agp->dtat_hash;
1792 	dt_ahashent_t *h;
1793 	dtrace_aggdata_t *data;
1794 	dtrace_aggdesc_t *aggdesc;
1795 	dtrace_recdesc_t *rec;
1796 	int i, max_cpus = agp->dtat_maxcpu;
1797 
1798 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1799 		aggdesc = h->dtahe_data.dtada_desc;
1800 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1801 		data = &h->dtahe_data;
1802 
1803 		bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
1804 
1805 		if (data->dtada_percpu == NULL)
1806 			continue;
1807 
1808 		for (i = 0; i < max_cpus; i++)
1809 			bzero(data->dtada_percpu[i], rec->dtrd_size);
1810 	}
1811 }
1812 
1813 void
1814 dt_aggregate_destroy(dtrace_hdl_t *dtp)
1815 {
1816 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1817 	dt_ahash_t *hash = &agp->dtat_hash;
1818 	dt_ahashent_t *h, *next;
1819 	dtrace_aggdata_t *aggdata;
1820 	int i, max_cpus = agp->dtat_maxcpu;
1821 
1822 	if (hash->dtah_hash == NULL) {
1823 		assert(hash->dtah_all == NULL);
1824 	} else {
1825 		free(hash->dtah_hash);
1826 
1827 		for (h = hash->dtah_all; h != NULL; h = next) {
1828 			next = h->dtahe_nextall;
1829 
1830 			aggdata = &h->dtahe_data;
1831 
1832 			if (aggdata->dtada_percpu != NULL) {
1833 				for (i = 0; i < max_cpus; i++)
1834 					free(aggdata->dtada_percpu[i]);
1835 				free(aggdata->dtada_percpu);
1836 			}
1837 
1838 			free(aggdata->dtada_data);
1839 			free(h);
1840 		}
1841 
1842 		hash->dtah_hash = NULL;
1843 		hash->dtah_all = NULL;
1844 		hash->dtah_size = 0;
1845 	}
1846 
1847 	free(agp->dtat_buf.dtbd_data);
1848 	free(agp->dtat_cpus);
1849 }
1850