17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5789d94c2Sjwadams  * Common Development and Distribution License (the "License").
6789d94c2Sjwadams  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
220c3b83b1SJonathan Adams  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
26d7dba7e5SBryan Cantrill /*
27*4f364e7cSRobert Mustacchi  * Copyright 2012 Joyent, Inc.  All rights reserved.
2822ce0148SMatthew Ahrens  * Copyright (c) 2013 by Delphix. All rights reserved.
29d7dba7e5SBryan Cantrill  */
30d7dba7e5SBryan Cantrill 
317c478bd9Sstevel@tonic-gate #include "umem.h"
327c478bd9Sstevel@tonic-gate 
337c478bd9Sstevel@tonic-gate #include <sys/vmem_impl_user.h>
347c478bd9Sstevel@tonic-gate #include <umem_impl.h>
357c478bd9Sstevel@tonic-gate 
367c478bd9Sstevel@tonic-gate #include <alloca.h>
37789d94c2Sjwadams #include <limits.h>
384a1c2431SJonathan Adams #include <mdb/mdb_whatis.h>
39*4f364e7cSRobert Mustacchi #include <thr_uberdata.h>
40*4f364e7cSRobert Mustacchi #include <stdio.h>
417c478bd9Sstevel@tonic-gate 
427c478bd9Sstevel@tonic-gate #include "misc.h"
43789d94c2Sjwadams #include "leaky.h"
44087e1372Stomee #include "dist.h"
457c478bd9Sstevel@tonic-gate 
467c478bd9Sstevel@tonic-gate #include "umem_pagesize.h"
477c478bd9Sstevel@tonic-gate 
487c478bd9Sstevel@tonic-gate #define	UM_ALLOCATED		0x1
497c478bd9Sstevel@tonic-gate #define	UM_FREE			0x2
507c478bd9Sstevel@tonic-gate #define	UM_BUFCTL		0x4
517c478bd9Sstevel@tonic-gate #define	UM_HASH			0x8
527c478bd9Sstevel@tonic-gate 
53789d94c2Sjwadams int umem_ready;
547c478bd9Sstevel@tonic-gate 
55789d94c2Sjwadams static int umem_stack_depth_warned;
56789d94c2Sjwadams static uint32_t umem_max_ncpus;
577c478bd9Sstevel@tonic-gate uint32_t umem_stack_depth;
58789d94c2Sjwadams 
597c478bd9Sstevel@tonic-gate size_t umem_pagesize;
607c478bd9Sstevel@tonic-gate 
617c478bd9Sstevel@tonic-gate #define	UMEM_READVAR(var)				\
627c478bd9Sstevel@tonic-gate 	(umem_readvar(&(var), #var) == -1 &&		\
63789d94c2Sjwadams 	    (mdb_warn("failed to read "#var), 1))
647c478bd9Sstevel@tonic-gate 
657c478bd9Sstevel@tonic-gate int
66789d94c2Sjwadams umem_update_variables(void)
677c478bd9Sstevel@tonic-gate {
687c478bd9Sstevel@tonic-gate 	size_t pagesize;
697c478bd9Sstevel@tonic-gate 
707c478bd9Sstevel@tonic-gate 	/*
71789d94c2Sjwadams 	 * Figure out which type of umem is being used; if it's not there
72789d94c2Sjwadams 	 * yet, succeed quietly.
737c478bd9Sstevel@tonic-gate 	 */
74789d94c2Sjwadams 	if (umem_set_standalone() == -1) {
75789d94c2Sjwadams 		umem_ready = 0;
76789d94c2Sjwadams 		return (0);		/* umem not there yet */
77789d94c2Sjwadams 	}
787c478bd9Sstevel@tonic-gate 
79789d94c2Sjwadams 	/*
80789d94c2Sjwadams 	 * Solaris 9 used a different name for umem_max_ncpus.  It's
81789d94c2Sjwadams 	 * cheap backwards compatibility to check for both names.
82789d94c2Sjwadams 	 */
83789d94c2Sjwadams 	if (umem_readvar(&umem_max_ncpus, "umem_max_ncpus") == -1 &&
84789d94c2Sjwadams 	    umem_readvar(&umem_max_ncpus, "max_ncpus") == -1) {
85789d94c2Sjwadams 		mdb_warn("unable to read umem_max_ncpus or max_ncpus");
86789d94c2Sjwadams 		return (-1);
87789d94c2Sjwadams 	}
88789d94c2Sjwadams 	if (UMEM_READVAR(umem_ready))
897c478bd9Sstevel@tonic-gate 		return (-1);
907c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_stack_depth))
917c478bd9Sstevel@tonic-gate 		return (-1);
927c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(pagesize))
937c478bd9Sstevel@tonic-gate 		return (-1);
947c478bd9Sstevel@tonic-gate 
957c478bd9Sstevel@tonic-gate 	if (umem_stack_depth > UMEM_MAX_STACK_DEPTH) {
96789d94c2Sjwadams 		if (umem_stack_depth_warned == 0) {
97789d94c2Sjwadams 			mdb_warn("umem_stack_depth corrupted (%d > %d)\n",
98789d94c2Sjwadams 			    umem_stack_depth, UMEM_MAX_STACK_DEPTH);
99789d94c2Sjwadams 			umem_stack_depth_warned = 1;
100789d94c2Sjwadams 		}
1017c478bd9Sstevel@tonic-gate 		umem_stack_depth = 0;
1027c478bd9Sstevel@tonic-gate 	}
103789d94c2Sjwadams 
104789d94c2Sjwadams 	umem_pagesize = pagesize;
105789d94c2Sjwadams 
1067c478bd9Sstevel@tonic-gate 	return (0);
1077c478bd9Sstevel@tonic-gate }
1087c478bd9Sstevel@tonic-gate 
109*4f364e7cSRobert Mustacchi static int
110*4f364e7cSRobert Mustacchi umem_ptc_walk_init(mdb_walk_state_t *wsp)
111*4f364e7cSRobert Mustacchi {
112*4f364e7cSRobert Mustacchi 	if (wsp->walk_addr == NULL) {
113*4f364e7cSRobert Mustacchi 		if (mdb_layered_walk("ulwp", wsp) == -1) {
114*4f364e7cSRobert Mustacchi 			mdb_warn("couldn't walk 'ulwp'");
115*4f364e7cSRobert Mustacchi 			return (WALK_ERR);
116*4f364e7cSRobert Mustacchi 		}
117*4f364e7cSRobert Mustacchi 	}
118*4f364e7cSRobert Mustacchi 
119*4f364e7cSRobert Mustacchi 	return (WALK_NEXT);
120*4f364e7cSRobert Mustacchi }
121*4f364e7cSRobert Mustacchi 
122*4f364e7cSRobert Mustacchi static int
123*4f364e7cSRobert Mustacchi umem_ptc_walk_step(mdb_walk_state_t *wsp)
124*4f364e7cSRobert Mustacchi {
125*4f364e7cSRobert Mustacchi 	uintptr_t this;
126*4f364e7cSRobert Mustacchi 	int rval;
127*4f364e7cSRobert Mustacchi 
128*4f364e7cSRobert Mustacchi 	if (wsp->walk_layer != NULL) {
129*4f364e7cSRobert Mustacchi 		this = (uintptr_t)((ulwp_t *)wsp->walk_layer)->ul_self +
130*4f364e7cSRobert Mustacchi 		    (uintptr_t)wsp->walk_arg;
131*4f364e7cSRobert Mustacchi 	} else {
132*4f364e7cSRobert Mustacchi 		this = wsp->walk_addr + (uintptr_t)wsp->walk_arg;
133*4f364e7cSRobert Mustacchi 	}
134*4f364e7cSRobert Mustacchi 
135*4f364e7cSRobert Mustacchi 	for (;;) {
136*4f364e7cSRobert Mustacchi 		if (mdb_vread(&this, sizeof (void *), this) == -1) {
137*4f364e7cSRobert Mustacchi 			mdb_warn("couldn't read ptc buffer at %p", this);
138*4f364e7cSRobert Mustacchi 			return (WALK_ERR);
139*4f364e7cSRobert Mustacchi 		}
140*4f364e7cSRobert Mustacchi 
141*4f364e7cSRobert Mustacchi 		if (this == NULL)
142*4f364e7cSRobert Mustacchi 			break;
143*4f364e7cSRobert Mustacchi 
144*4f364e7cSRobert Mustacchi 		rval = wsp->walk_callback(this, &this, wsp->walk_cbdata);
145*4f364e7cSRobert Mustacchi 
146*4f364e7cSRobert Mustacchi 		if (rval != WALK_NEXT)
147*4f364e7cSRobert Mustacchi 			return (rval);
148*4f364e7cSRobert Mustacchi 	}
149*4f364e7cSRobert Mustacchi 
150*4f364e7cSRobert Mustacchi 	return (wsp->walk_layer != NULL ? WALK_NEXT : WALK_DONE);
151*4f364e7cSRobert Mustacchi }
152*4f364e7cSRobert Mustacchi 
1537c478bd9Sstevel@tonic-gate /*ARGSUSED*/
154789d94c2Sjwadams static int
155*4f364e7cSRobert Mustacchi umem_init_walkers(uintptr_t addr, const umem_cache_t *c, int *sizes)
1567c478bd9Sstevel@tonic-gate {
1577c478bd9Sstevel@tonic-gate 	mdb_walker_t w;
1587c478bd9Sstevel@tonic-gate 	char descr[64];
159*4f364e7cSRobert Mustacchi 	char name[64];
160*4f364e7cSRobert Mustacchi 	int i;
1617c478bd9Sstevel@tonic-gate 
1627c478bd9Sstevel@tonic-gate 	(void) mdb_snprintf(descr, sizeof (descr),
1637c478bd9Sstevel@tonic-gate 	    "walk the %s cache", c->cache_name);
1647c478bd9Sstevel@tonic-gate 
1657c478bd9Sstevel@tonic-gate 	w.walk_name = c->cache_name;
1667c478bd9Sstevel@tonic-gate 	w.walk_descr = descr;
1677c478bd9Sstevel@tonic-gate 	w.walk_init = umem_walk_init;
1687c478bd9Sstevel@tonic-gate 	w.walk_step = umem_walk_step;
1697c478bd9Sstevel@tonic-gate 	w.walk_fini = umem_walk_fini;
1707c478bd9Sstevel@tonic-gate 	w.walk_init_arg = (void *)addr;
1717c478bd9Sstevel@tonic-gate 
1727c478bd9Sstevel@tonic-gate 	if (mdb_add_walker(&w) == -1)
1737c478bd9Sstevel@tonic-gate 		mdb_warn("failed to add %s walker", c->cache_name);
1747c478bd9Sstevel@tonic-gate 
175*4f364e7cSRobert Mustacchi 	if (!(c->cache_flags & UMF_PTC))
176*4f364e7cSRobert Mustacchi 		return (WALK_NEXT);
177*4f364e7cSRobert Mustacchi 
178*4f364e7cSRobert Mustacchi 	/*
179*4f364e7cSRobert Mustacchi 	 * For the per-thread cache walker, the address is the offset in the
180*4f364e7cSRobert Mustacchi 	 * tm_roots[] array of the ulwp_t.
181*4f364e7cSRobert Mustacchi 	 */
182*4f364e7cSRobert Mustacchi 	for (i = 0; sizes[i] != 0; i++) {
183*4f364e7cSRobert Mustacchi 		if (sizes[i] == c->cache_bufsize)
184*4f364e7cSRobert Mustacchi 			break;
185*4f364e7cSRobert Mustacchi 	}
186*4f364e7cSRobert Mustacchi 
187*4f364e7cSRobert Mustacchi 	if (sizes[i] == 0) {
188*4f364e7cSRobert Mustacchi 		mdb_warn("cache %s is cached per-thread, but could not find "
189*4f364e7cSRobert Mustacchi 		    "size in umem_alloc_sizes\n", c->cache_name);
190*4f364e7cSRobert Mustacchi 		return (WALK_NEXT);
191*4f364e7cSRobert Mustacchi 	}
192*4f364e7cSRobert Mustacchi 
193*4f364e7cSRobert Mustacchi 	if (i >= NTMEMBASE) {
194*4f364e7cSRobert Mustacchi 		mdb_warn("index for %s (%d) exceeds root slots (%d)\n",
195*4f364e7cSRobert Mustacchi 		    c->cache_name, i, NTMEMBASE);
196*4f364e7cSRobert Mustacchi 		return (WALK_NEXT);
197*4f364e7cSRobert Mustacchi 	}
198*4f364e7cSRobert Mustacchi 
199*4f364e7cSRobert Mustacchi 	(void) mdb_snprintf(name, sizeof (name),
200*4f364e7cSRobert Mustacchi 	    "umem_ptc_%d", c->cache_bufsize);
201*4f364e7cSRobert Mustacchi 	(void) mdb_snprintf(descr, sizeof (descr),
202*4f364e7cSRobert Mustacchi 	    "walk the per-thread cache for %s", c->cache_name);
203*4f364e7cSRobert Mustacchi 
204*4f364e7cSRobert Mustacchi 	w.walk_name = name;
205*4f364e7cSRobert Mustacchi 	w.walk_descr = descr;
206*4f364e7cSRobert Mustacchi 	w.walk_init = umem_ptc_walk_init;
207*4f364e7cSRobert Mustacchi 	w.walk_step = umem_ptc_walk_step;
208*4f364e7cSRobert Mustacchi 	w.walk_fini = NULL;
209*4f364e7cSRobert Mustacchi 	w.walk_init_arg = (void *)offsetof(ulwp_t, ul_tmem.tm_roots[i]);
210*4f364e7cSRobert Mustacchi 
211*4f364e7cSRobert Mustacchi 	if (mdb_add_walker(&w) == -1)
212*4f364e7cSRobert Mustacchi 		mdb_warn("failed to add %s walker", w.walk_name);
213*4f364e7cSRobert Mustacchi 
2147c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
2157c478bd9Sstevel@tonic-gate }
2167c478bd9Sstevel@tonic-gate 
217789d94c2Sjwadams /*ARGSUSED*/
218789d94c2Sjwadams static void
219789d94c2Sjwadams umem_statechange_cb(void *arg)
220789d94c2Sjwadams {
221789d94c2Sjwadams 	static int been_ready = 0;
222*4f364e7cSRobert Mustacchi 	GElf_Sym sym;
223*4f364e7cSRobert Mustacchi 	int *sizes;
224789d94c2Sjwadams 
225789d94c2Sjwadams #ifndef _KMDB
226789d94c2Sjwadams 	leaky_cleanup(1);	/* state changes invalidate leaky state */
227789d94c2Sjwadams #endif
228789d94c2Sjwadams 
229789d94c2Sjwadams 	if (umem_update_variables() == -1)
230789d94c2Sjwadams 		return;
231789d94c2Sjwadams 
232789d94c2Sjwadams 	if (been_ready)
233789d94c2Sjwadams 		return;
234789d94c2Sjwadams 
235789d94c2Sjwadams 	if (umem_ready != UMEM_READY)
236789d94c2Sjwadams 		return;
237789d94c2Sjwadams 
238789d94c2Sjwadams 	been_ready = 1;
239*4f364e7cSRobert Mustacchi 
240*4f364e7cSRobert Mustacchi 	/*
241*4f364e7cSRobert Mustacchi 	 * In order to determine the tm_roots offset of any cache that is
242*4f364e7cSRobert Mustacchi 	 * cached per-thread, we need to have the umem_alloc_sizes array.
243*4f364e7cSRobert Mustacchi 	 * Read this, assuring that it is zero-terminated.
244*4f364e7cSRobert Mustacchi 	 */
245*4f364e7cSRobert Mustacchi 	if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) {
246*4f364e7cSRobert Mustacchi 		mdb_warn("unable to lookup 'umem_alloc_sizes'");
247*4f364e7cSRobert Mustacchi 		return;
248*4f364e7cSRobert Mustacchi 	}
249*4f364e7cSRobert Mustacchi 
250*4f364e7cSRobert Mustacchi 	sizes = mdb_zalloc(sym.st_size + sizeof (int), UM_SLEEP | UM_GC);
251*4f364e7cSRobert Mustacchi 
252*4f364e7cSRobert Mustacchi 	if (mdb_vread(sizes, sym.st_size, (uintptr_t)sym.st_value) == -1) {
253*4f364e7cSRobert Mustacchi 		mdb_warn("couldn't read 'umem_alloc_sizes'");
254*4f364e7cSRobert Mustacchi 		return;
255*4f364e7cSRobert Mustacchi 	}
256*4f364e7cSRobert Mustacchi 
257*4f364e7cSRobert Mustacchi 	(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umem_init_walkers, sizes);
258789d94c2Sjwadams }
259789d94c2Sjwadams 
2607c478bd9Sstevel@tonic-gate int
2617c478bd9Sstevel@tonic-gate umem_abort_messages(void)
2627c478bd9Sstevel@tonic-gate {
2637c478bd9Sstevel@tonic-gate 	char *umem_error_buffer;
2647c478bd9Sstevel@tonic-gate 	uint_t umem_error_begin;
2657c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
2667c478bd9Sstevel@tonic-gate 	size_t bufsize;
2677c478bd9Sstevel@tonic-gate 
2687c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_error_begin))
2697c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
2707c478bd9Sstevel@tonic-gate 
2717c478bd9Sstevel@tonic-gate 	if (umem_lookup_by_name("umem_error_buffer", &sym) == -1) {
2727c478bd9Sstevel@tonic-gate 		mdb_warn("unable to look up umem_error_buffer");
2737c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
2747c478bd9Sstevel@tonic-gate 	}
2757c478bd9Sstevel@tonic-gate 
2767c478bd9Sstevel@tonic-gate 	bufsize = (size_t)sym.st_size;
2777c478bd9Sstevel@tonic-gate 
2787c478bd9Sstevel@tonic-gate 	umem_error_buffer = mdb_alloc(bufsize+1, UM_SLEEP | UM_GC);
2797c478bd9Sstevel@tonic-gate 
2807c478bd9Sstevel@tonic-gate 	if (mdb_vread(umem_error_buffer, bufsize, (uintptr_t)sym.st_value)
2817c478bd9Sstevel@tonic-gate 	    != bufsize) {
2827c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read umem_error_buffer");
2837c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
2847c478bd9Sstevel@tonic-gate 	}
2857c478bd9Sstevel@tonic-gate 	/* put a zero after the end of the buffer to simplify printing */
2867c478bd9Sstevel@tonic-gate 	umem_error_buffer[bufsize] = 0;
2877c478bd9Sstevel@tonic-gate 
2887c478bd9Sstevel@tonic-gate 	if ((umem_error_begin % bufsize) == 0)
2897c478bd9Sstevel@tonic-gate 		mdb_printf("%s\n", umem_error_buffer);
2907c478bd9Sstevel@tonic-gate 	else {
2917c478bd9Sstevel@tonic-gate 		umem_error_buffer[(umem_error_begin % bufsize) - 1] = 0;
2927c478bd9Sstevel@tonic-gate 		mdb_printf("%s%s\n",
2937c478bd9Sstevel@tonic-gate 		    &umem_error_buffer[umem_error_begin % bufsize],
2947c478bd9Sstevel@tonic-gate 		    umem_error_buffer);
2957c478bd9Sstevel@tonic-gate 	}
2967c478bd9Sstevel@tonic-gate 
2977c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
2987c478bd9Sstevel@tonic-gate }
2997c478bd9Sstevel@tonic-gate 
3007c478bd9Sstevel@tonic-gate static void
3017c478bd9Sstevel@tonic-gate umem_log_status(const char *name, umem_log_header_t *val)
3027c478bd9Sstevel@tonic-gate {
3037c478bd9Sstevel@tonic-gate 	umem_log_header_t my_lh;
3047c478bd9Sstevel@tonic-gate 	uintptr_t pos = (uintptr_t)val;
3057c478bd9Sstevel@tonic-gate 	size_t size;
3067c478bd9Sstevel@tonic-gate 
3077c478bd9Sstevel@tonic-gate 	if (pos == NULL)
3087c478bd9Sstevel@tonic-gate 		return;
3097c478bd9Sstevel@tonic-gate 
3107c478bd9Sstevel@tonic-gate 	if (mdb_vread(&my_lh, sizeof (umem_log_header_t), pos) == -1) {
3117c478bd9Sstevel@tonic-gate 		mdb_warn("\nunable to read umem_%s_log pointer %p",
3127c478bd9Sstevel@tonic-gate 		    name, pos);
3137c478bd9Sstevel@tonic-gate 		return;
3147c478bd9Sstevel@tonic-gate 	}
3157c478bd9Sstevel@tonic-gate 
3167c478bd9Sstevel@tonic-gate 	size = my_lh.lh_chunksize * my_lh.lh_nchunks;
3177c478bd9Sstevel@tonic-gate 
3187c478bd9Sstevel@tonic-gate 	if (size % (1024 * 1024) == 0)
3197c478bd9Sstevel@tonic-gate 		mdb_printf("%s=%dm ", name, size / (1024 * 1024));
3207c478bd9Sstevel@tonic-gate 	else if (size % 1024 == 0)
3217c478bd9Sstevel@tonic-gate 		mdb_printf("%s=%dk ", name, size / 1024);
3227c478bd9Sstevel@tonic-gate 	else
3237c478bd9Sstevel@tonic-gate 		mdb_printf("%s=%d ", name, size);
3247c478bd9Sstevel@tonic-gate }
3257c478bd9Sstevel@tonic-gate 
3267c478bd9Sstevel@tonic-gate typedef struct umem_debug_flags {
3277c478bd9Sstevel@tonic-gate 	const char	*udf_name;
3287c478bd9Sstevel@tonic-gate 	uint_t		udf_flags;
3297c478bd9Sstevel@tonic-gate 	uint_t		udf_clear;	/* if 0, uses udf_flags */
3307c478bd9Sstevel@tonic-gate } umem_debug_flags_t;
3317c478bd9Sstevel@tonic-gate 
3327c478bd9Sstevel@tonic-gate umem_debug_flags_t umem_status_flags[] = {
3337c478bd9Sstevel@tonic-gate 	{ "random",	UMF_RANDOMIZE,	UMF_RANDOM },
3347c478bd9Sstevel@tonic-gate 	{ "default",	UMF_AUDIT | UMF_DEADBEEF | UMF_REDZONE | UMF_CONTENTS },
3357c478bd9Sstevel@tonic-gate 	{ "audit",	UMF_AUDIT },
3367c478bd9Sstevel@tonic-gate 	{ "guards",	UMF_DEADBEEF | UMF_REDZONE },
3377c478bd9Sstevel@tonic-gate 	{ "nosignal",	UMF_CHECKSIGNAL },
3387c478bd9Sstevel@tonic-gate 	{ "firewall",	UMF_FIREWALL },
3397c478bd9Sstevel@tonic-gate 	{ "lite",	UMF_LITE },
3407c478bd9Sstevel@tonic-gate 	{ NULL }
3417c478bd9Sstevel@tonic-gate };
3427c478bd9Sstevel@tonic-gate 
3437c478bd9Sstevel@tonic-gate /*ARGSUSED*/
3447c478bd9Sstevel@tonic-gate int
3457c478bd9Sstevel@tonic-gate umem_status(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
3467c478bd9Sstevel@tonic-gate {
3477c478bd9Sstevel@tonic-gate 	int umem_logging;
3487c478bd9Sstevel@tonic-gate 
3497c478bd9Sstevel@tonic-gate 	umem_log_header_t *umem_transaction_log;
3507c478bd9Sstevel@tonic-gate 	umem_log_header_t *umem_content_log;
3517c478bd9Sstevel@tonic-gate 	umem_log_header_t *umem_failure_log;
3527c478bd9Sstevel@tonic-gate 	umem_log_header_t *umem_slab_log;
3537c478bd9Sstevel@tonic-gate 
3547c478bd9Sstevel@tonic-gate 	mdb_printf("Status:\t\t%s\n",
3557c478bd9Sstevel@tonic-gate 	    umem_ready == UMEM_READY_INIT_FAILED ? "initialization failed" :
3567c478bd9Sstevel@tonic-gate 	    umem_ready == UMEM_READY_STARTUP ? "uninitialized" :
3577c478bd9Sstevel@tonic-gate 	    umem_ready == UMEM_READY_INITING ? "initialization in process" :
3587c478bd9Sstevel@tonic-gate 	    umem_ready == UMEM_READY ? "ready and active" :
359789d94c2Sjwadams 	    umem_ready == 0 ? "not loaded into address space" :
3607c478bd9Sstevel@tonic-gate 	    "unknown (umem_ready invalid)");
3617c478bd9Sstevel@tonic-gate 
362789d94c2Sjwadams 	if (umem_ready == 0)
363789d94c2Sjwadams 		return (DCMD_OK);
364789d94c2Sjwadams 
3657c478bd9Sstevel@tonic-gate 	mdb_printf("Concurrency:\t%d\n", umem_max_ncpus);
3667c478bd9Sstevel@tonic-gate 
3677c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_logging))
3687c478bd9Sstevel@tonic-gate 		goto err;
3697c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_transaction_log))
3707c478bd9Sstevel@tonic-gate 		goto err;
3717c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_content_log))
3727c478bd9Sstevel@tonic-gate 		goto err;
3737c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_failure_log))
3747c478bd9Sstevel@tonic-gate 		goto err;
3757c478bd9Sstevel@tonic-gate 	if (UMEM_READVAR(umem_slab_log))
3767c478bd9Sstevel@tonic-gate 		goto err;
3777c478bd9Sstevel@tonic-gate 
3787c478bd9Sstevel@tonic-gate 	mdb_printf("Logs:\t\t");
3797c478bd9Sstevel@tonic-gate 	umem_log_status("transaction", umem_transaction_log);
3807c478bd9Sstevel@tonic-gate 	umem_log_status("content", umem_content_log);
3817c478bd9Sstevel@tonic-gate 	umem_log_status("fail", umem_failure_log);
3827c478bd9Sstevel@tonic-gate 	umem_log_status("slab", umem_slab_log);
3837c478bd9Sstevel@tonic-gate 	if (!umem_logging)
3847c478bd9Sstevel@tonic-gate 		mdb_printf("(inactive)");
3857c478bd9Sstevel@tonic-gate 	mdb_printf("\n");
3867c478bd9Sstevel@tonic-gate 
3877c478bd9Sstevel@tonic-gate 	mdb_printf("Message buffer:\n");
3887c478bd9Sstevel@tonic-gate 	return (umem_abort_messages());
3897c478bd9Sstevel@tonic-gate 
3907c478bd9Sstevel@tonic-gate err:
3917c478bd9Sstevel@tonic-gate 	mdb_printf("Message buffer:\n");
3927c478bd9Sstevel@tonic-gate 	(void) umem_abort_messages();
3937c478bd9Sstevel@tonic-gate 	return (DCMD_ERR);
3947c478bd9Sstevel@tonic-gate }
3957c478bd9Sstevel@tonic-gate 
3967c478bd9Sstevel@tonic-gate typedef struct {
3977c478bd9Sstevel@tonic-gate 	uintptr_t ucw_first;
3987c478bd9Sstevel@tonic-gate 	uintptr_t ucw_current;
3997c478bd9Sstevel@tonic-gate } umem_cache_walk_t;
4007c478bd9Sstevel@tonic-gate 
4017c478bd9Sstevel@tonic-gate int
4027c478bd9Sstevel@tonic-gate umem_cache_walk_init(mdb_walk_state_t *wsp)
4037c478bd9Sstevel@tonic-gate {
4047c478bd9Sstevel@tonic-gate 	umem_cache_walk_t *ucw;
4057c478bd9Sstevel@tonic-gate 	umem_cache_t c;
4067c478bd9Sstevel@tonic-gate 	uintptr_t cp;
4077c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
4087c478bd9Sstevel@tonic-gate 
4097c478bd9Sstevel@tonic-gate 	if (umem_lookup_by_name("umem_null_cache", &sym) == -1) {
4107c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find umem_null_cache");
4117c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
4127c478bd9Sstevel@tonic-gate 	}
4137c478bd9Sstevel@tonic-gate 
4147c478bd9Sstevel@tonic-gate 	cp = (uintptr_t)sym.st_value;
4157c478bd9Sstevel@tonic-gate 
4167c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (umem_cache_t), cp) == -1) {
4177c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read cache at %p", cp);
4187c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
4197c478bd9Sstevel@tonic-gate 	}
4207c478bd9Sstevel@tonic-gate 
4217c478bd9Sstevel@tonic-gate 	ucw = mdb_alloc(sizeof (umem_cache_walk_t), UM_SLEEP);
4227c478bd9Sstevel@tonic-gate 
4237c478bd9Sstevel@tonic-gate 	ucw->ucw_first = cp;
4247c478bd9Sstevel@tonic-gate 	ucw->ucw_current = (uintptr_t)c.cache_next;
4257c478bd9Sstevel@tonic-gate 	wsp->walk_data = ucw;
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
4287c478bd9Sstevel@tonic-gate }
4297c478bd9Sstevel@tonic-gate 
4307c478bd9Sstevel@tonic-gate int
4317c478bd9Sstevel@tonic-gate umem_cache_walk_step(mdb_walk_state_t *wsp)
4327c478bd9Sstevel@tonic-gate {
4337c478bd9Sstevel@tonic-gate 	umem_cache_walk_t *ucw = wsp->walk_data;
4347c478bd9Sstevel@tonic-gate 	umem_cache_t c;
4357c478bd9Sstevel@tonic-gate 	int status;
4367c478bd9Sstevel@tonic-gate 
4377c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (umem_cache_t), ucw->ucw_current) == -1) {
4387c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read cache at %p", ucw->ucw_current);
4397c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
4407c478bd9Sstevel@tonic-gate 	}
4417c478bd9Sstevel@tonic-gate 
4427c478bd9Sstevel@tonic-gate 	status = wsp->walk_callback(ucw->ucw_current, &c, wsp->walk_cbdata);
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate 	if ((ucw->ucw_current = (uintptr_t)c.cache_next) == ucw->ucw_first)
4457c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
4467c478bd9Sstevel@tonic-gate 
4477c478bd9Sstevel@tonic-gate 	return (status);
4487c478bd9Sstevel@tonic-gate }
4497c478bd9Sstevel@tonic-gate 
4507c478bd9Sstevel@tonic-gate void
4517c478bd9Sstevel@tonic-gate umem_cache_walk_fini(mdb_walk_state_t *wsp)
4527c478bd9Sstevel@tonic-gate {
4537c478bd9Sstevel@tonic-gate 	umem_cache_walk_t *ucw = wsp->walk_data;
4547c478bd9Sstevel@tonic-gate 	mdb_free(ucw, sizeof (umem_cache_walk_t));
4557c478bd9Sstevel@tonic-gate }
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate typedef struct {
4587c478bd9Sstevel@tonic-gate 	umem_cpu_t *ucw_cpus;
4597c478bd9Sstevel@tonic-gate 	uint32_t ucw_current;
4607c478bd9Sstevel@tonic-gate 	uint32_t ucw_max;
4617c478bd9Sstevel@tonic-gate } umem_cpu_walk_state_t;
4627c478bd9Sstevel@tonic-gate 
4637c478bd9Sstevel@tonic-gate int
4647c478bd9Sstevel@tonic-gate umem_cpu_walk_init(mdb_walk_state_t *wsp)
4657c478bd9Sstevel@tonic-gate {
4667c478bd9Sstevel@tonic-gate 	umem_cpu_t *umem_cpus;
4677c478bd9Sstevel@tonic-gate 
4687c478bd9Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw;
4697c478bd9Sstevel@tonic-gate 
4707c478bd9Sstevel@tonic-gate 	if (umem_readvar(&umem_cpus, "umem_cpus") == -1) {
4717c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_cpus'");
4727c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
4737c478bd9Sstevel@tonic-gate 	}
4747c478bd9Sstevel@tonic-gate 
4757c478bd9Sstevel@tonic-gate 	ucw = mdb_alloc(sizeof (*ucw), UM_SLEEP);
4767c478bd9Sstevel@tonic-gate 
4777c478bd9Sstevel@tonic-gate 	ucw->ucw_cpus = umem_cpus;
4787c478bd9Sstevel@tonic-gate 	ucw->ucw_current = 0;
4797c478bd9Sstevel@tonic-gate 	ucw->ucw_max = umem_max_ncpus;
4807c478bd9Sstevel@tonic-gate 
4817c478bd9Sstevel@tonic-gate 	wsp->walk_data = ucw;
4827c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
4837c478bd9Sstevel@tonic-gate }
4847c478bd9Sstevel@tonic-gate 
4857c478bd9Sstevel@tonic-gate int
4867c478bd9Sstevel@tonic-gate umem_cpu_walk_step(mdb_walk_state_t *wsp)
4877c478bd9Sstevel@tonic-gate {
4887c478bd9Sstevel@tonic-gate 	umem_cpu_t cpu;
4897c478bd9Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw = wsp->walk_data;
4907c478bd9Sstevel@tonic-gate 
4917c478bd9Sstevel@tonic-gate 	uintptr_t caddr;
4927c478bd9Sstevel@tonic-gate 
4937c478bd9Sstevel@tonic-gate 	if (ucw->ucw_current >= ucw->ucw_max)
4947c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
4957c478bd9Sstevel@tonic-gate 
4967c478bd9Sstevel@tonic-gate 	caddr = (uintptr_t)&(ucw->ucw_cpus[ucw->ucw_current]);
4977c478bd9Sstevel@tonic-gate 
4987c478bd9Sstevel@tonic-gate 	if (mdb_vread(&cpu, sizeof (umem_cpu_t), caddr) == -1) {
4997c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read cpu %d", ucw->ucw_current);
5007c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5017c478bd9Sstevel@tonic-gate 	}
5027c478bd9Sstevel@tonic-gate 
5037c478bd9Sstevel@tonic-gate 	ucw->ucw_current++;
5047c478bd9Sstevel@tonic-gate 
5057c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cpu, wsp->walk_cbdata));
5067c478bd9Sstevel@tonic-gate }
5077c478bd9Sstevel@tonic-gate 
5087c478bd9Sstevel@tonic-gate void
5097c478bd9Sstevel@tonic-gate umem_cpu_walk_fini(mdb_walk_state_t *wsp)
5107c478bd9Sstevel@tonic-gate {
5117c478bd9Sstevel@tonic-gate 	umem_cpu_walk_state_t *ucw = wsp->walk_data;
5127c478bd9Sstevel@tonic-gate 
5137c478bd9Sstevel@tonic-gate 	mdb_free(ucw, sizeof (*ucw));
5147c478bd9Sstevel@tonic-gate }
5157c478bd9Sstevel@tonic-gate 
5167c478bd9Sstevel@tonic-gate int
5177c478bd9Sstevel@tonic-gate umem_cpu_cache_walk_init(mdb_walk_state_t *wsp)
5187c478bd9Sstevel@tonic-gate {
5197c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
5207c478bd9Sstevel@tonic-gate 		mdb_warn("umem_cpu_cache doesn't support global walks");
5217c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5227c478bd9Sstevel@tonic-gate 	}
5237c478bd9Sstevel@tonic-gate 
5247c478bd9Sstevel@tonic-gate 	if (mdb_layered_walk("umem_cpu", wsp) == -1) {
5257c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'umem_cpu'");
5267c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5277c478bd9Sstevel@tonic-gate 	}
5287c478bd9Sstevel@tonic-gate 
5297c478bd9Sstevel@tonic-gate 	wsp->walk_data = (void *)wsp->walk_addr;
5307c478bd9Sstevel@tonic-gate 
5317c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
5327c478bd9Sstevel@tonic-gate }
5337c478bd9Sstevel@tonic-gate 
5347c478bd9Sstevel@tonic-gate int
5357c478bd9Sstevel@tonic-gate umem_cpu_cache_walk_step(mdb_walk_state_t *wsp)
5367c478bd9Sstevel@tonic-gate {
5377c478bd9Sstevel@tonic-gate 	uintptr_t caddr = (uintptr_t)wsp->walk_data;
5387c478bd9Sstevel@tonic-gate 	const umem_cpu_t *cpu = wsp->walk_layer;
5397c478bd9Sstevel@tonic-gate 	umem_cpu_cache_t cc;
5407c478bd9Sstevel@tonic-gate 
5417c478bd9Sstevel@tonic-gate 	caddr += cpu->cpu_cache_offset;
5427c478bd9Sstevel@tonic-gate 
5437c478bd9Sstevel@tonic-gate 	if (mdb_vread(&cc, sizeof (umem_cpu_cache_t), caddr) == -1) {
5447c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cpu_cache at %p", caddr);
5457c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5467c478bd9Sstevel@tonic-gate 	}
5477c478bd9Sstevel@tonic-gate 
5487c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata));
5497c478bd9Sstevel@tonic-gate }
5507c478bd9Sstevel@tonic-gate 
5517c478bd9Sstevel@tonic-gate int
5527c478bd9Sstevel@tonic-gate umem_slab_walk_init(mdb_walk_state_t *wsp)
5537c478bd9Sstevel@tonic-gate {
5547c478bd9Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
5557c478bd9Sstevel@tonic-gate 	umem_cache_t c;
5567c478bd9Sstevel@tonic-gate 
5577c478bd9Sstevel@tonic-gate 	if (caddr == NULL) {
5587c478bd9Sstevel@tonic-gate 		mdb_warn("umem_slab doesn't support global walks\n");
5597c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5607c478bd9Sstevel@tonic-gate 	}
5617c478bd9Sstevel@tonic-gate 
5627c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
5637c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", caddr);
5647c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5657c478bd9Sstevel@tonic-gate 	}
5667c478bd9Sstevel@tonic-gate 
5677c478bd9Sstevel@tonic-gate 	wsp->walk_data =
5687c478bd9Sstevel@tonic-gate 	    (void *)(caddr + offsetof(umem_cache_t, cache_nullslab));
5697c478bd9Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_next;
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
5727c478bd9Sstevel@tonic-gate }
5737c478bd9Sstevel@tonic-gate 
5747c478bd9Sstevel@tonic-gate int
5757c478bd9Sstevel@tonic-gate umem_slab_walk_partial_init(mdb_walk_state_t *wsp)
5767c478bd9Sstevel@tonic-gate {
5777c478bd9Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
5787c478bd9Sstevel@tonic-gate 	umem_cache_t c;
5797c478bd9Sstevel@tonic-gate 
5807c478bd9Sstevel@tonic-gate 	if (caddr == NULL) {
5817c478bd9Sstevel@tonic-gate 		mdb_warn("umem_slab_partial doesn't support global walks\n");
5827c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5837c478bd9Sstevel@tonic-gate 	}
5847c478bd9Sstevel@tonic-gate 
5857c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
5867c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", caddr);
5877c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
5887c478bd9Sstevel@tonic-gate 	}
5897c478bd9Sstevel@tonic-gate 
5907c478bd9Sstevel@tonic-gate 	wsp->walk_data =
5917c478bd9Sstevel@tonic-gate 	    (void *)(caddr + offsetof(umem_cache_t, cache_nullslab));
5927c478bd9Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)c.cache_freelist;
5937c478bd9Sstevel@tonic-gate 
5947c478bd9Sstevel@tonic-gate 	/*
5957c478bd9Sstevel@tonic-gate 	 * Some consumers (umem_walk_step(), in particular) require at
5967c478bd9Sstevel@tonic-gate 	 * least one callback if there are any buffers in the cache.  So
5977c478bd9Sstevel@tonic-gate 	 * if there are *no* partial slabs, report the last full slab, if
5987c478bd9Sstevel@tonic-gate 	 * any.
5997c478bd9Sstevel@tonic-gate 	 *
6007c478bd9Sstevel@tonic-gate 	 * Yes, this is ugly, but it's cleaner than the other possibilities.
6017c478bd9Sstevel@tonic-gate 	 */
6027c478bd9Sstevel@tonic-gate 	if ((uintptr_t)wsp->walk_data == wsp->walk_addr)
6037c478bd9Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_prev;
6047c478bd9Sstevel@tonic-gate 
6057c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
6067c478bd9Sstevel@tonic-gate }
6077c478bd9Sstevel@tonic-gate 
6087c478bd9Sstevel@tonic-gate int
6097c478bd9Sstevel@tonic-gate umem_slab_walk_step(mdb_walk_state_t *wsp)
6107c478bd9Sstevel@tonic-gate {
6117c478bd9Sstevel@tonic-gate 	umem_slab_t s;
6127c478bd9Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
6137c478bd9Sstevel@tonic-gate 	uintptr_t saddr = (uintptr_t)wsp->walk_data;
6147c478bd9Sstevel@tonic-gate 	uintptr_t caddr = saddr - offsetof(umem_cache_t, cache_nullslab);
6157c478bd9Sstevel@tonic-gate 
6167c478bd9Sstevel@tonic-gate 	if (addr == saddr)
6177c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
6187c478bd9Sstevel@tonic-gate 
6197c478bd9Sstevel@tonic-gate 	if (mdb_vread(&s, sizeof (s), addr) == -1) {
6207c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read slab at %p", wsp->walk_addr);
6217c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
6227c478bd9Sstevel@tonic-gate 	}
6237c478bd9Sstevel@tonic-gate 
6247c478bd9Sstevel@tonic-gate 	if ((uintptr_t)s.slab_cache != caddr) {
6257c478bd9Sstevel@tonic-gate 		mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
6267c478bd9Sstevel@tonic-gate 		    addr, caddr, s.slab_cache);
6277c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
6287c478bd9Sstevel@tonic-gate 	}
6297c478bd9Sstevel@tonic-gate 
6307c478bd9Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)s.slab_next;
6317c478bd9Sstevel@tonic-gate 
6327c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &s, wsp->walk_cbdata));
6337c478bd9Sstevel@tonic-gate }
6347c478bd9Sstevel@tonic-gate 
6357c478bd9Sstevel@tonic-gate int
6367c478bd9Sstevel@tonic-gate umem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
6377c478bd9Sstevel@tonic-gate {
6387c478bd9Sstevel@tonic-gate 	umem_cache_t c;
6397c478bd9Sstevel@tonic-gate 
6407c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
6417c478bd9Sstevel@tonic-gate 		if (mdb_walk_dcmd("umem_cache", "umem_cache", ac, argv) == -1) {
6427c478bd9Sstevel@tonic-gate 			mdb_warn("can't walk umem_cache");
6437c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
6447c478bd9Sstevel@tonic-gate 		}
6457c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
6467c478bd9Sstevel@tonic-gate 	}
6477c478bd9Sstevel@tonic-gate 
6487c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
6497c478bd9Sstevel@tonic-gate 		mdb_printf("%-?s %-25s %4s %8s %8s %8s\n", "ADDR", "NAME",
6507c478bd9Sstevel@tonic-gate 		    "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
6517c478bd9Sstevel@tonic-gate 
6527c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
6537c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read umem_cache at %p", addr);
6547c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
6557c478bd9Sstevel@tonic-gate 	}
6567c478bd9Sstevel@tonic-gate 
6577c478bd9Sstevel@tonic-gate 	mdb_printf("%0?p %-25s %04x %08x %8ld %8lld\n", addr, c.cache_name,
6587c478bd9Sstevel@tonic-gate 	    c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal);
6597c478bd9Sstevel@tonic-gate 
6607c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
6617c478bd9Sstevel@tonic-gate }
6627c478bd9Sstevel@tonic-gate 
6637c478bd9Sstevel@tonic-gate static int
6647c478bd9Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs)
6657c478bd9Sstevel@tonic-gate {
6667c478bd9Sstevel@tonic-gate 	uintptr_t p1 = *((uintptr_t *)lhs);
6677c478bd9Sstevel@tonic-gate 	uintptr_t p2 = *((uintptr_t *)rhs);
6687c478bd9Sstevel@tonic-gate 
6697c478bd9Sstevel@tonic-gate 	if (p1 < p2)
6707c478bd9Sstevel@tonic-gate 		return (-1);
6717c478bd9Sstevel@tonic-gate 	if (p1 > p2)
6727c478bd9Sstevel@tonic-gate 		return (1);
6737c478bd9Sstevel@tonic-gate 	return (0);
6747c478bd9Sstevel@tonic-gate }
6757c478bd9Sstevel@tonic-gate 
6767c478bd9Sstevel@tonic-gate static int
6777c478bd9Sstevel@tonic-gate bufctlcmp(const umem_bufctl_audit_t **lhs, const umem_bufctl_audit_t **rhs)
6787c478bd9Sstevel@tonic-gate {
6797c478bd9Sstevel@tonic-gate 	const umem_bufctl_audit_t *bcp1 = *lhs;
6807c478bd9Sstevel@tonic-gate 	const umem_bufctl_audit_t *bcp2 = *rhs;
6817c478bd9Sstevel@tonic-gate 
6827c478bd9Sstevel@tonic-gate 	if (bcp1->bc_timestamp > bcp2->bc_timestamp)
6837c478bd9Sstevel@tonic-gate 		return (-1);
6847c478bd9Sstevel@tonic-gate 
6857c478bd9Sstevel@tonic-gate 	if (bcp1->bc_timestamp < bcp2->bc_timestamp)
6867c478bd9Sstevel@tonic-gate 		return (1);
6877c478bd9Sstevel@tonic-gate 
6887c478bd9Sstevel@tonic-gate 	return (0);
6897c478bd9Sstevel@tonic-gate }
6907c478bd9Sstevel@tonic-gate 
6917c478bd9Sstevel@tonic-gate typedef struct umem_hash_walk {
6927c478bd9Sstevel@tonic-gate 	uintptr_t *umhw_table;
6937c478bd9Sstevel@tonic-gate 	size_t umhw_nelems;
6947c478bd9Sstevel@tonic-gate 	size_t umhw_pos;
6957c478bd9Sstevel@tonic-gate 	umem_bufctl_t umhw_cur;
6967c478bd9Sstevel@tonic-gate } umem_hash_walk_t;
6977c478bd9Sstevel@tonic-gate 
6987c478bd9Sstevel@tonic-gate int
6997c478bd9Sstevel@tonic-gate umem_hash_walk_init(mdb_walk_state_t *wsp)
7007c478bd9Sstevel@tonic-gate {
7017c478bd9Sstevel@tonic-gate 	umem_hash_walk_t *umhw;
7027c478bd9Sstevel@tonic-gate 	uintptr_t *hash;
7037c478bd9Sstevel@tonic-gate 	umem_cache_t c;
7047c478bd9Sstevel@tonic-gate 	uintptr_t haddr, addr = wsp->walk_addr;
7057c478bd9Sstevel@tonic-gate 	size_t nelems;
7067c478bd9Sstevel@tonic-gate 	size_t hsize;
7077c478bd9Sstevel@tonic-gate 
7087c478bd9Sstevel@tonic-gate 	if (addr == NULL) {
7097c478bd9Sstevel@tonic-gate 		mdb_warn("umem_hash doesn't support global walks\n");
7107c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
7117c478bd9Sstevel@tonic-gate 	}
7127c478bd9Sstevel@tonic-gate 
7137c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
7147c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
7157c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
7167c478bd9Sstevel@tonic-gate 	}
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate 	if (!(c.cache_flags & UMF_HASH)) {
7197c478bd9Sstevel@tonic-gate 		mdb_warn("cache %p doesn't have a hash table\n", addr);
7207c478bd9Sstevel@tonic-gate 		return (WALK_DONE);		/* nothing to do */
7217c478bd9Sstevel@tonic-gate 	}
7227c478bd9Sstevel@tonic-gate 
7237c478bd9Sstevel@tonic-gate 	umhw = mdb_zalloc(sizeof (umem_hash_walk_t), UM_SLEEP);
7247c478bd9Sstevel@tonic-gate 	umhw->umhw_cur.bc_next = NULL;
7257c478bd9Sstevel@tonic-gate 	umhw->umhw_pos = 0;
7267c478bd9Sstevel@tonic-gate 
7277c478bd9Sstevel@tonic-gate 	umhw->umhw_nelems = nelems = c.cache_hash_mask + 1;
7287c478bd9Sstevel@tonic-gate 	hsize = nelems * sizeof (uintptr_t);
7297c478bd9Sstevel@tonic-gate 	haddr = (uintptr_t)c.cache_hash_table;
7307c478bd9Sstevel@tonic-gate 
7317c478bd9Sstevel@tonic-gate 	umhw->umhw_table = hash = mdb_alloc(hsize, UM_SLEEP);
7327c478bd9Sstevel@tonic-gate 	if (mdb_vread(hash, hsize, haddr) == -1) {
7337c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read hash table at %p", haddr);
7347c478bd9Sstevel@tonic-gate 		mdb_free(hash, hsize);
7357c478bd9Sstevel@tonic-gate 		mdb_free(umhw, sizeof (umem_hash_walk_t));
7367c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
7377c478bd9Sstevel@tonic-gate 	}
7387c478bd9Sstevel@tonic-gate 
7397c478bd9Sstevel@tonic-gate 	wsp->walk_data = umhw;
7407c478bd9Sstevel@tonic-gate 
7417c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
7427c478bd9Sstevel@tonic-gate }
7437c478bd9Sstevel@tonic-gate 
7447c478bd9Sstevel@tonic-gate int
7457c478bd9Sstevel@tonic-gate umem_hash_walk_step(mdb_walk_state_t *wsp)
7467c478bd9Sstevel@tonic-gate {
7477c478bd9Sstevel@tonic-gate 	umem_hash_walk_t *umhw = wsp->walk_data;
7487c478bd9Sstevel@tonic-gate 	uintptr_t addr = NULL;
7497c478bd9Sstevel@tonic-gate 
7507c478bd9Sstevel@tonic-gate 	if ((addr = (uintptr_t)umhw->umhw_cur.bc_next) == NULL) {
7517c478bd9Sstevel@tonic-gate 		while (umhw->umhw_pos < umhw->umhw_nelems) {
7527c478bd9Sstevel@tonic-gate 			if ((addr = umhw->umhw_table[umhw->umhw_pos++]) != NULL)
7537c478bd9Sstevel@tonic-gate 				break;
7547c478bd9Sstevel@tonic-gate 		}
7557c478bd9Sstevel@tonic-gate 	}
7567c478bd9Sstevel@tonic-gate 	if (addr == NULL)
7577c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
7587c478bd9Sstevel@tonic-gate 
7597c478bd9Sstevel@tonic-gate 	if (mdb_vread(&umhw->umhw_cur, sizeof (umem_bufctl_t), addr) == -1) {
7607c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read umem_bufctl_t at addr %p", addr);
7617c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
7627c478bd9Sstevel@tonic-gate 	}
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &umhw->umhw_cur, wsp->walk_cbdata));
7657c478bd9Sstevel@tonic-gate }
7667c478bd9Sstevel@tonic-gate 
7677c478bd9Sstevel@tonic-gate void
7687c478bd9Sstevel@tonic-gate umem_hash_walk_fini(mdb_walk_state_t *wsp)
7697c478bd9Sstevel@tonic-gate {
7707c478bd9Sstevel@tonic-gate 	umem_hash_walk_t *umhw = wsp->walk_data;
7717c478bd9Sstevel@tonic-gate 
7727c478bd9Sstevel@tonic-gate 	if (umhw == NULL)
7737c478bd9Sstevel@tonic-gate 		return;
7747c478bd9Sstevel@tonic-gate 
7757c478bd9Sstevel@tonic-gate 	mdb_free(umhw->umhw_table, umhw->umhw_nelems * sizeof (uintptr_t));
7767c478bd9Sstevel@tonic-gate 	mdb_free(umhw, sizeof (umem_hash_walk_t));
7777c478bd9Sstevel@tonic-gate }
7787c478bd9Sstevel@tonic-gate 
7797c478bd9Sstevel@tonic-gate /*
7807c478bd9Sstevel@tonic-gate  * Find the address of the bufctl structure for the address 'buf' in cache
7817c478bd9Sstevel@tonic-gate  * 'cp', which is at address caddr, and place it in *out.
7827c478bd9Sstevel@tonic-gate  */
7837c478bd9Sstevel@tonic-gate static int
7847c478bd9Sstevel@tonic-gate umem_hash_lookup(umem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
7857c478bd9Sstevel@tonic-gate {
7867c478bd9Sstevel@tonic-gate 	uintptr_t bucket = (uintptr_t)UMEM_HASH(cp, buf);
7877c478bd9Sstevel@tonic-gate 	umem_bufctl_t *bcp;
7887c478bd9Sstevel@tonic-gate 	umem_bufctl_t bc;
7897c478bd9Sstevel@tonic-gate 
7907c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bcp, sizeof (umem_bufctl_t *), bucket) == -1) {
7917c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read hash bucket for %p in cache %p",
7927c478bd9Sstevel@tonic-gate 		    buf, caddr);
7937c478bd9Sstevel@tonic-gate 		return (-1);
7947c478bd9Sstevel@tonic-gate 	}
7957c478bd9Sstevel@tonic-gate 
7967c478bd9Sstevel@tonic-gate 	while (bcp != NULL) {
7977c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (umem_bufctl_t),
7987c478bd9Sstevel@tonic-gate 		    (uintptr_t)bcp) == -1) {
7997c478bd9Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", bcp);
8007c478bd9Sstevel@tonic-gate 			return (-1);
8017c478bd9Sstevel@tonic-gate 		}
8027c478bd9Sstevel@tonic-gate 		if (bc.bc_addr == buf) {
8037c478bd9Sstevel@tonic-gate 			*out = (uintptr_t)bcp;
8047c478bd9Sstevel@tonic-gate 			return (0);
8057c478bd9Sstevel@tonic-gate 		}
8067c478bd9Sstevel@tonic-gate 		bcp = bc.bc_next;
8077c478bd9Sstevel@tonic-gate 	}
8087c478bd9Sstevel@tonic-gate 
8097c478bd9Sstevel@tonic-gate 	mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr);
8107c478bd9Sstevel@tonic-gate 	return (-1);
8117c478bd9Sstevel@tonic-gate }
8127c478bd9Sstevel@tonic-gate 
8137c478bd9Sstevel@tonic-gate int
8147c478bd9Sstevel@tonic-gate umem_get_magsize(const umem_cache_t *cp)
8157c478bd9Sstevel@tonic-gate {
8167c478bd9Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)cp->cache_magtype;
8177c478bd9Sstevel@tonic-gate 	GElf_Sym mt_sym;
8187c478bd9Sstevel@tonic-gate 	umem_magtype_t mt;
8197c478bd9Sstevel@tonic-gate 	int res;
8207c478bd9Sstevel@tonic-gate 
8217c478bd9Sstevel@tonic-gate 	/*
8227c478bd9Sstevel@tonic-gate 	 * if cpu 0 has a non-zero magsize, it must be correct.  caches
8237c478bd9Sstevel@tonic-gate 	 * with UMF_NOMAGAZINE have disabled their magazine layers, so
8247c478bd9Sstevel@tonic-gate 	 * it is okay to return 0 for them.
8257c478bd9Sstevel@tonic-gate 	 */
8267c478bd9Sstevel@tonic-gate 	if ((res = cp->cache_cpu[0].cc_magsize) != 0 ||
8277c478bd9Sstevel@tonic-gate 	    (cp->cache_flags & UMF_NOMAGAZINE))
8287c478bd9Sstevel@tonic-gate 		return (res);
8297c478bd9Sstevel@tonic-gate 
830789d94c2Sjwadams 	if (umem_lookup_by_name("umem_magtype", &mt_sym) == -1) {
8317c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read 'umem_magtype'");
8327c478bd9Sstevel@tonic-gate 	} else if (addr < mt_sym.st_value ||
8337c478bd9Sstevel@tonic-gate 	    addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 ||
8347c478bd9Sstevel@tonic-gate 	    ((addr - mt_sym.st_value) % sizeof (mt)) != 0) {
8357c478bd9Sstevel@tonic-gate 		mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
8367c478bd9Sstevel@tonic-gate 		    cp->cache_name, addr);
8377c478bd9Sstevel@tonic-gate 		return (0);
8387c478bd9Sstevel@tonic-gate 	}
8397c478bd9Sstevel@tonic-gate 	if (mdb_vread(&mt, sizeof (mt), addr) == -1) {
8407c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read magtype at %a", addr);
8417c478bd9Sstevel@tonic-gate 		return (0);
8427c478bd9Sstevel@tonic-gate 	}
8437c478bd9Sstevel@tonic-gate 	return (mt.mt_magsize);
8447c478bd9Sstevel@tonic-gate }
8457c478bd9Sstevel@tonic-gate 
8467c478bd9Sstevel@tonic-gate /*ARGSUSED*/
8477c478bd9Sstevel@tonic-gate static int
8487c478bd9Sstevel@tonic-gate umem_estimate_slab(uintptr_t addr, const umem_slab_t *sp, size_t *est)
8497c478bd9Sstevel@tonic-gate {
8507c478bd9Sstevel@tonic-gate 	*est -= (sp->slab_chunks - sp->slab_refcnt);
8517c478bd9Sstevel@tonic-gate 
8527c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
8537c478bd9Sstevel@tonic-gate }
8547c478bd9Sstevel@tonic-gate 
8557c478bd9Sstevel@tonic-gate /*
8567c478bd9Sstevel@tonic-gate  * Returns an upper bound on the number of allocated buffers in a given
8577c478bd9Sstevel@tonic-gate  * cache.
8587c478bd9Sstevel@tonic-gate  */
8597c478bd9Sstevel@tonic-gate size_t
8607c478bd9Sstevel@tonic-gate umem_estimate_allocated(uintptr_t addr, const umem_cache_t *cp)
8617c478bd9Sstevel@tonic-gate {
8627c478bd9Sstevel@tonic-gate 	int magsize;
8637c478bd9Sstevel@tonic-gate 	size_t cache_est;
8647c478bd9Sstevel@tonic-gate 
8657c478bd9Sstevel@tonic-gate 	cache_est = cp->cache_buftotal;
8667c478bd9Sstevel@tonic-gate 
8677c478bd9Sstevel@tonic-gate 	(void) mdb_pwalk("umem_slab_partial",
8687c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)umem_estimate_slab, &cache_est, addr);
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate 	if ((magsize = umem_get_magsize(cp)) != 0) {
8717c478bd9Sstevel@tonic-gate 		size_t mag_est = cp->cache_full.ml_total * magsize;
8727c478bd9Sstevel@tonic-gate 
8737c478bd9Sstevel@tonic-gate 		if (cache_est >= mag_est) {
8747c478bd9Sstevel@tonic-gate 			cache_est -= mag_est;
8757c478bd9Sstevel@tonic-gate 		} else {
8767c478bd9Sstevel@tonic-gate 			mdb_warn("cache %p's magazine layer holds more buffers "
8777c478bd9Sstevel@tonic-gate 			    "than the slab layer.\n", addr);
8787c478bd9Sstevel@tonic-gate 		}
8797c478bd9Sstevel@tonic-gate 	}
8807c478bd9Sstevel@tonic-gate 	return (cache_est);
8817c478bd9Sstevel@tonic-gate }
8827c478bd9Sstevel@tonic-gate 
8837c478bd9Sstevel@tonic-gate #define	READMAG_ROUNDS(rounds) { \
8847c478bd9Sstevel@tonic-gate 	if (mdb_vread(mp, magbsize, (uintptr_t)ump) == -1) { \
8857c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read magazine at %p", ump); \
8867c478bd9Sstevel@tonic-gate 		goto fail; \
8877c478bd9Sstevel@tonic-gate 	} \
8887c478bd9Sstevel@tonic-gate 	for (i = 0; i < rounds; i++) { \
8897c478bd9Sstevel@tonic-gate 		maglist[magcnt++] = mp->mag_round[i]; \
8907c478bd9Sstevel@tonic-gate 		if (magcnt == magmax) { \
8917c478bd9Sstevel@tonic-gate 			mdb_warn("%d magazines exceeds fudge factor\n", \
8927c478bd9Sstevel@tonic-gate 			    magcnt); \
8937c478bd9Sstevel@tonic-gate 			goto fail; \
8947c478bd9Sstevel@tonic-gate 		} \
8957c478bd9Sstevel@tonic-gate 	} \
8967c478bd9Sstevel@tonic-gate }
8977c478bd9Sstevel@tonic-gate 
898*4f364e7cSRobert Mustacchi static int
899789d94c2Sjwadams umem_read_magazines(umem_cache_t *cp, uintptr_t addr,
900*4f364e7cSRobert Mustacchi     void ***maglistp, size_t *magcntp, size_t *magmaxp)
9017c478bd9Sstevel@tonic-gate {
9027c478bd9Sstevel@tonic-gate 	umem_magazine_t *ump, *mp;
9037c478bd9Sstevel@tonic-gate 	void **maglist = NULL;
9047c478bd9Sstevel@tonic-gate 	int i, cpu;
9057c478bd9Sstevel@tonic-gate 	size_t magsize, magmax, magbsize;
9067c478bd9Sstevel@tonic-gate 	size_t magcnt = 0;
9077c478bd9Sstevel@tonic-gate 
9087c478bd9Sstevel@tonic-gate 	/*
9097c478bd9Sstevel@tonic-gate 	 * Read the magtype out of the cache, after verifying the pointer's
9107c478bd9Sstevel@tonic-gate 	 * correctness.
9117c478bd9Sstevel@tonic-gate 	 */
9127c478bd9Sstevel@tonic-gate 	magsize = umem_get_magsize(cp);
913789d94c2Sjwadams 	if (magsize == 0) {
914789d94c2Sjwadams 		*maglistp = NULL;
915789d94c2Sjwadams 		*magcntp = 0;
916789d94c2Sjwadams 		*magmaxp = 0;
917*4f364e7cSRobert Mustacchi 		return (0);
918789d94c2Sjwadams 	}
9197c478bd9Sstevel@tonic-gate 
9207c478bd9Sstevel@tonic-gate 	/*
9217c478bd9Sstevel@tonic-gate 	 * There are several places where we need to go buffer hunting:
9227c478bd9Sstevel@tonic-gate 	 * the per-CPU loaded magazine, the per-CPU spare full magazine,
9237c478bd9Sstevel@tonic-gate 	 * and the full magazine list in the depot.
9247c478bd9Sstevel@tonic-gate 	 *
9257c478bd9Sstevel@tonic-gate 	 * For an upper bound on the number of buffers in the magazine
9267c478bd9Sstevel@tonic-gate 	 * layer, we have the number of magazines on the cache_full
9277c478bd9Sstevel@tonic-gate 	 * list plus at most two magazines per CPU (the loaded and the
9287c478bd9Sstevel@tonic-gate 	 * spare).  Toss in 100 magazines as a fudge factor in case this
9297c478bd9Sstevel@tonic-gate 	 * is live (the number "100" comes from the same fudge factor in
9307c478bd9Sstevel@tonic-gate 	 * crash(1M)).
9317c478bd9Sstevel@tonic-gate 	 */
932789d94c2Sjwadams 	magmax = (cp->cache_full.ml_total + 2 * umem_max_ncpus + 100) * magsize;
9337c478bd9Sstevel@tonic-gate 	magbsize = offsetof(umem_magazine_t, mag_round[magsize]);
9347c478bd9Sstevel@tonic-gate 
9357c478bd9Sstevel@tonic-gate 	if (magbsize >= PAGESIZE / 2) {
9367c478bd9Sstevel@tonic-gate 		mdb_warn("magazine size for cache %p unreasonable (%x)\n",
9377c478bd9Sstevel@tonic-gate 		    addr, magbsize);
938*4f364e7cSRobert Mustacchi 		return (-1);
9397c478bd9Sstevel@tonic-gate 	}
9407c478bd9Sstevel@tonic-gate 
941*4f364e7cSRobert Mustacchi 	maglist = mdb_alloc(magmax * sizeof (void *), UM_SLEEP);
942*4f364e7cSRobert Mustacchi 	mp = mdb_alloc(magbsize, UM_SLEEP);
9437c478bd9Sstevel@tonic-gate 	if (mp == NULL || maglist == NULL)
9447c478bd9Sstevel@tonic-gate 		goto fail;
9457c478bd9Sstevel@tonic-gate 
9467c478bd9Sstevel@tonic-gate 	/*
9477c478bd9Sstevel@tonic-gate 	 * First up: the magazines in the depot (i.e. on the cache_full list).
9487c478bd9Sstevel@tonic-gate 	 */
9497c478bd9Sstevel@tonic-gate 	for (ump = cp->cache_full.ml_list; ump != NULL; ) {
9507c478bd9Sstevel@tonic-gate 		READMAG_ROUNDS(magsize);
9517c478bd9Sstevel@tonic-gate 		ump = mp->mag_next;
9527c478bd9Sstevel@tonic-gate 
9537c478bd9Sstevel@tonic-gate 		if (ump == cp->cache_full.ml_list)
9547c478bd9Sstevel@tonic-gate 			break; /* cache_full list loop detected */
9557c478bd9Sstevel@tonic-gate 	}
9567c478bd9Sstevel@tonic-gate 
9577c478bd9Sstevel@tonic-gate 	dprintf(("cache_full list done\n"));
9587c478bd9Sstevel@tonic-gate 
9597c478bd9Sstevel@tonic-gate 	/*
9607c478bd9Sstevel@tonic-gate 	 * Now whip through the CPUs, snagging the loaded magazines
9617c478bd9Sstevel@tonic-gate 	 * and full spares.
9627c478bd9Sstevel@tonic-gate 	 */
963789d94c2Sjwadams 	for (cpu = 0; cpu < umem_max_ncpus; cpu++) {
9647c478bd9Sstevel@tonic-gate 		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu];
9657c478bd9Sstevel@tonic-gate 
9667c478bd9Sstevel@tonic-gate 		dprintf(("reading cpu cache %p\n",
9677c478bd9Sstevel@tonic-gate 		    (uintptr_t)ccp - (uintptr_t)cp + addr));
9687c478bd9Sstevel@tonic-gate 
9697c478bd9Sstevel@tonic-gate 		if (ccp->cc_rounds > 0 &&
9707c478bd9Sstevel@tonic-gate 		    (ump = ccp->cc_loaded) != NULL) {
9717c478bd9Sstevel@tonic-gate 			dprintf(("reading %d loaded rounds\n", ccp->cc_rounds));
9727c478bd9Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_rounds);
9737c478bd9Sstevel@tonic-gate 		}
9747c478bd9Sstevel@tonic-gate 
9757c478bd9Sstevel@tonic-gate 		if (ccp->cc_prounds > 0 &&
9767c478bd9Sstevel@tonic-gate 		    (ump = ccp->cc_ploaded) != NULL) {
9777c478bd9Sstevel@tonic-gate 			dprintf(("reading %d previously loaded rounds\n",
9787c478bd9Sstevel@tonic-gate 			    ccp->cc_prounds));
9797c478bd9Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_prounds);
9807c478bd9Sstevel@tonic-gate 		}
9817c478bd9Sstevel@tonic-gate 	}
9827c478bd9Sstevel@tonic-gate 
9837c478bd9Sstevel@tonic-gate 	dprintf(("magazine layer: %d buffers\n", magcnt));
9847c478bd9Sstevel@tonic-gate 
985*4f364e7cSRobert Mustacchi 	mdb_free(mp, magbsize);
9867c478bd9Sstevel@tonic-gate 
9877c478bd9Sstevel@tonic-gate 	*maglistp = maglist;
9887c478bd9Sstevel@tonic-gate 	*magcntp = magcnt;
9897c478bd9Sstevel@tonic-gate 	*magmaxp = magmax;
9907c478bd9Sstevel@tonic-gate 
991*4f364e7cSRobert Mustacchi 	return (0);
9927c478bd9Sstevel@tonic-gate 
9937c478bd9Sstevel@tonic-gate fail:
994*4f364e7cSRobert Mustacchi 	if (mp)
995*4f364e7cSRobert Mustacchi 		mdb_free(mp, magbsize);
996*4f364e7cSRobert Mustacchi 	if (maglist)
997*4f364e7cSRobert Mustacchi 		mdb_free(maglist, magmax * sizeof (void *));
998*4f364e7cSRobert Mustacchi 
999*4f364e7cSRobert Mustacchi 	return (-1);
1000*4f364e7cSRobert Mustacchi }
1001*4f364e7cSRobert Mustacchi 
1002*4f364e7cSRobert Mustacchi typedef struct umem_read_ptc_walk {
1003*4f364e7cSRobert Mustacchi 	void **urpw_buf;
1004*4f364e7cSRobert Mustacchi 	size_t urpw_cnt;
1005*4f364e7cSRobert Mustacchi 	size_t urpw_max;
1006*4f364e7cSRobert Mustacchi } umem_read_ptc_walk_t;
1007*4f364e7cSRobert Mustacchi 
1008*4f364e7cSRobert Mustacchi /*ARGSUSED*/
1009*4f364e7cSRobert Mustacchi static int
1010*4f364e7cSRobert Mustacchi umem_read_ptc_walk_buf(uintptr_t addr,
1011*4f364e7cSRobert Mustacchi     const void *ignored, umem_read_ptc_walk_t *urpw)
1012*4f364e7cSRobert Mustacchi {
1013*4f364e7cSRobert Mustacchi 	if (urpw->urpw_cnt == urpw->urpw_max) {
1014*4f364e7cSRobert Mustacchi 		size_t nmax = urpw->urpw_max ? (urpw->urpw_max << 1) : 1;
1015*4f364e7cSRobert Mustacchi 		void **new = mdb_zalloc(nmax * sizeof (void *), UM_SLEEP);
1016*4f364e7cSRobert Mustacchi 
1017*4f364e7cSRobert Mustacchi 		if (nmax > 1) {
1018*4f364e7cSRobert Mustacchi 			size_t osize = urpw->urpw_max * sizeof (void *);
1019*4f364e7cSRobert Mustacchi 			bcopy(urpw->urpw_buf, new, osize);
1020*4f364e7cSRobert Mustacchi 			mdb_free(urpw->urpw_buf, osize);
1021*4f364e7cSRobert Mustacchi 		}
1022*4f364e7cSRobert Mustacchi 
1023*4f364e7cSRobert Mustacchi 		urpw->urpw_buf = new;
1024*4f364e7cSRobert Mustacchi 		urpw->urpw_max = nmax;
1025*4f364e7cSRobert Mustacchi 	}
1026*4f364e7cSRobert Mustacchi 
1027*4f364e7cSRobert Mustacchi 	urpw->urpw_buf[urpw->urpw_cnt++] = (void *)addr;
1028*4f364e7cSRobert Mustacchi 
1029*4f364e7cSRobert Mustacchi 	return (WALK_NEXT);
1030*4f364e7cSRobert Mustacchi }
1031*4f364e7cSRobert Mustacchi 
1032*4f364e7cSRobert Mustacchi static int
1033*4f364e7cSRobert Mustacchi umem_read_ptc(umem_cache_t *cp,
1034*4f364e7cSRobert Mustacchi     void ***buflistp, size_t *bufcntp, size_t *bufmaxp)
1035*4f364e7cSRobert Mustacchi {
1036*4f364e7cSRobert Mustacchi 	umem_read_ptc_walk_t urpw;
1037*4f364e7cSRobert Mustacchi 	char walk[60];
1038*4f364e7cSRobert Mustacchi 	int rval;
1039*4f364e7cSRobert Mustacchi 
1040*4f364e7cSRobert Mustacchi 	if (!(cp->cache_flags & UMF_PTC))
1041*4f364e7cSRobert Mustacchi 		return (0);
1042*4f364e7cSRobert Mustacchi 
1043*4f364e7cSRobert Mustacchi 	(void) snprintf(walk, sizeof (walk), "umem_ptc_%d", cp->cache_bufsize);
1044*4f364e7cSRobert Mustacchi 
1045*4f364e7cSRobert Mustacchi 	urpw.urpw_buf = *buflistp;
1046*4f364e7cSRobert Mustacchi 	urpw.urpw_cnt = *bufcntp;
1047*4f364e7cSRobert Mustacchi 	urpw.urpw_max = *bufmaxp;
1048*4f364e7cSRobert Mustacchi 
1049*4f364e7cSRobert Mustacchi 	if ((rval = mdb_walk(walk,
1050*4f364e7cSRobert Mustacchi 	    (mdb_walk_cb_t)umem_read_ptc_walk_buf, &urpw)) == -1) {
1051*4f364e7cSRobert Mustacchi 		mdb_warn("couldn't walk %s", walk);
10527c478bd9Sstevel@tonic-gate 	}
1053*4f364e7cSRobert Mustacchi 
1054*4f364e7cSRobert Mustacchi 	*buflistp = urpw.urpw_buf;
1055*4f364e7cSRobert Mustacchi 	*bufcntp = urpw.urpw_cnt;
1056*4f364e7cSRobert Mustacchi 	*bufmaxp = urpw.urpw_max;
1057*4f364e7cSRobert Mustacchi 
1058*4f364e7cSRobert Mustacchi 	return (rval);
10597c478bd9Sstevel@tonic-gate }
10607c478bd9Sstevel@tonic-gate 
10617c478bd9Sstevel@tonic-gate static int
10627c478bd9Sstevel@tonic-gate umem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
10637c478bd9Sstevel@tonic-gate {
10647c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata));
10657c478bd9Sstevel@tonic-gate }
10667c478bd9Sstevel@tonic-gate 
10677c478bd9Sstevel@tonic-gate static int
10687c478bd9Sstevel@tonic-gate bufctl_walk_callback(umem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
10697c478bd9Sstevel@tonic-gate {
10707c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *b;
10717c478bd9Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&b);
10727c478bd9Sstevel@tonic-gate 
10737c478bd9Sstevel@tonic-gate 	/*
10747c478bd9Sstevel@tonic-gate 	 * if UMF_AUDIT is not set, we know that we're looking at a
10757c478bd9Sstevel@tonic-gate 	 * umem_bufctl_t.
10767c478bd9Sstevel@tonic-gate 	 */
10777c478bd9Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_AUDIT) ||
10787c478bd9Sstevel@tonic-gate 	    mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, buf) == -1) {
10797c478bd9Sstevel@tonic-gate 		(void) memset(b, 0, UMEM_BUFCTL_AUDIT_SIZE);
10807c478bd9Sstevel@tonic-gate 		if (mdb_vread(b, sizeof (umem_bufctl_t), buf) == -1) {
10817c478bd9Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", buf);
10827c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
10837c478bd9Sstevel@tonic-gate 		}
10847c478bd9Sstevel@tonic-gate 	}
10857c478bd9Sstevel@tonic-gate 
10867c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(buf, b, wsp->walk_cbdata));
10877c478bd9Sstevel@tonic-gate }
10887c478bd9Sstevel@tonic-gate 
10897c478bd9Sstevel@tonic-gate typedef struct umem_walk {
10907c478bd9Sstevel@tonic-gate 	int umw_type;
10917c478bd9Sstevel@tonic-gate 
1092d7dba7e5SBryan Cantrill 	uintptr_t umw_addr;		/* cache address */
10937c478bd9Sstevel@tonic-gate 	umem_cache_t *umw_cp;
10947c478bd9Sstevel@tonic-gate 	size_t umw_csize;
10957c478bd9Sstevel@tonic-gate 
10967c478bd9Sstevel@tonic-gate 	/*
10977c478bd9Sstevel@tonic-gate 	 * magazine layer
10987c478bd9Sstevel@tonic-gate 	 */
10997c478bd9Sstevel@tonic-gate 	void **umw_maglist;
11007c478bd9Sstevel@tonic-gate 	size_t umw_max;
11017c478bd9Sstevel@tonic-gate 	size_t umw_count;
11027c478bd9Sstevel@tonic-gate 	size_t umw_pos;
11037c478bd9Sstevel@tonic-gate 
11047c478bd9Sstevel@tonic-gate 	/*
11057c478bd9Sstevel@tonic-gate 	 * slab layer
11067c478bd9Sstevel@tonic-gate 	 */
11077c478bd9Sstevel@tonic-gate 	char *umw_valid;	/* to keep track of freed buffers */
11087c478bd9Sstevel@tonic-gate 	char *umw_ubase;	/* buffer for slab data */
11097c478bd9Sstevel@tonic-gate } umem_walk_t;
11107c478bd9Sstevel@tonic-gate 
11117c478bd9Sstevel@tonic-gate static int
11127c478bd9Sstevel@tonic-gate umem_walk_init_common(mdb_walk_state_t *wsp, int type)
11137c478bd9Sstevel@tonic-gate {
11147c478bd9Sstevel@tonic-gate 	umem_walk_t *umw;
1115789d94c2Sjwadams 	int csize;
11167c478bd9Sstevel@tonic-gate 	umem_cache_t *cp;
1117789d94c2Sjwadams 	size_t vm_quantum;
11187c478bd9Sstevel@tonic-gate 
11197c478bd9Sstevel@tonic-gate 	size_t magmax, magcnt;
11207c478bd9Sstevel@tonic-gate 	void **maglist = NULL;
11217c478bd9Sstevel@tonic-gate 	uint_t chunksize, slabsize;
11227c478bd9Sstevel@tonic-gate 	int status = WALK_ERR;
11237c478bd9Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
11247c478bd9Sstevel@tonic-gate 	const char *layered;
11257c478bd9Sstevel@tonic-gate 
11267c478bd9Sstevel@tonic-gate 	type &= ~UM_HASH;
11277c478bd9Sstevel@tonic-gate 
11287c478bd9Sstevel@tonic-gate 	if (addr == NULL) {
11297c478bd9Sstevel@tonic-gate 		mdb_warn("umem walk doesn't support global walks\n");
11307c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
11317c478bd9Sstevel@tonic-gate 	}
11327c478bd9Sstevel@tonic-gate 
11337c478bd9Sstevel@tonic-gate 	dprintf(("walking %p\n", addr));
11347c478bd9Sstevel@tonic-gate 
11357c478bd9Sstevel@tonic-gate 	/*
1136789d94c2Sjwadams 	 * The number of "cpus" determines how large the cache is.
11377c478bd9Sstevel@tonic-gate 	 */
1138789d94c2Sjwadams 	csize = UMEM_CACHE_SIZE(umem_max_ncpus);
11397c478bd9Sstevel@tonic-gate 	cp = mdb_alloc(csize, UM_SLEEP);
11407c478bd9Sstevel@tonic-gate 
11417c478bd9Sstevel@tonic-gate 	if (mdb_vread(cp, csize, addr) == -1) {
11427c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
11437c478bd9Sstevel@tonic-gate 		goto out2;
11447c478bd9Sstevel@tonic-gate 	}
11457c478bd9Sstevel@tonic-gate 
1146789d94c2Sjwadams 	/*
1147789d94c2Sjwadams 	 * It's easy for someone to hand us an invalid cache address.
1148789d94c2Sjwadams 	 * Unfortunately, it is hard for this walker to survive an
1149789d94c2Sjwadams 	 * invalid cache cleanly.  So we make sure that:
1150789d94c2Sjwadams 	 *
1151789d94c2Sjwadams 	 *	1. the vmem arena for the cache is readable,
1152789d94c2Sjwadams 	 *	2. the vmem arena's quantum is a power of 2,
1153789d94c2Sjwadams 	 *	3. our slabsize is a multiple of the quantum, and
1154789d94c2Sjwadams 	 *	4. our chunksize is >0 and less than our slabsize.
1155789d94c2Sjwadams 	 */
1156789d94c2Sjwadams 	if (mdb_vread(&vm_quantum, sizeof (vm_quantum),
1157789d94c2Sjwadams 	    (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
1158789d94c2Sjwadams 	    vm_quantum == 0 ||
1159789d94c2Sjwadams 	    (vm_quantum & (vm_quantum - 1)) != 0 ||
1160789d94c2Sjwadams 	    cp->cache_slabsize < vm_quantum ||
1161789d94c2Sjwadams 	    P2PHASE(cp->cache_slabsize, vm_quantum) != 0 ||
1162789d94c2Sjwadams 	    cp->cache_chunksize == 0 ||
1163789d94c2Sjwadams 	    cp->cache_chunksize > cp->cache_slabsize) {
1164789d94c2Sjwadams 		mdb_warn("%p is not a valid umem_cache_t\n", addr);
1165789d94c2Sjwadams 		goto out2;
1166789d94c2Sjwadams 	}
1167789d94c2Sjwadams 
11687c478bd9Sstevel@tonic-gate 	dprintf(("buf total is %d\n", cp->cache_buftotal));
11697c478bd9Sstevel@tonic-gate 
11707c478bd9Sstevel@tonic-gate 	if (cp->cache_buftotal == 0) {
11717c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
11727c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
11737c478bd9Sstevel@tonic-gate 	}
11747c478bd9Sstevel@tonic-gate 
11757c478bd9Sstevel@tonic-gate 	/*
11767c478bd9Sstevel@tonic-gate 	 * If they ask for bufctls, but it's a small-slab cache,
11777c478bd9Sstevel@tonic-gate 	 * there is nothing to report.
11787c478bd9Sstevel@tonic-gate 	 */
11797c478bd9Sstevel@tonic-gate 	if ((type & UM_BUFCTL) && !(cp->cache_flags & UMF_HASH)) {
11807c478bd9Sstevel@tonic-gate 		dprintf(("bufctl requested, not UMF_HASH (flags: %p)\n",
11817c478bd9Sstevel@tonic-gate 		    cp->cache_flags));
11827c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
11837c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
11847c478bd9Sstevel@tonic-gate 	}
11857c478bd9Sstevel@tonic-gate 
11867c478bd9Sstevel@tonic-gate 	/*
11877c478bd9Sstevel@tonic-gate 	 * Read in the contents of the magazine layer
11887c478bd9Sstevel@tonic-gate 	 */
1189*4f364e7cSRobert Mustacchi 	if (umem_read_magazines(cp, addr, &maglist, &magcnt, &magmax) != 0)
1190*4f364e7cSRobert Mustacchi 		goto out2;
1191*4f364e7cSRobert Mustacchi 
1192*4f364e7cSRobert Mustacchi 	/*
1193*4f364e7cSRobert Mustacchi 	 * Read in the contents of the per-thread caches, if any
1194*4f364e7cSRobert Mustacchi 	 */
1195*4f364e7cSRobert Mustacchi 	if (umem_read_ptc(cp, &maglist, &magcnt, &magmax) != 0)
11967c478bd9Sstevel@tonic-gate 		goto out2;
11977c478bd9Sstevel@tonic-gate 
11987c478bd9Sstevel@tonic-gate 	/*
1199*4f364e7cSRobert Mustacchi 	 * We have all of the buffers from the magazines and from the
1200*4f364e7cSRobert Mustacchi 	 * per-thread cache (if any);  if we are walking allocated buffers,
1201*4f364e7cSRobert Mustacchi 	 * sort them so we can bsearch them later.
12027c478bd9Sstevel@tonic-gate 	 */
12037c478bd9Sstevel@tonic-gate 	if (type & UM_ALLOCATED)
12047c478bd9Sstevel@tonic-gate 		qsort(maglist, magcnt, sizeof (void *), addrcmp);
12057c478bd9Sstevel@tonic-gate 
12067c478bd9Sstevel@tonic-gate 	wsp->walk_data = umw = mdb_zalloc(sizeof (umem_walk_t), UM_SLEEP);
12077c478bd9Sstevel@tonic-gate 
12087c478bd9Sstevel@tonic-gate 	umw->umw_type = type;
12097c478bd9Sstevel@tonic-gate 	umw->umw_addr = addr;
12107c478bd9Sstevel@tonic-gate 	umw->umw_cp = cp;
12117c478bd9Sstevel@tonic-gate 	umw->umw_csize = csize;
12127c478bd9Sstevel@tonic-gate 	umw->umw_maglist = maglist;
12137c478bd9Sstevel@tonic-gate 	umw->umw_max = magmax;
12147c478bd9Sstevel@tonic-gate 	umw->umw_count = magcnt;
12157c478bd9Sstevel@tonic-gate 	umw->umw_pos = 0;
12167c478bd9Sstevel@tonic-gate 
12177c478bd9Sstevel@tonic-gate 	/*
12187c478bd9Sstevel@tonic-gate 	 * When walking allocated buffers in a UMF_HASH cache, we walk the
12197c478bd9Sstevel@tonic-gate 	 * hash table instead of the slab layer.
12207c478bd9Sstevel@tonic-gate 	 */
12217c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) && (type & UM_ALLOCATED)) {
12227c478bd9Sstevel@tonic-gate 		layered = "umem_hash";
12237c478bd9Sstevel@tonic-gate 
12247c478bd9Sstevel@tonic-gate 		umw->umw_type |= UM_HASH;
12257c478bd9Sstevel@tonic-gate 	} else {
12267c478bd9Sstevel@tonic-gate 		/*
12277c478bd9Sstevel@tonic-gate 		 * If we are walking freed buffers, we only need the
12287c478bd9Sstevel@tonic-gate 		 * magazine layer plus the partially allocated slabs.
12297c478bd9Sstevel@tonic-gate 		 * To walk allocated buffers, we need all of the slabs.
12307c478bd9Sstevel@tonic-gate 		 */
12317c478bd9Sstevel@tonic-gate 		if (type & UM_ALLOCATED)
12327c478bd9Sstevel@tonic-gate 			layered = "umem_slab";
12337c478bd9Sstevel@tonic-gate 		else
12347c478bd9Sstevel@tonic-gate 			layered = "umem_slab_partial";
12357c478bd9Sstevel@tonic-gate 
12367c478bd9Sstevel@tonic-gate 		/*
12377c478bd9Sstevel@tonic-gate 		 * for small-slab caches, we read in the entire slab.  For
12387c478bd9Sstevel@tonic-gate 		 * freed buffers, we can just walk the freelist.  For
12397c478bd9Sstevel@tonic-gate 		 * allocated buffers, we use a 'valid' array to track
12407c478bd9Sstevel@tonic-gate 		 * the freed buffers.
12417c478bd9Sstevel@tonic-gate 		 */
12427c478bd9Sstevel@tonic-gate 		if (!(cp->cache_flags & UMF_HASH)) {
12437c478bd9Sstevel@tonic-gate 			chunksize = cp->cache_chunksize;
12447c478bd9Sstevel@tonic-gate 			slabsize = cp->cache_slabsize;
12457c478bd9Sstevel@tonic-gate 
12467c478bd9Sstevel@tonic-gate 			umw->umw_ubase = mdb_alloc(slabsize +
12477c478bd9Sstevel@tonic-gate 			    sizeof (umem_bufctl_t), UM_SLEEP);
12487c478bd9Sstevel@tonic-gate 
12497c478bd9Sstevel@tonic-gate 			if (type & UM_ALLOCATED)
12507c478bd9Sstevel@tonic-gate 				umw->umw_valid =
12517c478bd9Sstevel@tonic-gate 				    mdb_alloc(slabsize / chunksize, UM_SLEEP);
12527c478bd9Sstevel@tonic-gate 		}
12537c478bd9Sstevel@tonic-gate 	}
12547c478bd9Sstevel@tonic-gate 
12557c478bd9Sstevel@tonic-gate 	status = WALK_NEXT;
12567c478bd9Sstevel@tonic-gate 
12577c478bd9Sstevel@tonic-gate 	if (mdb_layered_walk(layered, wsp) == -1) {
12587c478bd9Sstevel@tonic-gate 		mdb_warn("unable to start layered '%s' walk", layered);
12597c478bd9Sstevel@tonic-gate 		status = WALK_ERR;
12607c478bd9Sstevel@tonic-gate 	}
12617c478bd9Sstevel@tonic-gate 
12627c478bd9Sstevel@tonic-gate out1:
12637c478bd9Sstevel@tonic-gate 	if (status == WALK_ERR) {
12647c478bd9Sstevel@tonic-gate 		if (umw->umw_valid)
12657c478bd9Sstevel@tonic-gate 			mdb_free(umw->umw_valid, slabsize / chunksize);
12667c478bd9Sstevel@tonic-gate 
12677c478bd9Sstevel@tonic-gate 		if (umw->umw_ubase)
12687c478bd9Sstevel@tonic-gate 			mdb_free(umw->umw_ubase, slabsize +
12697c478bd9Sstevel@tonic-gate 			    sizeof (umem_bufctl_t));
12707c478bd9Sstevel@tonic-gate 
1271789d94c2Sjwadams 		if (umw->umw_maglist)
1272789d94c2Sjwadams 			mdb_free(umw->umw_maglist, umw->umw_max *
1273789d94c2Sjwadams 			    sizeof (uintptr_t));
1274789d94c2Sjwadams 
12757c478bd9Sstevel@tonic-gate 		mdb_free(umw, sizeof (umem_walk_t));
12767c478bd9Sstevel@tonic-gate 		wsp->walk_data = NULL;
12777c478bd9Sstevel@tonic-gate 	}
12787c478bd9Sstevel@tonic-gate 
12797c478bd9Sstevel@tonic-gate out2:
12807c478bd9Sstevel@tonic-gate 	if (status == WALK_ERR)
12817c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
12827c478bd9Sstevel@tonic-gate 
12837c478bd9Sstevel@tonic-gate 	return (status);
12847c478bd9Sstevel@tonic-gate }
12857c478bd9Sstevel@tonic-gate 
12867c478bd9Sstevel@tonic-gate int
12877c478bd9Sstevel@tonic-gate umem_walk_step(mdb_walk_state_t *wsp)
12887c478bd9Sstevel@tonic-gate {
12897c478bd9Sstevel@tonic-gate 	umem_walk_t *umw = wsp->walk_data;
12907c478bd9Sstevel@tonic-gate 	int type = umw->umw_type;
12917c478bd9Sstevel@tonic-gate 	umem_cache_t *cp = umw->umw_cp;
12927c478bd9Sstevel@tonic-gate 
12937c478bd9Sstevel@tonic-gate 	void **maglist = umw->umw_maglist;
12947c478bd9Sstevel@tonic-gate 	int magcnt = umw->umw_count;
12957c478bd9Sstevel@tonic-gate 
12967c478bd9Sstevel@tonic-gate 	uintptr_t chunksize, slabsize;
12977c478bd9Sstevel@tonic-gate 	uintptr_t addr;
12987c478bd9Sstevel@tonic-gate 	const umem_slab_t *sp;
12997c478bd9Sstevel@tonic-gate 	const umem_bufctl_t *bcp;
13007c478bd9Sstevel@tonic-gate 	umem_bufctl_t bc;
13017c478bd9Sstevel@tonic-gate 
13027c478bd9Sstevel@tonic-gate 	int chunks;
13037c478bd9Sstevel@tonic-gate 	char *kbase;
13047c478bd9Sstevel@tonic-gate 	void *buf;
13057c478bd9Sstevel@tonic-gate 	int i, ret;
13067c478bd9Sstevel@tonic-gate 
13077c478bd9Sstevel@tonic-gate 	char *valid, *ubase;
13087c478bd9Sstevel@tonic-gate 
13097c478bd9Sstevel@tonic-gate 	/*
13107c478bd9Sstevel@tonic-gate 	 * first, handle the 'umem_hash' layered walk case
13117c478bd9Sstevel@tonic-gate 	 */
13127c478bd9Sstevel@tonic-gate 	if (type & UM_HASH) {
13137c478bd9Sstevel@tonic-gate 		/*
13147c478bd9Sstevel@tonic-gate 		 * We have a buffer which has been allocated out of the
13157c478bd9Sstevel@tonic-gate 		 * global layer. We need to make sure that it's not
13167c478bd9Sstevel@tonic-gate 		 * actually sitting in a magazine before we report it as
13177c478bd9Sstevel@tonic-gate 		 * an allocated buffer.
13187c478bd9Sstevel@tonic-gate 		 */
13197c478bd9Sstevel@tonic-gate 		buf = ((const umem_bufctl_t *)wsp->walk_layer)->bc_addr;
13207c478bd9Sstevel@tonic-gate 
13217c478bd9Sstevel@tonic-gate 		if (magcnt > 0 &&
13227c478bd9Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
13237c478bd9Sstevel@tonic-gate 		    addrcmp) != NULL)
13247c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
13257c478bd9Sstevel@tonic-gate 
13267c478bd9Sstevel@tonic-gate 		if (type & UM_BUFCTL)
13277c478bd9Sstevel@tonic-gate 			return (bufctl_walk_callback(cp, wsp, wsp->walk_addr));
13287c478bd9Sstevel@tonic-gate 
13297c478bd9Sstevel@tonic-gate 		return (umem_walk_callback(wsp, (uintptr_t)buf));
13307c478bd9Sstevel@tonic-gate 	}
13317c478bd9Sstevel@tonic-gate 
13327c478bd9Sstevel@tonic-gate 	ret = WALK_NEXT;
13337c478bd9Sstevel@tonic-gate 
13347c478bd9Sstevel@tonic-gate 	addr = umw->umw_addr;
13357c478bd9Sstevel@tonic-gate 
13367c478bd9Sstevel@tonic-gate 	/*
13377c478bd9Sstevel@tonic-gate 	 * If we're walking freed buffers, report everything in the
13387c478bd9Sstevel@tonic-gate 	 * magazine layer before processing the first slab.
13397c478bd9Sstevel@tonic-gate 	 */
13407c478bd9Sstevel@tonic-gate 	if ((type & UM_FREE) && magcnt != 0) {
13417c478bd9Sstevel@tonic-gate 		umw->umw_count = 0;		/* only do this once */
13427c478bd9Sstevel@tonic-gate 		for (i = 0; i < magcnt; i++) {
13437c478bd9Sstevel@tonic-gate 			buf = maglist[i];
13447c478bd9Sstevel@tonic-gate 
13457c478bd9Sstevel@tonic-gate 			if (type & UM_BUFCTL) {
13467c478bd9Sstevel@tonic-gate 				uintptr_t out;
13477c478bd9Sstevel@tonic-gate 
13487c478bd9Sstevel@tonic-gate 				if (cp->cache_flags & UMF_BUFTAG) {
13497c478bd9Sstevel@tonic-gate 					umem_buftag_t *btp;
13507c478bd9Sstevel@tonic-gate 					umem_buftag_t tag;
13517c478bd9Sstevel@tonic-gate 
13527c478bd9Sstevel@tonic-gate 					/* LINTED - alignment */
13537c478bd9Sstevel@tonic-gate 					btp = UMEM_BUFTAG(cp, buf);
13547c478bd9Sstevel@tonic-gate 					if (mdb_vread(&tag, sizeof (tag),
13557c478bd9Sstevel@tonic-gate 					    (uintptr_t)btp) == -1) {
13567c478bd9Sstevel@tonic-gate 						mdb_warn("reading buftag for "
13577c478bd9Sstevel@tonic-gate 						    "%p at %p", buf, btp);
13587c478bd9Sstevel@tonic-gate 						continue;
13597c478bd9Sstevel@tonic-gate 					}
13607c478bd9Sstevel@tonic-gate 					out = (uintptr_t)tag.bt_bufctl;
13617c478bd9Sstevel@tonic-gate 				} else {
13627c478bd9Sstevel@tonic-gate 					if (umem_hash_lookup(cp, addr, buf,
13637c478bd9Sstevel@tonic-gate 					    &out) == -1)
13647c478bd9Sstevel@tonic-gate 						continue;
13657c478bd9Sstevel@tonic-gate 				}
13667c478bd9Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp, out);
13677c478bd9Sstevel@tonic-gate 			} else {
13687c478bd9Sstevel@tonic-gate 				ret = umem_walk_callback(wsp, (uintptr_t)buf);
13697c478bd9Sstevel@tonic-gate 			}
13707c478bd9Sstevel@tonic-gate 
13717c478bd9Sstevel@tonic-gate 			if (ret != WALK_NEXT)
13727c478bd9Sstevel@tonic-gate 				return (ret);
13737c478bd9Sstevel@tonic-gate 		}
13747c478bd9Sstevel@tonic-gate 	}
13757c478bd9Sstevel@tonic-gate 
13767c478bd9Sstevel@tonic-gate 	/*
13777c478bd9Sstevel@tonic-gate 	 * Handle the buffers in the current slab
13787c478bd9Sstevel@tonic-gate 	 */
13797c478bd9Sstevel@tonic-gate 	chunksize = cp->cache_chunksize;
13807c478bd9Sstevel@tonic-gate 	slabsize = cp->cache_slabsize;
13817c478bd9Sstevel@tonic-gate 
13827c478bd9Sstevel@tonic-gate 	sp = wsp->walk_layer;
13837c478bd9Sstevel@tonic-gate 	chunks = sp->slab_chunks;
13847c478bd9Sstevel@tonic-gate 	kbase = sp->slab_base;
13857c478bd9Sstevel@tonic-gate 
13867c478bd9Sstevel@tonic-gate 	dprintf(("kbase is %p\n", kbase));
13877c478bd9Sstevel@tonic-gate 
13887c478bd9Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_HASH)) {
13897c478bd9Sstevel@tonic-gate 		valid = umw->umw_valid;
13907c478bd9Sstevel@tonic-gate 		ubase = umw->umw_ubase;
13917c478bd9Sstevel@tonic-gate 
13927c478bd9Sstevel@tonic-gate 		if (mdb_vread(ubase, chunks * chunksize,
13937c478bd9Sstevel@tonic-gate 		    (uintptr_t)kbase) == -1) {
13947c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read slab contents at %p", kbase);
13957c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
13967c478bd9Sstevel@tonic-gate 		}
13977c478bd9Sstevel@tonic-gate 
13987c478bd9Sstevel@tonic-gate 		/*
13997c478bd9Sstevel@tonic-gate 		 * Set up the valid map as fully allocated -- we'll punch
14007c478bd9Sstevel@tonic-gate 		 * out the freelist.
14017c478bd9Sstevel@tonic-gate 		 */
14027c478bd9Sstevel@tonic-gate 		if (type & UM_ALLOCATED)
14037c478bd9Sstevel@tonic-gate 			(void) memset(valid, 1, chunks);
14047c478bd9Sstevel@tonic-gate 	} else {
14057c478bd9Sstevel@tonic-gate 		valid = NULL;
14067c478bd9Sstevel@tonic-gate 		ubase = NULL;
14077c478bd9Sstevel@tonic-gate 	}
14087c478bd9Sstevel@tonic-gate 
14097c478bd9Sstevel@tonic-gate 	/*
14107c478bd9Sstevel@tonic-gate 	 * walk the slab's freelist
14117c478bd9Sstevel@tonic-gate 	 */
14127c478bd9Sstevel@tonic-gate 	bcp = sp->slab_head;
14137c478bd9Sstevel@tonic-gate 
14147c478bd9Sstevel@tonic-gate 	dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks));
14157c478bd9Sstevel@tonic-gate 
14167c478bd9Sstevel@tonic-gate 	/*
14177c478bd9Sstevel@tonic-gate 	 * since we could be in the middle of allocating a buffer,
14187c478bd9Sstevel@tonic-gate 	 * our refcnt could be one higher than it aught.  So we
14197c478bd9Sstevel@tonic-gate 	 * check one further on the freelist than the count allows.
14207c478bd9Sstevel@tonic-gate 	 */
14217c478bd9Sstevel@tonic-gate 	for (i = sp->slab_refcnt; i <= chunks; i++) {
14227c478bd9Sstevel@tonic-gate 		uint_t ndx;
14237c478bd9Sstevel@tonic-gate 
14247c478bd9Sstevel@tonic-gate 		dprintf(("bcp is %p\n", bcp));
14257c478bd9Sstevel@tonic-gate 
14267c478bd9Sstevel@tonic-gate 		if (bcp == NULL) {
14277c478bd9Sstevel@tonic-gate 			if (i == chunks)
14287c478bd9Sstevel@tonic-gate 				break;
14297c478bd9Sstevel@tonic-gate 			mdb_warn(
14307c478bd9Sstevel@tonic-gate 			    "slab %p in cache %p freelist too short by %d\n",
14317c478bd9Sstevel@tonic-gate 			    sp, addr, chunks - i);
14327c478bd9Sstevel@tonic-gate 			break;
14337c478bd9Sstevel@tonic-gate 		}
14347c478bd9Sstevel@tonic-gate 
14357c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_HASH) {
14367c478bd9Sstevel@tonic-gate 			if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
14377c478bd9Sstevel@tonic-gate 				mdb_warn("failed to read bufctl ptr at %p",
14387c478bd9Sstevel@tonic-gate 				    bcp);
14397c478bd9Sstevel@tonic-gate 				break;
14407c478bd9Sstevel@tonic-gate 			}
14417c478bd9Sstevel@tonic-gate 			buf = bc.bc_addr;
14427c478bd9Sstevel@tonic-gate 		} else {
14437c478bd9Sstevel@tonic-gate 			/*
1444d7dba7e5SBryan Cantrill 			 * Otherwise the buffer is (or should be) in the slab
1445d7dba7e5SBryan Cantrill 			 * that we've read in; determine its offset in the
1446d7dba7e5SBryan Cantrill 			 * slab, validate that it's not corrupt, and add to
1447d7dba7e5SBryan Cantrill 			 * our base address to find the umem_bufctl_t.  (Note
1448d7dba7e5SBryan Cantrill 			 * that we don't need to add the size of the bufctl
1449d7dba7e5SBryan Cantrill 			 * to our offset calculation because of the slop that's
1450d7dba7e5SBryan Cantrill 			 * allocated for the buffer at ubase.)
14517c478bd9Sstevel@tonic-gate 			 */
1452d7dba7e5SBryan Cantrill 			uintptr_t offs = (uintptr_t)bcp - (uintptr_t)kbase;
1453d7dba7e5SBryan Cantrill 
1454d7dba7e5SBryan Cantrill 			if (offs > chunks * chunksize) {
1455d7dba7e5SBryan Cantrill 				mdb_warn("found corrupt bufctl ptr %p"
1456d7dba7e5SBryan Cantrill 				    " in slab %p in cache %p\n", bcp,
1457d7dba7e5SBryan Cantrill 				    wsp->walk_addr, addr);
1458d7dba7e5SBryan Cantrill 				break;
1459d7dba7e5SBryan Cantrill 			}
14607c478bd9Sstevel@tonic-gate 
1461d7dba7e5SBryan Cantrill 			bc = *((umem_bufctl_t *)((uintptr_t)ubase + offs));
14627c478bd9Sstevel@tonic-gate 			buf = UMEM_BUF(cp, bcp);
14637c478bd9Sstevel@tonic-gate 		}
14647c478bd9Sstevel@tonic-gate 
14657c478bd9Sstevel@tonic-gate 		ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
14667c478bd9Sstevel@tonic-gate 
14677c478bd9Sstevel@tonic-gate 		if (ndx > slabsize / cp->cache_bufsize) {
14687c478bd9Sstevel@tonic-gate 			/*
14697c478bd9Sstevel@tonic-gate 			 * This is very wrong; we have managed to find
14707c478bd9Sstevel@tonic-gate 			 * a buffer in the slab which shouldn't
14717c478bd9Sstevel@tonic-gate 			 * actually be here.  Emit a warning, and
14727c478bd9Sstevel@tonic-gate 			 * try to continue.
14737c478bd9Sstevel@tonic-gate 			 */
14747c478bd9Sstevel@tonic-gate 			mdb_warn("buf %p is out of range for "
14757c478bd9Sstevel@tonic-gate 			    "slab %p, cache %p\n", buf, sp, addr);
14767c478bd9Sstevel@tonic-gate 		} else if (type & UM_ALLOCATED) {
14777c478bd9Sstevel@tonic-gate 			/*
14787c478bd9Sstevel@tonic-gate 			 * we have found a buffer on the slab's freelist;
14797c478bd9Sstevel@tonic-gate 			 * clear its entry
14807c478bd9Sstevel@tonic-gate 			 */
14817c478bd9Sstevel@tonic-gate 			valid[ndx] = 0;
14827c478bd9Sstevel@tonic-gate 		} else {
14837c478bd9Sstevel@tonic-gate 			/*
14847c478bd9Sstevel@tonic-gate 			 * Report this freed buffer
14857c478bd9Sstevel@tonic-gate 			 */
14867c478bd9Sstevel@tonic-gate 			if (type & UM_BUFCTL) {
14877c478bd9Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp,
14887c478bd9Sstevel@tonic-gate 				    (uintptr_t)bcp);
14897c478bd9Sstevel@tonic-gate 			} else {
14907c478bd9Sstevel@tonic-gate 				ret = umem_walk_callback(wsp, (uintptr_t)buf);
14917c478bd9Sstevel@tonic-gate 			}
14927c478bd9Sstevel@tonic-gate 			if (ret != WALK_NEXT)
14937c478bd9Sstevel@tonic-gate 				return (ret);
14947c478bd9Sstevel@tonic-gate 		}
14957c478bd9Sstevel@tonic-gate 
14967c478bd9Sstevel@tonic-gate 		bcp = bc.bc_next;
14977c478bd9Sstevel@tonic-gate 	}
14987c478bd9Sstevel@tonic-gate 
14997c478bd9Sstevel@tonic-gate 	if (bcp != NULL) {
15007c478bd9Sstevel@tonic-gate 		dprintf(("slab %p in cache %p freelist too long (%p)\n",
15017c478bd9Sstevel@tonic-gate 		    sp, addr, bcp));
15027c478bd9Sstevel@tonic-gate 	}
15037c478bd9Sstevel@tonic-gate 
15047c478bd9Sstevel@tonic-gate 	/*
15057c478bd9Sstevel@tonic-gate 	 * If we are walking freed buffers, the loop above handled reporting
15067c478bd9Sstevel@tonic-gate 	 * them.
15077c478bd9Sstevel@tonic-gate 	 */
15087c478bd9Sstevel@tonic-gate 	if (type & UM_FREE)
15097c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
15107c478bd9Sstevel@tonic-gate 
15117c478bd9Sstevel@tonic-gate 	if (type & UM_BUFCTL) {
15127c478bd9Sstevel@tonic-gate 		mdb_warn("impossible situation: small-slab UM_BUFCTL walk for "
15137c478bd9Sstevel@tonic-gate 		    "cache %p\n", addr);
15147c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
15157c478bd9Sstevel@tonic-gate 	}
15167c478bd9Sstevel@tonic-gate 
15177c478bd9Sstevel@tonic-gate 	/*
15187c478bd9Sstevel@tonic-gate 	 * Report allocated buffers, skipping buffers in the magazine layer.
15197c478bd9Sstevel@tonic-gate 	 * We only get this far for small-slab caches.
15207c478bd9Sstevel@tonic-gate 	 */
15217c478bd9Sstevel@tonic-gate 	for (i = 0; ret == WALK_NEXT && i < chunks; i++) {
15227c478bd9Sstevel@tonic-gate 		buf = (char *)kbase + i * chunksize;
15237c478bd9Sstevel@tonic-gate 
15247c478bd9Sstevel@tonic-gate 		if (!valid[i])
15257c478bd9Sstevel@tonic-gate 			continue;		/* on slab freelist */
15267c478bd9Sstevel@tonic-gate 
15277c478bd9Sstevel@tonic-gate 		if (magcnt > 0 &&
15287c478bd9Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
15297c478bd9Sstevel@tonic-gate 		    addrcmp) != NULL)
15307c478bd9Sstevel@tonic-gate 			continue;		/* in magazine layer */
15317c478bd9Sstevel@tonic-gate 
15327c478bd9Sstevel@tonic-gate 		ret = umem_walk_callback(wsp, (uintptr_t)buf);
15337c478bd9Sstevel@tonic-gate 	}
15347c478bd9Sstevel@tonic-gate 	return (ret);
15357c478bd9Sstevel@tonic-gate }
15367c478bd9Sstevel@tonic-gate 
15377c478bd9Sstevel@tonic-gate void
15387c478bd9Sstevel@tonic-gate umem_walk_fini(mdb_walk_state_t *wsp)
15397c478bd9Sstevel@tonic-gate {
15407c478bd9Sstevel@tonic-gate 	umem_walk_t *umw = wsp->walk_data;
15417c478bd9Sstevel@tonic-gate 	uintptr_t chunksize;
15427c478bd9Sstevel@tonic-gate 	uintptr_t slabsize;
15437c478bd9Sstevel@tonic-gate 
15447c478bd9Sstevel@tonic-gate 	if (umw == NULL)
15457c478bd9Sstevel@tonic-gate 		return;
15467c478bd9Sstevel@tonic-gate 
15477c478bd9Sstevel@tonic-gate 	if (umw->umw_maglist != NULL)
15487c478bd9Sstevel@tonic-gate 		mdb_free(umw->umw_maglist, umw->umw_max * sizeof (void *));
15497c478bd9Sstevel@tonic-gate 
15507c478bd9Sstevel@tonic-gate 	chunksize = umw->umw_cp->cache_chunksize;
15517c478bd9Sstevel@tonic-gate 	slabsize = umw->umw_cp->cache_slabsize;
15527c478bd9Sstevel@tonic-gate 
15537c478bd9Sstevel@tonic-gate 	if (umw->umw_valid != NULL)
15547c478bd9Sstevel@tonic-gate 		mdb_free(umw->umw_valid, slabsize / chunksize);
15557c478bd9Sstevel@tonic-gate 	if (umw->umw_ubase != NULL)
15567c478bd9Sstevel@tonic-gate 		mdb_free(umw->umw_ubase, slabsize + sizeof (umem_bufctl_t));
15577c478bd9Sstevel@tonic-gate 
15587c478bd9Sstevel@tonic-gate 	mdb_free(umw->umw_cp, umw->umw_csize);
15597c478bd9Sstevel@tonic-gate 	mdb_free(umw, sizeof (umem_walk_t));
15607c478bd9Sstevel@tonic-gate }
15617c478bd9Sstevel@tonic-gate 
15627c478bd9Sstevel@tonic-gate /*ARGSUSED*/
15637c478bd9Sstevel@tonic-gate static int
15647c478bd9Sstevel@tonic-gate umem_walk_all(uintptr_t addr, const umem_cache_t *c, mdb_walk_state_t *wsp)
15657c478bd9Sstevel@tonic-gate {
15667c478bd9Sstevel@tonic-gate 	/*
15677c478bd9Sstevel@tonic-gate 	 * Buffers allocated from NOTOUCH caches can also show up as freed
15687c478bd9Sstevel@tonic-gate 	 * memory in other caches.  This can be a little confusing, so we
15697c478bd9Sstevel@tonic-gate 	 * don't walk NOTOUCH caches when walking all caches (thereby assuring
15707c478bd9Sstevel@tonic-gate 	 * that "::walk umem" and "::walk freemem" yield disjoint output).
15717c478bd9Sstevel@tonic-gate 	 */
15727c478bd9Sstevel@tonic-gate 	if (c->cache_cflags & UMC_NOTOUCH)
15737c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
15747c478bd9Sstevel@tonic-gate 
15757c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(wsp->walk_data, wsp->walk_callback,
15767c478bd9Sstevel@tonic-gate 	    wsp->walk_cbdata, addr) == -1)
15777c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
15787c478bd9Sstevel@tonic-gate 
15797c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
15807c478bd9Sstevel@tonic-gate }
15817c478bd9Sstevel@tonic-gate 
15827c478bd9Sstevel@tonic-gate #define	UMEM_WALK_ALL(name, wsp) { \
15837c478bd9Sstevel@tonic-gate 	wsp->walk_data = (name); \
15847c478bd9Sstevel@tonic-gate 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)umem_walk_all, wsp) == -1) \
15857c478bd9Sstevel@tonic-gate 		return (WALK_ERR); \
15867c478bd9Sstevel@tonic-gate 	return (WALK_DONE); \
15877c478bd9Sstevel@tonic-gate }
15887c478bd9Sstevel@tonic-gate 
15897c478bd9Sstevel@tonic-gate int
15907c478bd9Sstevel@tonic-gate umem_walk_init(mdb_walk_state_t *wsp)
15917c478bd9Sstevel@tonic-gate {
15927c478bd9Sstevel@tonic-gate 	if (wsp->walk_arg != NULL)
15937c478bd9Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)wsp->walk_arg;
15947c478bd9Sstevel@tonic-gate 
15957c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
15967c478bd9Sstevel@tonic-gate 		UMEM_WALK_ALL("umem", wsp);
15977c478bd9Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_ALLOCATED));
15987c478bd9Sstevel@tonic-gate }
15997c478bd9Sstevel@tonic-gate 
16007c478bd9Sstevel@tonic-gate int
16017c478bd9Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp)
16027c478bd9Sstevel@tonic-gate {
16037c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16047c478bd9Sstevel@tonic-gate 		UMEM_WALK_ALL("bufctl", wsp);
16057c478bd9Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_ALLOCATED | UM_BUFCTL));
16067c478bd9Sstevel@tonic-gate }
16077c478bd9Sstevel@tonic-gate 
16087c478bd9Sstevel@tonic-gate int
16097c478bd9Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp)
16107c478bd9Sstevel@tonic-gate {
16117c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16127c478bd9Sstevel@tonic-gate 		UMEM_WALK_ALL("freemem", wsp);
16137c478bd9Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_FREE));
16147c478bd9Sstevel@tonic-gate }
16157c478bd9Sstevel@tonic-gate 
16167c478bd9Sstevel@tonic-gate int
16177c478bd9Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp)
16187c478bd9Sstevel@tonic-gate {
16197c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16207c478bd9Sstevel@tonic-gate 		UMEM_WALK_ALL("freectl", wsp);
16217c478bd9Sstevel@tonic-gate 	return (umem_walk_init_common(wsp, UM_FREE | UM_BUFCTL));
16227c478bd9Sstevel@tonic-gate }
16237c478bd9Sstevel@tonic-gate 
16247c478bd9Sstevel@tonic-gate typedef struct bufctl_history_walk {
16257c478bd9Sstevel@tonic-gate 	void		*bhw_next;
16267c478bd9Sstevel@tonic-gate 	umem_cache_t	*bhw_cache;
16277c478bd9Sstevel@tonic-gate 	umem_slab_t	*bhw_slab;
16287c478bd9Sstevel@tonic-gate 	hrtime_t	bhw_timestamp;
16297c478bd9Sstevel@tonic-gate } bufctl_history_walk_t;
16307c478bd9Sstevel@tonic-gate 
16317c478bd9Sstevel@tonic-gate int
16327c478bd9Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp)
16337c478bd9Sstevel@tonic-gate {
16347c478bd9Sstevel@tonic-gate 	bufctl_history_walk_t *bhw;
16357c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t bc;
16367c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t bcn;
16377c478bd9Sstevel@tonic-gate 
16387c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
16397c478bd9Sstevel@tonic-gate 		mdb_warn("bufctl_history walk doesn't support global walks\n");
16407c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
16417c478bd9Sstevel@tonic-gate 	}
16427c478bd9Sstevel@tonic-gate 
16437c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) {
16447c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", wsp->walk_addr);
16457c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
16467c478bd9Sstevel@tonic-gate 	}
16477c478bd9Sstevel@tonic-gate 
16487c478bd9Sstevel@tonic-gate 	bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP);
16497c478bd9Sstevel@tonic-gate 	bhw->bhw_timestamp = 0;
16507c478bd9Sstevel@tonic-gate 	bhw->bhw_cache = bc.bc_cache;
16517c478bd9Sstevel@tonic-gate 	bhw->bhw_slab = bc.bc_slab;
16527c478bd9Sstevel@tonic-gate 
16537c478bd9Sstevel@tonic-gate 	/*
16547c478bd9Sstevel@tonic-gate 	 * sometimes the first log entry matches the base bufctl;  in that
16557c478bd9Sstevel@tonic-gate 	 * case, skip the base bufctl.
16567c478bd9Sstevel@tonic-gate 	 */
16577c478bd9Sstevel@tonic-gate 	if (bc.bc_lastlog != NULL &&
16587c478bd9Sstevel@tonic-gate 	    mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
16597c478bd9Sstevel@tonic-gate 	    bc.bc_addr == bcn.bc_addr &&
16607c478bd9Sstevel@tonic-gate 	    bc.bc_cache == bcn.bc_cache &&
16617c478bd9Sstevel@tonic-gate 	    bc.bc_slab == bcn.bc_slab &&
16627c478bd9Sstevel@tonic-gate 	    bc.bc_timestamp == bcn.bc_timestamp &&
16637c478bd9Sstevel@tonic-gate 	    bc.bc_thread == bcn.bc_thread)
16647c478bd9Sstevel@tonic-gate 		bhw->bhw_next = bc.bc_lastlog;
16657c478bd9Sstevel@tonic-gate 	else
16667c478bd9Sstevel@tonic-gate 		bhw->bhw_next = (void *)wsp->walk_addr;
16677c478bd9Sstevel@tonic-gate 
16687c478bd9Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)bc.bc_addr;
16697c478bd9Sstevel@tonic-gate 	wsp->walk_data = bhw;
16707c478bd9Sstevel@tonic-gate 
16717c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
16727c478bd9Sstevel@tonic-gate }
16737c478bd9Sstevel@tonic-gate 
16747c478bd9Sstevel@tonic-gate int
16757c478bd9Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp)
16767c478bd9Sstevel@tonic-gate {
16777c478bd9Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
16787c478bd9Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)bhw->bhw_next;
16797c478bd9Sstevel@tonic-gate 	uintptr_t baseaddr = wsp->walk_addr;
16807c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *b;
16817c478bd9Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&b);
16827c478bd9Sstevel@tonic-gate 
16837c478bd9Sstevel@tonic-gate 	if (addr == NULL)
16847c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
16857c478bd9Sstevel@tonic-gate 
16867c478bd9Sstevel@tonic-gate 	if (mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
16877c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", bhw->bhw_next);
16887c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
16897c478bd9Sstevel@tonic-gate 	}
16907c478bd9Sstevel@tonic-gate 
16917c478bd9Sstevel@tonic-gate 	/*
16927c478bd9Sstevel@tonic-gate 	 * The bufctl is only valid if the address, cache, and slab are
16937c478bd9Sstevel@tonic-gate 	 * correct.  We also check that the timestamp is decreasing, to
16947c478bd9Sstevel@tonic-gate 	 * prevent infinite loops.
16957c478bd9Sstevel@tonic-gate 	 */
16967c478bd9Sstevel@tonic-gate 	if ((uintptr_t)b->bc_addr != baseaddr ||
16977c478bd9Sstevel@tonic-gate 	    b->bc_cache != bhw->bhw_cache ||
16987c478bd9Sstevel@tonic-gate 	    b->bc_slab != bhw->bhw_slab ||
16997c478bd9Sstevel@tonic-gate 	    (bhw->bhw_timestamp != 0 && b->bc_timestamp >= bhw->bhw_timestamp))
17007c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
17017c478bd9Sstevel@tonic-gate 
17027c478bd9Sstevel@tonic-gate 	bhw->bhw_next = b->bc_lastlog;
17037c478bd9Sstevel@tonic-gate 	bhw->bhw_timestamp = b->bc_timestamp;
17047c478bd9Sstevel@tonic-gate 
17057c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, b, wsp->walk_cbdata));
17067c478bd9Sstevel@tonic-gate }
17077c478bd9Sstevel@tonic-gate 
17087c478bd9Sstevel@tonic-gate void
17097c478bd9Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp)
17107c478bd9Sstevel@tonic-gate {
17117c478bd9Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
17127c478bd9Sstevel@tonic-gate 
17137c478bd9Sstevel@tonic-gate 	mdb_free(bhw, sizeof (*bhw));
17147c478bd9Sstevel@tonic-gate }
17157c478bd9Sstevel@tonic-gate 
17167c478bd9Sstevel@tonic-gate typedef struct umem_log_walk {
17177c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *ulw_base;
17187c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t **ulw_sorted;
17197c478bd9Sstevel@tonic-gate 	umem_log_header_t ulw_lh;
17207c478bd9Sstevel@tonic-gate 	size_t ulw_size;
17217c478bd9Sstevel@tonic-gate 	size_t ulw_maxndx;
17227c478bd9Sstevel@tonic-gate 	size_t ulw_ndx;
17237c478bd9Sstevel@tonic-gate } umem_log_walk_t;
17247c478bd9Sstevel@tonic-gate 
17257c478bd9Sstevel@tonic-gate int
17267c478bd9Sstevel@tonic-gate umem_log_walk_init(mdb_walk_state_t *wsp)
17277c478bd9Sstevel@tonic-gate {
17287c478bd9Sstevel@tonic-gate 	uintptr_t lp = wsp->walk_addr;
17297c478bd9Sstevel@tonic-gate 	umem_log_walk_t *ulw;
17307c478bd9Sstevel@tonic-gate 	umem_log_header_t *lhp;
17317c478bd9Sstevel@tonic-gate 	int maxndx, i, j, k;
17327c478bd9Sstevel@tonic-gate 
17337c478bd9Sstevel@tonic-gate 	/*
17347c478bd9Sstevel@tonic-gate 	 * By default (global walk), walk the umem_transaction_log.  Otherwise
17357c478bd9Sstevel@tonic-gate 	 * read the log whose umem_log_header_t is stored at walk_addr.
17367c478bd9Sstevel@tonic-gate 	 */
17377c478bd9Sstevel@tonic-gate 	if (lp == NULL && umem_readvar(&lp, "umem_transaction_log") == -1) {
17387c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_transaction_log'");
17397c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17407c478bd9Sstevel@tonic-gate 	}
17417c478bd9Sstevel@tonic-gate 
17427c478bd9Sstevel@tonic-gate 	if (lp == NULL) {
17437c478bd9Sstevel@tonic-gate 		mdb_warn("log is disabled\n");
17447c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17457c478bd9Sstevel@tonic-gate 	}
17467c478bd9Sstevel@tonic-gate 
17477c478bd9Sstevel@tonic-gate 	ulw = mdb_zalloc(sizeof (umem_log_walk_t), UM_SLEEP);
17487c478bd9Sstevel@tonic-gate 	lhp = &ulw->ulw_lh;
17497c478bd9Sstevel@tonic-gate 
17507c478bd9Sstevel@tonic-gate 	if (mdb_vread(lhp, sizeof (umem_log_header_t), lp) == -1) {
17517c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lp);
17527c478bd9Sstevel@tonic-gate 		mdb_free(ulw, sizeof (umem_log_walk_t));
17537c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17547c478bd9Sstevel@tonic-gate 	}
17557c478bd9Sstevel@tonic-gate 
17567c478bd9Sstevel@tonic-gate 	ulw->ulw_size = lhp->lh_chunksize * lhp->lh_nchunks;
17577c478bd9Sstevel@tonic-gate 	ulw->ulw_base = mdb_alloc(ulw->ulw_size, UM_SLEEP);
17587c478bd9Sstevel@tonic-gate 	maxndx = lhp->lh_chunksize / UMEM_BUFCTL_AUDIT_SIZE - 1;
17597c478bd9Sstevel@tonic-gate 
17607c478bd9Sstevel@tonic-gate 	if (mdb_vread(ulw->ulw_base, ulw->ulw_size,
17617c478bd9Sstevel@tonic-gate 	    (uintptr_t)lhp->lh_base) == -1) {
17627c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read log at base %p", lhp->lh_base);
17637c478bd9Sstevel@tonic-gate 		mdb_free(ulw->ulw_base, ulw->ulw_size);
17647c478bd9Sstevel@tonic-gate 		mdb_free(ulw, sizeof (umem_log_walk_t));
17657c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17667c478bd9Sstevel@tonic-gate 	}
17677c478bd9Sstevel@tonic-gate 
17687c478bd9Sstevel@tonic-gate 	ulw->ulw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks *
17697c478bd9Sstevel@tonic-gate 	    sizeof (umem_bufctl_audit_t *), UM_SLEEP);
17707c478bd9Sstevel@tonic-gate 
17717c478bd9Sstevel@tonic-gate 	for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
17727c478bd9Sstevel@tonic-gate 		caddr_t chunk = (caddr_t)
17737c478bd9Sstevel@tonic-gate 		    ((uintptr_t)ulw->ulw_base + i * lhp->lh_chunksize);
17747c478bd9Sstevel@tonic-gate 
17757c478bd9Sstevel@tonic-gate 		for (j = 0; j < maxndx; j++) {
17767c478bd9Sstevel@tonic-gate 			/* LINTED align */
17777c478bd9Sstevel@tonic-gate 			ulw->ulw_sorted[k++] = (umem_bufctl_audit_t *)chunk;
17787c478bd9Sstevel@tonic-gate 			chunk += UMEM_BUFCTL_AUDIT_SIZE;
17797c478bd9Sstevel@tonic-gate 		}
17807c478bd9Sstevel@tonic-gate 	}
17817c478bd9Sstevel@tonic-gate 
17827c478bd9Sstevel@tonic-gate 	qsort(ulw->ulw_sorted, k, sizeof (umem_bufctl_audit_t *),
17837c478bd9Sstevel@tonic-gate 	    (int(*)(const void *, const void *))bufctlcmp);
17847c478bd9Sstevel@tonic-gate 
17857c478bd9Sstevel@tonic-gate 	ulw->ulw_maxndx = k;
17867c478bd9Sstevel@tonic-gate 	wsp->walk_data = ulw;
17877c478bd9Sstevel@tonic-gate 
17887c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
17897c478bd9Sstevel@tonic-gate }
17907c478bd9Sstevel@tonic-gate 
17917c478bd9Sstevel@tonic-gate int
17927c478bd9Sstevel@tonic-gate umem_log_walk_step(mdb_walk_state_t *wsp)
17937c478bd9Sstevel@tonic-gate {
17947c478bd9Sstevel@tonic-gate 	umem_log_walk_t *ulw = wsp->walk_data;
17957c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
17967c478bd9Sstevel@tonic-gate 
17977c478bd9Sstevel@tonic-gate 	if (ulw->ulw_ndx == ulw->ulw_maxndx)
17987c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
17997c478bd9Sstevel@tonic-gate 
18007c478bd9Sstevel@tonic-gate 	bcp = ulw->ulw_sorted[ulw->ulw_ndx++];
18017c478bd9Sstevel@tonic-gate 
18027c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)ulw->ulw_base +
18037c478bd9Sstevel@tonic-gate 	    (uintptr_t)ulw->ulw_lh.lh_base, bcp, wsp->walk_cbdata));
18047c478bd9Sstevel@tonic-gate }
18057c478bd9Sstevel@tonic-gate 
18067c478bd9Sstevel@tonic-gate void
18077c478bd9Sstevel@tonic-gate umem_log_walk_fini(mdb_walk_state_t *wsp)
18087c478bd9Sstevel@tonic-gate {
18097c478bd9Sstevel@tonic-gate 	umem_log_walk_t *ulw = wsp->walk_data;
18107c478bd9Sstevel@tonic-gate 
18117c478bd9Sstevel@tonic-gate 	mdb_free(ulw->ulw_base, ulw->ulw_size);
18127c478bd9Sstevel@tonic-gate 	mdb_free(ulw->ulw_sorted, ulw->ulw_maxndx *
18137c478bd9Sstevel@tonic-gate 	    sizeof (umem_bufctl_audit_t *));
18147c478bd9Sstevel@tonic-gate 	mdb_free(ulw, sizeof (umem_log_walk_t));
18157c478bd9Sstevel@tonic-gate }
18167c478bd9Sstevel@tonic-gate 
18177c478bd9Sstevel@tonic-gate typedef struct allocdby_bufctl {
18187c478bd9Sstevel@tonic-gate 	uintptr_t abb_addr;
18197c478bd9Sstevel@tonic-gate 	hrtime_t abb_ts;
18207c478bd9Sstevel@tonic-gate } allocdby_bufctl_t;
18217c478bd9Sstevel@tonic-gate 
18227c478bd9Sstevel@tonic-gate typedef struct allocdby_walk {
18237c478bd9Sstevel@tonic-gate 	const char *abw_walk;
18247c478bd9Sstevel@tonic-gate 	uintptr_t abw_thread;
18257c478bd9Sstevel@tonic-gate 	size_t abw_nbufs;
18267c478bd9Sstevel@tonic-gate 	size_t abw_size;
18277c478bd9Sstevel@tonic-gate 	allocdby_bufctl_t *abw_buf;
18287c478bd9Sstevel@tonic-gate 	size_t abw_ndx;
18297c478bd9Sstevel@tonic-gate } allocdby_walk_t;
18307c478bd9Sstevel@tonic-gate 
18317c478bd9Sstevel@tonic-gate int
18327c478bd9Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const umem_bufctl_audit_t *bcp,
18337c478bd9Sstevel@tonic-gate     allocdby_walk_t *abw)
18347c478bd9Sstevel@tonic-gate {
18357c478bd9Sstevel@tonic-gate 	if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
18367c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
18377c478bd9Sstevel@tonic-gate 
18387c478bd9Sstevel@tonic-gate 	if (abw->abw_nbufs == abw->abw_size) {
18397c478bd9Sstevel@tonic-gate 		allocdby_bufctl_t *buf;
18407c478bd9Sstevel@tonic-gate 		size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size;
18417c478bd9Sstevel@tonic-gate 
18427c478bd9Sstevel@tonic-gate 		buf = mdb_zalloc(oldsize << 1, UM_SLEEP);
18437c478bd9Sstevel@tonic-gate 
18447c478bd9Sstevel@tonic-gate 		bcopy(abw->abw_buf, buf, oldsize);
18457c478bd9Sstevel@tonic-gate 		mdb_free(abw->abw_buf, oldsize);
18467c478bd9Sstevel@tonic-gate 
18477c478bd9Sstevel@tonic-gate 		abw->abw_size <<= 1;
18487c478bd9Sstevel@tonic-gate 		abw->abw_buf = buf;
18497c478bd9Sstevel@tonic-gate 	}
18507c478bd9Sstevel@tonic-gate 
18517c478bd9Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_addr = addr;
18527c478bd9Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp;
18537c478bd9Sstevel@tonic-gate 	abw->abw_nbufs++;
18547c478bd9Sstevel@tonic-gate 
18557c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
18567c478bd9Sstevel@tonic-gate }
18577c478bd9Sstevel@tonic-gate 
18587c478bd9Sstevel@tonic-gate /*ARGSUSED*/
18597c478bd9Sstevel@tonic-gate int
18607c478bd9Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const umem_cache_t *c, allocdby_walk_t *abw)
18617c478bd9Sstevel@tonic-gate {
18627c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl,
18637c478bd9Sstevel@tonic-gate 	    abw, addr) == -1) {
18647c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk bufctl for cache %p", addr);
18657c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
18667c478bd9Sstevel@tonic-gate 	}
18677c478bd9Sstevel@tonic-gate 
18687c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
18697c478bd9Sstevel@tonic-gate }
18707c478bd9Sstevel@tonic-gate 
18717c478bd9Sstevel@tonic-gate static int
18727c478bd9Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs)
18737c478bd9Sstevel@tonic-gate {
18747c478bd9Sstevel@tonic-gate 	if (lhs->abb_ts < rhs->abb_ts)
18757c478bd9Sstevel@tonic-gate 		return (1);
18767c478bd9Sstevel@tonic-gate 	if (lhs->abb_ts > rhs->abb_ts)
18777c478bd9Sstevel@tonic-gate 		return (-1);
18787c478bd9Sstevel@tonic-gate 	return (0);
18797c478bd9Sstevel@tonic-gate }
18807c478bd9Sstevel@tonic-gate 
18817c478bd9Sstevel@tonic-gate static int
18827c478bd9Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk)
18837c478bd9Sstevel@tonic-gate {
18847c478bd9Sstevel@tonic-gate 	allocdby_walk_t *abw;
18857c478bd9Sstevel@tonic-gate 
18867c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
18877c478bd9Sstevel@tonic-gate 		mdb_warn("allocdby walk doesn't support global walks\n");
18887c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
18897c478bd9Sstevel@tonic-gate 	}
18907c478bd9Sstevel@tonic-gate 
18917c478bd9Sstevel@tonic-gate 	abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP);
18927c478bd9Sstevel@tonic-gate 
18937c478bd9Sstevel@tonic-gate 	abw->abw_thread = wsp->walk_addr;
18947c478bd9Sstevel@tonic-gate 	abw->abw_walk = walk;
18957c478bd9Sstevel@tonic-gate 	abw->abw_size = 128;	/* something reasonable */
18967c478bd9Sstevel@tonic-gate 	abw->abw_buf =
18977c478bd9Sstevel@tonic-gate 	    mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP);
18987c478bd9Sstevel@tonic-gate 
18997c478bd9Sstevel@tonic-gate 	wsp->walk_data = abw;
19007c478bd9Sstevel@tonic-gate 
19017c478bd9Sstevel@tonic-gate 	if (mdb_walk("umem_cache",
19027c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) {
19037c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk umem_cache");
19047c478bd9Sstevel@tonic-gate 		allocdby_walk_fini(wsp);
19057c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
19067c478bd9Sstevel@tonic-gate 	}
19077c478bd9Sstevel@tonic-gate 
19087c478bd9Sstevel@tonic-gate 	qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t),
19097c478bd9Sstevel@tonic-gate 	    (int(*)(const void *, const void *))allocdby_cmp);
19107c478bd9Sstevel@tonic-gate 
19117c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
19127c478bd9Sstevel@tonic-gate }
19137c478bd9Sstevel@tonic-gate 
19147c478bd9Sstevel@tonic-gate int
19157c478bd9Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp)
19167c478bd9Sstevel@tonic-gate {
19177c478bd9Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "bufctl"));
19187c478bd9Sstevel@tonic-gate }
19197c478bd9Sstevel@tonic-gate 
19207c478bd9Sstevel@tonic-gate int
19217c478bd9Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp)
19227c478bd9Sstevel@tonic-gate {
19237c478bd9Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "freectl"));
19247c478bd9Sstevel@tonic-gate }
19257c478bd9Sstevel@tonic-gate 
19267c478bd9Sstevel@tonic-gate int
19277c478bd9Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp)
19287c478bd9Sstevel@tonic-gate {
19297c478bd9Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
19307c478bd9Sstevel@tonic-gate 	uintptr_t addr;
19317c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
19327c478bd9Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
19337c478bd9Sstevel@tonic-gate 
19347c478bd9Sstevel@tonic-gate 	if (abw->abw_ndx == abw->abw_nbufs)
19357c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
19367c478bd9Sstevel@tonic-gate 
19377c478bd9Sstevel@tonic-gate 	addr = abw->abw_buf[abw->abw_ndx++].abb_addr;
19387c478bd9Sstevel@tonic-gate 
19397c478bd9Sstevel@tonic-gate 	if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
19407c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
19417c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
19427c478bd9Sstevel@tonic-gate 	}
19437c478bd9Sstevel@tonic-gate 
19447c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, bcp, wsp->walk_cbdata));
19457c478bd9Sstevel@tonic-gate }
19467c478bd9Sstevel@tonic-gate 
19477c478bd9Sstevel@tonic-gate void
19487c478bd9Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp)
19497c478bd9Sstevel@tonic-gate {
19507c478bd9Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
19517c478bd9Sstevel@tonic-gate 
19527c478bd9Sstevel@tonic-gate 	mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size);
19537c478bd9Sstevel@tonic-gate 	mdb_free(abw, sizeof (allocdby_walk_t));
19547c478bd9Sstevel@tonic-gate }
19557c478bd9Sstevel@tonic-gate 
19567c478bd9Sstevel@tonic-gate /*ARGSUSED*/
19577c478bd9Sstevel@tonic-gate int
19587c478bd9Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const umem_bufctl_audit_t *bcp, void *ignored)
19597c478bd9Sstevel@tonic-gate {
19607c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
19617c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
19627c478bd9Sstevel@tonic-gate 	int i;
19637c478bd9Sstevel@tonic-gate 
19647c478bd9Sstevel@tonic-gate 	mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp);
19657c478bd9Sstevel@tonic-gate 	for (i = 0; i < bcp->bc_depth; i++) {
19667c478bd9Sstevel@tonic-gate 		if (mdb_lookup_by_addr(bcp->bc_stack[i],
19677c478bd9Sstevel@tonic-gate 		    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
19687c478bd9Sstevel@tonic-gate 			continue;
19697c478bd9Sstevel@tonic-gate 		if (is_umem_sym(c, "umem_"))
19707c478bd9Sstevel@tonic-gate 			continue;
19717c478bd9Sstevel@tonic-gate 		mdb_printf("%s+0x%lx",
19727c478bd9Sstevel@tonic-gate 		    c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
19737c478bd9Sstevel@tonic-gate 		break;
19747c478bd9Sstevel@tonic-gate 	}
19757c478bd9Sstevel@tonic-gate 	mdb_printf("\n");
19767c478bd9Sstevel@tonic-gate 
19777c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
19787c478bd9Sstevel@tonic-gate }
19797c478bd9Sstevel@tonic-gate 
19807c478bd9Sstevel@tonic-gate static int
19817c478bd9Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w)
19827c478bd9Sstevel@tonic-gate {
19837c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
19847c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
19857c478bd9Sstevel@tonic-gate 
19867c478bd9Sstevel@tonic-gate 	mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER");
19877c478bd9Sstevel@tonic-gate 
19887c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) {
19897c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk '%s' for %p", w, addr);
19907c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
19917c478bd9Sstevel@tonic-gate 	}
19927c478bd9Sstevel@tonic-gate 
19937c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
19947c478bd9Sstevel@tonic-gate }
19957c478bd9Sstevel@tonic-gate 
19967c478bd9Sstevel@tonic-gate /*ARGSUSED*/
19977c478bd9Sstevel@tonic-gate int
19987c478bd9Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
19997c478bd9Sstevel@tonic-gate {
20007c478bd9Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "allocdby"));
20017c478bd9Sstevel@tonic-gate }
20027c478bd9Sstevel@tonic-gate 
20037c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20047c478bd9Sstevel@tonic-gate int
20057c478bd9Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20067c478bd9Sstevel@tonic-gate {
20077c478bd9Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "freedby"));
20087c478bd9Sstevel@tonic-gate }
20097c478bd9Sstevel@tonic-gate 
20104a1c2431SJonathan Adams typedef struct whatis_info {
20114a1c2431SJonathan Adams 	mdb_whatis_t *wi_w;
20124a1c2431SJonathan Adams 	const umem_cache_t *wi_cache;
20134a1c2431SJonathan Adams 	const vmem_t *wi_vmem;
20144a1c2431SJonathan Adams 	vmem_t *wi_msb_arena;
20154a1c2431SJonathan Adams 	size_t wi_slab_size;
20164a1c2431SJonathan Adams 	int wi_slab_found;
20174a1c2431SJonathan Adams 	uint_t wi_freemem;
20184a1c2431SJonathan Adams } whatis_info_t;
20190c3b83b1SJonathan Adams 
20200c3b83b1SJonathan Adams /* call one of our dcmd functions with "-v" and the provided address */
20210c3b83b1SJonathan Adams static void
20220c3b83b1SJonathan Adams whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr)
20230c3b83b1SJonathan Adams {
20240c3b83b1SJonathan Adams 	mdb_arg_t a;
20250c3b83b1SJonathan Adams 	a.a_type = MDB_TYPE_STRING;
20260c3b83b1SJonathan Adams 	a.a_un.a_str = "-v";
20270c3b83b1SJonathan Adams 
20284a1c2431SJonathan Adams 	mdb_printf(":\n");
20290c3b83b1SJonathan Adams 	(void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a);
20300c3b83b1SJonathan Adams }
20310c3b83b1SJonathan Adams 
20327c478bd9Sstevel@tonic-gate static void
20334a1c2431SJonathan Adams whatis_print_umem(whatis_info_t *wi, uintptr_t maddr, uintptr_t addr,
20344a1c2431SJonathan Adams     uintptr_t baddr)
20357c478bd9Sstevel@tonic-gate {
20364a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
20374a1c2431SJonathan Adams 	const umem_cache_t *cp = wi->wi_cache;
20384a1c2431SJonathan Adams 	int quiet = (mdb_whatis_flags(w) & WHATIS_QUIET);
20397c478bd9Sstevel@tonic-gate 
20404a1c2431SJonathan Adams 	int call_printer = (!quiet && (cp->cache_flags & UMF_AUDIT));
20417c478bd9Sstevel@tonic-gate 
20424a1c2431SJonathan Adams 	mdb_whatis_report_object(w, maddr, addr, "");
20437c478bd9Sstevel@tonic-gate 
20444a1c2431SJonathan Adams 	if (baddr != 0 && !call_printer)
20454a1c2431SJonathan Adams 		mdb_printf("bufctl %p ", baddr);
20467c478bd9Sstevel@tonic-gate 
20474a1c2431SJonathan Adams 	mdb_printf("%s from %s",
20484a1c2431SJonathan Adams 	    (wi->wi_freemem == FALSE) ? "allocated" : "freed", cp->cache_name);
20497c478bd9Sstevel@tonic-gate 
20504a1c2431SJonathan Adams 	if (call_printer && baddr != 0) {
20514a1c2431SJonathan Adams 		whatis_call_printer(bufctl, baddr);
20524a1c2431SJonathan Adams 		return;
20537c478bd9Sstevel@tonic-gate 	}
20544a1c2431SJonathan Adams 	mdb_printf("\n");
20554a1c2431SJonathan Adams }
20567c478bd9Sstevel@tonic-gate 
20574a1c2431SJonathan Adams /*ARGSUSED*/
20584a1c2431SJonathan Adams static int
20594a1c2431SJonathan Adams whatis_walk_umem(uintptr_t addr, void *ignored, whatis_info_t *wi)
20604a1c2431SJonathan Adams {
20614a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
20620c3b83b1SJonathan Adams 
20634a1c2431SJonathan Adams 	uintptr_t cur;
20644a1c2431SJonathan Adams 	size_t size = wi->wi_cache->cache_bufsize;
20650c3b83b1SJonathan Adams 
20664a1c2431SJonathan Adams 	while (mdb_whatis_match(w, addr, size, &cur))
20674a1c2431SJonathan Adams 		whatis_print_umem(wi, cur, addr, NULL);
20680c3b83b1SJonathan Adams 
20694a1c2431SJonathan Adams 	return (WHATIS_WALKRET(w));
20707c478bd9Sstevel@tonic-gate }
20717c478bd9Sstevel@tonic-gate 
20727c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20737c478bd9Sstevel@tonic-gate static int
20744a1c2431SJonathan Adams whatis_walk_bufctl(uintptr_t baddr, const umem_bufctl_t *bcp, whatis_info_t *wi)
20757c478bd9Sstevel@tonic-gate {
20764a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
20777c478bd9Sstevel@tonic-gate 
20784a1c2431SJonathan Adams 	uintptr_t cur;
20794a1c2431SJonathan Adams 	uintptr_t addr = (uintptr_t)bcp->bc_addr;
20804a1c2431SJonathan Adams 	size_t size = wi->wi_cache->cache_bufsize;
20814a1c2431SJonathan Adams 
20824a1c2431SJonathan Adams 	while (mdb_whatis_match(w, addr, size, &cur))
20834a1c2431SJonathan Adams 		whatis_print_umem(wi, cur, addr, baddr);
20844a1c2431SJonathan Adams 
20854a1c2431SJonathan Adams 	return (WHATIS_WALKRET(w));
20867c478bd9Sstevel@tonic-gate }
20877c478bd9Sstevel@tonic-gate 
20884a1c2431SJonathan Adams 
20897c478bd9Sstevel@tonic-gate static int
20904a1c2431SJonathan Adams whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_info_t *wi)
20917c478bd9Sstevel@tonic-gate {
20924a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
20934a1c2431SJonathan Adams 
20944a1c2431SJonathan Adams 	size_t size = vs->vs_end - vs->vs_start;
20954a1c2431SJonathan Adams 	uintptr_t cur;
20964a1c2431SJonathan Adams 
20974a1c2431SJonathan Adams 	/* We're not interested in anything but alloc and free segments */
20984a1c2431SJonathan Adams 	if (vs->vs_type != VMEM_ALLOC && vs->vs_type != VMEM_FREE)
20997c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
21007c478bd9Sstevel@tonic-gate 
21014a1c2431SJonathan Adams 	while (mdb_whatis_match(w, vs->vs_start, size, &cur)) {
21024a1c2431SJonathan Adams 		mdb_whatis_report_object(w, cur, vs->vs_start, "");
21037c478bd9Sstevel@tonic-gate 
21044a1c2431SJonathan Adams 		/*
21054a1c2431SJonathan Adams 		 * If we're not printing it seperately, provide the vmem_seg
21064a1c2431SJonathan Adams 		 * pointer if it has a stack trace.
21074a1c2431SJonathan Adams 		 */
21084a1c2431SJonathan Adams 		if ((mdb_whatis_flags(w) & WHATIS_QUIET) &&
21094a1c2431SJonathan Adams 		    ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0 ||
21104a1c2431SJonathan Adams 		    (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) {
21114a1c2431SJonathan Adams 			mdb_printf("vmem_seg %p ", addr);
21124a1c2431SJonathan Adams 		}
21137c478bd9Sstevel@tonic-gate 
21144a1c2431SJonathan Adams 		mdb_printf("%s from %s vmem arena",
21154a1c2431SJonathan Adams 		    (vs->vs_type == VMEM_ALLOC) ? "allocated" : "freed",
21164a1c2431SJonathan Adams 		    wi->wi_vmem->vm_name);
21170c3b83b1SJonathan Adams 
21184a1c2431SJonathan Adams 		if (!mdb_whatis_flags(w) & WHATIS_QUIET)
21194a1c2431SJonathan Adams 			whatis_call_printer(vmem_seg, addr);
21204a1c2431SJonathan Adams 		else
21214a1c2431SJonathan Adams 			mdb_printf("\n");
21224a1c2431SJonathan Adams 	}
21237c478bd9Sstevel@tonic-gate 
21244a1c2431SJonathan Adams 	return (WHATIS_WALKRET(w));
21257c478bd9Sstevel@tonic-gate }
21267c478bd9Sstevel@tonic-gate 
21277c478bd9Sstevel@tonic-gate static int
21284a1c2431SJonathan Adams whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_info_t *wi)
21297c478bd9Sstevel@tonic-gate {
21304a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
21317c478bd9Sstevel@tonic-gate 	const char *nm = vmem->vm_name;
21324a1c2431SJonathan Adams 	wi->wi_vmem = vmem;
21337c478bd9Sstevel@tonic-gate 
21344a1c2431SJonathan Adams 	if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
21357c478bd9Sstevel@tonic-gate 		mdb_printf("Searching vmem arena %s...\n", nm);
21367c478bd9Sstevel@tonic-gate 
21374a1c2431SJonathan Adams 	if (mdb_pwalk("vmem_seg",
21384a1c2431SJonathan Adams 	    (mdb_walk_cb_t)whatis_walk_seg, wi, addr) == -1) {
21397c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk vmem seg for %p", addr);
21407c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
21417c478bd9Sstevel@tonic-gate 	}
21427c478bd9Sstevel@tonic-gate 
21434a1c2431SJonathan Adams 	return (WHATIS_WALKRET(w));
21447c478bd9Sstevel@tonic-gate }
21457c478bd9Sstevel@tonic-gate 
21467c478bd9Sstevel@tonic-gate /*ARGSUSED*/
21477c478bd9Sstevel@tonic-gate static int
21484a1c2431SJonathan Adams whatis_walk_slab(uintptr_t saddr, const umem_slab_t *sp, whatis_info_t *wi)
21497c478bd9Sstevel@tonic-gate {
21504a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
21517c478bd9Sstevel@tonic-gate 
21524a1c2431SJonathan Adams 	/* It must overlap with the slab data, or it's not interesting */
21534a1c2431SJonathan Adams 	if (mdb_whatis_overlaps(w,
21544a1c2431SJonathan Adams 	    (uintptr_t)sp->slab_base, wi->wi_slab_size)) {
21554a1c2431SJonathan Adams 		wi->wi_slab_found++;
21564a1c2431SJonathan Adams 		return (WALK_DONE);
21574a1c2431SJonathan Adams 	}
21584a1c2431SJonathan Adams 	return (WALK_NEXT);
21597c478bd9Sstevel@tonic-gate }
21607c478bd9Sstevel@tonic-gate 
21617c478bd9Sstevel@tonic-gate static int
21624a1c2431SJonathan Adams whatis_walk_cache(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
21637c478bd9Sstevel@tonic-gate {
21644a1c2431SJonathan Adams 	mdb_whatis_t *w = wi->wi_w;
21657c478bd9Sstevel@tonic-gate 	char *walk, *freewalk;
21667c478bd9Sstevel@tonic-gate 	mdb_walk_cb_t func;
21674a1c2431SJonathan Adams 	int do_bufctl;
21687c478bd9Sstevel@tonic-gate 
21694a1c2431SJonathan Adams 	/* Override the '-b' flag as necessary */
21704a1c2431SJonathan Adams 	if (!(c->cache_flags & UMF_HASH))
21714a1c2431SJonathan Adams 		do_bufctl = FALSE;	/* no bufctls to walk */
21724a1c2431SJonathan Adams 	else if (c->cache_flags & UMF_AUDIT)
21734a1c2431SJonathan Adams 		do_bufctl = TRUE;	/* we always want debugging info */
21744a1c2431SJonathan Adams 	else
21754a1c2431SJonathan Adams 		do_bufctl = ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0);
21764a1c2431SJonathan Adams 
21774a1c2431SJonathan Adams 	if (do_bufctl) {
21787c478bd9Sstevel@tonic-gate 		walk = "bufctl";
21797c478bd9Sstevel@tonic-gate 		freewalk = "freectl";
21807c478bd9Sstevel@tonic-gate 		func = (mdb_walk_cb_t)whatis_walk_bufctl;
21810c3b83b1SJonathan Adams 	} else {
21820c3b83b1SJonathan Adams 		walk = "umem";
21830c3b83b1SJonathan Adams 		freewalk = "freemem";
21840c3b83b1SJonathan Adams 		func = (mdb_walk_cb_t)whatis_walk_umem;
21857c478bd9Sstevel@tonic-gate 	}
21867c478bd9Sstevel@tonic-gate 
21874a1c2431SJonathan Adams 	wi->wi_cache = c;
21884a1c2431SJonathan Adams 
21894a1c2431SJonathan Adams 	if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
21907c478bd9Sstevel@tonic-gate 		mdb_printf("Searching %s...\n", c->cache_name);
21917c478bd9Sstevel@tonic-gate 
21924a1c2431SJonathan Adams 	/*
21934a1c2431SJonathan Adams 	 * If more then two buffers live on each slab, figure out if we're
21944a1c2431SJonathan Adams 	 * interested in anything in any slab before doing the more expensive
21954a1c2431SJonathan Adams 	 * umem/freemem (bufctl/freectl) walkers.
21964a1c2431SJonathan Adams 	 */
21974a1c2431SJonathan Adams 	wi->wi_slab_size = c->cache_slabsize - c->cache_maxcolor;
21984a1c2431SJonathan Adams 	if (!(c->cache_flags & UMF_HASH))
21994a1c2431SJonathan Adams 		wi->wi_slab_size -= sizeof (umem_slab_t);
22004a1c2431SJonathan Adams 
22014a1c2431SJonathan Adams 	if ((wi->wi_slab_size / c->cache_chunksize) > 2) {
22024a1c2431SJonathan Adams 		wi->wi_slab_found = 0;
22034a1c2431SJonathan Adams 		if (mdb_pwalk("umem_slab", (mdb_walk_cb_t)whatis_walk_slab, wi,
22044a1c2431SJonathan Adams 		    addr) == -1) {
22054a1c2431SJonathan Adams 			mdb_warn("can't find umem_slab walker");
22064a1c2431SJonathan Adams 			return (WALK_DONE);
22074a1c2431SJonathan Adams 		}
22084a1c2431SJonathan Adams 		if (wi->wi_slab_found == 0)
22094a1c2431SJonathan Adams 			return (WALK_NEXT);
22104a1c2431SJonathan Adams 	}
22117c478bd9Sstevel@tonic-gate 
22124a1c2431SJonathan Adams 	wi->wi_freemem = FALSE;
22134a1c2431SJonathan Adams 	if (mdb_pwalk(walk, func, wi, addr) == -1) {
22147c478bd9Sstevel@tonic-gate 		mdb_warn("can't find %s walker", walk);
22157c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
22167c478bd9Sstevel@tonic-gate 	}
22177c478bd9Sstevel@tonic-gate 
22184a1c2431SJonathan Adams 	if (mdb_whatis_done(w))
22197c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
22207c478bd9Sstevel@tonic-gate 
22217c478bd9Sstevel@tonic-gate 	/*
22227c478bd9Sstevel@tonic-gate 	 * We have searched for allocated memory; now search for freed memory.
22237c478bd9Sstevel@tonic-gate 	 */
22244a1c2431SJonathan Adams 	if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
22257c478bd9Sstevel@tonic-gate 		mdb_printf("Searching %s for free memory...\n", c->cache_name);
22267c478bd9Sstevel@tonic-gate 
22274a1c2431SJonathan Adams 	wi->wi_freemem = TRUE;
22287c478bd9Sstevel@tonic-gate 
22294a1c2431SJonathan Adams 	if (mdb_pwalk(freewalk, func, wi, addr) == -1) {
22307c478bd9Sstevel@tonic-gate 		mdb_warn("can't find %s walker", freewalk);
22317c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
22327c478bd9Sstevel@tonic-gate 	}
22337c478bd9Sstevel@tonic-gate 
22344a1c2431SJonathan Adams 	return (WHATIS_WALKRET(w));
22357c478bd9Sstevel@tonic-gate }
22367c478bd9Sstevel@tonic-gate 
22377c478bd9Sstevel@tonic-gate static int
22384a1c2431SJonathan Adams whatis_walk_touch(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
22397c478bd9Sstevel@tonic-gate {
22404a1c2431SJonathan Adams 	if (c->cache_arena == wi->wi_msb_arena ||
22414a1c2431SJonathan Adams 	    (c->cache_cflags & UMC_NOTOUCH))
22427c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22437c478bd9Sstevel@tonic-gate 
22444a1c2431SJonathan Adams 	return (whatis_walk_cache(addr, c, wi));
22457c478bd9Sstevel@tonic-gate }
22467c478bd9Sstevel@tonic-gate 
22477c478bd9Sstevel@tonic-gate static int
22484a1c2431SJonathan Adams whatis_walk_metadata(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
22497c478bd9Sstevel@tonic-gate {
22504a1c2431SJonathan Adams 	if (c->cache_arena != wi->wi_msb_arena)
22517c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22527c478bd9Sstevel@tonic-gate 
22534a1c2431SJonathan Adams 	return (whatis_walk_cache(addr, c, wi));
22547c478bd9Sstevel@tonic-gate }
22557c478bd9Sstevel@tonic-gate 
22564a1c2431SJonathan Adams static int
22574a1c2431SJonathan Adams whatis_walk_notouch(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi)
22587c478bd9Sstevel@tonic-gate {
22594a1c2431SJonathan Adams 	if (c->cache_arena == wi->wi_msb_arena ||
22604a1c2431SJonathan Adams 	    !(c->cache_cflags & UMC_NOTOUCH))
22614a1c2431SJonathan Adams 		return (WALK_NEXT);
22627c478bd9Sstevel@tonic-gate 
22634a1c2431SJonathan Adams 	return (whatis_walk_cache(addr, c, wi));
22644a1c2431SJonathan Adams }
22657c478bd9Sstevel@tonic-gate 
22664a1c2431SJonathan Adams /*ARGSUSED*/
22674a1c2431SJonathan Adams static int
22684a1c2431SJonathan Adams whatis_run_umem(mdb_whatis_t *w, void *ignored)
22694a1c2431SJonathan Adams {
22704a1c2431SJonathan Adams 	whatis_info_t wi;
22714a1c2431SJonathan Adams 
22724a1c2431SJonathan Adams 	bzero(&wi, sizeof (wi));
22734a1c2431SJonathan Adams 	wi.wi_w = w;
22747c478bd9Sstevel@tonic-gate 
22754a1c2431SJonathan Adams 	/* umem's metadata is allocated from the umem_internal_arena */
227622ce0148SMatthew Ahrens 	if (umem_readvar(&wi.wi_msb_arena, "umem_internal_arena") == -1)
22774a1c2431SJonathan Adams 		mdb_warn("unable to readvar \"umem_internal_arena\"");
22787c478bd9Sstevel@tonic-gate 
22797c478bd9Sstevel@tonic-gate 	/*
22804a1c2431SJonathan Adams 	 * We process umem caches in the following order:
22814a1c2431SJonathan Adams 	 *
22824a1c2431SJonathan Adams 	 *	non-UMC_NOTOUCH, non-metadata	(typically the most interesting)
22834a1c2431SJonathan Adams 	 *	metadata			(can be huge with UMF_AUDIT)
22844a1c2431SJonathan Adams 	 *	UMC_NOTOUCH, non-metadata	(see umem_walk_all())
22857c478bd9Sstevel@tonic-gate 	 */
22864a1c2431SJonathan Adams 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_touch,
22874a1c2431SJonathan Adams 	    &wi) == -1 ||
22884a1c2431SJonathan Adams 	    mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_metadata,
22894a1c2431SJonathan Adams 	    &wi) == -1 ||
22904a1c2431SJonathan Adams 	    mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_notouch,
22914a1c2431SJonathan Adams 	    &wi) == -1) {
22927c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find umem_cache walker");
22934a1c2431SJonathan Adams 		return (1);
22947c478bd9Sstevel@tonic-gate 	}
22954a1c2431SJonathan Adams 	return (0);
22964a1c2431SJonathan Adams }
22977c478bd9Sstevel@tonic-gate 
22984a1c2431SJonathan Adams /*ARGSUSED*/
22994a1c2431SJonathan Adams static int
23004a1c2431SJonathan Adams whatis_run_vmem(mdb_whatis_t *w, void *ignored)
23014a1c2431SJonathan Adams {
23024a1c2431SJonathan Adams 	whatis_info_t wi;
23037c478bd9Sstevel@tonic-gate 
23044a1c2431SJonathan Adams 	bzero(&wi, sizeof (wi));
23054a1c2431SJonathan Adams 	wi.wi_w = w;
23067c478bd9Sstevel@tonic-gate 
23077c478bd9Sstevel@tonic-gate 	if (mdb_walk("vmem_postfix",
23084a1c2431SJonathan Adams 	    (mdb_walk_cb_t)whatis_walk_vmem, &wi) == -1) {
23097c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find vmem_postfix walker");
23104a1c2431SJonathan Adams 		return (1);
23114a1c2431SJonathan Adams 	}
23124a1c2431SJonathan Adams 	return (0);
23134a1c2431SJonathan Adams }
23144a1c2431SJonathan Adams 
23154a1c2431SJonathan Adams int
23164a1c2431SJonathan Adams umem_init(void)
23174a1c2431SJonathan Adams {
23184a1c2431SJonathan Adams 	mdb_walker_t w = {
23194a1c2431SJonathan Adams 		"umem_cache", "walk list of umem caches", umem_cache_walk_init,
23204a1c2431SJonathan Adams 		umem_cache_walk_step, umem_cache_walk_fini
23214a1c2431SJonathan Adams 	};
23224a1c2431SJonathan Adams 
23234a1c2431SJonathan Adams 	if (mdb_add_walker(&w) == -1) {
23244a1c2431SJonathan Adams 		mdb_warn("failed to add umem_cache walker");
23254a1c2431SJonathan Adams 		return (-1);
23267c478bd9Sstevel@tonic-gate 	}
23277c478bd9Sstevel@tonic-gate 
23284a1c2431SJonathan Adams 	if (umem_update_variables() == -1)
23294a1c2431SJonathan Adams 		return (-1);
23307c478bd9Sstevel@tonic-gate 
23314a1c2431SJonathan Adams 	/* install a callback so that our variables are always up-to-date */
23324a1c2431SJonathan Adams 	(void) mdb_callback_add(MDB_CALLBACK_STCHG, umem_statechange_cb, NULL);
23334a1c2431SJonathan Adams 	umem_statechange_cb(NULL);
23344a1c2431SJonathan Adams 
23354a1c2431SJonathan Adams 	/*
23364a1c2431SJonathan Adams 	 * Register our ::whatis callbacks.
23374a1c2431SJonathan Adams 	 */
23384a1c2431SJonathan Adams 	mdb_whatis_register("umem", whatis_run_umem, NULL,
23394a1c2431SJonathan Adams 	    WHATIS_PRIO_ALLOCATOR, WHATIS_REG_NO_ID);
23404a1c2431SJonathan Adams 	mdb_whatis_register("vmem", whatis_run_vmem, NULL,
23414a1c2431SJonathan Adams 	    WHATIS_PRIO_ALLOCATOR, WHATIS_REG_NO_ID);
23424a1c2431SJonathan Adams 
23434a1c2431SJonathan Adams 	return (0);
23447c478bd9Sstevel@tonic-gate }
23457c478bd9Sstevel@tonic-gate 
23467c478bd9Sstevel@tonic-gate typedef struct umem_log_cpu {
23477c478bd9Sstevel@tonic-gate 	uintptr_t umc_low;
23487c478bd9Sstevel@tonic-gate 	uintptr_t umc_high;
23497c478bd9Sstevel@tonic-gate } umem_log_cpu_t;
23507c478bd9Sstevel@tonic-gate 
23517c478bd9Sstevel@tonic-gate int
23527c478bd9Sstevel@tonic-gate umem_log_walk(uintptr_t addr, const umem_bufctl_audit_t *b, umem_log_cpu_t *umc)
23537c478bd9Sstevel@tonic-gate {
23547c478bd9Sstevel@tonic-gate 	int i;
23557c478bd9Sstevel@tonic-gate 
23567c478bd9Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
23577c478bd9Sstevel@tonic-gate 		if (addr >= umc[i].umc_low && addr < umc[i].umc_high)
23587c478bd9Sstevel@tonic-gate 			break;
23597c478bd9Sstevel@tonic-gate 	}
23607c478bd9Sstevel@tonic-gate 
23617c478bd9Sstevel@tonic-gate 	if (i == umem_max_ncpus)
23627c478bd9Sstevel@tonic-gate 		mdb_printf("   ");
23637c478bd9Sstevel@tonic-gate 	else
23647c478bd9Sstevel@tonic-gate 		mdb_printf("%3d", i);
23657c478bd9Sstevel@tonic-gate 
23667c478bd9Sstevel@tonic-gate 	mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr,
23677c478bd9Sstevel@tonic-gate 	    b->bc_timestamp, b->bc_thread);
23687c478bd9Sstevel@tonic-gate 
23697c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
23707c478bd9Sstevel@tonic-gate }
23717c478bd9Sstevel@tonic-gate 
23727c478bd9Sstevel@tonic-gate /*ARGSUSED*/
23737c478bd9Sstevel@tonic-gate int
23747c478bd9Sstevel@tonic-gate umem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
23757c478bd9Sstevel@tonic-gate {
23767c478bd9Sstevel@tonic-gate 	umem_log_header_t lh;
23777c478bd9Sstevel@tonic-gate 	umem_cpu_log_header_t clh;
23787c478bd9Sstevel@tonic-gate 	uintptr_t lhp, clhp;
23797c478bd9Sstevel@tonic-gate 	umem_log_cpu_t *umc;
23807c478bd9Sstevel@tonic-gate 	int i;
23817c478bd9Sstevel@tonic-gate 
23827c478bd9Sstevel@tonic-gate 	if (umem_readvar(&lhp, "umem_transaction_log") == -1) {
23837c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read 'umem_transaction_log'");
23847c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
23857c478bd9Sstevel@tonic-gate 	}
23867c478bd9Sstevel@tonic-gate 
23877c478bd9Sstevel@tonic-gate 	if (lhp == NULL) {
23887c478bd9Sstevel@tonic-gate 		mdb_warn("no umem transaction log\n");
23897c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
23907c478bd9Sstevel@tonic-gate 	}
23917c478bd9Sstevel@tonic-gate 
23927c478bd9Sstevel@tonic-gate 	if (mdb_vread(&lh, sizeof (umem_log_header_t), lhp) == -1) {
23937c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lhp);
23947c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
23957c478bd9Sstevel@tonic-gate 	}
23967c478bd9Sstevel@tonic-gate 
23977c478bd9Sstevel@tonic-gate 	clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
23987c478bd9Sstevel@tonic-gate 
23997c478bd9Sstevel@tonic-gate 	umc = mdb_zalloc(sizeof (umem_log_cpu_t) * umem_max_ncpus,
24007c478bd9Sstevel@tonic-gate 	    UM_SLEEP | UM_GC);
24017c478bd9Sstevel@tonic-gate 
24027c478bd9Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
24037c478bd9Sstevel@tonic-gate 		if (mdb_vread(&clh, sizeof (clh), clhp) == -1) {
24047c478bd9Sstevel@tonic-gate 			mdb_warn("cannot read cpu %d's log header at %p",
24057c478bd9Sstevel@tonic-gate 			    i, clhp);
24067c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
24077c478bd9Sstevel@tonic-gate 		}
24087c478bd9Sstevel@tonic-gate 
24097c478bd9Sstevel@tonic-gate 		umc[i].umc_low = clh.clh_chunk * lh.lh_chunksize +
24107c478bd9Sstevel@tonic-gate 		    (uintptr_t)lh.lh_base;
24117c478bd9Sstevel@tonic-gate 		umc[i].umc_high = (uintptr_t)clh.clh_current;
24127c478bd9Sstevel@tonic-gate 
24137c478bd9Sstevel@tonic-gate 		clhp += sizeof (umem_cpu_log_header_t);
24147c478bd9Sstevel@tonic-gate 	}
24157c478bd9Sstevel@tonic-gate 
24167c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags)) {
24177c478bd9Sstevel@tonic-gate 		mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR",
24187c478bd9Sstevel@tonic-gate 		    "BUFADDR", "TIMESTAMP", "THREAD");
24197c478bd9Sstevel@tonic-gate 	}
24207c478bd9Sstevel@tonic-gate 
24217c478bd9Sstevel@tonic-gate 	/*
24227c478bd9Sstevel@tonic-gate 	 * If we have been passed an address, we'll just print out that
24237c478bd9Sstevel@tonic-gate 	 * log entry.
24247c478bd9Sstevel@tonic-gate 	 */
24257c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
24267c478bd9Sstevel@tonic-gate 		umem_bufctl_audit_t *bp;
24277c478bd9Sstevel@tonic-gate 		UMEM_LOCAL_BUFCTL_AUDIT(&bp);
24287c478bd9Sstevel@tonic-gate 
24297c478bd9Sstevel@tonic-gate 		if (mdb_vread(bp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
24307c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read bufctl at %p", addr);
24317c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
24327c478bd9Sstevel@tonic-gate 		}
24337c478bd9Sstevel@tonic-gate 
24347c478bd9Sstevel@tonic-gate 		(void) umem_log_walk(addr, bp, umc);
24357c478bd9Sstevel@tonic-gate 
24367c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
24377c478bd9Sstevel@tonic-gate 	}
24387c478bd9Sstevel@tonic-gate 
24397c478bd9Sstevel@tonic-gate 	if (mdb_walk("umem_log", (mdb_walk_cb_t)umem_log_walk, umc) == -1) {
24407c478bd9Sstevel@tonic-gate 		mdb_warn("can't find umem log walker");
24417c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
24427c478bd9Sstevel@tonic-gate 	}
24437c478bd9Sstevel@tonic-gate 
24447c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
24457c478bd9Sstevel@tonic-gate }
24467c478bd9Sstevel@tonic-gate 
24477c478bd9Sstevel@tonic-gate typedef struct bufctl_history_cb {
24487c478bd9Sstevel@tonic-gate 	int		bhc_flags;
24497c478bd9Sstevel@tonic-gate 	int		bhc_argc;
24507c478bd9Sstevel@tonic-gate 	const mdb_arg_t	*bhc_argv;
24517c478bd9Sstevel@tonic-gate 	int		bhc_ret;
24527c478bd9Sstevel@tonic-gate } bufctl_history_cb_t;
24537c478bd9Sstevel@tonic-gate 
24547c478bd9Sstevel@tonic-gate /*ARGSUSED*/
24557c478bd9Sstevel@tonic-gate static int
24567c478bd9Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
24577c478bd9Sstevel@tonic-gate {
24587c478bd9Sstevel@tonic-gate 	bufctl_history_cb_t *bhc = arg;
24597c478bd9Sstevel@tonic-gate 
24607c478bd9Sstevel@tonic-gate 	bhc->bhc_ret =
24617c478bd9Sstevel@tonic-gate 	    bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv);
24627c478bd9Sstevel@tonic-gate 
24637c478bd9Sstevel@tonic-gate 	bhc->bhc_flags &= ~DCMD_LOOPFIRST;
24647c478bd9Sstevel@tonic-gate 
24657c478bd9Sstevel@tonic-gate 	return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE);
24667c478bd9Sstevel@tonic-gate }
24677c478bd9Sstevel@tonic-gate 
24687c478bd9Sstevel@tonic-gate void
24697c478bd9Sstevel@tonic-gate bufctl_help(void)
24707c478bd9Sstevel@tonic-gate {
24717c478bd9Sstevel@tonic-gate 	mdb_printf("%s\n",
24727c478bd9Sstevel@tonic-gate "Display the contents of umem_bufctl_audit_ts, with optional filtering.\n");
24737c478bd9Sstevel@tonic-gate 	mdb_dec_indent(2);
24747c478bd9Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
24757c478bd9Sstevel@tonic-gate 	mdb_inc_indent(2);
24767c478bd9Sstevel@tonic-gate 	mdb_printf("%s",
24777c478bd9Sstevel@tonic-gate "  -v    Display the full content of the bufctl, including its stack trace\n"
24787c478bd9Sstevel@tonic-gate "  -h    retrieve the bufctl's transaction history, if available\n"
24797c478bd9Sstevel@tonic-gate "  -a addr\n"
24807c478bd9Sstevel@tonic-gate "        filter out bufctls not involving the buffer at addr\n"
24817c478bd9Sstevel@tonic-gate "  -c caller\n"
24827c478bd9Sstevel@tonic-gate "        filter out bufctls without the function/PC in their stack trace\n"
24837c478bd9Sstevel@tonic-gate "  -e earliest\n"
24847c478bd9Sstevel@tonic-gate "        filter out bufctls timestamped before earliest\n"
24857c478bd9Sstevel@tonic-gate "  -l latest\n"
24867c478bd9Sstevel@tonic-gate "        filter out bufctls timestamped after latest\n"
24877c478bd9Sstevel@tonic-gate "  -t thread\n"
24887c478bd9Sstevel@tonic-gate "        filter out bufctls not involving thread\n");
24897c478bd9Sstevel@tonic-gate }
24907c478bd9Sstevel@tonic-gate 
24917c478bd9Sstevel@tonic-gate int
24927c478bd9Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
24937c478bd9Sstevel@tonic-gate {
24947c478bd9Sstevel@tonic-gate 	uint_t verbose = FALSE;
24957c478bd9Sstevel@tonic-gate 	uint_t history = FALSE;
24967c478bd9Sstevel@tonic-gate 	uint_t in_history = FALSE;
24977c478bd9Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
24987c478bd9Sstevel@tonic-gate 	uintptr_t laddr, haddr, baddr = NULL;
24997c478bd9Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
25007c478bd9Sstevel@tonic-gate 	int i, depth;
25017c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
25027c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
25037c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
25047c478bd9Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
25057c478bd9Sstevel@tonic-gate 
25067c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
25077c478bd9Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
25087c478bd9Sstevel@tonic-gate 	    'h', MDB_OPT_SETBITS, TRUE, &history,
25097c478bd9Sstevel@tonic-gate 	    'H', MDB_OPT_SETBITS, TRUE, &in_history,		/* internal */
25107c478bd9Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
25117c478bd9Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
25127c478bd9Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
25137c478bd9Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
25147c478bd9Sstevel@tonic-gate 	    'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc)
25157c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
25167c478bd9Sstevel@tonic-gate 
25177c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
25187c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
25197c478bd9Sstevel@tonic-gate 
25207c478bd9Sstevel@tonic-gate 	if (in_history && !history)
25217c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
25227c478bd9Sstevel@tonic-gate 
25237c478bd9Sstevel@tonic-gate 	if (history && !in_history) {
25247c478bd9Sstevel@tonic-gate 		mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1),
25257c478bd9Sstevel@tonic-gate 		    UM_SLEEP | UM_GC);
25267c478bd9Sstevel@tonic-gate 		bufctl_history_cb_t bhc;
25277c478bd9Sstevel@tonic-gate 
25287c478bd9Sstevel@tonic-gate 		nargv[0].a_type = MDB_TYPE_STRING;
25297c478bd9Sstevel@tonic-gate 		nargv[0].a_un.a_str = "-H";		/* prevent recursion */
25307c478bd9Sstevel@tonic-gate 
25317c478bd9Sstevel@tonic-gate 		for (i = 0; i < argc; i++)
25327c478bd9Sstevel@tonic-gate 			nargv[i + 1] = argv[i];
25337c478bd9Sstevel@tonic-gate 
25347c478bd9Sstevel@tonic-gate 		/*
25357c478bd9Sstevel@tonic-gate 		 * When in history mode, we treat each element as if it
25367c478bd9Sstevel@tonic-gate 		 * were in a seperate loop, so that the headers group
25377c478bd9Sstevel@tonic-gate 		 * bufctls with similar histories.
25387c478bd9Sstevel@tonic-gate 		 */
25397c478bd9Sstevel@tonic-gate 		bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST;
25407c478bd9Sstevel@tonic-gate 		bhc.bhc_argc = argc + 1;
25417c478bd9Sstevel@tonic-gate 		bhc.bhc_argv = nargv;
25427c478bd9Sstevel@tonic-gate 		bhc.bhc_ret = DCMD_OK;
25437c478bd9Sstevel@tonic-gate 
25447c478bd9Sstevel@tonic-gate 		if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc,
25457c478bd9Sstevel@tonic-gate 		    addr) == -1) {
25467c478bd9Sstevel@tonic-gate 			mdb_warn("unable to walk bufctl_history");
25477c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
25487c478bd9Sstevel@tonic-gate 		}
25497c478bd9Sstevel@tonic-gate 
25507c478bd9Sstevel@tonic-gate 		if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT))
25517c478bd9Sstevel@tonic-gate 			mdb_printf("\n");
25527c478bd9Sstevel@tonic-gate 
25537c478bd9Sstevel@tonic-gate 		return (bhc.bhc_ret);
25547c478bd9Sstevel@tonic-gate 	}
25557c478bd9Sstevel@tonic-gate 
25567c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
25577c478bd9Sstevel@tonic-gate 		if (verbose) {
25587c478bd9Sstevel@tonic-gate 			mdb_printf("%16s %16s %16s %16s\n"
25597c478bd9Sstevel@tonic-gate 			    "%<u>%16s %16s %16s %16s%</u>\n",
25607c478bd9Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
25617c478bd9Sstevel@tonic-gate 			    "", "CACHE", "LASTLOG", "CONTENTS");
25627c478bd9Sstevel@tonic-gate 		} else {
25637c478bd9Sstevel@tonic-gate 			mdb_printf("%<u>%-?s %-?s %-12s %5s %s%</u>\n",
25647c478bd9Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THRD", "CALLER");
25657c478bd9Sstevel@tonic-gate 		}
25667c478bd9Sstevel@tonic-gate 	}
25677c478bd9Sstevel@tonic-gate 
25687c478bd9Sstevel@tonic-gate 	if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
25697c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
25707c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
25717c478bd9Sstevel@tonic-gate 	}
25727c478bd9Sstevel@tonic-gate 
25737c478bd9Sstevel@tonic-gate 	/*
25747c478bd9Sstevel@tonic-gate 	 * Guard against bogus bc_depth in case the bufctl is corrupt or
25757c478bd9Sstevel@tonic-gate 	 * the address does not really refer to a bufctl.
25767c478bd9Sstevel@tonic-gate 	 */
25777c478bd9Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, umem_stack_depth);
25787c478bd9Sstevel@tonic-gate 
25797c478bd9Sstevel@tonic-gate 	if (caller != NULL) {
25807c478bd9Sstevel@tonic-gate 		laddr = caller;
25817c478bd9Sstevel@tonic-gate 		haddr = caller + sizeof (caller);
25827c478bd9Sstevel@tonic-gate 
25837c478bd9Sstevel@tonic-gate 		if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c),
25847c478bd9Sstevel@tonic-gate 		    &sym) != -1 && caller == (uintptr_t)sym.st_value) {
25857c478bd9Sstevel@tonic-gate 			/*
25867c478bd9Sstevel@tonic-gate 			 * We were provided an exact symbol value; any
25877c478bd9Sstevel@tonic-gate 			 * address in the function is valid.
25887c478bd9Sstevel@tonic-gate 			 */
25897c478bd9Sstevel@tonic-gate 			laddr = (uintptr_t)sym.st_value;
25907c478bd9Sstevel@tonic-gate 			haddr = (uintptr_t)sym.st_value + sym.st_size;
25917c478bd9Sstevel@tonic-gate 		}
25927c478bd9Sstevel@tonic-gate 
25937c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
25947c478bd9Sstevel@tonic-gate 			if (bcp->bc_stack[i] >= laddr &&
25957c478bd9Sstevel@tonic-gate 			    bcp->bc_stack[i] < haddr)
25967c478bd9Sstevel@tonic-gate 				break;
25977c478bd9Sstevel@tonic-gate 
25987c478bd9Sstevel@tonic-gate 		if (i == depth)
25997c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
26007c478bd9Sstevel@tonic-gate 	}
26017c478bd9Sstevel@tonic-gate 
26027c478bd9Sstevel@tonic-gate 	if (thread != NULL && (uintptr_t)bcp->bc_thread != thread)
26037c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26047c478bd9Sstevel@tonic-gate 
26057c478bd9Sstevel@tonic-gate 	if (earliest != 0 && bcp->bc_timestamp < earliest)
26067c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26077c478bd9Sstevel@tonic-gate 
26087c478bd9Sstevel@tonic-gate 	if (latest != 0 && bcp->bc_timestamp > latest)
26097c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26107c478bd9Sstevel@tonic-gate 
26117c478bd9Sstevel@tonic-gate 	if (baddr != 0 && (uintptr_t)bcp->bc_addr != baddr)
26127c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26137c478bd9Sstevel@tonic-gate 
26147c478bd9Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
26157c478bd9Sstevel@tonic-gate 		mdb_printf("%#r\n", addr);
26167c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26177c478bd9Sstevel@tonic-gate 	}
26187c478bd9Sstevel@tonic-gate 
26197c478bd9Sstevel@tonic-gate 	if (verbose) {
26207c478bd9Sstevel@tonic-gate 		mdb_printf(
26217c478bd9Sstevel@tonic-gate 		    "%<b>%16p%</b> %16p %16llx %16d\n"
26227c478bd9Sstevel@tonic-gate 		    "%16s %16p %16p %16p\n",
26237c478bd9Sstevel@tonic-gate 		    addr, bcp->bc_addr, bcp->bc_timestamp, bcp->bc_thread,
26247c478bd9Sstevel@tonic-gate 		    "", bcp->bc_cache, bcp->bc_lastlog, bcp->bc_contents);
26257c478bd9Sstevel@tonic-gate 
26267c478bd9Sstevel@tonic-gate 		mdb_inc_indent(17);
26277c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
26287c478bd9Sstevel@tonic-gate 			mdb_printf("%a\n", bcp->bc_stack[i]);
26297c478bd9Sstevel@tonic-gate 		mdb_dec_indent(17);
26307c478bd9Sstevel@tonic-gate 		mdb_printf("\n");
26317c478bd9Sstevel@tonic-gate 	} else {
26327c478bd9Sstevel@tonic-gate 		mdb_printf("%0?p %0?p %12llx %5d", addr, bcp->bc_addr,
26337c478bd9Sstevel@tonic-gate 		    bcp->bc_timestamp, bcp->bc_thread);
26347c478bd9Sstevel@tonic-gate 
26357c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
26367c478bd9Sstevel@tonic-gate 			if (mdb_lookup_by_addr(bcp->bc_stack[i],
26377c478bd9Sstevel@tonic-gate 			    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
26387c478bd9Sstevel@tonic-gate 				continue;
26397c478bd9Sstevel@tonic-gate 			if (is_umem_sym(c, "umem_"))
26407c478bd9Sstevel@tonic-gate 				continue;
26417c478bd9Sstevel@tonic-gate 			mdb_printf(" %a\n", bcp->bc_stack[i]);
26427c478bd9Sstevel@tonic-gate 			break;
26437c478bd9Sstevel@tonic-gate 		}
26447c478bd9Sstevel@tonic-gate 
26457c478bd9Sstevel@tonic-gate 		if (i >= depth)
26467c478bd9Sstevel@tonic-gate 			mdb_printf("\n");
26477c478bd9Sstevel@tonic-gate 	}
26487c478bd9Sstevel@tonic-gate 
26497c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
26507c478bd9Sstevel@tonic-gate }
26517c478bd9Sstevel@tonic-gate 
26527c478bd9Sstevel@tonic-gate /*ARGSUSED*/
26537c478bd9Sstevel@tonic-gate int
26547c478bd9Sstevel@tonic-gate bufctl_audit(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
26557c478bd9Sstevel@tonic-gate {
26567c478bd9Sstevel@tonic-gate 	mdb_arg_t a;
26577c478bd9Sstevel@tonic-gate 
26587c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
26597c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
26607c478bd9Sstevel@tonic-gate 
26617c478bd9Sstevel@tonic-gate 	if (argc != 0)
26627c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
26637c478bd9Sstevel@tonic-gate 
26647c478bd9Sstevel@tonic-gate 	a.a_type = MDB_TYPE_STRING;
26657c478bd9Sstevel@tonic-gate 	a.a_un.a_str = "-v";
26667c478bd9Sstevel@tonic-gate 
26677c478bd9Sstevel@tonic-gate 	return (bufctl(addr, flags, 1, &a));
26687c478bd9Sstevel@tonic-gate }
26697c478bd9Sstevel@tonic-gate 
26707c478bd9Sstevel@tonic-gate typedef struct umem_verify {
26717c478bd9Sstevel@tonic-gate 	uint64_t *umv_buf;		/* buffer to read cache contents into */
26727c478bd9Sstevel@tonic-gate 	size_t umv_size;		/* number of bytes in umv_buf */
26737c478bd9Sstevel@tonic-gate 	int umv_corruption;		/* > 0 if corruption found. */
26747c478bd9Sstevel@tonic-gate 	int umv_besilent;		/* report actual corruption sites */
26757c478bd9Sstevel@tonic-gate 	struct umem_cache umv_cache;	/* the cache we're operating on */
26767c478bd9Sstevel@tonic-gate } umem_verify_t;
26777c478bd9Sstevel@tonic-gate 
26787c478bd9Sstevel@tonic-gate /*
26797c478bd9Sstevel@tonic-gate  * verify_pattern()
26807c478bd9Sstevel@tonic-gate  *	verify that buf is filled with the pattern pat.
26817c478bd9Sstevel@tonic-gate  */
26827c478bd9Sstevel@tonic-gate static int64_t
26837c478bd9Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat)
26847c478bd9Sstevel@tonic-gate {
26857c478bd9Sstevel@tonic-gate 	/*LINTED*/
26867c478bd9Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
26877c478bd9Sstevel@tonic-gate 	uint64_t *buf;
26887c478bd9Sstevel@tonic-gate 
26897c478bd9Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++)
26907c478bd9Sstevel@tonic-gate 		if (*buf != pat)
26917c478bd9Sstevel@tonic-gate 			return ((uintptr_t)buf - (uintptr_t)buf_arg);
26927c478bd9Sstevel@tonic-gate 	return (-1);
26937c478bd9Sstevel@tonic-gate }
26947c478bd9Sstevel@tonic-gate 
26957c478bd9Sstevel@tonic-gate /*
26967c478bd9Sstevel@tonic-gate  * verify_buftag()
26977c478bd9Sstevel@tonic-gate  *	verify that btp->bt_bxstat == (bcp ^ pat)
26987c478bd9Sstevel@tonic-gate  */
26997c478bd9Sstevel@tonic-gate static int
27007c478bd9Sstevel@tonic-gate verify_buftag(umem_buftag_t *btp, uintptr_t pat)
27017c478bd9Sstevel@tonic-gate {
27027c478bd9Sstevel@tonic-gate 	return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1);
27037c478bd9Sstevel@tonic-gate }
27047c478bd9Sstevel@tonic-gate 
27057c478bd9Sstevel@tonic-gate /*
27067c478bd9Sstevel@tonic-gate  * verify_free()
27077c478bd9Sstevel@tonic-gate  *	verify the integrity of a free block of memory by checking
27087c478bd9Sstevel@tonic-gate  *	that it is filled with 0xdeadbeef and that its buftag is sane.
27097c478bd9Sstevel@tonic-gate  */
27107c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
27117c478bd9Sstevel@tonic-gate static int
27127c478bd9Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private)
27137c478bd9Sstevel@tonic-gate {
27147c478bd9Sstevel@tonic-gate 	umem_verify_t *umv = (umem_verify_t *)private;
27157c478bd9Sstevel@tonic-gate 	uint64_t *buf = umv->umv_buf;	/* buf to validate */
27167c478bd9Sstevel@tonic-gate 	int64_t corrupt;		/* corruption offset */
27177c478bd9Sstevel@tonic-gate 	umem_buftag_t *buftagp;		/* ptr to buftag */
27187c478bd9Sstevel@tonic-gate 	umem_cache_t *cp = &umv->umv_cache;
27197c478bd9Sstevel@tonic-gate 	int besilent = umv->umv_besilent;
27207c478bd9Sstevel@tonic-gate 
27217c478bd9Sstevel@tonic-gate 	/*LINTED*/
27227c478bd9Sstevel@tonic-gate 	buftagp = UMEM_BUFTAG(cp, buf);
27237c478bd9Sstevel@tonic-gate 
27247c478bd9Sstevel@tonic-gate 	/*
27257c478bd9Sstevel@tonic-gate 	 * Read the buffer to check.
27267c478bd9Sstevel@tonic-gate 	 */
27277c478bd9Sstevel@tonic-gate 	if (mdb_vread(buf, umv->umv_size, addr) == -1) {
27287c478bd9Sstevel@tonic-gate 		if (!besilent)
27297c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
27307c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
27317c478bd9Sstevel@tonic-gate 	}
27327c478bd9Sstevel@tonic-gate 
27337c478bd9Sstevel@tonic-gate 	if ((corrupt = verify_pattern(buf, cp->cache_verify,
27347c478bd9Sstevel@tonic-gate 	    UMEM_FREE_PATTERN)) >= 0) {
27357c478bd9Sstevel@tonic-gate 		if (!besilent)
27367c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems corrupted, at %p\n",
27377c478bd9Sstevel@tonic-gate 			    addr, (uintptr_t)addr + corrupt);
27387c478bd9Sstevel@tonic-gate 		goto corrupt;
27397c478bd9Sstevel@tonic-gate 	}
27407c478bd9Sstevel@tonic-gate 
27417c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) &&
27427c478bd9Sstevel@tonic-gate 	    buftagp->bt_redzone != UMEM_REDZONE_PATTERN) {
27437c478bd9Sstevel@tonic-gate 		if (!besilent)
27447c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems to "
27457c478bd9Sstevel@tonic-gate 			    "have a corrupt redzone pattern\n", addr);
27467c478bd9Sstevel@tonic-gate 		goto corrupt;
27477c478bd9Sstevel@tonic-gate 	}
27487c478bd9Sstevel@tonic-gate 
27497c478bd9Sstevel@tonic-gate 	/*
27507c478bd9Sstevel@tonic-gate 	 * confirm bufctl pointer integrity.
27517c478bd9Sstevel@tonic-gate 	 */
27527c478bd9Sstevel@tonic-gate 	if (verify_buftag(buftagp, UMEM_BUFTAG_FREE) == -1) {
27537c478bd9Sstevel@tonic-gate 		if (!besilent)
27547c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (free) has a corrupt "
27557c478bd9Sstevel@tonic-gate 			    "buftag\n", addr);
27567c478bd9Sstevel@tonic-gate 		goto corrupt;
27577c478bd9Sstevel@tonic-gate 	}
27587c478bd9Sstevel@tonic-gate 
27597c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
27607c478bd9Sstevel@tonic-gate corrupt:
27617c478bd9Sstevel@tonic-gate 	umv->umv_corruption++;
27627c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
27637c478bd9Sstevel@tonic-gate }
27647c478bd9Sstevel@tonic-gate 
27657c478bd9Sstevel@tonic-gate /*
27667c478bd9Sstevel@tonic-gate  * verify_alloc()
27677c478bd9Sstevel@tonic-gate  *	Verify that the buftag of an allocated buffer makes sense with respect
27687c478bd9Sstevel@tonic-gate  *	to the buffer.
27697c478bd9Sstevel@tonic-gate  */
27707c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
27717c478bd9Sstevel@tonic-gate static int
27727c478bd9Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private)
27737c478bd9Sstevel@tonic-gate {
27747c478bd9Sstevel@tonic-gate 	umem_verify_t *umv = (umem_verify_t *)private;
27757c478bd9Sstevel@tonic-gate 	umem_cache_t *cp = &umv->umv_cache;
27767c478bd9Sstevel@tonic-gate 	uint64_t *buf = umv->umv_buf;	/* buf to validate */
27777c478bd9Sstevel@tonic-gate 	/*LINTED*/
27787c478bd9Sstevel@tonic-gate 	umem_buftag_t *buftagp = UMEM_BUFTAG(cp, buf);
27797c478bd9Sstevel@tonic-gate 	uint32_t *ip = (uint32_t *)buftagp;
27807c478bd9Sstevel@tonic-gate 	uint8_t *bp = (uint8_t *)buf;
27817c478bd9Sstevel@tonic-gate 	int looks_ok = 0, size_ok = 1;	/* flags for finding corruption */
27827c478bd9Sstevel@tonic-gate 	int besilent = umv->umv_besilent;
27837c478bd9Sstevel@tonic-gate 
27847c478bd9Sstevel@tonic-gate 	/*
27857c478bd9Sstevel@tonic-gate 	 * Read the buffer to check.
27867c478bd9Sstevel@tonic-gate 	 */
27877c478bd9Sstevel@tonic-gate 	if (mdb_vread(buf, umv->umv_size, addr) == -1) {
27887c478bd9Sstevel@tonic-gate 		if (!besilent)
27897c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
27907c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
27917c478bd9Sstevel@tonic-gate 	}
27927c478bd9Sstevel@tonic-gate 
27937c478bd9Sstevel@tonic-gate 	/*
27947c478bd9Sstevel@tonic-gate 	 * There are two cases to handle:
27957c478bd9Sstevel@tonic-gate 	 * 1. If the buf was alloc'd using umem_cache_alloc, it will have
27967c478bd9Sstevel@tonic-gate 	 *    0xfeedfacefeedface at the end of it
27977c478bd9Sstevel@tonic-gate 	 * 2. If the buf was alloc'd using umem_alloc, it will have
27987c478bd9Sstevel@tonic-gate 	 *    0xbb just past the end of the region in use.  At the buftag,
27997c478bd9Sstevel@tonic-gate 	 *    it will have 0xfeedface (or, if the whole buffer is in use,
28007c478bd9Sstevel@tonic-gate 	 *    0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
28017c478bd9Sstevel@tonic-gate 	 *    endianness), followed by 32 bits containing the offset of the
28027c478bd9Sstevel@tonic-gate 	 *    0xbb byte in the buffer.
28037c478bd9Sstevel@tonic-gate 	 *
28047c478bd9Sstevel@tonic-gate 	 * Finally, the two 32-bit words that comprise the second half of the
28057c478bd9Sstevel@tonic-gate 	 * buftag should xor to UMEM_BUFTAG_ALLOC
28067c478bd9Sstevel@tonic-gate 	 */
28077c478bd9Sstevel@tonic-gate 
28087c478bd9Sstevel@tonic-gate 	if (buftagp->bt_redzone == UMEM_REDZONE_PATTERN)
28097c478bd9Sstevel@tonic-gate 		looks_ok = 1;
28107c478bd9Sstevel@tonic-gate 	else if (!UMEM_SIZE_VALID(ip[1]))
28117c478bd9Sstevel@tonic-gate 		size_ok = 0;
28127c478bd9Sstevel@tonic-gate 	else if (bp[UMEM_SIZE_DECODE(ip[1])] == UMEM_REDZONE_BYTE)
28137c478bd9Sstevel@tonic-gate 		looks_ok = 1;
28147c478bd9Sstevel@tonic-gate 	else
28157c478bd9Sstevel@tonic-gate 		size_ok = 0;
28167c478bd9Sstevel@tonic-gate 
28177c478bd9Sstevel@tonic-gate 	if (!size_ok) {
28187c478bd9Sstevel@tonic-gate 		if (!besilent)
28197c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
28207c478bd9Sstevel@tonic-gate 			    "redzone size encoding\n", addr);
28217c478bd9Sstevel@tonic-gate 		goto corrupt;
28227c478bd9Sstevel@tonic-gate 	}
28237c478bd9Sstevel@tonic-gate 
28247c478bd9Sstevel@tonic-gate 	if (!looks_ok) {
28257c478bd9Sstevel@tonic-gate 		if (!besilent)
28267c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
28277c478bd9Sstevel@tonic-gate 			    "redzone signature\n", addr);
28287c478bd9Sstevel@tonic-gate 		goto corrupt;
28297c478bd9Sstevel@tonic-gate 	}
28307c478bd9Sstevel@tonic-gate 
28317c478bd9Sstevel@tonic-gate 	if (verify_buftag(buftagp, UMEM_BUFTAG_ALLOC) == -1) {
28327c478bd9Sstevel@tonic-gate 		if (!besilent)
28337c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a "
28347c478bd9Sstevel@tonic-gate 			    "corrupt buftag\n", addr);
28357c478bd9Sstevel@tonic-gate 		goto corrupt;
28367c478bd9Sstevel@tonic-gate 	}
28377c478bd9Sstevel@tonic-gate 
28387c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
28397c478bd9Sstevel@tonic-gate corrupt:
28407c478bd9Sstevel@tonic-gate 	umv->umv_corruption++;
28417c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
28427c478bd9Sstevel@tonic-gate }
28437c478bd9Sstevel@tonic-gate 
28447c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
28457c478bd9Sstevel@tonic-gate int
28467c478bd9Sstevel@tonic-gate umem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
28477c478bd9Sstevel@tonic-gate {
28487c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
28497c478bd9Sstevel@tonic-gate 		int check_alloc = 0, check_free = 0;
28507c478bd9Sstevel@tonic-gate 		umem_verify_t umv;
28517c478bd9Sstevel@tonic-gate 
28527c478bd9Sstevel@tonic-gate 		if (mdb_vread(&umv.umv_cache, sizeof (umv.umv_cache),
28537c478bd9Sstevel@tonic-gate 		    addr) == -1) {
28547c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read umem_cache %p", addr);
28557c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
28567c478bd9Sstevel@tonic-gate 		}
28577c478bd9Sstevel@tonic-gate 
28587c478bd9Sstevel@tonic-gate 		umv.umv_size = umv.umv_cache.cache_buftag +
28597c478bd9Sstevel@tonic-gate 		    sizeof (umem_buftag_t);
28607c478bd9Sstevel@tonic-gate 		umv.umv_buf = mdb_alloc(umv.umv_size, UM_SLEEP | UM_GC);
28617c478bd9Sstevel@tonic-gate 		umv.umv_corruption = 0;
28627c478bd9Sstevel@tonic-gate 
28637c478bd9Sstevel@tonic-gate 		if ((umv.umv_cache.cache_flags & UMF_REDZONE)) {
28647c478bd9Sstevel@tonic-gate 			check_alloc = 1;
28657c478bd9Sstevel@tonic-gate 			if (umv.umv_cache.cache_flags & UMF_DEADBEEF)
28667c478bd9Sstevel@tonic-gate 				check_free = 1;
28677c478bd9Sstevel@tonic-gate 		} else {
28687c478bd9Sstevel@tonic-gate 			if (!(flags & DCMD_LOOP)) {
28697c478bd9Sstevel@tonic-gate 				mdb_warn("cache %p (%s) does not have "
28707c478bd9Sstevel@tonic-gate 				    "redzone checking enabled\n", addr,
28717c478bd9Sstevel@tonic-gate 				    umv.umv_cache.cache_name);
28727c478bd9Sstevel@tonic-gate 			}
28737c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
28747c478bd9Sstevel@tonic-gate 		}
28757c478bd9Sstevel@tonic-gate 
28767c478bd9Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
28777c478bd9Sstevel@tonic-gate 			/*
28787c478bd9Sstevel@tonic-gate 			 * table mode, don't print out every corrupt buffer
28797c478bd9Sstevel@tonic-gate 			 */
28807c478bd9Sstevel@tonic-gate 			umv.umv_besilent = 1;
28817c478bd9Sstevel@tonic-gate 		} else {
28827c478bd9Sstevel@tonic-gate 			mdb_printf("Summary for cache '%s'\n",
28837c478bd9Sstevel@tonic-gate 			    umv.umv_cache.cache_name);
28847c478bd9Sstevel@tonic-gate 			mdb_inc_indent(2);
28857c478bd9Sstevel@tonic-gate 			umv.umv_besilent = 0;
28867c478bd9Sstevel@tonic-gate 		}
28877c478bd9Sstevel@tonic-gate 
28887c478bd9Sstevel@tonic-gate 		if (check_alloc)
28897c478bd9Sstevel@tonic-gate 			(void) mdb_pwalk("umem", verify_alloc, &umv, addr);
28907c478bd9Sstevel@tonic-gate 		if (check_free)
28917c478bd9Sstevel@tonic-gate 			(void) mdb_pwalk("freemem", verify_free, &umv, addr);
28927c478bd9Sstevel@tonic-gate 
28937c478bd9Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
28947c478bd9Sstevel@tonic-gate 			if (umv.umv_corruption == 0) {
28957c478bd9Sstevel@tonic-gate 				mdb_printf("%-*s %?p clean\n",
28967c478bd9Sstevel@tonic-gate 				    UMEM_CACHE_NAMELEN,
28977c478bd9Sstevel@tonic-gate 				    umv.umv_cache.cache_name, addr);
28987c478bd9Sstevel@tonic-gate 			} else {
28997c478bd9Sstevel@tonic-gate 				char *s = "";	/* optional s in "buffer[s]" */
29007c478bd9Sstevel@tonic-gate 				if (umv.umv_corruption > 1)
29017c478bd9Sstevel@tonic-gate 					s = "s";
29027c478bd9Sstevel@tonic-gate 
29037c478bd9Sstevel@tonic-gate 				mdb_printf("%-*s %?p %d corrupt buffer%s\n",
29047c478bd9Sstevel@tonic-gate 				    UMEM_CACHE_NAMELEN,
29057c478bd9Sstevel@tonic-gate 				    umv.umv_cache.cache_name, addr,
29067c478bd9Sstevel@tonic-gate 				    umv.umv_corruption, s);
29077c478bd9Sstevel@tonic-gate 			}
29087c478bd9Sstevel@tonic-gate 		} else {
29097c478bd9Sstevel@tonic-gate 			/*
29107c478bd9Sstevel@tonic-gate 			 * This is the more verbose mode, when the user has
29117c478bd9Sstevel@tonic-gate 			 * type addr::umem_verify.  If the cache was clean,
29127c478bd9Sstevel@tonic-gate 			 * nothing will have yet been printed. So say something.
29137c478bd9Sstevel@tonic-gate 			 */
29147c478bd9Sstevel@tonic-gate 			if (umv.umv_corruption == 0)
29157c478bd9Sstevel@tonic-gate 				mdb_printf("clean\n");
29167c478bd9Sstevel@tonic-gate 
29177c478bd9Sstevel@tonic-gate 			mdb_dec_indent(2);
29187c478bd9Sstevel@tonic-gate 		}
29197c478bd9Sstevel@tonic-gate 	} else {
29207c478bd9Sstevel@tonic-gate 		/*
29217c478bd9Sstevel@tonic-gate 		 * If the user didn't specify a cache to verify, we'll walk all
29227c478bd9Sstevel@tonic-gate 		 * umem_cache's, specifying ourself as a callback for each...
29237c478bd9Sstevel@tonic-gate 		 * this is the equivalent of '::walk umem_cache .::umem_verify'
29247c478bd9Sstevel@tonic-gate 		 */
29257c478bd9Sstevel@tonic-gate 		mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", UMEM_CACHE_NAMELEN,
29267c478bd9Sstevel@tonic-gate 		    "Cache Name", "Addr", "Cache Integrity");
29277c478bd9Sstevel@tonic-gate 		(void) (mdb_walk_dcmd("umem_cache", "umem_verify", 0, NULL));
29287c478bd9Sstevel@tonic-gate 	}
29297c478bd9Sstevel@tonic-gate 
29307c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
29317c478bd9Sstevel@tonic-gate }
29327c478bd9Sstevel@tonic-gate 
29337c478bd9Sstevel@tonic-gate typedef struct vmem_node {
29347c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_next;
29357c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_parent;
29367c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_sibling;
29377c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_children;
29387c478bd9Sstevel@tonic-gate 	uintptr_t vn_addr;
29397c478bd9Sstevel@tonic-gate 	int vn_marked;
29407c478bd9Sstevel@tonic-gate 	vmem_t vn_vmem;
29417c478bd9Sstevel@tonic-gate } vmem_node_t;
29427c478bd9Sstevel@tonic-gate 
29437c478bd9Sstevel@tonic-gate typedef struct vmem_walk {
29447c478bd9Sstevel@tonic-gate 	vmem_node_t *vw_root;
29457c478bd9Sstevel@tonic-gate 	vmem_node_t *vw_current;
29467c478bd9Sstevel@tonic-gate } vmem_walk_t;
29477c478bd9Sstevel@tonic-gate 
29487c478bd9Sstevel@tonic-gate int
29497c478bd9Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp)
29507c478bd9Sstevel@tonic-gate {
29517c478bd9Sstevel@tonic-gate 	uintptr_t vaddr, paddr;
29527c478bd9Sstevel@tonic-gate 	vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp;
29537c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw;
29547c478bd9Sstevel@tonic-gate 
29557c478bd9Sstevel@tonic-gate 	if (umem_readvar(&vaddr, "vmem_list") == -1) {
29567c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read 'vmem_list'");
29577c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
29587c478bd9Sstevel@tonic-gate 	}
29597c478bd9Sstevel@tonic-gate 
29607c478bd9Sstevel@tonic-gate 	while (vaddr != NULL) {
29617c478bd9Sstevel@tonic-gate 		vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP);
29627c478bd9Sstevel@tonic-gate 		vp->vn_addr = vaddr;
29637c478bd9Sstevel@tonic-gate 		vp->vn_next = head;
29647c478bd9Sstevel@tonic-gate 		head = vp;
29657c478bd9Sstevel@tonic-gate 
29667c478bd9Sstevel@tonic-gate 		if (vaddr == wsp->walk_addr)
29677c478bd9Sstevel@tonic-gate 			current = vp;
29687c478bd9Sstevel@tonic-gate 
29697c478bd9Sstevel@tonic-gate 		if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) {
29707c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read vmem_t at %p", vaddr);
29717c478bd9Sstevel@tonic-gate 			goto err;
29727c478bd9Sstevel@tonic-gate 		}
29737c478bd9Sstevel@tonic-gate 
29747c478bd9Sstevel@tonic-gate 		vaddr = (uintptr_t)vp->vn_vmem.vm_next;
29757c478bd9Sstevel@tonic-gate 	}
29767c478bd9Sstevel@tonic-gate 
29777c478bd9Sstevel@tonic-gate 	for (vp = head; vp != NULL; vp = vp->vn_next) {
29787c478bd9Sstevel@tonic-gate 
29797c478bd9Sstevel@tonic-gate 		if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
29807c478bd9Sstevel@tonic-gate 			vp->vn_sibling = root;
29817c478bd9Sstevel@tonic-gate 			root = vp;
29827c478bd9Sstevel@tonic-gate 			continue;
29837c478bd9Sstevel@tonic-gate 		}
29847c478bd9Sstevel@tonic-gate 
29857c478bd9Sstevel@tonic-gate 		for (parent = head; parent != NULL; parent = parent->vn_next) {
29867c478bd9Sstevel@tonic-gate 			if (parent->vn_addr != paddr)
29877c478bd9Sstevel@tonic-gate 				continue;
29887c478bd9Sstevel@tonic-gate 			vp->vn_sibling = parent->vn_children;
29897c478bd9Sstevel@tonic-gate 			parent->vn_children = vp;
29907c478bd9Sstevel@tonic-gate 			vp->vn_parent = parent;
29917c478bd9Sstevel@tonic-gate 			break;
29927c478bd9Sstevel@tonic-gate 		}
29937c478bd9Sstevel@tonic-gate 
29947c478bd9Sstevel@tonic-gate 		if (parent == NULL) {
29957c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't find %p's parent (%p)\n",
29967c478bd9Sstevel@tonic-gate 			    vp->vn_addr, paddr);
29977c478bd9Sstevel@tonic-gate 			goto err;
29987c478bd9Sstevel@tonic-gate 		}
29997c478bd9Sstevel@tonic-gate 	}
30007c478bd9Sstevel@tonic-gate 
30017c478bd9Sstevel@tonic-gate 	vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP);
30027c478bd9Sstevel@tonic-gate 	vw->vw_root = root;
30037c478bd9Sstevel@tonic-gate 
30047c478bd9Sstevel@tonic-gate 	if (current != NULL)
30057c478bd9Sstevel@tonic-gate 		vw->vw_current = current;
30067c478bd9Sstevel@tonic-gate 	else
30077c478bd9Sstevel@tonic-gate 		vw->vw_current = root;
30087c478bd9Sstevel@tonic-gate 
30097c478bd9Sstevel@tonic-gate 	wsp->walk_data = vw;
30107c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
30117c478bd9Sstevel@tonic-gate err:
30127c478bd9Sstevel@tonic-gate 	for (vp = head; head != NULL; vp = head) {
30137c478bd9Sstevel@tonic-gate 		head = vp->vn_next;
30147c478bd9Sstevel@tonic-gate 		mdb_free(vp, sizeof (vmem_node_t));
30157c478bd9Sstevel@tonic-gate 	}
30167c478bd9Sstevel@tonic-gate 
30177c478bd9Sstevel@tonic-gate 	return (WALK_ERR);
30187c478bd9Sstevel@tonic-gate }
30197c478bd9Sstevel@tonic-gate 
30207c478bd9Sstevel@tonic-gate int
30217c478bd9Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp)
30227c478bd9Sstevel@tonic-gate {
30237c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
30247c478bd9Sstevel@tonic-gate 	vmem_node_t *vp;
30257c478bd9Sstevel@tonic-gate 	int rval;
30267c478bd9Sstevel@tonic-gate 
30277c478bd9Sstevel@tonic-gate 	if ((vp = vw->vw_current) == NULL)
30287c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
30297c478bd9Sstevel@tonic-gate 
30307c478bd9Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
30317c478bd9Sstevel@tonic-gate 
30327c478bd9Sstevel@tonic-gate 	if (vp->vn_children != NULL) {
30337c478bd9Sstevel@tonic-gate 		vw->vw_current = vp->vn_children;
30347c478bd9Sstevel@tonic-gate 		return (rval);
30357c478bd9Sstevel@tonic-gate 	}
30367c478bd9Sstevel@tonic-gate 
30377c478bd9Sstevel@tonic-gate 	do {
30387c478bd9Sstevel@tonic-gate 		vw->vw_current = vp->vn_sibling;
30397c478bd9Sstevel@tonic-gate 		vp = vp->vn_parent;
30407c478bd9Sstevel@tonic-gate 	} while (vw->vw_current == NULL && vp != NULL);
30417c478bd9Sstevel@tonic-gate 
30427c478bd9Sstevel@tonic-gate 	return (rval);
30437c478bd9Sstevel@tonic-gate }
30447c478bd9Sstevel@tonic-gate 
30457c478bd9Sstevel@tonic-gate /*
30467c478bd9Sstevel@tonic-gate  * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
30477c478bd9Sstevel@tonic-gate  * children are visited before their parent.  We perform the postfix walk
30487c478bd9Sstevel@tonic-gate  * iteratively (rather than recursively) to allow mdb to regain control
30497c478bd9Sstevel@tonic-gate  * after each callback.
30507c478bd9Sstevel@tonic-gate  */
30517c478bd9Sstevel@tonic-gate int
30527c478bd9Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp)
30537c478bd9Sstevel@tonic-gate {
30547c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
30557c478bd9Sstevel@tonic-gate 	vmem_node_t *vp = vw->vw_current;
30567c478bd9Sstevel@tonic-gate 	int rval;
30577c478bd9Sstevel@tonic-gate 
30587c478bd9Sstevel@tonic-gate 	/*
30597c478bd9Sstevel@tonic-gate 	 * If this node is marked, then we know that we have already visited
30607c478bd9Sstevel@tonic-gate 	 * all of its children.  If the node has any siblings, they need to
30617c478bd9Sstevel@tonic-gate 	 * be visited next; otherwise, we need to visit the parent.  Note
30627c478bd9Sstevel@tonic-gate 	 * that vp->vn_marked will only be zero on the first invocation of
30637c478bd9Sstevel@tonic-gate 	 * the step function.
30647c478bd9Sstevel@tonic-gate 	 */
30657c478bd9Sstevel@tonic-gate 	if (vp->vn_marked) {
30667c478bd9Sstevel@tonic-gate 		if (vp->vn_sibling != NULL)
30677c478bd9Sstevel@tonic-gate 			vp = vp->vn_sibling;
30687c478bd9Sstevel@tonic-gate 		else if (vp->vn_parent != NULL)
30697c478bd9Sstevel@tonic-gate 			vp = vp->vn_parent;
30707c478bd9Sstevel@tonic-gate 		else {
30717c478bd9Sstevel@tonic-gate 			/*
30727c478bd9Sstevel@tonic-gate 			 * We have neither a parent, nor a sibling, and we
30737c478bd9Sstevel@tonic-gate 			 * have already been visited; we're done.
30747c478bd9Sstevel@tonic-gate 			 */
30757c478bd9Sstevel@tonic-gate 			return (WALK_DONE);
30767c478bd9Sstevel@tonic-gate 		}
30777c478bd9Sstevel@tonic-gate 	}
30787c478bd9Sstevel@tonic-gate 
30797c478bd9Sstevel@tonic-gate 	/*
30807c478bd9Sstevel@tonic-gate 	 * Before we visit this node, visit its children.
30817c478bd9Sstevel@tonic-gate 	 */
30827c478bd9Sstevel@tonic-gate 	while (vp->vn_children != NULL && !vp->vn_children->vn_marked)
30837c478bd9Sstevel@tonic-gate 		vp = vp->vn_children;
30847c478bd9Sstevel@tonic-gate 
30857c478bd9Sstevel@tonic-gate 	vp->vn_marked = 1;
30867c478bd9Sstevel@tonic-gate 	vw->vw_current = vp;
30877c478bd9Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
30887c478bd9Sstevel@tonic-gate 
30897c478bd9Sstevel@tonic-gate 	return (rval);
30907c478bd9Sstevel@tonic-gate }
30917c478bd9Sstevel@tonic-gate 
30927c478bd9Sstevel@tonic-gate void
30937c478bd9Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp)
30947c478bd9Sstevel@tonic-gate {
30957c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
30967c478bd9Sstevel@tonic-gate 	vmem_node_t *root = vw->vw_root;
30977c478bd9Sstevel@tonic-gate 	int done;
30987c478bd9Sstevel@tonic-gate 
30997c478bd9Sstevel@tonic-gate 	if (root == NULL)
31007c478bd9Sstevel@tonic-gate 		return;
31017c478bd9Sstevel@tonic-gate 
31027c478bd9Sstevel@tonic-gate 	if ((vw->vw_root = root->vn_children) != NULL)
31037c478bd9Sstevel@tonic-gate 		vmem_walk_fini(wsp);
31047c478bd9Sstevel@tonic-gate 
31057c478bd9Sstevel@tonic-gate 	vw->vw_root = root->vn_sibling;
31067c478bd9Sstevel@tonic-gate 	done = (root->vn_sibling == NULL && root->vn_parent == NULL);
31077c478bd9Sstevel@tonic-gate 	mdb_free(root, sizeof (vmem_node_t));
31087c478bd9Sstevel@tonic-gate 
31097c478bd9Sstevel@tonic-gate 	if (done) {
31107c478bd9Sstevel@tonic-gate 		mdb_free(vw, sizeof (vmem_walk_t));
31117c478bd9Sstevel@tonic-gate 	} else {
31127c478bd9Sstevel@tonic-gate 		vmem_walk_fini(wsp);
31137c478bd9Sstevel@tonic-gate 	}
31147c478bd9Sstevel@tonic-gate }
31157c478bd9Sstevel@tonic-gate 
31167c478bd9Sstevel@tonic-gate typedef struct vmem_seg_walk {
31177c478bd9Sstevel@tonic-gate 	uint8_t vsw_type;
31187c478bd9Sstevel@tonic-gate 	uintptr_t vsw_start;
31197c478bd9Sstevel@tonic-gate 	uintptr_t vsw_current;
31207c478bd9Sstevel@tonic-gate } vmem_seg_walk_t;
31217c478bd9Sstevel@tonic-gate 
31227c478bd9Sstevel@tonic-gate /*ARGSUSED*/
31237c478bd9Sstevel@tonic-gate int
31247c478bd9Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name)
31257c478bd9Sstevel@tonic-gate {
31267c478bd9Sstevel@tonic-gate 	vmem_seg_walk_t *vsw;
31277c478bd9Sstevel@tonic-gate 
31287c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
31297c478bd9Sstevel@tonic-gate 		mdb_warn("vmem_%s does not support global walks\n", name);
31307c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
31317c478bd9Sstevel@tonic-gate 	}
31327c478bd9Sstevel@tonic-gate 
31337c478bd9Sstevel@tonic-gate 	wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP);
31347c478bd9Sstevel@tonic-gate 
31357c478bd9Sstevel@tonic-gate 	vsw->vsw_type = type;
31367c478bd9Sstevel@tonic-gate 	vsw->vsw_start = wsp->walk_addr + OFFSETOF(vmem_t, vm_seg0);
31377c478bd9Sstevel@tonic-gate 	vsw->vsw_current = vsw->vsw_start;
31387c478bd9Sstevel@tonic-gate 
31397c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
31407c478bd9Sstevel@tonic-gate }
31417c478bd9Sstevel@tonic-gate 
31427c478bd9Sstevel@tonic-gate /*
31437c478bd9Sstevel@tonic-gate  * vmem segments can't have type 0 (this should be added to vmem_impl.h).
31447c478bd9Sstevel@tonic-gate  */
31457c478bd9Sstevel@tonic-gate #define	VMEM_NONE	0
31467c478bd9Sstevel@tonic-gate 
31477c478bd9Sstevel@tonic-gate int
31487c478bd9Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp)
31497c478bd9Sstevel@tonic-gate {
31507c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc"));
31517c478bd9Sstevel@tonic-gate }
31527c478bd9Sstevel@tonic-gate 
31537c478bd9Sstevel@tonic-gate int
31547c478bd9Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp)
31557c478bd9Sstevel@tonic-gate {
31567c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free"));
31577c478bd9Sstevel@tonic-gate }
31587c478bd9Sstevel@tonic-gate 
31597c478bd9Sstevel@tonic-gate int
31607c478bd9Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp)
31617c478bd9Sstevel@tonic-gate {
31627c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span"));
31637c478bd9Sstevel@tonic-gate }
31647c478bd9Sstevel@tonic-gate 
31657c478bd9Sstevel@tonic-gate int
31667c478bd9Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp)
31677c478bd9Sstevel@tonic-gate {
31687c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg"));
31697c478bd9Sstevel@tonic-gate }
31707c478bd9Sstevel@tonic-gate 
31717c478bd9Sstevel@tonic-gate int
31727c478bd9Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp)
31737c478bd9Sstevel@tonic-gate {
31747c478bd9Sstevel@tonic-gate 	vmem_seg_t seg;
31757c478bd9Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
31767c478bd9Sstevel@tonic-gate 	uintptr_t addr = vsw->vsw_current;
31777c478bd9Sstevel@tonic-gate 	static size_t seg_size = 0;
31787c478bd9Sstevel@tonic-gate 	int rval;
31797c478bd9Sstevel@tonic-gate 
31807c478bd9Sstevel@tonic-gate 	if (!seg_size) {
31817c478bd9Sstevel@tonic-gate 		if (umem_readvar(&seg_size, "vmem_seg_size") == -1) {
31827c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read 'vmem_seg_size'");
31837c478bd9Sstevel@tonic-gate 			seg_size = sizeof (vmem_seg_t);
31847c478bd9Sstevel@tonic-gate 		}
31857c478bd9Sstevel@tonic-gate 	}
31867c478bd9Sstevel@tonic-gate 
31877c478bd9Sstevel@tonic-gate 	if (seg_size < sizeof (seg))
31887c478bd9Sstevel@tonic-gate 		bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size);
31897c478bd9Sstevel@tonic-gate 
31907c478bd9Sstevel@tonic-gate 	if (mdb_vread(&seg, seg_size, addr) == -1) {
31917c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
31927c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
31937c478bd9Sstevel@tonic-gate 	}
31947c478bd9Sstevel@tonic-gate 
31957c478bd9Sstevel@tonic-gate 	vsw->vsw_current = (uintptr_t)seg.vs_anext;
31967c478bd9Sstevel@tonic-gate 	if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) {
31977c478bd9Sstevel@tonic-gate 		rval = WALK_NEXT;
31987c478bd9Sstevel@tonic-gate 	} else {
31997c478bd9Sstevel@tonic-gate 		rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata);
32007c478bd9Sstevel@tonic-gate 	}
32017c478bd9Sstevel@tonic-gate 
32027c478bd9Sstevel@tonic-gate 	if (vsw->vsw_current == vsw->vsw_start)
32037c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
32047c478bd9Sstevel@tonic-gate 
32057c478bd9Sstevel@tonic-gate 	return (rval);
32067c478bd9Sstevel@tonic-gate }
32077c478bd9Sstevel@tonic-gate 
32087c478bd9Sstevel@tonic-gate void
32097c478bd9Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp)
32107c478bd9Sstevel@tonic-gate {
32117c478bd9Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
32127c478bd9Sstevel@tonic-gate 
32137c478bd9Sstevel@tonic-gate 	mdb_free(vsw, sizeof (vmem_seg_walk_t));
32147c478bd9Sstevel@tonic-gate }
32157c478bd9Sstevel@tonic-gate 
32167c478bd9Sstevel@tonic-gate #define	VMEM_NAMEWIDTH	22
32177c478bd9Sstevel@tonic-gate 
32187c478bd9Sstevel@tonic-gate int
32197c478bd9Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
32207c478bd9Sstevel@tonic-gate {
32217c478bd9Sstevel@tonic-gate 	vmem_t v, parent;
32227c478bd9Sstevel@tonic-gate 	uintptr_t paddr;
32237c478bd9Sstevel@tonic-gate 	int ident = 0;
32247c478bd9Sstevel@tonic-gate 	char c[VMEM_NAMEWIDTH];
32257c478bd9Sstevel@tonic-gate 
32267c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
32277c478bd9Sstevel@tonic-gate 		if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) {
32287c478bd9Sstevel@tonic-gate 			mdb_warn("can't walk vmem");
32297c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
32307c478bd9Sstevel@tonic-gate 		}
32317c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
32327c478bd9Sstevel@tonic-gate 	}
32337c478bd9Sstevel@tonic-gate 
32347c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
32357c478bd9Sstevel@tonic-gate 		mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
32367c478bd9Sstevel@tonic-gate 		    "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE",
32377c478bd9Sstevel@tonic-gate 		    "TOTAL", "SUCCEED", "FAIL");
32387c478bd9Sstevel@tonic-gate 
32397c478bd9Sstevel@tonic-gate 	if (mdb_vread(&v, sizeof (v), addr) == -1) {
32407c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read vmem at %p", addr);
32417c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
32427c478bd9Sstevel@tonic-gate 	}
32437c478bd9Sstevel@tonic-gate 
32447c478bd9Sstevel@tonic-gate 	for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
32457c478bd9Sstevel@tonic-gate 		if (mdb_vread(&parent, sizeof (parent), paddr) == -1) {
32467c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't trace %p's ancestry", addr);
32477c478bd9Sstevel@tonic-gate 			ident = 0;
32487c478bd9Sstevel@tonic-gate 			break;
32497c478bd9Sstevel@tonic-gate 		}
32507c478bd9Sstevel@tonic-gate 		paddr = (uintptr_t)parent.vm_source;
32517c478bd9Sstevel@tonic-gate 	}
32527c478bd9Sstevel@tonic-gate 
32537c478bd9Sstevel@tonic-gate 	(void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name);
32547c478bd9Sstevel@tonic-gate 
32557c478bd9Sstevel@tonic-gate 	mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
32567c478bd9Sstevel@tonic-gate 	    addr, VMEM_NAMEWIDTH, c,
32577c478bd9Sstevel@tonic-gate 	    v.vm_kstat.vk_mem_inuse, v.vm_kstat.vk_mem_total,
32587c478bd9Sstevel@tonic-gate 	    v.vm_kstat.vk_alloc, v.vm_kstat.vk_fail);
32597c478bd9Sstevel@tonic-gate 
32607c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
32617c478bd9Sstevel@tonic-gate }
32627c478bd9Sstevel@tonic-gate 
32637c478bd9Sstevel@tonic-gate void
32647c478bd9Sstevel@tonic-gate vmem_seg_help(void)
32657c478bd9Sstevel@tonic-gate {
32667c478bd9Sstevel@tonic-gate 	mdb_printf("%s\n",
32677c478bd9Sstevel@tonic-gate "Display the contents of vmem_seg_ts, with optional filtering.\n"
32687c478bd9Sstevel@tonic-gate "\n"
32697c478bd9Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
32707c478bd9Sstevel@tonic-gate "representing a single chunk of data.  Only ALLOC segments have debugging\n"
32717c478bd9Sstevel@tonic-gate "information.\n");
32727c478bd9Sstevel@tonic-gate 	mdb_dec_indent(2);
32737c478bd9Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
32747c478bd9Sstevel@tonic-gate 	mdb_inc_indent(2);
32757c478bd9Sstevel@tonic-gate 	mdb_printf("%s",
32767c478bd9Sstevel@tonic-gate "  -v    Display the full content of the vmem_seg, including its stack trace\n"
32777c478bd9Sstevel@tonic-gate "  -s    report the size of the segment, instead of the end address\n"
32787c478bd9Sstevel@tonic-gate "  -c caller\n"
32797c478bd9Sstevel@tonic-gate "        filter out segments without the function/PC in their stack trace\n"
32807c478bd9Sstevel@tonic-gate "  -e earliest\n"
32817c478bd9Sstevel@tonic-gate "        filter out segments timestamped before earliest\n"
32827c478bd9Sstevel@tonic-gate "  -l latest\n"
32837c478bd9Sstevel@tonic-gate "        filter out segments timestamped after latest\n"
32847c478bd9Sstevel@tonic-gate "  -m minsize\n"
32857c478bd9Sstevel@tonic-gate "        filer out segments smaller than minsize\n"
32867c478bd9Sstevel@tonic-gate "  -M maxsize\n"
32877c478bd9Sstevel@tonic-gate "        filer out segments larger than maxsize\n"
32887c478bd9Sstevel@tonic-gate "  -t thread\n"
32897c478bd9Sstevel@tonic-gate "        filter out segments not involving thread\n"
32907c478bd9Sstevel@tonic-gate "  -T type\n"
32917c478bd9Sstevel@tonic-gate "        filter out segments not of type 'type'\n"
32927c478bd9Sstevel@tonic-gate "        type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n");
32937c478bd9Sstevel@tonic-gate }
32947c478bd9Sstevel@tonic-gate 
32957c478bd9Sstevel@tonic-gate 
32967c478bd9Sstevel@tonic-gate /*ARGSUSED*/
32977c478bd9Sstevel@tonic-gate int
32987c478bd9Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
32997c478bd9Sstevel@tonic-gate {
33007c478bd9Sstevel@tonic-gate 	vmem_seg_t vs;
33017c478bd9Sstevel@tonic-gate 	uintptr_t *stk = vs.vs_stack;
33027c478bd9Sstevel@tonic-gate 	uintptr_t sz;
33037c478bd9Sstevel@tonic-gate 	uint8_t t;
33047c478bd9Sstevel@tonic-gate 	const char *type = NULL;
33057c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
33067c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
33077c478bd9Sstevel@tonic-gate 	int no_debug;
33087c478bd9Sstevel@tonic-gate 	int i;
33097c478bd9Sstevel@tonic-gate 	int depth;
33107c478bd9Sstevel@tonic-gate 	uintptr_t laddr, haddr;
33117c478bd9Sstevel@tonic-gate 
33127c478bd9Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
33137c478bd9Sstevel@tonic-gate 	uintptr_t minsize = 0, maxsize = 0;
33147c478bd9Sstevel@tonic-gate 
33157c478bd9Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
33167c478bd9Sstevel@tonic-gate 
33177c478bd9Sstevel@tonic-gate 	uint_t size = 0;
33187c478bd9Sstevel@tonic-gate 	uint_t verbose = 0;
33197c478bd9Sstevel@tonic-gate 
33207c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
33217c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
33227c478bd9Sstevel@tonic-gate 
33237c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
33247c478bd9Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
33257c478bd9Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
33267c478bd9Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
33277c478bd9Sstevel@tonic-gate 	    's', MDB_OPT_SETBITS, TRUE, &size,
33287c478bd9Sstevel@tonic-gate 	    'm', MDB_OPT_UINTPTR, &minsize,
33297c478bd9Sstevel@tonic-gate 	    'M', MDB_OPT_UINTPTR, &maxsize,
33307c478bd9Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
33317c478bd9Sstevel@tonic-gate 	    'T', MDB_OPT_STR, &type,
33327c478bd9Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
33337c478bd9Sstevel@tonic-gate 	    NULL) != argc)
33347c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
33357c478bd9Sstevel@tonic-gate 
33367c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
33377c478bd9Sstevel@tonic-gate 		if (verbose) {
33387c478bd9Sstevel@tonic-gate 			mdb_printf("%16s %4s %16s %16s %16s\n"
33397c478bd9Sstevel@tonic-gate 			    "%<u>%16s %4s %16s %16s %16s%</u>\n",
33407c478bd9Sstevel@tonic-gate 			    "ADDR", "TYPE", "START", "END", "SIZE",
33417c478bd9Sstevel@tonic-gate 			    "", "", "THREAD", "TIMESTAMP", "");
33427c478bd9Sstevel@tonic-gate 		} else {
33437c478bd9Sstevel@tonic-gate 			mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE",
33447c478bd9Sstevel@tonic-gate 			    "START", size? "SIZE" : "END", "WHO");
33457c478bd9Sstevel@tonic-gate 		}
33467c478bd9Sstevel@tonic-gate 	}
33477c478bd9Sstevel@tonic-gate 
33487c478bd9Sstevel@tonic-gate 	if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
33497c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
33507c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
33517c478bd9Sstevel@tonic-gate 	}
33527c478bd9Sstevel@tonic-gate 
33537c478bd9Sstevel@tonic-gate 	if (type != NULL) {
33547c478bd9Sstevel@tonic-gate 		if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0)
33557c478bd9Sstevel@tonic-gate 			t = VMEM_ALLOC;
33567c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "FREE") == 0)
33577c478bd9Sstevel@tonic-gate 			t = VMEM_FREE;
33587c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "SPAN") == 0)
33597c478bd9Sstevel@tonic-gate 			t = VMEM_SPAN;
33607c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "ROTR") == 0 ||
33617c478bd9Sstevel@tonic-gate 		    strcmp(type, "ROTOR") == 0)
33627c478bd9Sstevel@tonic-gate 			t = VMEM_ROTOR;
33637c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "WLKR") == 0 ||
33647c478bd9Sstevel@tonic-gate 		    strcmp(type, "WALKER") == 0)
33657c478bd9Sstevel@tonic-gate 			t = VMEM_WALKER;
33667c478bd9Sstevel@tonic-gate 		else {
33677c478bd9Sstevel@tonic-gate 			mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
33687c478bd9Sstevel@tonic-gate 			    type);
33697c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
33707c478bd9Sstevel@tonic-gate 		}
33717c478bd9Sstevel@tonic-gate 
33727c478bd9Sstevel@tonic-gate 		if (vs.vs_type != t)
33737c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
33747c478bd9Sstevel@tonic-gate 	}
33757c478bd9Sstevel@tonic-gate 
33767c478bd9Sstevel@tonic-gate 	sz = vs.vs_end - vs.vs_start;
33777c478bd9Sstevel@tonic-gate 
33787c478bd9Sstevel@tonic-gate 	if (minsize != 0 && sz < minsize)
33797c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
33807c478bd9Sstevel@tonic-gate 
33817c478bd9Sstevel@tonic-gate 	if (maxsize != 0 && sz > maxsize)
33827c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
33837c478bd9Sstevel@tonic-gate 
33847c478bd9Sstevel@tonic-gate 	t = vs.vs_type;
33857c478bd9Sstevel@tonic-gate 	depth = vs.vs_depth;
33867c478bd9Sstevel@tonic-gate 
33877c478bd9Sstevel@tonic-gate 	/*
33887c478bd9Sstevel@tonic-gate 	 * debug info, when present, is only accurate for VMEM_ALLOC segments
33897c478bd9Sstevel@tonic-gate 	 */
33907c478bd9Sstevel@tonic-gate 	no_debug = (t != VMEM_ALLOC) ||
33917c478bd9Sstevel@tonic-gate 	    (depth == 0 || depth > VMEM_STACK_DEPTH);
33927c478bd9Sstevel@tonic-gate 
33937c478bd9Sstevel@tonic-gate 	if (no_debug) {
33947c478bd9Sstevel@tonic-gate 		if (caller != NULL || thread != NULL || earliest != 0 ||
33957c478bd9Sstevel@tonic-gate 		    latest != 0)
33967c478bd9Sstevel@tonic-gate 			return (DCMD_OK);		/* not enough info */
33977c478bd9Sstevel@tonic-gate 	} else {
33987c478bd9Sstevel@tonic-gate 		if (caller != NULL) {
33997c478bd9Sstevel@tonic-gate 			laddr = caller;
34007c478bd9Sstevel@tonic-gate 			haddr = caller + sizeof (caller);
34017c478bd9Sstevel@tonic-gate 
34027c478bd9Sstevel@tonic-gate 			if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c,
34037c478bd9Sstevel@tonic-gate 			    sizeof (c), &sym) != -1 &&
34047c478bd9Sstevel@tonic-gate 			    caller == (uintptr_t)sym.st_value) {
34057c478bd9Sstevel@tonic-gate 				/*
34067c478bd9Sstevel@tonic-gate 				 * We were provided an exact symbol value; any
34077c478bd9Sstevel@tonic-gate 				 * address in the function is valid.
34087c478bd9Sstevel@tonic-gate 				 */
34097c478bd9Sstevel@tonic-gate 				laddr = (uintptr_t)sym.st_value;
34107c478bd9Sstevel@tonic-gate 				haddr = (uintptr_t)sym.st_value + sym.st_size;
34117c478bd9Sstevel@tonic-gate 			}
34127c478bd9Sstevel@tonic-gate 
34137c478bd9Sstevel@tonic-gate 			for (i = 0; i < depth; i++)
34147c478bd9Sstevel@tonic-gate 				if (vs.vs_stack[i] >= laddr &&
34157c478bd9Sstevel@tonic-gate 				    vs.vs_stack[i] < haddr)
34167c478bd9Sstevel@tonic-gate 					break;
34177c478bd9Sstevel@tonic-gate 
34187c478bd9Sstevel@tonic-gate 			if (i == depth)
34197c478bd9Sstevel@tonic-gate 				return (DCMD_OK);
34207c478bd9Sstevel@tonic-gate 		}
34217c478bd9Sstevel@tonic-gate 
34227c478bd9Sstevel@tonic-gate 		if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
34237c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
34247c478bd9Sstevel@tonic-gate 
34257c478bd9Sstevel@tonic-gate 		if (earliest != 0 && vs.vs_timestamp < earliest)
34267c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
34277c478bd9Sstevel@tonic-gate 
34287c478bd9Sstevel@tonic-gate 		if (latest != 0 && vs.vs_timestamp > latest)
34297c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
34307c478bd9Sstevel@tonic-gate 	}
34317c478bd9Sstevel@tonic-gate 
34327c478bd9Sstevel@tonic-gate 	type = (t == VMEM_ALLOC ? "ALLC" :
34337c478bd9Sstevel@tonic-gate 	    t == VMEM_FREE ? "FREE" :
34347c478bd9Sstevel@tonic-gate 	    t == VMEM_SPAN ? "SPAN" :
34357c478bd9Sstevel@tonic-gate 	    t == VMEM_ROTOR ? "ROTR" :
34367c478bd9Sstevel@tonic-gate 	    t == VMEM_WALKER ? "WLKR" :
34377c478bd9Sstevel@tonic-gate 	    "????");
34387c478bd9Sstevel@tonic-gate 
34397c478bd9Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
34407c478bd9Sstevel@tonic-gate 		mdb_printf("%#r\n", addr);
34417c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
34427c478bd9Sstevel@tonic-gate 	}
34437c478bd9Sstevel@tonic-gate 
34447c478bd9Sstevel@tonic-gate 	if (verbose) {
34457c478bd9Sstevel@tonic-gate 		mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
34467c478bd9Sstevel@tonic-gate 		    addr, type, vs.vs_start, vs.vs_end, sz);
34477c478bd9Sstevel@tonic-gate 
34487c478bd9Sstevel@tonic-gate 		if (no_debug)
34497c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
34507c478bd9Sstevel@tonic-gate 
34517c478bd9Sstevel@tonic-gate 		mdb_printf("%16s %4s %16d %16llx\n",
34527c478bd9Sstevel@tonic-gate 		    "", "", vs.vs_thread, vs.vs_timestamp);
34537c478bd9Sstevel@tonic-gate 
34547c478bd9Sstevel@tonic-gate 		mdb_inc_indent(17);
34557c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
34567c478bd9Sstevel@tonic-gate 			mdb_printf("%a\n", stk[i]);
34577c478bd9Sstevel@tonic-gate 		}
34587c478bd9Sstevel@tonic-gate 		mdb_dec_indent(17);
34597c478bd9Sstevel@tonic-gate 		mdb_printf("\n");
34607c478bd9Sstevel@tonic-gate 	} else {
34617c478bd9Sstevel@tonic-gate 		mdb_printf("%0?p %4s %0?p %0?p", addr, type,
34627c478bd9Sstevel@tonic-gate 		    vs.vs_start, size? sz : vs.vs_end);
34637c478bd9Sstevel@tonic-gate 
34647c478bd9Sstevel@tonic-gate 		if (no_debug) {
34657c478bd9Sstevel@tonic-gate 			mdb_printf("\n");
34667c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
34677c478bd9Sstevel@tonic-gate 		}
34687c478bd9Sstevel@tonic-gate 
34697c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
34707c478bd9Sstevel@tonic-gate 			if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY,
34717c478bd9Sstevel@tonic-gate 			    c, sizeof (c), &sym) == -1)
34727c478bd9Sstevel@tonic-gate 				continue;
34737c478bd9Sstevel@tonic-gate 			if (is_umem_sym(c, "vmem_"))
34747c478bd9Sstevel@tonic-gate 				continue;
34757c478bd9Sstevel@tonic-gate 			break;
34767c478bd9Sstevel@tonic-gate 		}
34777c478bd9Sstevel@tonic-gate 		mdb_printf(" %a\n", stk[i]);
34787c478bd9Sstevel@tonic-gate 	}
34797c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
34807c478bd9Sstevel@tonic-gate }
34817c478bd9Sstevel@tonic-gate 
34827c478bd9Sstevel@tonic-gate /*ARGSUSED*/
34837c478bd9Sstevel@tonic-gate static int
34847c478bd9Sstevel@tonic-gate showbc(uintptr_t addr, const umem_bufctl_audit_t *bcp, hrtime_t *newest)
34857c478bd9Sstevel@tonic-gate {
34867c478bd9Sstevel@tonic-gate 	char name[UMEM_CACHE_NAMELEN + 1];
34877c478bd9Sstevel@tonic-gate 	hrtime_t delta;
34887c478bd9Sstevel@tonic-gate 	int i, depth;
34897c478bd9Sstevel@tonic-gate 
34907c478bd9Sstevel@tonic-gate 	if (bcp->bc_timestamp == 0)
34917c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
34927c478bd9Sstevel@tonic-gate 
34937c478bd9Sstevel@tonic-gate 	if (*newest == 0)
34947c478bd9Sstevel@tonic-gate 		*newest = bcp->bc_timestamp;
34957c478bd9Sstevel@tonic-gate 
34967c478bd9Sstevel@tonic-gate 	delta = *newest - bcp->bc_timestamp;
34977c478bd9Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, umem_stack_depth);
34987c478bd9Sstevel@tonic-gate 
34997c478bd9Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)
35007c478bd9Sstevel@tonic-gate 	    &bcp->bc_cache->cache_name) <= 0)
35017c478bd9Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache);
35027c478bd9Sstevel@tonic-gate 
35037c478bd9Sstevel@tonic-gate 	mdb_printf("\nT-%lld.%09lld  addr=%p  %s\n",
35047c478bd9Sstevel@tonic-gate 	    delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name);
35057c478bd9Sstevel@tonic-gate 
35067c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
35077c478bd9Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
35087c478bd9Sstevel@tonic-gate 
35097c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
35107c478bd9Sstevel@tonic-gate }
35117c478bd9Sstevel@tonic-gate 
35127c478bd9Sstevel@tonic-gate int
35137c478bd9Sstevel@tonic-gate umalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
35147c478bd9Sstevel@tonic-gate {
35157c478bd9Sstevel@tonic-gate 	const char *logname = "umem_transaction_log";
35167c478bd9Sstevel@tonic-gate 	hrtime_t newest = 0;
35177c478bd9Sstevel@tonic-gate 
35187c478bd9Sstevel@tonic-gate 	if ((flags & DCMD_ADDRSPEC) || argc > 1)
35197c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
35207c478bd9Sstevel@tonic-gate 
35217c478bd9Sstevel@tonic-gate 	if (argc > 0) {
35227c478bd9Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING)
35237c478bd9Sstevel@tonic-gate 			return (DCMD_USAGE);
35247c478bd9Sstevel@tonic-gate 		if (strcmp(argv->a_un.a_str, "fail") == 0)
35257c478bd9Sstevel@tonic-gate 			logname = "umem_failure_log";
35267c478bd9Sstevel@tonic-gate 		else if (strcmp(argv->a_un.a_str, "slab") == 0)
35277c478bd9Sstevel@tonic-gate 			logname = "umem_slab_log";
35287c478bd9Sstevel@tonic-gate 		else
35297c478bd9Sstevel@tonic-gate 			return (DCMD_USAGE);
35307c478bd9Sstevel@tonic-gate 	}
35317c478bd9Sstevel@tonic-gate 
35327c478bd9Sstevel@tonic-gate 	if (umem_readvar(&addr, logname) == -1) {
35337c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read %s log header pointer");
35347c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
35357c478bd9Sstevel@tonic-gate 	}
35367c478bd9Sstevel@tonic-gate 
35377c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("umem_log", (mdb_walk_cb_t)showbc, &newest, addr) == -1) {
35387c478bd9Sstevel@tonic-gate 		mdb_warn("failed to walk umem log");
35397c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
35407c478bd9Sstevel@tonic-gate 	}
35417c478bd9Sstevel@tonic-gate 
35427c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
35437c478bd9Sstevel@tonic-gate }
35447c478bd9Sstevel@tonic-gate 
35457c478bd9Sstevel@tonic-gate /*
35467c478bd9Sstevel@tonic-gate  * As the final lure for die-hard crash(1M) users, we provide ::umausers here.
35477c478bd9Sstevel@tonic-gate  * The first piece is a structure which we use to accumulate umem_cache_t
35487c478bd9Sstevel@tonic-gate  * addresses of interest.  The umc_add is used as a callback for the umem_cache
35497c478bd9Sstevel@tonic-gate  * walker; we either add all caches, or ones named explicitly as arguments.
35507c478bd9Sstevel@tonic-gate  */
35517c478bd9Sstevel@tonic-gate 
35527c478bd9Sstevel@tonic-gate typedef struct umclist {
35537c478bd9Sstevel@tonic-gate 	const char *umc_name;			/* Name to match (or NULL) */
35547c478bd9Sstevel@tonic-gate 	uintptr_t *umc_caches;			/* List of umem_cache_t addrs */
35557c478bd9Sstevel@tonic-gate 	int umc_nelems;				/* Num entries in umc_caches */
35567c478bd9Sstevel@tonic-gate 	int umc_size;				/* Size of umc_caches array */
35577c478bd9Sstevel@tonic-gate } umclist_t;
35587c478bd9Sstevel@tonic-gate 
35597c478bd9Sstevel@tonic-gate static int
35607c478bd9Sstevel@tonic-gate umc_add(uintptr_t addr, const umem_cache_t *cp, umclist_t *umc)
35617c478bd9Sstevel@tonic-gate {
35627c478bd9Sstevel@tonic-gate 	void *p;
35637c478bd9Sstevel@tonic-gate 	int s;
35647c478bd9Sstevel@tonic-gate 
35657c478bd9Sstevel@tonic-gate 	if (umc->umc_name == NULL ||
35667c478bd9Sstevel@tonic-gate 	    strcmp(cp->cache_name, umc->umc_name) == 0) {
35677c478bd9Sstevel@tonic-gate 		/*
35687c478bd9Sstevel@tonic-gate 		 * If we have a match, grow our array (if necessary), and then
35697c478bd9Sstevel@tonic-gate 		 * add the virtual address of the matching cache to our list.
35707c478bd9Sstevel@tonic-gate 		 */
35717c478bd9Sstevel@tonic-gate 		if (umc->umc_nelems >= umc->umc_size) {
35727c478bd9Sstevel@tonic-gate 			s = umc->umc_size ? umc->umc_size * 2 : 256;
35737c478bd9Sstevel@tonic-gate 			p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
35747c478bd9Sstevel@tonic-gate 
35757c478bd9Sstevel@tonic-gate 			bcopy(umc->umc_caches, p,
35767c478bd9Sstevel@tonic-gate 			    sizeof (uintptr_t) * umc->umc_size);
35777c478bd9Sstevel@tonic-gate 
35787c478bd9Sstevel@tonic-gate 			umc->umc_caches = p;
35797c478bd9Sstevel@tonic-gate 			umc->umc_size = s;
35807c478bd9Sstevel@tonic-gate 		}
35817c478bd9Sstevel@tonic-gate 
35827c478bd9Sstevel@tonic-gate 		umc->umc_caches[umc->umc_nelems++] = addr;
35837c478bd9Sstevel@tonic-gate 		return (umc->umc_name ? WALK_DONE : WALK_NEXT);
35847c478bd9Sstevel@tonic-gate 	}
35857c478bd9Sstevel@tonic-gate 
35867c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
35877c478bd9Sstevel@tonic-gate }
35887c478bd9Sstevel@tonic-gate 
35897c478bd9Sstevel@tonic-gate /*
35907c478bd9Sstevel@tonic-gate  * The second piece of ::umausers is a hash table of allocations.  Each
35917c478bd9Sstevel@tonic-gate  * allocation owner is identified by its stack trace and data_size.  We then
35927c478bd9Sstevel@tonic-gate  * track the total bytes of all such allocations, and the number of allocations
35937c478bd9Sstevel@tonic-gate  * to report at the end.  Once we have a list of caches, we walk through the
35947c478bd9Sstevel@tonic-gate  * allocated bufctls of each, and update our hash table accordingly.
35957c478bd9Sstevel@tonic-gate  */
35967c478bd9Sstevel@tonic-gate 
35977c478bd9Sstevel@tonic-gate typedef struct umowner {
35987c478bd9Sstevel@tonic-gate 	struct umowner *umo_head;		/* First hash elt in bucket */
35997c478bd9Sstevel@tonic-gate 	struct umowner *umo_next;		/* Next hash elt in chain */
36007c478bd9Sstevel@tonic-gate 	size_t umo_signature;			/* Hash table signature */
36017c478bd9Sstevel@tonic-gate 	uint_t umo_num;				/* Number of allocations */
36027c478bd9Sstevel@tonic-gate 	size_t umo_data_size;			/* Size of each allocation */
36037c478bd9Sstevel@tonic-gate 	size_t umo_total_size;			/* Total bytes of allocation */
36047c478bd9Sstevel@tonic-gate 	int umo_depth;				/* Depth of stack trace */
36057c478bd9Sstevel@tonic-gate 	uintptr_t *umo_stack;			/* Stack trace */
36067c478bd9Sstevel@tonic-gate } umowner_t;
36077c478bd9Sstevel@tonic-gate 
36087c478bd9Sstevel@tonic-gate typedef struct umusers {
36097c478bd9Sstevel@tonic-gate 	const umem_cache_t *umu_cache;		/* Current umem cache */
36107c478bd9Sstevel@tonic-gate 	umowner_t *umu_hash;			/* Hash table of owners */
36117c478bd9Sstevel@tonic-gate 	uintptr_t *umu_stacks;			/* stacks for owners */
36127c478bd9Sstevel@tonic-gate 	int umu_nelems;				/* Number of entries in use */
36137c478bd9Sstevel@tonic-gate 	int umu_size;				/* Total number of entries */
36147c478bd9Sstevel@tonic-gate } umusers_t;
36157c478bd9Sstevel@tonic-gate 
36167c478bd9Sstevel@tonic-gate static void
36177c478bd9Sstevel@tonic-gate umu_add(umusers_t *umu, const umem_bufctl_audit_t *bcp,
36187c478bd9Sstevel@tonic-gate     size_t size, size_t data_size)
36197c478bd9Sstevel@tonic-gate {
36207c478bd9Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, umem_stack_depth);
36217c478bd9Sstevel@tonic-gate 	size_t bucket, signature = data_size;
36227c478bd9Sstevel@tonic-gate 	umowner_t *umo, *umoend;
36237c478bd9Sstevel@tonic-gate 
36247c478bd9Sstevel@tonic-gate 	/*
36257c478bd9Sstevel@tonic-gate 	 * If the hash table is full, double its size and rehash everything.
36267c478bd9Sstevel@tonic-gate 	 */
36277c478bd9Sstevel@tonic-gate 	if (umu->umu_nelems >= umu->umu_size) {
36287c478bd9Sstevel@tonic-gate 		int s = umu->umu_size ? umu->umu_size * 2 : 1024;
36297c478bd9Sstevel@tonic-gate 		size_t umowner_size = sizeof (umowner_t);
36307c478bd9Sstevel@tonic-gate 		size_t trace_size = umem_stack_depth * sizeof (uintptr_t);
36317c478bd9Sstevel@tonic-gate 		uintptr_t *new_stacks;
36327c478bd9Sstevel@tonic-gate 
36337c478bd9Sstevel@tonic-gate 		umo = mdb_alloc(umowner_size * s, UM_SLEEP | UM_GC);
36347c478bd9Sstevel@tonic-gate 		new_stacks = mdb_alloc(trace_size * s, UM_SLEEP | UM_GC);
36357c478bd9Sstevel@tonic-gate 
36367c478bd9Sstevel@tonic-gate 		bcopy(umu->umu_hash, umo, umowner_size * umu->umu_size);
36377c478bd9Sstevel@tonic-gate 		bcopy(umu->umu_stacks, new_stacks, trace_size * umu->umu_size);
36387c478bd9Sstevel@tonic-gate 		umu->umu_hash = umo;
36397c478bd9Sstevel@tonic-gate 		umu->umu_stacks = new_stacks;
36407c478bd9Sstevel@tonic-gate 		umu->umu_size = s;
36417c478bd9Sstevel@tonic-gate 
36427c478bd9Sstevel@tonic-gate 		umoend = umu->umu_hash + umu->umu_size;
36437c478bd9Sstevel@tonic-gate 		for (umo = umu->umu_hash; umo < umoend; umo++) {
36447c478bd9Sstevel@tonic-gate 			umo->umo_head = NULL;
36457c478bd9Sstevel@tonic-gate 			umo->umo_stack = &umu->umu_stacks[
36467c478bd9Sstevel@tonic-gate 			    umem_stack_depth * (umo - umu->umu_hash)];
36477c478bd9Sstevel@tonic-gate 		}
36487c478bd9Sstevel@tonic-gate 
36497c478bd9Sstevel@tonic-gate 		umoend = umu->umu_hash + umu->umu_nelems;
36507c478bd9Sstevel@tonic-gate 		for (umo = umu->umu_hash; umo < umoend; umo++) {
36517c478bd9Sstevel@tonic-gate 			bucket = umo->umo_signature & (umu->umu_size - 1);
36527c478bd9Sstevel@tonic-gate 			umo->umo_next = umu->umu_hash[bucket].umo_head;
36537c478bd9Sstevel@tonic-gate 			umu->umu_hash[bucket].umo_head = umo;
36547c478bd9Sstevel@tonic-gate 		}
36557c478bd9Sstevel@tonic-gate 	}
36567c478bd9Sstevel@tonic-gate 
36577c478bd9Sstevel@tonic-gate 	/*
36587c478bd9Sstevel@tonic-gate 	 * Finish computing the hash signature from the stack trace, and then
36597c478bd9Sstevel@tonic-gate 	 * see if the owner is in the hash table.  If so, update our stats.
36607c478bd9Sstevel@tonic-gate 	 */
36617c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
36627c478bd9Sstevel@tonic-gate 		signature += bcp->bc_stack[i];
36637c478bd9Sstevel@tonic-gate 
36647c478bd9Sstevel@tonic-gate 	bucket = signature & (umu->umu_size - 1);
36657c478bd9Sstevel@tonic-gate 
36667c478bd9Sstevel@tonic-gate 	for (umo = umu->umu_hash[bucket].umo_head; umo; umo = umo->umo_next) {
36677c478bd9Sstevel@tonic-gate 		if (umo->umo_signature == signature) {
36687c478bd9Sstevel@tonic-gate 			size_t difference = 0;
36697c478bd9Sstevel@tonic-gate 
36707c478bd9Sstevel@tonic-gate 			difference |= umo->umo_data_size - data_size;
36717c478bd9Sstevel@tonic-gate 			difference |= umo->umo_depth - depth;
36727c478bd9Sstevel@tonic-gate 
36737c478bd9Sstevel@tonic-gate 			for (i = 0; i < depth; i++) {
36747c478bd9Sstevel@tonic-gate 				difference |= umo->umo_stack[i] -
36757c478bd9Sstevel@tonic-gate 				    bcp->bc_stack[i];
36767c478bd9Sstevel@tonic-gate 			}
36777c478bd9Sstevel@tonic-gate 
36787c478bd9Sstevel@tonic-gate 			if (difference == 0) {
36797c478bd9Sstevel@tonic-gate 				umo->umo_total_size += size;
36807c478bd9Sstevel@tonic-gate 				umo->umo_num++;
36817c478bd9Sstevel@tonic-gate 				return;
36827c478bd9Sstevel@tonic-gate 			}
36837c478bd9Sstevel@tonic-gate 		}
36847c478bd9Sstevel@tonic-gate 	}
36857c478bd9Sstevel@tonic-gate 
36867c478bd9Sstevel@tonic-gate 	/*
36877c478bd9Sstevel@tonic-gate 	 * If the owner is not yet hashed, grab the next element and fill it
36887c478bd9Sstevel@tonic-gate 	 * in based on the allocation information.
36897c478bd9Sstevel@tonic-gate 	 */
36907c478bd9Sstevel@tonic-gate 	umo = &umu->umu_hash[umu->umu_nelems++];
36917c478bd9Sstevel@tonic-gate 	umo->umo_next = umu->umu_hash[bucket].umo_head;
36927c478bd9Sstevel@tonic-gate 	umu->umu_hash[bucket].umo_head = umo;
36937c478bd9Sstevel@tonic-gate 
36947c478bd9Sstevel@tonic-gate 	umo->umo_signature = signature;
36957c478bd9Sstevel@tonic-gate 	umo->umo_num = 1;
36967c478bd9Sstevel@tonic-gate 	umo->umo_data_size = data_size;
36977c478bd9Sstevel@tonic-gate 	umo->umo_total_size = size;
36987c478bd9Sstevel@tonic-gate 	umo->umo_depth = depth;
36997c478bd9Sstevel@tonic-gate 
37007c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
37017c478bd9Sstevel@tonic-gate 		umo->umo_stack[i] = bcp->bc_stack[i];
37027c478bd9Sstevel@tonic-gate }
37037c478bd9Sstevel@tonic-gate 
37047c478bd9Sstevel@tonic-gate /*
37057c478bd9Sstevel@tonic-gate  * When ::umausers is invoked without the -f flag, we simply update our hash
37067c478bd9Sstevel@tonic-gate  * table with the information from each allocated bufctl.
37077c478bd9Sstevel@tonic-gate  */
37087c478bd9Sstevel@tonic-gate /*ARGSUSED*/
37097c478bd9Sstevel@tonic-gate static int
37107c478bd9Sstevel@tonic-gate umause1(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu)
37117c478bd9Sstevel@tonic-gate {
37127c478bd9Sstevel@tonic-gate 	const umem_cache_t *cp = umu->umu_cache;
37137c478bd9Sstevel@tonic-gate 
37147c478bd9Sstevel@tonic-gate 	umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize);
37157c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
37167c478bd9Sstevel@tonic-gate }
37177c478bd9Sstevel@tonic-gate 
37187c478bd9Sstevel@tonic-gate /*
37197c478bd9Sstevel@tonic-gate  * When ::umausers is invoked with the -f flag, we print out the information
37207c478bd9Sstevel@tonic-gate  * for each bufctl as well as updating the hash table.
37217c478bd9Sstevel@tonic-gate  */
37227c478bd9Sstevel@tonic-gate static int
37237c478bd9Sstevel@tonic-gate umause2(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu)
37247c478bd9Sstevel@tonic-gate {
37257c478bd9Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, umem_stack_depth);
37267c478bd9Sstevel@tonic-gate 	const umem_cache_t *cp = umu->umu_cache;
37277c478bd9Sstevel@tonic-gate 
37287c478bd9Sstevel@tonic-gate 	mdb_printf("size %d, addr %p, thread %p, cache %s\n",
37297c478bd9Sstevel@tonic-gate 	    cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name);
37307c478bd9Sstevel@tonic-gate 
37317c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
37327c478bd9Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
37337c478bd9Sstevel@tonic-gate 
37347c478bd9Sstevel@tonic-gate 	umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize);
37357c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
37367c478bd9Sstevel@tonic-gate }
37377c478bd9Sstevel@tonic-gate 
37387c478bd9Sstevel@tonic-gate /*
37397c478bd9Sstevel@tonic-gate  * We sort our results by allocation size before printing them.
37407c478bd9Sstevel@tonic-gate  */
37417c478bd9Sstevel@tonic-gate static int
37427c478bd9Sstevel@tonic-gate umownercmp(const void *lp, const void *rp)
37437c478bd9Sstevel@tonic-gate {
37447c478bd9Sstevel@tonic-gate 	const umowner_t *lhs = lp;
37457c478bd9Sstevel@tonic-gate 	const umowner_t *rhs = rp;
37467c478bd9Sstevel@tonic-gate 
37477c478bd9Sstevel@tonic-gate 	return (rhs->umo_total_size - lhs->umo_total_size);
37487c478bd9Sstevel@tonic-gate }
37497c478bd9Sstevel@tonic-gate 
37507c478bd9Sstevel@tonic-gate /*
37517c478bd9Sstevel@tonic-gate  * The main engine of ::umausers is relatively straightforward: First we
37527c478bd9Sstevel@tonic-gate  * accumulate our list of umem_cache_t addresses into the umclist_t. Next we
37537c478bd9Sstevel@tonic-gate  * iterate over the allocated bufctls of each cache in the list.  Finally,
37547c478bd9Sstevel@tonic-gate  * we sort and print our results.
37557c478bd9Sstevel@tonic-gate  */
37567c478bd9Sstevel@tonic-gate /*ARGSUSED*/
37577c478bd9Sstevel@tonic-gate int
37587c478bd9Sstevel@tonic-gate umausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
37597c478bd9Sstevel@tonic-gate {
37607c478bd9Sstevel@tonic-gate 	int mem_threshold = 8192;	/* Minimum # bytes for printing */
37617c478bd9Sstevel@tonic-gate 	int cnt_threshold = 100;	/* Minimum # blocks for printing */
37627c478bd9Sstevel@tonic-gate 	int audited_caches = 0;		/* Number of UMF_AUDIT caches found */
37637c478bd9Sstevel@tonic-gate 	int do_all_caches = 1;		/* Do all caches (no arguments) */
37647c478bd9Sstevel@tonic-gate 	int opt_e = FALSE;		/* Include "small" users */
37657c478bd9Sstevel@tonic-gate 	int opt_f = FALSE;		/* Print stack traces */
37667c478bd9Sstevel@tonic-gate 
37677c478bd9Sstevel@tonic-gate 	mdb_walk_cb_t callback = (mdb_walk_cb_t)umause1;
37687c478bd9Sstevel@tonic-gate 	umowner_t *umo, *umoend;
37697c478bd9Sstevel@tonic-gate 	int i, oelems;
37707c478bd9Sstevel@tonic-gate 
37717c478bd9Sstevel@tonic-gate 	umclist_t umc;
37727c478bd9Sstevel@tonic-gate 	umusers_t umu;
37737c478bd9Sstevel@tonic-gate 
37747c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC)
37757c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
37767c478bd9Sstevel@tonic-gate 
37777c478bd9Sstevel@tonic-gate 	bzero(&umc, sizeof (umc));
37787c478bd9Sstevel@tonic-gate 	bzero(&umu, sizeof (umu));
37797c478bd9Sstevel@tonic-gate 
37807c478bd9Sstevel@tonic-gate 	while ((i = mdb_getopts(argc, argv,
37817c478bd9Sstevel@tonic-gate 	    'e', MDB_OPT_SETBITS, TRUE, &opt_e,
37827c478bd9Sstevel@tonic-gate 	    'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) {
37837c478bd9Sstevel@tonic-gate 
37847c478bd9Sstevel@tonic-gate 		argv += i;	/* skip past options we just processed */
37857c478bd9Sstevel@tonic-gate 		argc -= i;	/* adjust argc */
37867c478bd9Sstevel@tonic-gate 
37877c478bd9Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-')
37887c478bd9Sstevel@tonic-gate 			return (DCMD_USAGE);
37897c478bd9Sstevel@tonic-gate 
37907c478bd9Sstevel@tonic-gate 		oelems = umc.umc_nelems;
37917c478bd9Sstevel@tonic-gate 		umc.umc_name = argv->a_un.a_str;
37927c478bd9Sstevel@tonic-gate 		(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc);
37937c478bd9Sstevel@tonic-gate 
37947c478bd9Sstevel@tonic-gate 		if (umc.umc_nelems == oelems) {
37957c478bd9Sstevel@tonic-gate 			mdb_warn("unknown umem cache: %s\n", umc.umc_name);
37967c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
37977c478bd9Sstevel@tonic-gate 		}
37987c478bd9Sstevel@tonic-gate 
37997c478bd9Sstevel@tonic-gate 		do_all_caches = 0;
38007c478bd9Sstevel@tonic-gate 		argv++;
38017c478bd9Sstevel@tonic-gate 		argc--;
38027c478bd9Sstevel@tonic-gate 	}
38037c478bd9Sstevel@tonic-gate 
38047c478bd9Sstevel@tonic-gate 	if (opt_e)
38057c478bd9Sstevel@tonic-gate 		mem_threshold = cnt_threshold = 0;
38067c478bd9Sstevel@tonic-gate 
38077c478bd9Sstevel@tonic-gate 	if (opt_f)
38087c478bd9Sstevel@tonic-gate 		callback = (mdb_walk_cb_t)umause2;
38097c478bd9Sstevel@tonic-gate 
38107c478bd9Sstevel@tonic-gate 	if (do_all_caches) {
38117c478bd9Sstevel@tonic-gate 		umc.umc_name = NULL; /* match all cache names */
38127c478bd9Sstevel@tonic-gate 		(void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc);
38137c478bd9Sstevel@tonic-gate 	}
38147c478bd9Sstevel@tonic-gate 
38157c478bd9Sstevel@tonic-gate 	for (i = 0; i < umc.umc_nelems; i++) {
38167c478bd9Sstevel@tonic-gate 		uintptr_t cp = umc.umc_caches[i];
38177c478bd9Sstevel@tonic-gate 		umem_cache_t c;
38187c478bd9Sstevel@tonic-gate 
38197c478bd9Sstevel@tonic-gate 		if (mdb_vread(&c, sizeof (c), cp) == -1) {
38207c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read cache at %p", cp);
38217c478bd9Sstevel@tonic-gate 			continue;
38227c478bd9Sstevel@tonic-gate 		}
38237c478bd9Sstevel@tonic-gate 
38247c478bd9Sstevel@tonic-gate 		if (!(c.cache_flags & UMF_AUDIT)) {
38257c478bd9Sstevel@tonic-gate 			if (!do_all_caches) {
38267c478bd9Sstevel@tonic-gate 				mdb_warn("UMF_AUDIT is not enabled for %s\n",
38277c478bd9Sstevel@tonic-gate 				    c.cache_name);
38287c478bd9Sstevel@tonic-gate 			}
38297c478bd9Sstevel@tonic-gate 			continue;
38307c478bd9Sstevel@tonic-gate 		}
38317c478bd9Sstevel@tonic-gate 
38327c478bd9Sstevel@tonic-gate 		umu.umu_cache = &c;
38337c478bd9Sstevel@tonic-gate 		(void) mdb_pwalk("bufctl", callback, &umu, cp);
38347c478bd9Sstevel@tonic-gate 		audited_caches++;
38357c478bd9Sstevel@tonic-gate 	}
38367c478bd9Sstevel@tonic-gate 
38377c478bd9Sstevel@tonic-gate 	if (audited_caches == 0 && do_all_caches) {
38387c478bd9Sstevel@tonic-gate 		mdb_warn("UMF_AUDIT is not enabled for any caches\n");
38397c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
38407c478bd9Sstevel@tonic-gate 	}
38417c478bd9Sstevel@tonic-gate 
38427c478bd9Sstevel@tonic-gate 	qsort(umu.umu_hash, umu.umu_nelems, sizeof (umowner_t), umownercmp);
38437c478bd9Sstevel@tonic-gate 	umoend = umu.umu_hash + umu.umu_nelems;
38447c478bd9Sstevel@tonic-gate 
38457c478bd9Sstevel@tonic-gate 	for (umo = umu.umu_hash; umo < umoend; umo++) {
38467c478bd9Sstevel@tonic-gate 		if (umo->umo_total_size < mem_threshold &&
38477c478bd9Sstevel@tonic-gate 		    umo->umo_num < cnt_threshold)
38487c478bd9Sstevel@tonic-gate 			continue;
38497c478bd9Sstevel@tonic-gate 		mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
38507c478bd9Sstevel@tonic-gate 		    umo->umo_total_size, umo->umo_num, umo->umo_data_size);
38517c478bd9Sstevel@tonic-gate 		for (i = 0; i < umo->umo_depth; i++)
38527c478bd9Sstevel@tonic-gate 			mdb_printf("\t %a\n", umo->umo_stack[i]);
38537c478bd9Sstevel@tonic-gate 	}
38547c478bd9Sstevel@tonic-gate 
38557c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
38567c478bd9Sstevel@tonic-gate }
3857789d94c2Sjwadams 
3858789d94c2Sjwadams struct malloc_data {
3859789d94c2Sjwadams 	uint32_t malloc_size;
3860789d94c2Sjwadams 	uint32_t malloc_stat; /* == UMEM_MALLOC_ENCODE(state, malloc_size) */
3861789d94c2Sjwadams };
3862789d94c2Sjwadams 
3863789d94c2Sjwadams #ifdef _LP64
3864789d94c2Sjwadams #define	UMI_MAX_BUCKET		(UMEM_MAXBUF - 2*sizeof (struct malloc_data))
3865789d94c2Sjwadams #else
3866789d94c2Sjwadams #define	UMI_MAX_BUCKET		(UMEM_MAXBUF - sizeof (struct malloc_data))
3867789d94c2Sjwadams #endif
3868789d94c2Sjwadams 
3869789d94c2Sjwadams typedef struct umem_malloc_info {
3870789d94c2Sjwadams 	size_t um_total;	/* total allocated buffers */
3871789d94c2Sjwadams 	size_t um_malloc;	/* malloc buffers */
3872789d94c2Sjwadams 	size_t um_malloc_size;	/* sum of malloc buffer sizes */
3873789d94c2Sjwadams 	size_t um_malloc_overhead; /* sum of in-chunk overheads */
3874789d94c2Sjwadams 
3875789d94c2Sjwadams 	umem_cache_t *um_cp;
3876789d94c2Sjwadams 
3877789d94c2Sjwadams 	uint_t *um_bucket;
3878789d94c2Sjwadams } umem_malloc_info_t;
3879789d94c2Sjwadams 
3880789d94c2Sjwadams static void
3881789d94c2Sjwadams umem_malloc_print_dist(uint_t *um_bucket, size_t minmalloc, size_t maxmalloc,
3882789d94c2Sjwadams     size_t maxbuckets, size_t minbucketsize, int geometric)
3883789d94c2Sjwadams {
38843893cb7fStomee 	uint64_t um_malloc;
3885789d94c2Sjwadams 	int minb = -1;
3886789d94c2Sjwadams 	int maxb = -1;
3887789d94c2Sjwadams 	int buckets;
3888789d94c2Sjwadams 	int nbucks;
3889789d94c2Sjwadams 	int i;
3890789d94c2Sjwadams 	int b;
3891789d94c2Sjwadams 	const int *distarray;
3892789d94c2Sjwadams 
3893789d94c2Sjwadams 	minb = (int)minmalloc;
3894789d94c2Sjwadams 	maxb = (int)maxmalloc;
3895789d94c2Sjwadams 
3896789d94c2Sjwadams 	nbucks = buckets = maxb - minb + 1;
3897789d94c2Sjwadams 
3898789d94c2Sjwadams 	um_malloc = 0;
3899789d94c2Sjwadams 	for (b = minb; b <= maxb; b++)
3900789d94c2Sjwadams 		um_malloc += um_bucket[b];
3901789d94c2Sjwadams 
3902789d94c2Sjwadams 	if (maxbuckets != 0)
3903789d94c2Sjwadams 		buckets = MIN(buckets, maxbuckets);
3904789d94c2Sjwadams 
3905789d94c2Sjwadams 	if (minbucketsize > 1) {
3906789d94c2Sjwadams 		buckets = MIN(buckets, nbucks/minbucketsize);
3907789d94c2Sjwadams 		if (buckets == 0) {
3908789d94c2Sjwadams 			buckets = 1;
3909789d94c2Sjwadams 			minbucketsize = nbucks;
3910789d94c2Sjwadams 		}
3911789d94c2Sjwadams 	}
3912789d94c2Sjwadams 
3913789d94c2Sjwadams 	if (geometric)
3914087e1372Stomee 		distarray = dist_geometric(buckets, minb, maxb, minbucketsize);
3915789d94c2Sjwadams 	else
3916087e1372Stomee 		distarray = dist_linear(buckets, minb, maxb);
3917789d94c2Sjwadams 
3918087e1372Stomee 	dist_print_header("malloc size", 11, "count");
3919789d94c2Sjwadams 	for (i = 0; i < buckets; i++) {
3920087e1372Stomee 		dist_print_bucket(distarray, i, um_bucket, um_malloc, 11);
3921789d94c2Sjwadams 	}
3922789d94c2Sjwadams 	mdb_printf("\n");
3923789d94c2Sjwadams }
3924789d94c2Sjwadams 
3925789d94c2Sjwadams /*
3926789d94c2Sjwadams  * A malloc()ed buffer looks like:
3927789d94c2Sjwadams  *
3928789d94c2Sjwadams  *	<----------- mi.malloc_size --->
3929789d94c2Sjwadams  *	<----------- cp.cache_bufsize ------------------>
3930789d94c2Sjwadams  *	<----------- cp.cache_chunksize -------------------------------->
3931789d94c2Sjwadams  *	+-------+-----------------------+---------------+---------------+
3932789d94c2Sjwadams  *	|/tag///| mallocsz		|/round-off/////|/debug info////|
3933789d94c2Sjwadams  *	+-------+---------------------------------------+---------------+
3934789d94c2Sjwadams  *		<-- usable space ------>
3935789d94c2Sjwadams  *
3936789d94c2Sjwadams  * mallocsz is the argument to malloc(3C).
3937789d94c2Sjwadams  * mi.malloc_size is the actual size passed to umem_alloc(), which
3938789d94c2Sjwadams  * is rounded up to the smallest available cache size, which is
3939789d94c2Sjwadams  * cache_bufsize.  If there is debugging or alignment overhead in
3940789d94c2Sjwadams  * the cache, that is reflected in a larger cache_chunksize.
3941789d94c2Sjwadams  *
3942789d94c2Sjwadams  * The tag at the beginning of the buffer is either 8-bytes or 16-bytes,
3943789d94c2Sjwadams  * depending upon the ISA's alignment requirements.  For 32-bit allocations,
3944789d94c2Sjwadams  * it is always a 8-byte tag.  For 64-bit allocations larger than 8 bytes,
3945789d94c2Sjwadams  * the tag has 8 bytes of padding before it.
3946789d94c2Sjwadams  *
3947789d94c2Sjwadams  * 32-byte, 64-byte buffers <= 8 bytes:
3948789d94c2Sjwadams  *	+-------+-------+--------- ...
3949789d94c2Sjwadams  *	|/size//|/stat//| mallocsz ...
3950789d94c2Sjwadams  *	+-------+-------+--------- ...
3951789d94c2Sjwadams  *			^
3952789d94c2Sjwadams  *			pointer returned from malloc(3C)
3953789d94c2Sjwadams  *
3954789d94c2Sjwadams  * 64-byte buffers > 8 bytes:
3955789d94c2Sjwadams  *	+---------------+-------+-------+--------- ...
3956789d94c2Sjwadams  *	|/padding///////|/size//|/stat//| mallocsz ...
3957789d94c2Sjwadams  *	+---------------+-------+-------+--------- ...
3958789d94c2Sjwadams  *					^
3959789d94c2Sjwadams  *					pointer returned from malloc(3C)
3960789d94c2Sjwadams  *
3961789d94c2Sjwadams  * The "size" field is "malloc_size", which is mallocsz + the padding.
3962789d94c2Sjwadams  * The "stat" field is derived from malloc_size, and functions as a
3963789d94c2Sjwadams  * validation that this buffer is actually from malloc(3C).
3964789d94c2Sjwadams  */
3965789d94c2Sjwadams /*ARGSUSED*/
3966789d94c2Sjwadams static int
3967789d94c2Sjwadams um_umem_buffer_cb(uintptr_t addr, void *buf, umem_malloc_info_t *ump)
3968789d94c2Sjwadams {
3969789d94c2Sjwadams 	struct malloc_data md;
3970789d94c2Sjwadams 	size_t m_addr = addr;
3971789d94c2Sjwadams 	size_t overhead = sizeof (md);
3972789d94c2Sjwadams 	size_t mallocsz;
3973789d94c2Sjwadams 
3974789d94c2Sjwadams 	ump->um_total++;
3975789d94c2Sjwadams 
3976789d94c2Sjwadams #ifdef _LP64
3977789d94c2Sjwadams 	if (ump->um_cp->cache_bufsize > UMEM_SECOND_ALIGN) {
3978789d94c2Sjwadams 		m_addr += overhead;
3979789d94c2Sjwadams 		overhead += sizeof (md);
3980789d94c2Sjwadams 	}
3981789d94c2Sjwadams #endif
3982789d94c2Sjwadams 
3983789d94c2Sjwadams 	if (mdb_vread(&md, sizeof (md), m_addr) == -1) {
3984789d94c2Sjwadams 		mdb_warn("unable to read malloc header at %p", m_addr);
3985789d94c2Sjwadams 		return (WALK_NEXT);
3986789d94c2Sjwadams 	}
3987789d94c2Sjwadams 
3988789d94c2Sjwadams 	switch (UMEM_MALLOC_DECODE(md.malloc_stat, md.malloc_size)) {
3989789d94c2Sjwadams 	case MALLOC_MAGIC:
3990789d94c2Sjwadams #ifdef _LP64
3991789d94c2Sjwadams 	case MALLOC_SECOND_MAGIC:
3992789d94c2Sjwadams #endif
3993789d94c2Sjwadams 		mallocsz = md.malloc_size - overhead;
3994789d94c2Sjwadams 
3995789d94c2Sjwadams 		ump->um_malloc++;
3996789d94c2Sjwadams 		ump->um_malloc_size += mallocsz;
3997789d94c2Sjwadams 		ump->um_malloc_overhead += overhead;
3998789d94c2Sjwadams 
3999789d94c2Sjwadams 		/* include round-off and debug overhead */
4000789d94c2Sjwadams 		ump->um_malloc_overhead +=
4001789d94c2Sjwadams 		    ump->um_cp->cache_chunksize - md.malloc_size;
4002789d94c2Sjwadams 
4003789d94c2Sjwadams 		if (ump->um_bucket != NULL && mallocsz <= UMI_MAX_BUCKET)
4004789d94c2Sjwadams 			ump->um_bucket[mallocsz]++;
4005789d94c2Sjwadams 
4006789d94c2Sjwadams 		break;
4007789d94c2Sjwadams 	default:
4008789d94c2Sjwadams 		break;
4009789d94c2Sjwadams 	}
4010789d94c2Sjwadams 
4011789d94c2Sjwadams 	return (WALK_NEXT);
4012789d94c2Sjwadams }
4013789d94c2Sjwadams 
4014789d94c2Sjwadams int
4015789d94c2Sjwadams get_umem_alloc_sizes(int **out, size_t *out_num)
4016789d94c2Sjwadams {
4017789d94c2Sjwadams 	GElf_Sym sym;
4018789d94c2Sjwadams 
4019789d94c2Sjwadams 	if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) {
4020789d94c2Sjwadams 		mdb_warn("unable to look up umem_alloc_sizes");
4021789d94c2Sjwadams 		return (-1);
4022789d94c2Sjwadams 	}
4023789d94c2Sjwadams 
4024789d94c2Sjwadams 	*out = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
4025789d94c2Sjwadams 	*out_num = sym.st_size / sizeof (int);
4026789d94c2Sjwadams 
4027789d94c2Sjwadams 	if (mdb_vread(*out, sym.st_size, sym.st_value) == -1) {
4028789d94c2Sjwadams 		mdb_warn("unable to read umem_alloc_sizes (%p)", sym.st_value);
4029789d94c2Sjwadams 		*out = NULL;
4030789d94c2Sjwadams 		return (-1);
4031789d94c2Sjwadams 	}
4032789d94c2Sjwadams 
4033789d94c2Sjwadams 	return (0);
4034789d94c2Sjwadams }
4035789d94c2Sjwadams 
4036789d94c2Sjwadams 
4037789d94c2Sjwadams static int
4038789d94c2Sjwadams um_umem_cache_cb(uintptr_t addr, umem_cache_t *cp, umem_malloc_info_t *ump)
4039789d94c2Sjwadams {
4040789d94c2Sjwadams 	if (strncmp(cp->cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0)
4041789d94c2Sjwadams 		return (WALK_NEXT);
4042789d94c2Sjwadams 
4043789d94c2Sjwadams 	ump->um_cp = cp;
4044789d94c2Sjwadams 
4045789d94c2Sjwadams 	if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, ump, addr) ==
4046789d94c2Sjwadams 	    -1) {
4047789d94c2Sjwadams 		mdb_warn("can't walk 'umem' for cache %p", addr);
4048789d94c2Sjwadams 		return (WALK_ERR);
4049789d94c2Sjwadams 	}
4050789d94c2Sjwadams 
4051789d94c2Sjwadams 	return (WALK_NEXT);
4052789d94c2Sjwadams }
4053789d94c2Sjwadams 
4054789d94c2Sjwadams void
4055789d94c2Sjwadams umem_malloc_dist_help(void)
4056789d94c2Sjwadams {
4057789d94c2Sjwadams 	mdb_printf("%s\n",
4058789d94c2Sjwadams 	    "report distribution of outstanding malloc()s");
4059789d94c2Sjwadams 	mdb_dec_indent(2);
4060789d94c2Sjwadams 	mdb_printf("%<b>OPTIONS%</b>\n");
4061789d94c2Sjwadams 	mdb_inc_indent(2);
4062789d94c2Sjwadams 	mdb_printf("%s",
4063789d94c2Sjwadams "  -b maxbins\n"
4064789d94c2Sjwadams "        Use at most maxbins bins for the data\n"
4065789d94c2Sjwadams "  -B minbinsize\n"
4066789d94c2Sjwadams "        Make the bins at least minbinsize bytes apart\n"
4067789d94c2Sjwadams "  -d    dump the raw data out, without binning\n"
4068789d94c2Sjwadams "  -g    use geometric binning instead of linear binning\n");
4069789d94c2Sjwadams }
4070789d94c2Sjwadams 
4071789d94c2Sjwadams /*ARGSUSED*/
4072789d94c2Sjwadams int
4073789d94c2Sjwadams umem_malloc_dist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
4074789d94c2Sjwadams {
4075789d94c2Sjwadams 	umem_malloc_info_t mi;
4076789d94c2Sjwadams 	uint_t geometric = 0;
4077789d94c2Sjwadams 	uint_t dump = 0;
4078789d94c2Sjwadams 	size_t maxbuckets = 0;
4079789d94c2Sjwadams 	size_t minbucketsize = 0;
4080789d94c2Sjwadams 
4081789d94c2Sjwadams 	size_t minalloc = 0;
4082789d94c2Sjwadams 	size_t maxalloc = UMI_MAX_BUCKET;
4083789d94c2Sjwadams 
4084789d94c2Sjwadams 	if (flags & DCMD_ADDRSPEC)
4085789d94c2Sjwadams 		return (DCMD_USAGE);
4086789d94c2Sjwadams 
4087789d94c2Sjwadams 	if (mdb_getopts(argc, argv,
4088789d94c2Sjwadams 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
4089789d94c2Sjwadams 	    'g', MDB_OPT_SETBITS, TRUE, &geometric,
4090789d94c2Sjwadams 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
4091789d94c2Sjwadams 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
4092789d94c2Sjwadams 	    0) != argc)
4093789d94c2Sjwadams 		return (DCMD_USAGE);
4094789d94c2Sjwadams 
4095789d94c2Sjwadams 	bzero(&mi, sizeof (mi));
4096789d94c2Sjwadams 	mi.um_bucket = mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket),
4097789d94c2Sjwadams 	    UM_SLEEP | UM_GC);
4098789d94c2Sjwadams 
4099789d94c2Sjwadams 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)um_umem_cache_cb,
4100789d94c2Sjwadams 	    &mi) == -1) {
4101789d94c2Sjwadams 		mdb_warn("unable to walk 'umem_cache'");
4102789d94c2Sjwadams 		return (DCMD_ERR);
4103789d94c2Sjwadams 	}
4104789d94c2Sjwadams 
4105789d94c2Sjwadams 	if (dump) {
4106789d94c2Sjwadams 		int i;
4107789d94c2Sjwadams 		for (i = minalloc; i <= maxalloc; i++)
4108789d94c2Sjwadams 			mdb_printf("%d\t%d\n", i, mi.um_bucket[i]);
4109789d94c2Sjwadams 
4110789d94c2Sjwadams 		return (DCMD_OK);
4111789d94c2Sjwadams 	}
4112789d94c2Sjwadams 
4113789d94c2Sjwadams 	umem_malloc_print_dist(mi.um_bucket, minalloc, maxalloc,
4114789d94c2Sjwadams 	    maxbuckets, minbucketsize, geometric);
4115789d94c2Sjwadams 
4116789d94c2Sjwadams 	return (DCMD_OK);
4117789d94c2Sjwadams }
4118789d94c2Sjwadams 
4119789d94c2Sjwadams void
4120789d94c2Sjwadams umem_malloc_info_help(void)
4121789d94c2Sjwadams {
4122789d94c2Sjwadams 	mdb_printf("%s\n",
4123789d94c2Sjwadams 	    "report information about malloc()s by cache.  ");
4124789d94c2Sjwadams 	mdb_dec_indent(2);
4125789d94c2Sjwadams 	mdb_printf("%<b>OPTIONS%</b>\n");
4126789d94c2Sjwadams 	mdb_inc_indent(2);
4127789d94c2Sjwadams 	mdb_printf("%s",
4128789d94c2Sjwadams "  -b maxbins\n"
4129789d94c2Sjwadams "        Use at most maxbins bins for the data\n"
4130789d94c2Sjwadams "  -B minbinsize\n"
4131789d94c2Sjwadams "        Make the bins at least minbinsize bytes apart\n"
4132789d94c2Sjwadams "  -d    dump the raw distribution data without binning\n"
4133789d94c2Sjwadams #ifndef _KMDB
4134789d94c2Sjwadams "  -g    use geometric binning instead of linear binning\n"
4135789d94c2Sjwadams #endif
4136789d94c2Sjwadams 	    "");
4137789d94c2Sjwadams }
4138789d94c2Sjwadams int
4139789d94c2Sjwadams umem_malloc_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
4140789d94c2Sjwadams {
4141789d94c2Sjwadams 	umem_cache_t c;
4142789d94c2Sjwadams 	umem_malloc_info_t mi;
4143789d94c2Sjwadams 
4144789d94c2Sjwadams 	int skip = 0;
4145789d94c2Sjwadams 
4146789d94c2Sjwadams 	size_t maxmalloc;
4147789d94c2Sjwadams 	size_t overhead;
4148789d94c2Sjwadams 	size_t allocated;
4149789d94c2Sjwadams 	size_t avg_malloc;
4150789d94c2Sjwadams 	size_t overhead_pct;	/* 1000 * overhead_percent */
4151789d94c2Sjwadams 
4152789d94c2Sjwadams 	uint_t verbose = 0;
4153789d94c2Sjwadams 	uint_t dump = 0;
4154789d94c2Sjwadams 	uint_t geometric = 0;
4155789d94c2Sjwadams 	size_t maxbuckets = 0;
4156789d94c2Sjwadams 	size_t minbucketsize = 0;
4157789d94c2Sjwadams 
4158789d94c2Sjwadams 	int *alloc_sizes;
4159789d94c2Sjwadams 	int idx;
4160789d94c2Sjwadams 	size_t num;
4161789d94c2Sjwadams 	size_t minmalloc;
4162789d94c2Sjwadams 
4163789d94c2Sjwadams 	if (mdb_getopts(argc, argv,
4164789d94c2Sjwadams 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
4165789d94c2Sjwadams 	    'g', MDB_OPT_SETBITS, TRUE, &geometric,
4166789d94c2Sjwadams 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
4167789d94c2Sjwadams 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
4168789d94c2Sjwadams 	    0) != argc)
4169789d94c2Sjwadams 		return (DCMD_USAGE);
4170789d94c2Sjwadams 
4171789d94c2Sjwadams 	if (dump || geometric || (maxbuckets != 0) || (minbucketsize != 0))
4172789d94c2Sjwadams 		verbose = 1;
4173789d94c2Sjwadams 
4174789d94c2Sjwadams 	if (!(flags & DCMD_ADDRSPEC)) {
4175789d94c2Sjwadams 		if (mdb_walk_dcmd("umem_cache", "umem_malloc_info",
4176789d94c2Sjwadams 		    argc, argv) == -1) {
4177789d94c2Sjwadams 			mdb_warn("can't walk umem_cache");
4178789d94c2Sjwadams 			return (DCMD_ERR);
4179789d94c2Sjwadams 		}
4180789d94c2Sjwadams 		return (DCMD_OK);
4181789d94c2Sjwadams 	}
4182789d94c2Sjwadams 
4183789d94c2Sjwadams 	if (!mdb_vread(&c, sizeof (c), addr)) {
4184789d94c2Sjwadams 		mdb_warn("unable to read cache at %p", addr);
4185789d94c2Sjwadams 		return (DCMD_ERR);
4186789d94c2Sjwadams 	}
4187789d94c2Sjwadams 
4188789d94c2Sjwadams 	if (strncmp(c.cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) {
4189789d94c2Sjwadams 		if (!(flags & DCMD_LOOP))
4190789d94c2Sjwadams 			mdb_warn("umem_malloc_info: cache \"%s\" is not used "
4191789d94c2Sjwadams 			    "by malloc()\n", c.cache_name);
4192789d94c2Sjwadams 		skip = 1;
4193789d94c2Sjwadams 	}
4194789d94c2Sjwadams 
4195789d94c2Sjwadams 	/*
4196789d94c2Sjwadams 	 * normally, print the header only the first time.  In verbose mode,
4197789d94c2Sjwadams 	 * print the header on every non-skipped buffer
4198789d94c2Sjwadams 	 */
4199789d94c2Sjwadams 	if ((!verbose && DCMD_HDRSPEC(flags)) || (verbose && !skip))
4200789d94c2Sjwadams 		mdb_printf("%<ul>%-?s %6s %6s %8s %8s %10s %10s %6s%</ul>\n",
4201789d94c2Sjwadams 		    "CACHE", "BUFSZ", "MAXMAL",
4202789d94c2Sjwadams 		    "BUFMALLC", "AVG_MAL", "MALLOCED", "OVERHEAD", "%OVER");
4203789d94c2Sjwadams 
4204789d94c2Sjwadams 	if (skip)
4205789d94c2Sjwadams 		return (DCMD_OK);
4206789d94c2Sjwadams 
4207789d94c2Sjwadams 	maxmalloc = c.cache_bufsize - sizeof (struct malloc_data);
4208789d94c2Sjwadams #ifdef _LP64
4209789d94c2Sjwadams 	if (c.cache_bufsize > UMEM_SECOND_ALIGN)
4210789d94c2Sjwadams 		maxmalloc -= sizeof (struct malloc_data);
4211789d94c2Sjwadams #endif
4212789d94c2Sjwadams 
4213789d94c2Sjwadams 	bzero(&mi, sizeof (mi));
4214789d94c2Sjwadams 	mi.um_cp = &c;
4215789d94c2Sjwadams 	if (verbose)
4216789d94c2Sjwadams 		mi.um_bucket =
4217789d94c2Sjwadams 		    mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket),
4218789d94c2Sjwadams 		    UM_SLEEP | UM_GC);
4219789d94c2Sjwadams 
4220789d94c2Sjwadams 	if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, &mi, addr) ==
4221789d94c2Sjwadams 	    -1) {
4222789d94c2Sjwadams 		mdb_warn("can't walk 'umem'");
4223789d94c2Sjwadams 		return (DCMD_ERR);
4224789d94c2Sjwadams 	}
4225789d94c2Sjwadams 
4226789d94c2Sjwadams 	overhead = mi.um_malloc_overhead;
4227789d94c2Sjwadams 	allocated = mi.um_malloc_size;
4228789d94c2Sjwadams 
4229789d94c2Sjwadams 	/* do integer round off for the average */
4230789d94c2Sjwadams 	if (mi.um_malloc != 0)
4231789d94c2Sjwadams 		avg_malloc = (allocated + (mi.um_malloc - 1)/2) / mi.um_malloc;
4232789d94c2Sjwadams 	else
4233789d94c2Sjwadams 		avg_malloc = 0;
4234789d94c2Sjwadams 
4235789d94c2Sjwadams 	/*
4236789d94c2Sjwadams 	 * include per-slab overhead
4237789d94c2Sjwadams 	 *
4238789d94c2Sjwadams 	 * Each slab in a given cache is the same size, and has the same
4239789d94c2Sjwadams 	 * number of chunks in it;  we read in the first slab on the
4240789d94c2Sjwadams 	 * slab list to get the number of chunks for all slabs.  To
4241789d94c2Sjwadams 	 * compute the per-slab overhead, we just subtract the chunk usage
4242789d94c2Sjwadams 	 * from the slabsize:
4243789d94c2Sjwadams 	 *
4244789d94c2Sjwadams 	 * +------------+-------+-------+ ... --+-------+-------+-------+
4245789d94c2Sjwadams 	 * |////////////|	|	| ...	|	|///////|///////|
4246789d94c2Sjwadams 	 * |////color///| chunk	| chunk	| ...	| chunk	|/color/|/slab//|
4247789d94c2Sjwadams 	 * |////////////|	|	| ...	|	|///////|///////|
4248789d94c2Sjwadams 	 * +------------+-------+-------+ ... --+-------+-------+-------+
4249789d94c2Sjwadams 	 * |		\_______chunksize * chunks_____/		|
4250789d94c2Sjwadams 	 * \__________________________slabsize__________________________/
4251789d94c2Sjwadams 	 *
4252789d94c2Sjwadams 	 * For UMF_HASH caches, there is an additional source of overhead;
4253789d94c2Sjwadams 	 * the external umem_slab_t and per-chunk bufctl structures.  We
4254789d94c2Sjwadams 	 * include those in our per-slab overhead.
4255789d94c2Sjwadams 	 *
4256789d94c2Sjwadams 	 * Once we have a number for the per-slab overhead, we estimate
4257789d94c2Sjwadams 	 * the actual overhead by treating the malloc()ed buffers as if
4258789d94c2Sjwadams 	 * they were densely packed:
4259789d94c2Sjwadams 	 *
4260789d94c2Sjwadams 	 *	additional overhead = (# mallocs) * (per-slab) / (chunks);
4261789d94c2Sjwadams 	 *
4262789d94c2Sjwadams 	 * carefully ordering the multiply before the divide, to avoid
4263789d94c2Sjwadams 	 * round-off error.
4264789d94c2Sjwadams 	 */
4265789d94c2Sjwadams 	if (mi.um_malloc != 0) {
4266789d94c2Sjwadams 		umem_slab_t slab;
4267789d94c2Sjwadams 		uintptr_t saddr = (uintptr_t)c.cache_nullslab.slab_next;
4268789d94c2Sjwadams 
4269789d94c2Sjwadams 		if (mdb_vread(&slab, sizeof (slab), saddr) == -1) {
4270789d94c2Sjwadams 			mdb_warn("unable to read slab at %p\n", saddr);
4271789d94c2Sjwadams 		} else {
4272789d94c2Sjwadams 			long chunks = slab.slab_chunks;
4273789d94c2Sjwadams 			if (chunks != 0 && c.cache_chunksize != 0 &&
4274789d94c2Sjwadams 			    chunks <= c.cache_slabsize / c.cache_chunksize) {
4275789d94c2Sjwadams 				uintmax_t perslab =
4276789d94c2Sjwadams 				    c.cache_slabsize -
4277789d94c2Sjwadams 				    (c.cache_chunksize * chunks);
4278789d94c2Sjwadams 
4279789d94c2Sjwadams 				if (c.cache_flags & UMF_HASH) {
4280789d94c2Sjwadams 					perslab += sizeof (umem_slab_t) +
4281789d94c2Sjwadams 					    chunks *
4282789d94c2Sjwadams 					    ((c.cache_flags & UMF_AUDIT) ?
4283789d94c2Sjwadams 					    sizeof (umem_bufctl_audit_t) :
4284789d94c2Sjwadams 					    sizeof (umem_bufctl_t));
4285789d94c2Sjwadams 				}
4286789d94c2Sjwadams 				overhead +=
4287789d94c2Sjwadams 				    (perslab * (uintmax_t)mi.um_malloc)/chunks;
4288789d94c2Sjwadams 			} else {
4289789d94c2Sjwadams 				mdb_warn("invalid #chunks (%d) in slab %p\n",
4290789d94c2Sjwadams 				    chunks, saddr);
4291789d94c2Sjwadams 			}
4292789d94c2Sjwadams 		}
4293789d94c2Sjwadams 	}
4294789d94c2Sjwadams 
4295789d94c2Sjwadams 	if (allocated != 0)
4296789d94c2Sjwadams 		overhead_pct = (1000ULL * overhead) / allocated;
4297789d94c2Sjwadams 	else
4298789d94c2Sjwadams 		overhead_pct = 0;
4299789d94c2Sjwadams 
4300789d94c2Sjwadams 	mdb_printf("%0?p %6ld %6ld %8ld %8ld %10ld %10ld %3ld.%01ld%%\n",
4301789d94c2Sjwadams 	    addr, c.cache_bufsize, maxmalloc,
4302789d94c2Sjwadams 	    mi.um_malloc, avg_malloc, allocated, overhead,
4303789d94c2Sjwadams 	    overhead_pct / 10, overhead_pct % 10);
4304789d94c2Sjwadams 
4305789d94c2Sjwadams 	if (!verbose)
4306789d94c2Sjwadams 		return (DCMD_OK);
4307789d94c2Sjwadams 
4308789d94c2Sjwadams 	if (!dump)
4309789d94c2Sjwadams 		mdb_printf("\n");
4310789d94c2Sjwadams 
4311789d94c2Sjwadams 	if (get_umem_alloc_sizes(&alloc_sizes, &num) == -1)
4312789d94c2Sjwadams 		return (DCMD_ERR);
4313789d94c2Sjwadams 
4314789d94c2Sjwadams 	for (idx = 0; idx < num; idx++) {
4315789d94c2Sjwadams 		if (alloc_sizes[idx] == c.cache_bufsize)
4316789d94c2Sjwadams 			break;
4317789d94c2Sjwadams 		if (alloc_sizes[idx] == 0) {
4318789d94c2Sjwadams 			idx = num;	/* 0-terminated array */
4319789d94c2Sjwadams 			break;
4320789d94c2Sjwadams 		}
4321789d94c2Sjwadams 	}
4322789d94c2Sjwadams 	if (idx == num) {
4323789d94c2Sjwadams 		mdb_warn(
4324789d94c2Sjwadams 		    "cache %p's size (%d) not in umem_alloc_sizes\n",
4325789d94c2Sjwadams 		    addr, c.cache_bufsize);
4326789d94c2Sjwadams 		return (DCMD_ERR);
4327789d94c2Sjwadams 	}
4328789d94c2Sjwadams 
4329789d94c2Sjwadams 	minmalloc = (idx == 0)? 0 : alloc_sizes[idx - 1];
4330789d94c2Sjwadams 	if (minmalloc > 0) {
4331789d94c2Sjwadams #ifdef _LP64
4332789d94c2Sjwadams 		if (minmalloc > UMEM_SECOND_ALIGN)
4333789d94c2Sjwadams 			minmalloc -= sizeof (struct malloc_data);
4334789d94c2Sjwadams #endif
4335789d94c2Sjwadams 		minmalloc -= sizeof (struct malloc_data);
4336789d94c2Sjwadams 		minmalloc += 1;
4337789d94c2Sjwadams 	}
4338789d94c2Sjwadams 
4339789d94c2Sjwadams 	if (dump) {
4340789d94c2Sjwadams 		for (idx = minmalloc; idx <= maxmalloc; idx++)
4341789d94c2Sjwadams 			mdb_printf("%d\t%d\n", idx, mi.um_bucket[idx]);
4342789d94c2Sjwadams 		mdb_printf("\n");
4343789d94c2Sjwadams 	} else {
4344789d94c2Sjwadams 		umem_malloc_print_dist(mi.um_bucket, minmalloc, maxmalloc,
4345789d94c2Sjwadams 		    maxbuckets, minbucketsize, geometric);
4346789d94c2Sjwadams 	}
4347789d94c2Sjwadams 
4348789d94c2Sjwadams 	return (DCMD_OK);
4349789d94c2Sjwadams }
4350