17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5789d94c2Sjwadams  * Common Development and Distribution License (the "License").
6789d94c2Sjwadams  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*b5fca8f8Stomee  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate #include <mdb/mdb_param.h>
297c478bd9Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
307c478bd9Sstevel@tonic-gate #include <mdb/mdb_ctf.h>
317c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
327c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h>
337c478bd9Sstevel@tonic-gate #include <sys/vmem_impl.h>
347c478bd9Sstevel@tonic-gate #include <sys/machelf.h>
357c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
367c478bd9Sstevel@tonic-gate #include <sys/kobj.h>
377c478bd9Sstevel@tonic-gate #include <sys/panic.h>
387c478bd9Sstevel@tonic-gate #include <sys/stack.h>
397c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
407c478bd9Sstevel@tonic-gate #include <vm/page.h>
417c478bd9Sstevel@tonic-gate 
42*b5fca8f8Stomee #include "avl.h"
43*b5fca8f8Stomee #include "combined.h"
44087e1372Stomee #include "dist.h"
457c478bd9Sstevel@tonic-gate #include "kmem.h"
46789d94c2Sjwadams #include "leaky.h"
47*b5fca8f8Stomee #include "list.h"
487c478bd9Sstevel@tonic-gate 
497c478bd9Sstevel@tonic-gate #define	dprintf(x) if (mdb_debug_level) { \
507c478bd9Sstevel@tonic-gate 	mdb_printf("kmem debug: ");  \
517c478bd9Sstevel@tonic-gate 	/*CSTYLED*/\
527c478bd9Sstevel@tonic-gate 	mdb_printf x ;\
537c478bd9Sstevel@tonic-gate }
547c478bd9Sstevel@tonic-gate 
557c478bd9Sstevel@tonic-gate #define	KM_ALLOCATED		0x01
567c478bd9Sstevel@tonic-gate #define	KM_FREE			0x02
577c478bd9Sstevel@tonic-gate #define	KM_BUFCTL		0x04
587c478bd9Sstevel@tonic-gate #define	KM_CONSTRUCTED		0x08	/* only constructed free buffers */
597c478bd9Sstevel@tonic-gate #define	KM_HASH			0x10
607c478bd9Sstevel@tonic-gate 
617c478bd9Sstevel@tonic-gate static int mdb_debug_level = 0;
627c478bd9Sstevel@tonic-gate 
637c478bd9Sstevel@tonic-gate /*ARGSUSED*/
647c478bd9Sstevel@tonic-gate static int
657c478bd9Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored)
667c478bd9Sstevel@tonic-gate {
677c478bd9Sstevel@tonic-gate 	mdb_walker_t w;
687c478bd9Sstevel@tonic-gate 	char descr[64];
697c478bd9Sstevel@tonic-gate 
707c478bd9Sstevel@tonic-gate 	(void) mdb_snprintf(descr, sizeof (descr),
717c478bd9Sstevel@tonic-gate 	    "walk the %s cache", c->cache_name);
727c478bd9Sstevel@tonic-gate 
737c478bd9Sstevel@tonic-gate 	w.walk_name = c->cache_name;
747c478bd9Sstevel@tonic-gate 	w.walk_descr = descr;
757c478bd9Sstevel@tonic-gate 	w.walk_init = kmem_walk_init;
767c478bd9Sstevel@tonic-gate 	w.walk_step = kmem_walk_step;
777c478bd9Sstevel@tonic-gate 	w.walk_fini = kmem_walk_fini;
787c478bd9Sstevel@tonic-gate 	w.walk_init_arg = (void *)addr;
797c478bd9Sstevel@tonic-gate 
807c478bd9Sstevel@tonic-gate 	if (mdb_add_walker(&w) == -1)
817c478bd9Sstevel@tonic-gate 		mdb_warn("failed to add %s walker", c->cache_name);
827c478bd9Sstevel@tonic-gate 
837c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
847c478bd9Sstevel@tonic-gate }
857c478bd9Sstevel@tonic-gate 
867c478bd9Sstevel@tonic-gate /*ARGSUSED*/
877c478bd9Sstevel@tonic-gate int
887c478bd9Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
897c478bd9Sstevel@tonic-gate {
907c478bd9Sstevel@tonic-gate 	mdb_debug_level ^= 1;
917c478bd9Sstevel@tonic-gate 
927c478bd9Sstevel@tonic-gate 	mdb_printf("kmem: debugging is now %s\n",
937c478bd9Sstevel@tonic-gate 	    mdb_debug_level ? "on" : "off");
947c478bd9Sstevel@tonic-gate 
957c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
967c478bd9Sstevel@tonic-gate }
977c478bd9Sstevel@tonic-gate 
987c478bd9Sstevel@tonic-gate int
997c478bd9Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp)
1007c478bd9Sstevel@tonic-gate {
1017c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
1027c478bd9Sstevel@tonic-gate 
103*b5fca8f8Stomee 	if (mdb_lookup_by_name("kmem_caches", &sym) == -1) {
104*b5fca8f8Stomee 		mdb_warn("couldn't find kmem_caches");
1057c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
1067c478bd9Sstevel@tonic-gate 	}
1077c478bd9Sstevel@tonic-gate 
108*b5fca8f8Stomee 	wsp->walk_addr = (uintptr_t)sym.st_value;
1097c478bd9Sstevel@tonic-gate 
110*b5fca8f8Stomee 	return (list_walk_init_named(wsp, "cache list", "cache"));
1117c478bd9Sstevel@tonic-gate }
1127c478bd9Sstevel@tonic-gate 
1137c478bd9Sstevel@tonic-gate int
1147c478bd9Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp)
1157c478bd9Sstevel@tonic-gate {
1167c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
1177c478bd9Sstevel@tonic-gate 		mdb_warn("kmem_cpu_cache doesn't support global walks");
1187c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
1197c478bd9Sstevel@tonic-gate 	}
1207c478bd9Sstevel@tonic-gate 
1217c478bd9Sstevel@tonic-gate 	if (mdb_layered_walk("cpu", wsp) == -1) {
1227c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'cpu'");
1237c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
1247c478bd9Sstevel@tonic-gate 	}
1257c478bd9Sstevel@tonic-gate 
1267c478bd9Sstevel@tonic-gate 	wsp->walk_data = (void *)wsp->walk_addr;
1277c478bd9Sstevel@tonic-gate 
1287c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
1297c478bd9Sstevel@tonic-gate }
1307c478bd9Sstevel@tonic-gate 
1317c478bd9Sstevel@tonic-gate int
1327c478bd9Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp)
1337c478bd9Sstevel@tonic-gate {
1347c478bd9Sstevel@tonic-gate 	uintptr_t caddr = (uintptr_t)wsp->walk_data;
1357c478bd9Sstevel@tonic-gate 	const cpu_t *cpu = wsp->walk_layer;
1367c478bd9Sstevel@tonic-gate 	kmem_cpu_cache_t cc;
1377c478bd9Sstevel@tonic-gate 
1387c478bd9Sstevel@tonic-gate 	caddr += cpu->cpu_cache_offset;
1397c478bd9Sstevel@tonic-gate 
1407c478bd9Sstevel@tonic-gate 	if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) {
1417c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read kmem_cpu_cache at %p", caddr);
1427c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
1437c478bd9Sstevel@tonic-gate 	}
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata));
1467c478bd9Sstevel@tonic-gate }
1477c478bd9Sstevel@tonic-gate 
148*b5fca8f8Stomee static int
149*b5fca8f8Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg)
150*b5fca8f8Stomee {
151*b5fca8f8Stomee 	kmem_slab_t *sp = p;
152*b5fca8f8Stomee 	uintptr_t caddr = (uintptr_t)arg;
153*b5fca8f8Stomee 	if ((uintptr_t)sp->slab_cache != caddr) {
154*b5fca8f8Stomee 		mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
155*b5fca8f8Stomee 		    saddr, caddr, sp->slab_cache);
156*b5fca8f8Stomee 		return (-1);
157*b5fca8f8Stomee 	}
158*b5fca8f8Stomee 
159*b5fca8f8Stomee 	return (0);
160*b5fca8f8Stomee }
161*b5fca8f8Stomee 
162*b5fca8f8Stomee static int
163*b5fca8f8Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg)
164*b5fca8f8Stomee {
165*b5fca8f8Stomee 	kmem_slab_t *sp = p;
166*b5fca8f8Stomee 
167*b5fca8f8Stomee 	int rc = kmem_slab_check(p, saddr, arg);
168*b5fca8f8Stomee 	if (rc != 0) {
169*b5fca8f8Stomee 		return (rc);
170*b5fca8f8Stomee 	}
171*b5fca8f8Stomee 
172*b5fca8f8Stomee 	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
173*b5fca8f8Stomee 		mdb_warn("slab %p is not a partial slab\n", saddr);
174*b5fca8f8Stomee 		return (-1);
175*b5fca8f8Stomee 	}
176*b5fca8f8Stomee 
177*b5fca8f8Stomee 	return (0);
178*b5fca8f8Stomee }
179*b5fca8f8Stomee 
180*b5fca8f8Stomee static int
181*b5fca8f8Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg)
182*b5fca8f8Stomee {
183*b5fca8f8Stomee 	kmem_slab_t *sp = p;
184*b5fca8f8Stomee 
185*b5fca8f8Stomee 	int rc = kmem_slab_check(p, saddr, arg);
186*b5fca8f8Stomee 	if (rc != 0) {
187*b5fca8f8Stomee 		return (rc);
188*b5fca8f8Stomee 	}
189*b5fca8f8Stomee 
190*b5fca8f8Stomee 	if (!KMEM_SLAB_IS_ALL_USED(sp)) {
191*b5fca8f8Stomee 		mdb_warn("slab %p is not completely allocated\n", saddr);
192*b5fca8f8Stomee 		return (-1);
193*b5fca8f8Stomee 	}
194*b5fca8f8Stomee 
195*b5fca8f8Stomee 	return (0);
196*b5fca8f8Stomee }
197*b5fca8f8Stomee 
198*b5fca8f8Stomee typedef struct {
199*b5fca8f8Stomee 	uintptr_t kns_cache_addr;
200*b5fca8f8Stomee 	int kns_nslabs;
201*b5fca8f8Stomee } kmem_nth_slab_t;
202*b5fca8f8Stomee 
203*b5fca8f8Stomee static int
204*b5fca8f8Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg)
205*b5fca8f8Stomee {
206*b5fca8f8Stomee 	kmem_nth_slab_t *chkp = arg;
207*b5fca8f8Stomee 
208*b5fca8f8Stomee 	int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr);
209*b5fca8f8Stomee 	if (rc != 0) {
210*b5fca8f8Stomee 		return (rc);
211*b5fca8f8Stomee 	}
212*b5fca8f8Stomee 
213*b5fca8f8Stomee 	return (chkp->kns_nslabs-- == 0 ? 1 : 0);
214*b5fca8f8Stomee }
215*b5fca8f8Stomee 
216*b5fca8f8Stomee static int
217*b5fca8f8Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp)
218*b5fca8f8Stomee {
219*b5fca8f8Stomee 	uintptr_t caddr = wsp->walk_addr;
220*b5fca8f8Stomee 
221*b5fca8f8Stomee 	wsp->walk_addr = (uintptr_t)(caddr +
222*b5fca8f8Stomee 	    offsetof(kmem_cache_t, cache_complete_slabs));
223*b5fca8f8Stomee 
224*b5fca8f8Stomee 	return (list_walk_init_checked(wsp, "slab list", "slab",
225*b5fca8f8Stomee 	    kmem_complete_slab_check, (void *)caddr));
226*b5fca8f8Stomee }
227*b5fca8f8Stomee 
228*b5fca8f8Stomee static int
229*b5fca8f8Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp)
230*b5fca8f8Stomee {
231*b5fca8f8Stomee 	uintptr_t caddr = wsp->walk_addr;
232*b5fca8f8Stomee 
233*b5fca8f8Stomee 	wsp->walk_addr = (uintptr_t)(caddr +
234*b5fca8f8Stomee 	    offsetof(kmem_cache_t, cache_partial_slabs));
235*b5fca8f8Stomee 
236*b5fca8f8Stomee 	return (avl_walk_init_checked(wsp, "slab list", "slab",
237*b5fca8f8Stomee 	    kmem_partial_slab_check, (void *)caddr));
238*b5fca8f8Stomee }
239*b5fca8f8Stomee 
2407c478bd9Sstevel@tonic-gate int
2417c478bd9Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp)
2427c478bd9Sstevel@tonic-gate {
2437c478bd9Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
2447c478bd9Sstevel@tonic-gate 
2457c478bd9Sstevel@tonic-gate 	if (caddr == NULL) {
2467c478bd9Sstevel@tonic-gate 		mdb_warn("kmem_slab doesn't support global walks\n");
2477c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
2487c478bd9Sstevel@tonic-gate 	}
2497c478bd9Sstevel@tonic-gate 
250*b5fca8f8Stomee 	combined_walk_init(wsp);
251*b5fca8f8Stomee 	combined_walk_add(wsp,
252*b5fca8f8Stomee 	    kmem_complete_slab_walk_init, list_walk_step, list_walk_fini);
253*b5fca8f8Stomee 	combined_walk_add(wsp,
254*b5fca8f8Stomee 	    kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini);
2557c478bd9Sstevel@tonic-gate 
2567c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
2577c478bd9Sstevel@tonic-gate }
2587c478bd9Sstevel@tonic-gate 
259*b5fca8f8Stomee static int
260*b5fca8f8Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp)
261*b5fca8f8Stomee {
262*b5fca8f8Stomee 	uintptr_t caddr = wsp->walk_addr;
263*b5fca8f8Stomee 	kmem_nth_slab_t *chk;
264*b5fca8f8Stomee 
265*b5fca8f8Stomee 	chk = mdb_alloc(sizeof (kmem_nth_slab_t),
266*b5fca8f8Stomee 	    UM_SLEEP | UM_GC);
267*b5fca8f8Stomee 	chk->kns_cache_addr = caddr;
268*b5fca8f8Stomee 	chk->kns_nslabs = 1;
269*b5fca8f8Stomee 	wsp->walk_addr = (uintptr_t)(caddr +
270*b5fca8f8Stomee 	    offsetof(kmem_cache_t, cache_complete_slabs));
271*b5fca8f8Stomee 
272*b5fca8f8Stomee 	return (list_walk_init_checked(wsp, "slab list", "slab",
273*b5fca8f8Stomee 	    kmem_nth_slab_check, chk));
274*b5fca8f8Stomee }
275*b5fca8f8Stomee 
2767c478bd9Sstevel@tonic-gate int
2777c478bd9Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp)
2787c478bd9Sstevel@tonic-gate {
2797c478bd9Sstevel@tonic-gate 	uintptr_t caddr = wsp->walk_addr;
2807c478bd9Sstevel@tonic-gate 	kmem_cache_t c;
2817c478bd9Sstevel@tonic-gate 
2827c478bd9Sstevel@tonic-gate 	if (caddr == NULL) {
2837c478bd9Sstevel@tonic-gate 		mdb_warn("kmem_slab_partial doesn't support global walks\n");
2847c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
2857c478bd9Sstevel@tonic-gate 	}
2867c478bd9Sstevel@tonic-gate 
2877c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), caddr) == -1) {
2887c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read kmem_cache at %p", caddr);
2897c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
2907c478bd9Sstevel@tonic-gate 	}
2917c478bd9Sstevel@tonic-gate 
292*b5fca8f8Stomee 	combined_walk_init(wsp);
2937c478bd9Sstevel@tonic-gate 
2947c478bd9Sstevel@tonic-gate 	/*
2957c478bd9Sstevel@tonic-gate 	 * Some consumers (umem_walk_step(), in particular) require at
2967c478bd9Sstevel@tonic-gate 	 * least one callback if there are any buffers in the cache.  So
297*b5fca8f8Stomee 	 * if there are *no* partial slabs, report the first full slab, if
2987c478bd9Sstevel@tonic-gate 	 * any.
2997c478bd9Sstevel@tonic-gate 	 *
3007c478bd9Sstevel@tonic-gate 	 * Yes, this is ugly, but it's cleaner than the other possibilities.
3017c478bd9Sstevel@tonic-gate 	 */
302*b5fca8f8Stomee 	if (c.cache_partial_slabs.avl_numnodes == 0) {
303*b5fca8f8Stomee 		combined_walk_add(wsp, kmem_first_complete_slab_walk_init,
304*b5fca8f8Stomee 		    list_walk_step, list_walk_fini);
305*b5fca8f8Stomee 	} else {
306*b5fca8f8Stomee 		combined_walk_add(wsp, kmem_partial_slab_walk_init,
307*b5fca8f8Stomee 		    avl_walk_step, avl_walk_fini);
3087c478bd9Sstevel@tonic-gate 	}
3097c478bd9Sstevel@tonic-gate 
310*b5fca8f8Stomee 	return (WALK_NEXT);
3117c478bd9Sstevel@tonic-gate }
3127c478bd9Sstevel@tonic-gate 
3137c478bd9Sstevel@tonic-gate int
3147c478bd9Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
3157c478bd9Sstevel@tonic-gate {
3167c478bd9Sstevel@tonic-gate 	kmem_cache_t c;
317*b5fca8f8Stomee 	const char *filter = NULL;
318*b5fca8f8Stomee 
319*b5fca8f8Stomee 	if (mdb_getopts(ac, argv,
320*b5fca8f8Stomee 	    'n', MDB_OPT_STR, &filter,
321*b5fca8f8Stomee 	    NULL) != ac) {
322*b5fca8f8Stomee 		return (DCMD_USAGE);
323*b5fca8f8Stomee 	}
3247c478bd9Sstevel@tonic-gate 
3257c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
3267c478bd9Sstevel@tonic-gate 		if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) {
3277c478bd9Sstevel@tonic-gate 			mdb_warn("can't walk kmem_cache");
3287c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
3297c478bd9Sstevel@tonic-gate 		}
3307c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
3317c478bd9Sstevel@tonic-gate 	}
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
3347c478bd9Sstevel@tonic-gate 		mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME",
3357c478bd9Sstevel@tonic-gate 		    "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
3367c478bd9Sstevel@tonic-gate 
3377c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
3387c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read kmem_cache at %p", addr);
3397c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
3407c478bd9Sstevel@tonic-gate 	}
3417c478bd9Sstevel@tonic-gate 
342*b5fca8f8Stomee 	if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL))
343*b5fca8f8Stomee 		return (DCMD_OK);
344*b5fca8f8Stomee 
3457c478bd9Sstevel@tonic-gate 	mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name,
3467c478bd9Sstevel@tonic-gate 	    c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal);
3477c478bd9Sstevel@tonic-gate 
3487c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
3497c478bd9Sstevel@tonic-gate }
3507c478bd9Sstevel@tonic-gate 
351*b5fca8f8Stomee void
352*b5fca8f8Stomee kmem_cache_help(void)
353*b5fca8f8Stomee {
354*b5fca8f8Stomee 	mdb_printf("%s", "Print kernel memory caches.\n\n");
355*b5fca8f8Stomee 	mdb_dec_indent(2);
356*b5fca8f8Stomee 	mdb_printf("%<b>OPTIONS%</b>\n");
357*b5fca8f8Stomee 	mdb_inc_indent(2);
358*b5fca8f8Stomee 	mdb_printf("%s",
359*b5fca8f8Stomee "  -n name\n"
360*b5fca8f8Stomee "        name of kmem cache (or matching partial name)\n"
361*b5fca8f8Stomee "\n"
362*b5fca8f8Stomee "Column\tDescription\n"
363*b5fca8f8Stomee "\n"
364*b5fca8f8Stomee "ADDR\t\taddress of kmem cache\n"
365*b5fca8f8Stomee "NAME\t\tname of kmem cache\n"
366*b5fca8f8Stomee "FLAG\t\tvarious cache state flags\n"
367*b5fca8f8Stomee "CFLAG\t\tcache creation flags\n"
368*b5fca8f8Stomee "BUFSIZE\tobject size in bytes\n"
369*b5fca8f8Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n");
370*b5fca8f8Stomee }
3713893cb7fStomee 
3723893cb7fStomee #define	LABEL_WIDTH	11
3733893cb7fStomee static void
3743893cb7fStomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab,
3753893cb7fStomee     size_t maxbuckets, size_t minbucketsize)
3763893cb7fStomee {
3773893cb7fStomee 	uint64_t total;
3783893cb7fStomee 	int buckets;
3793893cb7fStomee 	int i;
3803893cb7fStomee 	const int *distarray;
3813893cb7fStomee 	int complete[2];
3823893cb7fStomee 
3833893cb7fStomee 	buckets = buffers_per_slab;
3843893cb7fStomee 
3853893cb7fStomee 	total = 0;
3863893cb7fStomee 	for (i = 0; i <= buffers_per_slab; i++)
3873893cb7fStomee 		total += ks_bucket[i];
3883893cb7fStomee 
3893893cb7fStomee 	if (maxbuckets > 1)
3903893cb7fStomee 		buckets = MIN(buckets, maxbuckets);
3913893cb7fStomee 
3923893cb7fStomee 	if (minbucketsize > 1) {
3933893cb7fStomee 		/*
3943893cb7fStomee 		 * minbucketsize does not apply to the first bucket reserved
3953893cb7fStomee 		 * for completely allocated slabs
3963893cb7fStomee 		 */
3973893cb7fStomee 		buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) /
3983893cb7fStomee 		    minbucketsize));
3993893cb7fStomee 		if ((buckets < 2) && (buffers_per_slab > 1)) {
4003893cb7fStomee 			buckets = 2;
4013893cb7fStomee 			minbucketsize = (buffers_per_slab - 1);
4023893cb7fStomee 		}
4033893cb7fStomee 	}
4043893cb7fStomee 
4053893cb7fStomee 	/*
4063893cb7fStomee 	 * The first printed bucket is reserved for completely allocated slabs.
4073893cb7fStomee 	 * Passing (buckets - 1) excludes that bucket from the generated
4083893cb7fStomee 	 * distribution, since we're handling it as a special case.
4093893cb7fStomee 	 */
4103893cb7fStomee 	complete[0] = buffers_per_slab;
4113893cb7fStomee 	complete[1] = buffers_per_slab + 1;
412087e1372Stomee 	distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1);
4133893cb7fStomee 
4143893cb7fStomee 	mdb_printf("%*s\n", LABEL_WIDTH, "Allocated");
415087e1372Stomee 	dist_print_header("Buffers", LABEL_WIDTH, "Slabs");
4163893cb7fStomee 
417087e1372Stomee 	dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH);
4183893cb7fStomee 	/*
4193893cb7fStomee 	 * Print bucket ranges in descending order after the first bucket for
4203893cb7fStomee 	 * completely allocated slabs, so a person can see immediately whether
4213893cb7fStomee 	 * or not there is fragmentation without having to scan possibly
4223893cb7fStomee 	 * multiple screens of output. Starting at (buckets - 2) excludes the
4233893cb7fStomee 	 * extra terminating bucket.
4243893cb7fStomee 	 */
4253893cb7fStomee 	for (i = buckets - 2; i >= 0; i--) {
426087e1372Stomee 		dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH);
4273893cb7fStomee 	}
4283893cb7fStomee 	mdb_printf("\n");
4293893cb7fStomee }
4303893cb7fStomee #undef LABEL_WIDTH
4313893cb7fStomee 
4323893cb7fStomee /*ARGSUSED*/
4333893cb7fStomee static int
4343893cb7fStomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab)
4353893cb7fStomee {
4363893cb7fStomee 	*is_slab = B_TRUE;
4373893cb7fStomee 	return (WALK_DONE);
4383893cb7fStomee }
4393893cb7fStomee 
4403893cb7fStomee /*ARGSUSED*/
4413893cb7fStomee static int
4423893cb7fStomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp,
4433893cb7fStomee     boolean_t *is_slab)
4443893cb7fStomee {
4453893cb7fStomee 	/*
446*b5fca8f8Stomee 	 * The "kmem_partial_slab" walker reports the first full slab if there
4473893cb7fStomee 	 * are no partial slabs (for the sake of consumers that require at least
4483893cb7fStomee 	 * one callback if there are any buffers in the cache).
4493893cb7fStomee 	 */
450*b5fca8f8Stomee 	*is_slab = KMEM_SLAB_IS_PARTIAL(sp);
4513893cb7fStomee 	return (WALK_DONE);
4523893cb7fStomee }
4533893cb7fStomee 
454*b5fca8f8Stomee typedef struct kmem_slab_usage {
455*b5fca8f8Stomee 	int ksu_refcnt;			/* count of allocated buffers on slab */
456*b5fca8f8Stomee 	boolean_t ksu_nomove;		/* slab marked non-reclaimable */
457*b5fca8f8Stomee } kmem_slab_usage_t;
458*b5fca8f8Stomee 
459*b5fca8f8Stomee typedef struct kmem_slab_stats {
460*b5fca8f8Stomee 	const kmem_cache_t *ks_cp;
461*b5fca8f8Stomee 	int ks_slabs;			/* slabs in cache */
462*b5fca8f8Stomee 	int ks_partial_slabs;		/* partially allocated slabs in cache */
463*b5fca8f8Stomee 	uint64_t ks_unused_buffers;	/* total unused buffers in cache */
464*b5fca8f8Stomee 	int ks_max_buffers_per_slab;	/* max buffers per slab */
465*b5fca8f8Stomee 	int ks_usage_len;		/* ks_usage array length */
466*b5fca8f8Stomee 	kmem_slab_usage_t *ks_usage;	/* partial slab usage */
467*b5fca8f8Stomee 	uint_t *ks_bucket;		/* slab usage distribution */
468*b5fca8f8Stomee } kmem_slab_stats_t;
469*b5fca8f8Stomee 
4703893cb7fStomee /*ARGSUSED*/
4713893cb7fStomee static int
4723893cb7fStomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp,
4733893cb7fStomee     kmem_slab_stats_t *ks)
4743893cb7fStomee {
4753893cb7fStomee 	kmem_slab_usage_t *ksu;
4763893cb7fStomee 	long unused;
4773893cb7fStomee 
4783893cb7fStomee 	ks->ks_slabs++;
4793893cb7fStomee 	ks->ks_bucket[sp->slab_refcnt]++;
4803893cb7fStomee 
4813893cb7fStomee 	unused = (sp->slab_chunks - sp->slab_refcnt);
4823893cb7fStomee 	if (unused == 0) {
4833893cb7fStomee 		return (WALK_NEXT);
4843893cb7fStomee 	}
4853893cb7fStomee 
4863893cb7fStomee 	ks->ks_partial_slabs++;
4873893cb7fStomee 	ks->ks_unused_buffers += unused;
4883893cb7fStomee 
4893893cb7fStomee 	if (ks->ks_partial_slabs > ks->ks_usage_len) {
4903893cb7fStomee 		kmem_slab_usage_t *usage;
4913893cb7fStomee 		int len = ks->ks_usage_len;
4923893cb7fStomee 
4933893cb7fStomee 		len = (len == 0 ? 16 : len * 2);
4943893cb7fStomee 		usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP);
4953893cb7fStomee 		if (ks->ks_usage != NULL) {
4963893cb7fStomee 			bcopy(ks->ks_usage, usage,
4973893cb7fStomee 			    ks->ks_usage_len * sizeof (kmem_slab_usage_t));
4983893cb7fStomee 			mdb_free(ks->ks_usage,
4993893cb7fStomee 			    ks->ks_usage_len * sizeof (kmem_slab_usage_t));
5003893cb7fStomee 		}
5013893cb7fStomee 		ks->ks_usage = usage;
5023893cb7fStomee 		ks->ks_usage_len = len;
5033893cb7fStomee 	}
5043893cb7fStomee 
5053893cb7fStomee 	ksu = &ks->ks_usage[ks->ks_partial_slabs - 1];
5063893cb7fStomee 	ksu->ksu_refcnt = sp->slab_refcnt;
507*b5fca8f8Stomee 	ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5083893cb7fStomee 	return (WALK_NEXT);
5093893cb7fStomee }
5103893cb7fStomee 
5113893cb7fStomee static void
5123893cb7fStomee kmem_slabs_header()
5133893cb7fStomee {
5143893cb7fStomee 	mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5153893cb7fStomee 	    "", "", "Partial", "", "Unused", "");
5163893cb7fStomee 	mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5173893cb7fStomee 	    "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste");
5183893cb7fStomee 	mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5193893cb7fStomee 	    "-------------------------", "--------", "--------", "---------",
5203893cb7fStomee 	    "---------", "------");
5213893cb7fStomee }
5223893cb7fStomee 
5233893cb7fStomee int
5243893cb7fStomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
5253893cb7fStomee {
5263893cb7fStomee 	kmem_cache_t c;
5273893cb7fStomee 	kmem_slab_stats_t stats;
5283893cb7fStomee 	mdb_walk_cb_t cb;
5293893cb7fStomee 	int pct;
5303893cb7fStomee 	int tenths_pct;
5313893cb7fStomee 	size_t maxbuckets = 1;
5323893cb7fStomee 	size_t minbucketsize = 0;
5333893cb7fStomee 	const char *filter = NULL;
534*b5fca8f8Stomee 	const char *name = NULL;
5353893cb7fStomee 	uint_t opt_v = FALSE;
536*b5fca8f8Stomee 	boolean_t buckets = B_FALSE;
5373893cb7fStomee 	boolean_t skip = B_FALSE;
5383893cb7fStomee 
5393893cb7fStomee 	if (mdb_getopts(argc, argv,
5403893cb7fStomee 	    'B', MDB_OPT_UINTPTR, &minbucketsize,
5413893cb7fStomee 	    'b', MDB_OPT_UINTPTR, &maxbuckets,
5423893cb7fStomee 	    'n', MDB_OPT_STR, &filter,
543*b5fca8f8Stomee 	    'N', MDB_OPT_STR, &name,
5443893cb7fStomee 	    'v', MDB_OPT_SETBITS, TRUE, &opt_v,
5453893cb7fStomee 	    NULL) != argc) {
5463893cb7fStomee 		return (DCMD_USAGE);
5473893cb7fStomee 	}
5483893cb7fStomee 
549*b5fca8f8Stomee 	if ((maxbuckets != 1) || (minbucketsize != 0)) {
550*b5fca8f8Stomee 		buckets = B_TRUE;
5513893cb7fStomee 	}
5523893cb7fStomee 
5533893cb7fStomee 	if (!(flags & DCMD_ADDRSPEC)) {
5543893cb7fStomee 		if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc,
5553893cb7fStomee 		    argv) == -1) {
5563893cb7fStomee 			mdb_warn("can't walk kmem_cache");
5573893cb7fStomee 			return (DCMD_ERR);
5583893cb7fStomee 		}
5593893cb7fStomee 		return (DCMD_OK);
5603893cb7fStomee 	}
5613893cb7fStomee 
5623893cb7fStomee 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
5633893cb7fStomee 		mdb_warn("couldn't read kmem_cache at %p", addr);
5643893cb7fStomee 		return (DCMD_ERR);
5653893cb7fStomee 	}
5663893cb7fStomee 
567*b5fca8f8Stomee 	if (name == NULL) {
568*b5fca8f8Stomee 		skip = ((filter != NULL) &&
569*b5fca8f8Stomee 		    (strstr(c.cache_name, filter) == NULL));
570*b5fca8f8Stomee 	} else if (filter == NULL) {
571*b5fca8f8Stomee 		skip = (strcmp(c.cache_name, name) != 0);
572*b5fca8f8Stomee 	} else {
573*b5fca8f8Stomee 		/* match either -n or -N */
574*b5fca8f8Stomee 		skip = ((strcmp(c.cache_name, name) != 0) &&
575*b5fca8f8Stomee 		    (strstr(c.cache_name, filter) == NULL));
5763893cb7fStomee 	}
5773893cb7fStomee 
578*b5fca8f8Stomee 	if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) {
5793893cb7fStomee 		kmem_slabs_header();
580*b5fca8f8Stomee 	} else if ((opt_v || buckets) && !skip) {
5813893cb7fStomee 		if (DCMD_HDRSPEC(flags)) {
5823893cb7fStomee 			kmem_slabs_header();
5833893cb7fStomee 		} else {
5843893cb7fStomee 			boolean_t is_slab = B_FALSE;
5853893cb7fStomee 			const char *walker_name;
5863893cb7fStomee 			if (opt_v) {
5873893cb7fStomee 				cb = (mdb_walk_cb_t)kmem_first_partial_slab;
5883893cb7fStomee 				walker_name = "kmem_slab_partial";
5893893cb7fStomee 			} else {
5903893cb7fStomee 				cb = (mdb_walk_cb_t)kmem_first_slab;
5913893cb7fStomee 				walker_name = "kmem_slab";
5923893cb7fStomee 			}
5933893cb7fStomee 			(void) mdb_pwalk(walker_name, cb, &is_slab, addr);
5943893cb7fStomee 			if (is_slab) {
5953893cb7fStomee 				kmem_slabs_header();
5963893cb7fStomee 			}
5973893cb7fStomee 		}
5983893cb7fStomee 	}
5993893cb7fStomee 
6003893cb7fStomee 	if (skip) {
6013893cb7fStomee 		return (DCMD_OK);
6023893cb7fStomee 	}
6033893cb7fStomee 
6043893cb7fStomee 	bzero(&stats, sizeof (kmem_slab_stats_t));
605*b5fca8f8Stomee 	stats.ks_cp = &c;
606*b5fca8f8Stomee 	stats.ks_max_buffers_per_slab = c.cache_maxchunks;
607*b5fca8f8Stomee 	/* +1 to include a zero bucket */
608*b5fca8f8Stomee 	stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) *
609*b5fca8f8Stomee 	    sizeof (*stats.ks_bucket), UM_SLEEP);
6103893cb7fStomee 	cb = (mdb_walk_cb_t)kmem_slablist_stat;
6113893cb7fStomee 	(void) mdb_pwalk("kmem_slab", cb, &stats, addr);
6123893cb7fStomee 
6133893cb7fStomee 	if (c.cache_buftotal == 0) {
6143893cb7fStomee 		pct = 0;
6153893cb7fStomee 		tenths_pct = 0;
6163893cb7fStomee 	} else {
6173893cb7fStomee 		uint64_t n = stats.ks_unused_buffers * 10000;
6183893cb7fStomee 		pct = (int)(n / c.cache_buftotal);
6193893cb7fStomee 		tenths_pct = pct - ((pct / 100) * 100);
6203893cb7fStomee 		tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */
6213893cb7fStomee 		if (tenths_pct == 10) {
6223893cb7fStomee 			pct += 100;
6233893cb7fStomee 			tenths_pct = 0;
6243893cb7fStomee 		}
6253893cb7fStomee 	}
6263893cb7fStomee 
6273893cb7fStomee 	pct /= 100;
6283893cb7fStomee 	mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name,
6293893cb7fStomee 	    stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal,
6303893cb7fStomee 	    stats.ks_unused_buffers, pct, tenths_pct);
6313893cb7fStomee 
6323893cb7fStomee 	if (maxbuckets == 0) {
633*b5fca8f8Stomee 		maxbuckets = stats.ks_max_buffers_per_slab;
6343893cb7fStomee 	}
6353893cb7fStomee 
6363893cb7fStomee 	if (((maxbuckets > 1) || (minbucketsize > 0)) &&
6373893cb7fStomee 	    (stats.ks_slabs > 0)) {
6383893cb7fStomee 		mdb_printf("\n");
6393893cb7fStomee 		kmem_slabs_print_dist(stats.ks_bucket,
640*b5fca8f8Stomee 		    stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize);
641*b5fca8f8Stomee 	}
642*b5fca8f8Stomee 
643*b5fca8f8Stomee 	mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) *
644*b5fca8f8Stomee 	    sizeof (*stats.ks_bucket));
645*b5fca8f8Stomee 
646*b5fca8f8Stomee 	if (!opt_v) {
647*b5fca8f8Stomee 		return (DCMD_OK);
6483893cb7fStomee 	}
6493893cb7fStomee 
6503893cb7fStomee 	if (opt_v && (stats.ks_partial_slabs > 0)) {
6513893cb7fStomee 		int i;
6523893cb7fStomee 		kmem_slab_usage_t *ksu;
6533893cb7fStomee 
6543893cb7fStomee 		mdb_printf("  %d complete, %d partial",
6553893cb7fStomee 		    (stats.ks_slabs - stats.ks_partial_slabs),
6563893cb7fStomee 		    stats.ks_partial_slabs);
6573893cb7fStomee 		if (stats.ks_partial_slabs > 0) {
658*b5fca8f8Stomee 			mdb_printf(" (%d):", stats.ks_max_buffers_per_slab);
6593893cb7fStomee 		}
6603893cb7fStomee 		for (i = 0; i < stats.ks_partial_slabs; i++) {
6613893cb7fStomee 			ksu = &stats.ks_usage[i];
662*b5fca8f8Stomee 			if (ksu->ksu_nomove) {
663*b5fca8f8Stomee 				const char *symbol = "*";
664*b5fca8f8Stomee 				mdb_printf(" %d%s", ksu->ksu_refcnt, symbol);
665*b5fca8f8Stomee 			} else {
666*b5fca8f8Stomee 				mdb_printf(" %d", ksu->ksu_refcnt);
667*b5fca8f8Stomee 			}
6683893cb7fStomee 		}
6693893cb7fStomee 		mdb_printf("\n\n");
6703893cb7fStomee 	}
6713893cb7fStomee 
6723893cb7fStomee 	if (stats.ks_usage_len > 0) {
6733893cb7fStomee 		mdb_free(stats.ks_usage,
6743893cb7fStomee 		    stats.ks_usage_len * sizeof (kmem_slab_usage_t));
6753893cb7fStomee 	}
6763893cb7fStomee 
6773893cb7fStomee 	return (DCMD_OK);
6783893cb7fStomee }
6793893cb7fStomee 
6803893cb7fStomee void
6813893cb7fStomee kmem_slabs_help(void)
6823893cb7fStomee {
683*b5fca8f8Stomee 	mdb_printf("%s",
684*b5fca8f8Stomee "Display slab usage per kmem cache.\n\n");
6853893cb7fStomee 	mdb_dec_indent(2);
6863893cb7fStomee 	mdb_printf("%<b>OPTIONS%</b>\n");
6873893cb7fStomee 	mdb_inc_indent(2);
6883893cb7fStomee 	mdb_printf("%s",
6893893cb7fStomee "  -n name\n"
6903893cb7fStomee "        name of kmem cache (or matching partial name)\n"
691*b5fca8f8Stomee "  -N name\n"
692*b5fca8f8Stomee "        exact name of kmem cache\n"
6933893cb7fStomee "  -b maxbins\n"
6943893cb7fStomee "        Print a distribution of allocated buffers per slab using at\n"
6953893cb7fStomee "        most maxbins bins. The first bin is reserved for completely\n"
6963893cb7fStomee "        allocated slabs. Setting maxbins to zero (-b 0) has the same\n"
6973893cb7fStomee "        effect as specifying the maximum allocated buffers per slab\n"
6983893cb7fStomee "        or setting minbinsize to 1 (-B 1).\n"
6993893cb7fStomee "  -B minbinsize\n"
7003893cb7fStomee "        Print a distribution of allocated buffers per slab, making\n"
7013893cb7fStomee "        all bins (except the first, reserved for completely allocated\n"
7023893cb7fStomee "        slabs) at least minbinsize buffers apart.\n"
7033893cb7fStomee "  -v    verbose output: List the allocated buffer count of each partial\n"
7043893cb7fStomee "        slab on the free list in order from front to back to show how\n"
7053893cb7fStomee "        closely the slabs are ordered by usage. For example\n"
7063893cb7fStomee "\n"
7073893cb7fStomee "          10 complete, 3 partial (8): 7 3 1\n"
7083893cb7fStomee "\n"
7093893cb7fStomee "        means there are thirteen slabs with eight buffers each, including\n"
7103893cb7fStomee "        three partially allocated slabs with less than all eight buffers\n"
7113893cb7fStomee "        allocated.\n"
7123893cb7fStomee "\n"
7133893cb7fStomee "        Buffer allocations are always from the front of the partial slab\n"
7143893cb7fStomee "        list. When a buffer is freed from a completely used slab, that\n"
7153893cb7fStomee "        slab is added to the front of the partial slab list. Assuming\n"
7163893cb7fStomee "        that all buffers are equally likely to be freed soon, the\n"
7173893cb7fStomee "        desired order of partial slabs is most-used at the front of the\n"
7183893cb7fStomee "        list and least-used at the back (as in the example above).\n"
7193893cb7fStomee "        However, if a slab contains an allocated buffer that will not\n"
7203893cb7fStomee "        soon be freed, it would be better for that slab to be at the\n"
721*b5fca8f8Stomee "        front where all of its buffers can be allocated. Taking a slab\n"
722*b5fca8f8Stomee "        off the partial slab list (either with all buffers freed or all\n"
723*b5fca8f8Stomee "        buffers allocated) reduces cache fragmentation.\n"
724*b5fca8f8Stomee "\n"
725*b5fca8f8Stomee "        A slab's allocated buffer count representing a partial slab (9 in\n"
726*b5fca8f8Stomee "        the example below) may be marked as follows:\n"
727*b5fca8f8Stomee "\n"
728*b5fca8f8Stomee "        9*   An asterisk indicates that kmem has marked the slab non-\n"
729*b5fca8f8Stomee "        reclaimable because the kmem client refused to move one of the\n"
730*b5fca8f8Stomee "        slab's buffers. Since kmem does not expect to completely free the\n"
731*b5fca8f8Stomee "        slab, it moves it to the front of the list in the hope of\n"
732*b5fca8f8Stomee "        completely allocating it instead. A slab marked with an asterisk\n"
733*b5fca8f8Stomee "        stays marked for as long as it remains on the partial slab list.\n"
7343893cb7fStomee "\n"
7353893cb7fStomee "Column\t\tDescription\n"
7363893cb7fStomee "\n"
7373893cb7fStomee "Cache Name\t\tname of kmem cache\n"
7383893cb7fStomee "Slabs\t\t\ttotal slab count\n"
7393893cb7fStomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n"
7403893cb7fStomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n"
7413893cb7fStomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n"
7423893cb7fStomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n"
7433893cb7fStomee "\t\t\t  for accounting structures (debug mode), slab\n"
7443893cb7fStomee "\t\t\t  coloring (incremental small offsets to stagger\n"
7453893cb7fStomee "\t\t\t  buffer alignment), or the per-CPU magazine layer\n");
7463893cb7fStomee }
7473893cb7fStomee 
7487c478bd9Sstevel@tonic-gate static int
7497c478bd9Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs)
7507c478bd9Sstevel@tonic-gate {
7517c478bd9Sstevel@tonic-gate 	uintptr_t p1 = *((uintptr_t *)lhs);
7527c478bd9Sstevel@tonic-gate 	uintptr_t p2 = *((uintptr_t *)rhs);
7537c478bd9Sstevel@tonic-gate 
7547c478bd9Sstevel@tonic-gate 	if (p1 < p2)
7557c478bd9Sstevel@tonic-gate 		return (-1);
7567c478bd9Sstevel@tonic-gate 	if (p1 > p2)
7577c478bd9Sstevel@tonic-gate 		return (1);
7587c478bd9Sstevel@tonic-gate 	return (0);
7597c478bd9Sstevel@tonic-gate }
7607c478bd9Sstevel@tonic-gate 
7617c478bd9Sstevel@tonic-gate static int
7627c478bd9Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs)
7637c478bd9Sstevel@tonic-gate {
7647c478bd9Sstevel@tonic-gate 	const kmem_bufctl_audit_t *bcp1 = *lhs;
7657c478bd9Sstevel@tonic-gate 	const kmem_bufctl_audit_t *bcp2 = *rhs;
7667c478bd9Sstevel@tonic-gate 
7677c478bd9Sstevel@tonic-gate 	if (bcp1->bc_timestamp > bcp2->bc_timestamp)
7687c478bd9Sstevel@tonic-gate 		return (-1);
7697c478bd9Sstevel@tonic-gate 
7707c478bd9Sstevel@tonic-gate 	if (bcp1->bc_timestamp < bcp2->bc_timestamp)
7717c478bd9Sstevel@tonic-gate 		return (1);
7727c478bd9Sstevel@tonic-gate 
7737c478bd9Sstevel@tonic-gate 	return (0);
7747c478bd9Sstevel@tonic-gate }
7757c478bd9Sstevel@tonic-gate 
7767c478bd9Sstevel@tonic-gate typedef struct kmem_hash_walk {
7777c478bd9Sstevel@tonic-gate 	uintptr_t *kmhw_table;
7787c478bd9Sstevel@tonic-gate 	size_t kmhw_nelems;
7797c478bd9Sstevel@tonic-gate 	size_t kmhw_pos;
7807c478bd9Sstevel@tonic-gate 	kmem_bufctl_t kmhw_cur;
7817c478bd9Sstevel@tonic-gate } kmem_hash_walk_t;
7827c478bd9Sstevel@tonic-gate 
7837c478bd9Sstevel@tonic-gate int
7847c478bd9Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp)
7857c478bd9Sstevel@tonic-gate {
7867c478bd9Sstevel@tonic-gate 	kmem_hash_walk_t *kmhw;
7877c478bd9Sstevel@tonic-gate 	uintptr_t *hash;
7887c478bd9Sstevel@tonic-gate 	kmem_cache_t c;
7897c478bd9Sstevel@tonic-gate 	uintptr_t haddr, addr = wsp->walk_addr;
7907c478bd9Sstevel@tonic-gate 	size_t nelems;
7917c478bd9Sstevel@tonic-gate 	size_t hsize;
7927c478bd9Sstevel@tonic-gate 
7937c478bd9Sstevel@tonic-gate 	if (addr == NULL) {
7947c478bd9Sstevel@tonic-gate 		mdb_warn("kmem_hash doesn't support global walks\n");
7957c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
7967c478bd9Sstevel@tonic-gate 	}
7977c478bd9Sstevel@tonic-gate 
7987c478bd9Sstevel@tonic-gate 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
7997c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
8007c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
8017c478bd9Sstevel@tonic-gate 	}
8027c478bd9Sstevel@tonic-gate 
8037c478bd9Sstevel@tonic-gate 	if (!(c.cache_flags & KMF_HASH)) {
8047c478bd9Sstevel@tonic-gate 		mdb_warn("cache %p doesn't have a hash table\n", addr);
8057c478bd9Sstevel@tonic-gate 		return (WALK_DONE);		/* nothing to do */
8067c478bd9Sstevel@tonic-gate 	}
8077c478bd9Sstevel@tonic-gate 
8087c478bd9Sstevel@tonic-gate 	kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP);
8097c478bd9Sstevel@tonic-gate 	kmhw->kmhw_cur.bc_next = NULL;
8107c478bd9Sstevel@tonic-gate 	kmhw->kmhw_pos = 0;
8117c478bd9Sstevel@tonic-gate 
8127c478bd9Sstevel@tonic-gate 	kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1;
8137c478bd9Sstevel@tonic-gate 	hsize = nelems * sizeof (uintptr_t);
8147c478bd9Sstevel@tonic-gate 	haddr = (uintptr_t)c.cache_hash_table;
8157c478bd9Sstevel@tonic-gate 
8167c478bd9Sstevel@tonic-gate 	kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP);
8177c478bd9Sstevel@tonic-gate 	if (mdb_vread(hash, hsize, haddr) == -1) {
8187c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read hash table at %p", haddr);
8197c478bd9Sstevel@tonic-gate 		mdb_free(hash, hsize);
8207c478bd9Sstevel@tonic-gate 		mdb_free(kmhw, sizeof (kmem_hash_walk_t));
8217c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
8227c478bd9Sstevel@tonic-gate 	}
8237c478bd9Sstevel@tonic-gate 
8247c478bd9Sstevel@tonic-gate 	wsp->walk_data = kmhw;
8257c478bd9Sstevel@tonic-gate 
8267c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
8277c478bd9Sstevel@tonic-gate }
8287c478bd9Sstevel@tonic-gate 
8297c478bd9Sstevel@tonic-gate int
8307c478bd9Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp)
8317c478bd9Sstevel@tonic-gate {
8327c478bd9Sstevel@tonic-gate 	kmem_hash_walk_t *kmhw = wsp->walk_data;
8337c478bd9Sstevel@tonic-gate 	uintptr_t addr = NULL;
8347c478bd9Sstevel@tonic-gate 
8357c478bd9Sstevel@tonic-gate 	if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) {
8367c478bd9Sstevel@tonic-gate 		while (kmhw->kmhw_pos < kmhw->kmhw_nelems) {
8377c478bd9Sstevel@tonic-gate 			if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL)
8387c478bd9Sstevel@tonic-gate 				break;
8397c478bd9Sstevel@tonic-gate 		}
8407c478bd9Sstevel@tonic-gate 	}
8417c478bd9Sstevel@tonic-gate 	if (addr == NULL)
8427c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
8437c478bd9Sstevel@tonic-gate 
8447c478bd9Sstevel@tonic-gate 	if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) {
8457c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr);
8467c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
8477c478bd9Sstevel@tonic-gate 	}
8487c478bd9Sstevel@tonic-gate 
8497c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata));
8507c478bd9Sstevel@tonic-gate }
8517c478bd9Sstevel@tonic-gate 
8527c478bd9Sstevel@tonic-gate void
8537c478bd9Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp)
8547c478bd9Sstevel@tonic-gate {
8557c478bd9Sstevel@tonic-gate 	kmem_hash_walk_t *kmhw = wsp->walk_data;
8567c478bd9Sstevel@tonic-gate 
8577c478bd9Sstevel@tonic-gate 	if (kmhw == NULL)
8587c478bd9Sstevel@tonic-gate 		return;
8597c478bd9Sstevel@tonic-gate 
8607c478bd9Sstevel@tonic-gate 	mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t));
8617c478bd9Sstevel@tonic-gate 	mdb_free(kmhw, sizeof (kmem_hash_walk_t));
8627c478bd9Sstevel@tonic-gate }
8637c478bd9Sstevel@tonic-gate 
8647c478bd9Sstevel@tonic-gate /*
8657c478bd9Sstevel@tonic-gate  * Find the address of the bufctl structure for the address 'buf' in cache
8667c478bd9Sstevel@tonic-gate  * 'cp', which is at address caddr, and place it in *out.
8677c478bd9Sstevel@tonic-gate  */
8687c478bd9Sstevel@tonic-gate static int
8697c478bd9Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
8707c478bd9Sstevel@tonic-gate {
8717c478bd9Sstevel@tonic-gate 	uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf);
8727c478bd9Sstevel@tonic-gate 	kmem_bufctl_t *bcp;
8737c478bd9Sstevel@tonic-gate 	kmem_bufctl_t bc;
8747c478bd9Sstevel@tonic-gate 
8757c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) {
8767c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read hash bucket for %p in cache %p",
8777c478bd9Sstevel@tonic-gate 		    buf, caddr);
8787c478bd9Sstevel@tonic-gate 		return (-1);
8797c478bd9Sstevel@tonic-gate 	}
8807c478bd9Sstevel@tonic-gate 
8817c478bd9Sstevel@tonic-gate 	while (bcp != NULL) {
8827c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (kmem_bufctl_t),
8837c478bd9Sstevel@tonic-gate 		    (uintptr_t)bcp) == -1) {
8847c478bd9Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", bcp);
8857c478bd9Sstevel@tonic-gate 			return (-1);
8867c478bd9Sstevel@tonic-gate 		}
8877c478bd9Sstevel@tonic-gate 		if (bc.bc_addr == buf) {
8887c478bd9Sstevel@tonic-gate 			*out = (uintptr_t)bcp;
8897c478bd9Sstevel@tonic-gate 			return (0);
8907c478bd9Sstevel@tonic-gate 		}
8917c478bd9Sstevel@tonic-gate 		bcp = bc.bc_next;
8927c478bd9Sstevel@tonic-gate 	}
8937c478bd9Sstevel@tonic-gate 
8947c478bd9Sstevel@tonic-gate 	mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr);
8957c478bd9Sstevel@tonic-gate 	return (-1);
8967c478bd9Sstevel@tonic-gate }
8977c478bd9Sstevel@tonic-gate 
8987c478bd9Sstevel@tonic-gate int
8997c478bd9Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp)
9007c478bd9Sstevel@tonic-gate {
9017c478bd9Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)cp->cache_magtype;
9027c478bd9Sstevel@tonic-gate 	GElf_Sym mt_sym;
9037c478bd9Sstevel@tonic-gate 	kmem_magtype_t mt;
9047c478bd9Sstevel@tonic-gate 	int res;
9057c478bd9Sstevel@tonic-gate 
9067c478bd9Sstevel@tonic-gate 	/*
9077c478bd9Sstevel@tonic-gate 	 * if cpu 0 has a non-zero magsize, it must be correct.  caches
9087c478bd9Sstevel@tonic-gate 	 * with KMF_NOMAGAZINE have disabled their magazine layers, so
9097c478bd9Sstevel@tonic-gate 	 * it is okay to return 0 for them.
9107c478bd9Sstevel@tonic-gate 	 */
9117c478bd9Sstevel@tonic-gate 	if ((res = cp->cache_cpu[0].cc_magsize) != 0 ||
9127c478bd9Sstevel@tonic-gate 	    (cp->cache_flags & KMF_NOMAGAZINE))
9137c478bd9Sstevel@tonic-gate 		return (res);
9147c478bd9Sstevel@tonic-gate 
9157c478bd9Sstevel@tonic-gate 	if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) {
9167c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read 'kmem_magtype'");
9177c478bd9Sstevel@tonic-gate 	} else if (addr < mt_sym.st_value ||
9187c478bd9Sstevel@tonic-gate 	    addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 ||
9197c478bd9Sstevel@tonic-gate 	    ((addr - mt_sym.st_value) % sizeof (mt)) != 0) {
9207c478bd9Sstevel@tonic-gate 		mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
9217c478bd9Sstevel@tonic-gate 		    cp->cache_name, addr);
9227c478bd9Sstevel@tonic-gate 		return (0);
9237c478bd9Sstevel@tonic-gate 	}
9247c478bd9Sstevel@tonic-gate 	if (mdb_vread(&mt, sizeof (mt), addr) == -1) {
9257c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read magtype at %a", addr);
9267c478bd9Sstevel@tonic-gate 		return (0);
9277c478bd9Sstevel@tonic-gate 	}
9287c478bd9Sstevel@tonic-gate 	return (mt.mt_magsize);
9297c478bd9Sstevel@tonic-gate }
9307c478bd9Sstevel@tonic-gate 
9317c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9327c478bd9Sstevel@tonic-gate static int
9337c478bd9Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est)
9347c478bd9Sstevel@tonic-gate {
9357c478bd9Sstevel@tonic-gate 	*est -= (sp->slab_chunks - sp->slab_refcnt);
9367c478bd9Sstevel@tonic-gate 
9377c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
9387c478bd9Sstevel@tonic-gate }
9397c478bd9Sstevel@tonic-gate 
9407c478bd9Sstevel@tonic-gate /*
9417c478bd9Sstevel@tonic-gate  * Returns an upper bound on the number of allocated buffers in a given
9427c478bd9Sstevel@tonic-gate  * cache.
9437c478bd9Sstevel@tonic-gate  */
9447c478bd9Sstevel@tonic-gate size_t
9457c478bd9Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp)
9467c478bd9Sstevel@tonic-gate {
9477c478bd9Sstevel@tonic-gate 	int magsize;
9487c478bd9Sstevel@tonic-gate 	size_t cache_est;
9497c478bd9Sstevel@tonic-gate 
9507c478bd9Sstevel@tonic-gate 	cache_est = cp->cache_buftotal;
9517c478bd9Sstevel@tonic-gate 
9527c478bd9Sstevel@tonic-gate 	(void) mdb_pwalk("kmem_slab_partial",
9537c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr);
9547c478bd9Sstevel@tonic-gate 
9557c478bd9Sstevel@tonic-gate 	if ((magsize = kmem_get_magsize(cp)) != 0) {
9567c478bd9Sstevel@tonic-gate 		size_t mag_est = cp->cache_full.ml_total * magsize;
9577c478bd9Sstevel@tonic-gate 
9587c478bd9Sstevel@tonic-gate 		if (cache_est >= mag_est) {
9597c478bd9Sstevel@tonic-gate 			cache_est -= mag_est;
9607c478bd9Sstevel@tonic-gate 		} else {
9617c478bd9Sstevel@tonic-gate 			mdb_warn("cache %p's magazine layer holds more buffers "
9627c478bd9Sstevel@tonic-gate 			    "than the slab layer.\n", addr);
9637c478bd9Sstevel@tonic-gate 		}
9647c478bd9Sstevel@tonic-gate 	}
9657c478bd9Sstevel@tonic-gate 	return (cache_est);
9667c478bd9Sstevel@tonic-gate }
9677c478bd9Sstevel@tonic-gate 
9687c478bd9Sstevel@tonic-gate #define	READMAG_ROUNDS(rounds) { \
9697c478bd9Sstevel@tonic-gate 	if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \
9707c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read magazine at %p", kmp); \
9717c478bd9Sstevel@tonic-gate 		goto fail; \
9727c478bd9Sstevel@tonic-gate 	} \
9737c478bd9Sstevel@tonic-gate 	for (i = 0; i < rounds; i++) { \
9747c478bd9Sstevel@tonic-gate 		maglist[magcnt++] = mp->mag_round[i]; \
9757c478bd9Sstevel@tonic-gate 		if (magcnt == magmax) { \
9767c478bd9Sstevel@tonic-gate 			mdb_warn("%d magazines exceeds fudge factor\n", \
9777c478bd9Sstevel@tonic-gate 			    magcnt); \
9787c478bd9Sstevel@tonic-gate 			goto fail; \
9797c478bd9Sstevel@tonic-gate 		} \
9807c478bd9Sstevel@tonic-gate 	} \
9817c478bd9Sstevel@tonic-gate }
9827c478bd9Sstevel@tonic-gate 
9837c478bd9Sstevel@tonic-gate int
9847c478bd9Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus,
9857c478bd9Sstevel@tonic-gate     void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags)
9867c478bd9Sstevel@tonic-gate {
9877c478bd9Sstevel@tonic-gate 	kmem_magazine_t *kmp, *mp;
9887c478bd9Sstevel@tonic-gate 	void **maglist = NULL;
9897c478bd9Sstevel@tonic-gate 	int i, cpu;
9907c478bd9Sstevel@tonic-gate 	size_t magsize, magmax, magbsize;
9917c478bd9Sstevel@tonic-gate 	size_t magcnt = 0;
9927c478bd9Sstevel@tonic-gate 
9937c478bd9Sstevel@tonic-gate 	/*
9947c478bd9Sstevel@tonic-gate 	 * Read the magtype out of the cache, after verifying the pointer's
9957c478bd9Sstevel@tonic-gate 	 * correctness.
9967c478bd9Sstevel@tonic-gate 	 */
9977c478bd9Sstevel@tonic-gate 	magsize = kmem_get_magsize(cp);
998789d94c2Sjwadams 	if (magsize == 0) {
999789d94c2Sjwadams 		*maglistp = NULL;
1000789d94c2Sjwadams 		*magcntp = 0;
1001789d94c2Sjwadams 		*magmaxp = 0;
1002789d94c2Sjwadams 		return (WALK_NEXT);
1003789d94c2Sjwadams 	}
10047c478bd9Sstevel@tonic-gate 
10057c478bd9Sstevel@tonic-gate 	/*
10067c478bd9Sstevel@tonic-gate 	 * There are several places where we need to go buffer hunting:
10077c478bd9Sstevel@tonic-gate 	 * the per-CPU loaded magazine, the per-CPU spare full magazine,
10087c478bd9Sstevel@tonic-gate 	 * and the full magazine list in the depot.
10097c478bd9Sstevel@tonic-gate 	 *
10107c478bd9Sstevel@tonic-gate 	 * For an upper bound on the number of buffers in the magazine
10117c478bd9Sstevel@tonic-gate 	 * layer, we have the number of magazines on the cache_full
10127c478bd9Sstevel@tonic-gate 	 * list plus at most two magazines per CPU (the loaded and the
10137c478bd9Sstevel@tonic-gate 	 * spare).  Toss in 100 magazines as a fudge factor in case this
10147c478bd9Sstevel@tonic-gate 	 * is live (the number "100" comes from the same fudge factor in
10157c478bd9Sstevel@tonic-gate 	 * crash(1M)).
10167c478bd9Sstevel@tonic-gate 	 */
10177c478bd9Sstevel@tonic-gate 	magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize;
10187c478bd9Sstevel@tonic-gate 	magbsize = offsetof(kmem_magazine_t, mag_round[magsize]);
10197c478bd9Sstevel@tonic-gate 
10207c478bd9Sstevel@tonic-gate 	if (magbsize >= PAGESIZE / 2) {
10217c478bd9Sstevel@tonic-gate 		mdb_warn("magazine size for cache %p unreasonable (%x)\n",
10227c478bd9Sstevel@tonic-gate 		    addr, magbsize);
1023789d94c2Sjwadams 		return (WALK_ERR);
10247c478bd9Sstevel@tonic-gate 	}
10257c478bd9Sstevel@tonic-gate 
10267c478bd9Sstevel@tonic-gate 	maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags);
10277c478bd9Sstevel@tonic-gate 	mp = mdb_alloc(magbsize, alloc_flags);
10287c478bd9Sstevel@tonic-gate 	if (mp == NULL || maglist == NULL)
10297c478bd9Sstevel@tonic-gate 		goto fail;
10307c478bd9Sstevel@tonic-gate 
10317c478bd9Sstevel@tonic-gate 	/*
10327c478bd9Sstevel@tonic-gate 	 * First up: the magazines in the depot (i.e. on the cache_full list).
10337c478bd9Sstevel@tonic-gate 	 */
10347c478bd9Sstevel@tonic-gate 	for (kmp = cp->cache_full.ml_list; kmp != NULL; ) {
10357c478bd9Sstevel@tonic-gate 		READMAG_ROUNDS(magsize);
10367c478bd9Sstevel@tonic-gate 		kmp = mp->mag_next;
10377c478bd9Sstevel@tonic-gate 
10387c478bd9Sstevel@tonic-gate 		if (kmp == cp->cache_full.ml_list)
10397c478bd9Sstevel@tonic-gate 			break; /* cache_full list loop detected */
10407c478bd9Sstevel@tonic-gate 	}
10417c478bd9Sstevel@tonic-gate 
10427c478bd9Sstevel@tonic-gate 	dprintf(("cache_full list done\n"));
10437c478bd9Sstevel@tonic-gate 
10447c478bd9Sstevel@tonic-gate 	/*
10457c478bd9Sstevel@tonic-gate 	 * Now whip through the CPUs, snagging the loaded magazines
10467c478bd9Sstevel@tonic-gate 	 * and full spares.
10477c478bd9Sstevel@tonic-gate 	 */
10487c478bd9Sstevel@tonic-gate 	for (cpu = 0; cpu < ncpus; cpu++) {
10497c478bd9Sstevel@tonic-gate 		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu];
10507c478bd9Sstevel@tonic-gate 
10517c478bd9Sstevel@tonic-gate 		dprintf(("reading cpu cache %p\n",
10527c478bd9Sstevel@tonic-gate 		    (uintptr_t)ccp - (uintptr_t)cp + addr));
10537c478bd9Sstevel@tonic-gate 
10547c478bd9Sstevel@tonic-gate 		if (ccp->cc_rounds > 0 &&
10557c478bd9Sstevel@tonic-gate 		    (kmp = ccp->cc_loaded) != NULL) {
10567c478bd9Sstevel@tonic-gate 			dprintf(("reading %d loaded rounds\n", ccp->cc_rounds));
10577c478bd9Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_rounds);
10587c478bd9Sstevel@tonic-gate 		}
10597c478bd9Sstevel@tonic-gate 
10607c478bd9Sstevel@tonic-gate 		if (ccp->cc_prounds > 0 &&
10617c478bd9Sstevel@tonic-gate 		    (kmp = ccp->cc_ploaded) != NULL) {
10627c478bd9Sstevel@tonic-gate 			dprintf(("reading %d previously loaded rounds\n",
10637c478bd9Sstevel@tonic-gate 			    ccp->cc_prounds));
10647c478bd9Sstevel@tonic-gate 			READMAG_ROUNDS(ccp->cc_prounds);
10657c478bd9Sstevel@tonic-gate 		}
10667c478bd9Sstevel@tonic-gate 	}
10677c478bd9Sstevel@tonic-gate 
10687c478bd9Sstevel@tonic-gate 	dprintf(("magazine layer: %d buffers\n", magcnt));
10697c478bd9Sstevel@tonic-gate 
10707c478bd9Sstevel@tonic-gate 	if (!(alloc_flags & UM_GC))
10717c478bd9Sstevel@tonic-gate 		mdb_free(mp, magbsize);
10727c478bd9Sstevel@tonic-gate 
10737c478bd9Sstevel@tonic-gate 	*maglistp = maglist;
10747c478bd9Sstevel@tonic-gate 	*magcntp = magcnt;
10757c478bd9Sstevel@tonic-gate 	*magmaxp = magmax;
10767c478bd9Sstevel@tonic-gate 
10777c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
10787c478bd9Sstevel@tonic-gate 
10797c478bd9Sstevel@tonic-gate fail:
10807c478bd9Sstevel@tonic-gate 	if (!(alloc_flags & UM_GC)) {
10817c478bd9Sstevel@tonic-gate 		if (mp)
10827c478bd9Sstevel@tonic-gate 			mdb_free(mp, magbsize);
10837c478bd9Sstevel@tonic-gate 		if (maglist)
10847c478bd9Sstevel@tonic-gate 			mdb_free(maglist, magmax * sizeof (void *));
10857c478bd9Sstevel@tonic-gate 	}
10867c478bd9Sstevel@tonic-gate 	return (WALK_ERR);
10877c478bd9Sstevel@tonic-gate }
10887c478bd9Sstevel@tonic-gate 
10897c478bd9Sstevel@tonic-gate static int
10907c478bd9Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
10917c478bd9Sstevel@tonic-gate {
10927c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata));
10937c478bd9Sstevel@tonic-gate }
10947c478bd9Sstevel@tonic-gate 
10957c478bd9Sstevel@tonic-gate static int
10967c478bd9Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
10977c478bd9Sstevel@tonic-gate {
10987c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t b;
10997c478bd9Sstevel@tonic-gate 
11007c478bd9Sstevel@tonic-gate 	/*
11017c478bd9Sstevel@tonic-gate 	 * if KMF_AUDIT is not set, we know that we're looking at a
11027c478bd9Sstevel@tonic-gate 	 * kmem_bufctl_t.
11037c478bd9Sstevel@tonic-gate 	 */
11047c478bd9Sstevel@tonic-gate 	if (!(cp->cache_flags & KMF_AUDIT) ||
11057c478bd9Sstevel@tonic-gate 	    mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) {
11067c478bd9Sstevel@tonic-gate 		(void) memset(&b, 0, sizeof (b));
11077c478bd9Sstevel@tonic-gate 		if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) {
11087c478bd9Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p", buf);
11097c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
11107c478bd9Sstevel@tonic-gate 		}
11117c478bd9Sstevel@tonic-gate 	}
11127c478bd9Sstevel@tonic-gate 
11137c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(buf, &b, wsp->walk_cbdata));
11147c478bd9Sstevel@tonic-gate }
11157c478bd9Sstevel@tonic-gate 
11167c478bd9Sstevel@tonic-gate typedef struct kmem_walk {
11177c478bd9Sstevel@tonic-gate 	int kmw_type;
11187c478bd9Sstevel@tonic-gate 
11197c478bd9Sstevel@tonic-gate 	int kmw_addr;			/* cache address */
11207c478bd9Sstevel@tonic-gate 	kmem_cache_t *kmw_cp;
11217c478bd9Sstevel@tonic-gate 	size_t kmw_csize;
11227c478bd9Sstevel@tonic-gate 
11237c478bd9Sstevel@tonic-gate 	/*
11247c478bd9Sstevel@tonic-gate 	 * magazine layer
11257c478bd9Sstevel@tonic-gate 	 */
11267c478bd9Sstevel@tonic-gate 	void **kmw_maglist;
11277c478bd9Sstevel@tonic-gate 	size_t kmw_max;
11287c478bd9Sstevel@tonic-gate 	size_t kmw_count;
11297c478bd9Sstevel@tonic-gate 	size_t kmw_pos;
11307c478bd9Sstevel@tonic-gate 
11317c478bd9Sstevel@tonic-gate 	/*
11327c478bd9Sstevel@tonic-gate 	 * slab layer
11337c478bd9Sstevel@tonic-gate 	 */
11347c478bd9Sstevel@tonic-gate 	char *kmw_valid;	/* to keep track of freed buffers */
11357c478bd9Sstevel@tonic-gate 	char *kmw_ubase;	/* buffer for slab data */
11367c478bd9Sstevel@tonic-gate } kmem_walk_t;
11377c478bd9Sstevel@tonic-gate 
11387c478bd9Sstevel@tonic-gate static int
11397c478bd9Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type)
11407c478bd9Sstevel@tonic-gate {
11417c478bd9Sstevel@tonic-gate 	kmem_walk_t *kmw;
11427c478bd9Sstevel@tonic-gate 	int ncpus, csize;
11437c478bd9Sstevel@tonic-gate 	kmem_cache_t *cp;
1144789d94c2Sjwadams 	size_t vm_quantum;
11457c478bd9Sstevel@tonic-gate 
11467c478bd9Sstevel@tonic-gate 	size_t magmax, magcnt;
11477c478bd9Sstevel@tonic-gate 	void **maglist = NULL;
11487c478bd9Sstevel@tonic-gate 	uint_t chunksize, slabsize;
11497c478bd9Sstevel@tonic-gate 	int status = WALK_ERR;
11507c478bd9Sstevel@tonic-gate 	uintptr_t addr = wsp->walk_addr;
11517c478bd9Sstevel@tonic-gate 	const char *layered;
11527c478bd9Sstevel@tonic-gate 
11537c478bd9Sstevel@tonic-gate 	type &= ~KM_HASH;
11547c478bd9Sstevel@tonic-gate 
11557c478bd9Sstevel@tonic-gate 	if (addr == NULL) {
11567c478bd9Sstevel@tonic-gate 		mdb_warn("kmem walk doesn't support global walks\n");
11577c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
11587c478bd9Sstevel@tonic-gate 	}
11597c478bd9Sstevel@tonic-gate 
11607c478bd9Sstevel@tonic-gate 	dprintf(("walking %p\n", addr));
11617c478bd9Sstevel@tonic-gate 
11627c478bd9Sstevel@tonic-gate 	/*
11637c478bd9Sstevel@tonic-gate 	 * First we need to figure out how many CPUs are configured in the
11647c478bd9Sstevel@tonic-gate 	 * system to know how much to slurp out.
11657c478bd9Sstevel@tonic-gate 	 */
11667c478bd9Sstevel@tonic-gate 	mdb_readvar(&ncpus, "max_ncpus");
11677c478bd9Sstevel@tonic-gate 
11687c478bd9Sstevel@tonic-gate 	csize = KMEM_CACHE_SIZE(ncpus);
11697c478bd9Sstevel@tonic-gate 	cp = mdb_alloc(csize, UM_SLEEP);
11707c478bd9Sstevel@tonic-gate 
11717c478bd9Sstevel@tonic-gate 	if (mdb_vread(cp, csize, addr) == -1) {
11727c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read cache at addr %p", addr);
11737c478bd9Sstevel@tonic-gate 		goto out2;
11747c478bd9Sstevel@tonic-gate 	}
11757c478bd9Sstevel@tonic-gate 
1176789d94c2Sjwadams 	/*
1177789d94c2Sjwadams 	 * It's easy for someone to hand us an invalid cache address.
1178789d94c2Sjwadams 	 * Unfortunately, it is hard for this walker to survive an
1179789d94c2Sjwadams 	 * invalid cache cleanly.  So we make sure that:
1180789d94c2Sjwadams 	 *
1181789d94c2Sjwadams 	 *	1. the vmem arena for the cache is readable,
1182789d94c2Sjwadams 	 *	2. the vmem arena's quantum is a power of 2,
1183789d94c2Sjwadams 	 *	3. our slabsize is a multiple of the quantum, and
1184789d94c2Sjwadams 	 *	4. our chunksize is >0 and less than our slabsize.
1185789d94c2Sjwadams 	 */
1186789d94c2Sjwadams 	if (mdb_vread(&vm_quantum, sizeof (vm_quantum),
1187789d94c2Sjwadams 	    (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
1188789d94c2Sjwadams 	    vm_quantum == 0 ||
1189789d94c2Sjwadams 	    (vm_quantum & (vm_quantum - 1)) != 0 ||
1190789d94c2Sjwadams 	    cp->cache_slabsize < vm_quantum ||
1191789d94c2Sjwadams 	    P2PHASE(cp->cache_slabsize, vm_quantum) != 0 ||
1192789d94c2Sjwadams 	    cp->cache_chunksize == 0 ||
1193789d94c2Sjwadams 	    cp->cache_chunksize > cp->cache_slabsize) {
1194789d94c2Sjwadams 		mdb_warn("%p is not a valid kmem_cache_t\n", addr);
1195789d94c2Sjwadams 		goto out2;
1196789d94c2Sjwadams 	}
1197789d94c2Sjwadams 
11987c478bd9Sstevel@tonic-gate 	dprintf(("buf total is %d\n", cp->cache_buftotal));
11997c478bd9Sstevel@tonic-gate 
12007c478bd9Sstevel@tonic-gate 	if (cp->cache_buftotal == 0) {
12017c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
12027c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
12037c478bd9Sstevel@tonic-gate 	}
12047c478bd9Sstevel@tonic-gate 
12057c478bd9Sstevel@tonic-gate 	/*
12067c478bd9Sstevel@tonic-gate 	 * If they ask for bufctls, but it's a small-slab cache,
12077c478bd9Sstevel@tonic-gate 	 * there is nothing to report.
12087c478bd9Sstevel@tonic-gate 	 */
12097c478bd9Sstevel@tonic-gate 	if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) {
12107c478bd9Sstevel@tonic-gate 		dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n",
12117c478bd9Sstevel@tonic-gate 		    cp->cache_flags));
12127c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
12137c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
12147c478bd9Sstevel@tonic-gate 	}
12157c478bd9Sstevel@tonic-gate 
12167c478bd9Sstevel@tonic-gate 	/*
12177c478bd9Sstevel@tonic-gate 	 * If they want constructed buffers, but there's no constructor or
12187c478bd9Sstevel@tonic-gate 	 * the cache has DEADBEEF checking enabled, there is nothing to report.
12197c478bd9Sstevel@tonic-gate 	 */
12207c478bd9Sstevel@tonic-gate 	if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) ||
12217c478bd9Sstevel@tonic-gate 	    cp->cache_constructor == NULL ||
12227c478bd9Sstevel@tonic-gate 	    (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) {
12237c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
12247c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
12257c478bd9Sstevel@tonic-gate 	}
12267c478bd9Sstevel@tonic-gate 
12277c478bd9Sstevel@tonic-gate 	/*
12287c478bd9Sstevel@tonic-gate 	 * Read in the contents of the magazine layer
12297c478bd9Sstevel@tonic-gate 	 */
12307c478bd9Sstevel@tonic-gate 	if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt,
12317c478bd9Sstevel@tonic-gate 	    &magmax, UM_SLEEP) == WALK_ERR)
12327c478bd9Sstevel@tonic-gate 		goto out2;
12337c478bd9Sstevel@tonic-gate 
12347c478bd9Sstevel@tonic-gate 	/*
12357c478bd9Sstevel@tonic-gate 	 * We have all of the buffers from the magazines;  if we are walking
12367c478bd9Sstevel@tonic-gate 	 * allocated buffers, sort them so we can bsearch them later.
12377c478bd9Sstevel@tonic-gate 	 */
12387c478bd9Sstevel@tonic-gate 	if (type & KM_ALLOCATED)
12397c478bd9Sstevel@tonic-gate 		qsort(maglist, magcnt, sizeof (void *), addrcmp);
12407c478bd9Sstevel@tonic-gate 
12417c478bd9Sstevel@tonic-gate 	wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP);
12427c478bd9Sstevel@tonic-gate 
12437c478bd9Sstevel@tonic-gate 	kmw->kmw_type = type;
12447c478bd9Sstevel@tonic-gate 	kmw->kmw_addr = addr;
12457c478bd9Sstevel@tonic-gate 	kmw->kmw_cp = cp;
12467c478bd9Sstevel@tonic-gate 	kmw->kmw_csize = csize;
12477c478bd9Sstevel@tonic-gate 	kmw->kmw_maglist = maglist;
12487c478bd9Sstevel@tonic-gate 	kmw->kmw_max = magmax;
12497c478bd9Sstevel@tonic-gate 	kmw->kmw_count = magcnt;
12507c478bd9Sstevel@tonic-gate 	kmw->kmw_pos = 0;
12517c478bd9Sstevel@tonic-gate 
12527c478bd9Sstevel@tonic-gate 	/*
12537c478bd9Sstevel@tonic-gate 	 * When walking allocated buffers in a KMF_HASH cache, we walk the
12547c478bd9Sstevel@tonic-gate 	 * hash table instead of the slab layer.
12557c478bd9Sstevel@tonic-gate 	 */
12567c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) {
12577c478bd9Sstevel@tonic-gate 		layered = "kmem_hash";
12587c478bd9Sstevel@tonic-gate 
12597c478bd9Sstevel@tonic-gate 		kmw->kmw_type |= KM_HASH;
12607c478bd9Sstevel@tonic-gate 	} else {
12617c478bd9Sstevel@tonic-gate 		/*
12627c478bd9Sstevel@tonic-gate 		 * If we are walking freed buffers, we only need the
12637c478bd9Sstevel@tonic-gate 		 * magazine layer plus the partially allocated slabs.
12647c478bd9Sstevel@tonic-gate 		 * To walk allocated buffers, we need all of the slabs.
12657c478bd9Sstevel@tonic-gate 		 */
12667c478bd9Sstevel@tonic-gate 		if (type & KM_ALLOCATED)
12677c478bd9Sstevel@tonic-gate 			layered = "kmem_slab";
12687c478bd9Sstevel@tonic-gate 		else
12697c478bd9Sstevel@tonic-gate 			layered = "kmem_slab_partial";
12707c478bd9Sstevel@tonic-gate 
12717c478bd9Sstevel@tonic-gate 		/*
12727c478bd9Sstevel@tonic-gate 		 * for small-slab caches, we read in the entire slab.  For
12737c478bd9Sstevel@tonic-gate 		 * freed buffers, we can just walk the freelist.  For
12747c478bd9Sstevel@tonic-gate 		 * allocated buffers, we use a 'valid' array to track
12757c478bd9Sstevel@tonic-gate 		 * the freed buffers.
12767c478bd9Sstevel@tonic-gate 		 */
12777c478bd9Sstevel@tonic-gate 		if (!(cp->cache_flags & KMF_HASH)) {
12787c478bd9Sstevel@tonic-gate 			chunksize = cp->cache_chunksize;
12797c478bd9Sstevel@tonic-gate 			slabsize = cp->cache_slabsize;
12807c478bd9Sstevel@tonic-gate 
12817c478bd9Sstevel@tonic-gate 			kmw->kmw_ubase = mdb_alloc(slabsize +
12827c478bd9Sstevel@tonic-gate 			    sizeof (kmem_bufctl_t), UM_SLEEP);
12837c478bd9Sstevel@tonic-gate 
12847c478bd9Sstevel@tonic-gate 			if (type & KM_ALLOCATED)
12857c478bd9Sstevel@tonic-gate 				kmw->kmw_valid =
12867c478bd9Sstevel@tonic-gate 				    mdb_alloc(slabsize / chunksize, UM_SLEEP);
12877c478bd9Sstevel@tonic-gate 		}
12887c478bd9Sstevel@tonic-gate 	}
12897c478bd9Sstevel@tonic-gate 
12907c478bd9Sstevel@tonic-gate 	status = WALK_NEXT;
12917c478bd9Sstevel@tonic-gate 
12927c478bd9Sstevel@tonic-gate 	if (mdb_layered_walk(layered, wsp) == -1) {
12937c478bd9Sstevel@tonic-gate 		mdb_warn("unable to start layered '%s' walk", layered);
12947c478bd9Sstevel@tonic-gate 		status = WALK_ERR;
12957c478bd9Sstevel@tonic-gate 	}
12967c478bd9Sstevel@tonic-gate 
12977c478bd9Sstevel@tonic-gate out1:
12987c478bd9Sstevel@tonic-gate 	if (status == WALK_ERR) {
12997c478bd9Sstevel@tonic-gate 		if (kmw->kmw_valid)
13007c478bd9Sstevel@tonic-gate 			mdb_free(kmw->kmw_valid, slabsize / chunksize);
13017c478bd9Sstevel@tonic-gate 
13027c478bd9Sstevel@tonic-gate 		if (kmw->kmw_ubase)
13037c478bd9Sstevel@tonic-gate 			mdb_free(kmw->kmw_ubase, slabsize +
13047c478bd9Sstevel@tonic-gate 			    sizeof (kmem_bufctl_t));
13057c478bd9Sstevel@tonic-gate 
1306789d94c2Sjwadams 		if (kmw->kmw_maglist)
1307789d94c2Sjwadams 			mdb_free(kmw->kmw_maglist,
1308789d94c2Sjwadams 			    kmw->kmw_max * sizeof (uintptr_t));
1309789d94c2Sjwadams 
13107c478bd9Sstevel@tonic-gate 		mdb_free(kmw, sizeof (kmem_walk_t));
13117c478bd9Sstevel@tonic-gate 		wsp->walk_data = NULL;
13127c478bd9Sstevel@tonic-gate 	}
13137c478bd9Sstevel@tonic-gate 
13147c478bd9Sstevel@tonic-gate out2:
13157c478bd9Sstevel@tonic-gate 	if (status == WALK_ERR)
13167c478bd9Sstevel@tonic-gate 		mdb_free(cp, csize);
13177c478bd9Sstevel@tonic-gate 
13187c478bd9Sstevel@tonic-gate 	return (status);
13197c478bd9Sstevel@tonic-gate }
13207c478bd9Sstevel@tonic-gate 
13217c478bd9Sstevel@tonic-gate int
13227c478bd9Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp)
13237c478bd9Sstevel@tonic-gate {
13247c478bd9Sstevel@tonic-gate 	kmem_walk_t *kmw = wsp->walk_data;
13257c478bd9Sstevel@tonic-gate 	int type = kmw->kmw_type;
13267c478bd9Sstevel@tonic-gate 	kmem_cache_t *cp = kmw->kmw_cp;
13277c478bd9Sstevel@tonic-gate 
13287c478bd9Sstevel@tonic-gate 	void **maglist = kmw->kmw_maglist;
13297c478bd9Sstevel@tonic-gate 	int magcnt = kmw->kmw_count;
13307c478bd9Sstevel@tonic-gate 
13317c478bd9Sstevel@tonic-gate 	uintptr_t chunksize, slabsize;
13327c478bd9Sstevel@tonic-gate 	uintptr_t addr;
13337c478bd9Sstevel@tonic-gate 	const kmem_slab_t *sp;
13347c478bd9Sstevel@tonic-gate 	const kmem_bufctl_t *bcp;
13357c478bd9Sstevel@tonic-gate 	kmem_bufctl_t bc;
13367c478bd9Sstevel@tonic-gate 
13377c478bd9Sstevel@tonic-gate 	int chunks;
13387c478bd9Sstevel@tonic-gate 	char *kbase;
13397c478bd9Sstevel@tonic-gate 	void *buf;
13407c478bd9Sstevel@tonic-gate 	int i, ret;
13417c478bd9Sstevel@tonic-gate 
13427c478bd9Sstevel@tonic-gate 	char *valid, *ubase;
13437c478bd9Sstevel@tonic-gate 
13447c478bd9Sstevel@tonic-gate 	/*
13457c478bd9Sstevel@tonic-gate 	 * first, handle the 'kmem_hash' layered walk case
13467c478bd9Sstevel@tonic-gate 	 */
13477c478bd9Sstevel@tonic-gate 	if (type & KM_HASH) {
13487c478bd9Sstevel@tonic-gate 		/*
13497c478bd9Sstevel@tonic-gate 		 * We have a buffer which has been allocated out of the
13507c478bd9Sstevel@tonic-gate 		 * global layer. We need to make sure that it's not
13517c478bd9Sstevel@tonic-gate 		 * actually sitting in a magazine before we report it as
13527c478bd9Sstevel@tonic-gate 		 * an allocated buffer.
13537c478bd9Sstevel@tonic-gate 		 */
13547c478bd9Sstevel@tonic-gate 		buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr;
13557c478bd9Sstevel@tonic-gate 
13567c478bd9Sstevel@tonic-gate 		if (magcnt > 0 &&
13577c478bd9Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
13587c478bd9Sstevel@tonic-gate 		    addrcmp) != NULL)
13597c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
13607c478bd9Sstevel@tonic-gate 
13617c478bd9Sstevel@tonic-gate 		if (type & KM_BUFCTL)
13627c478bd9Sstevel@tonic-gate 			return (bufctl_walk_callback(cp, wsp, wsp->walk_addr));
13637c478bd9Sstevel@tonic-gate 
13647c478bd9Sstevel@tonic-gate 		return (kmem_walk_callback(wsp, (uintptr_t)buf));
13657c478bd9Sstevel@tonic-gate 	}
13667c478bd9Sstevel@tonic-gate 
13677c478bd9Sstevel@tonic-gate 	ret = WALK_NEXT;
13687c478bd9Sstevel@tonic-gate 
13697c478bd9Sstevel@tonic-gate 	addr = kmw->kmw_addr;
13707c478bd9Sstevel@tonic-gate 
13717c478bd9Sstevel@tonic-gate 	/*
13727c478bd9Sstevel@tonic-gate 	 * If we're walking freed buffers, report everything in the
13737c478bd9Sstevel@tonic-gate 	 * magazine layer before processing the first slab.
13747c478bd9Sstevel@tonic-gate 	 */
13757c478bd9Sstevel@tonic-gate 	if ((type & KM_FREE) && magcnt != 0) {
13767c478bd9Sstevel@tonic-gate 		kmw->kmw_count = 0;		/* only do this once */
13777c478bd9Sstevel@tonic-gate 		for (i = 0; i < magcnt; i++) {
13787c478bd9Sstevel@tonic-gate 			buf = maglist[i];
13797c478bd9Sstevel@tonic-gate 
13807c478bd9Sstevel@tonic-gate 			if (type & KM_BUFCTL) {
13817c478bd9Sstevel@tonic-gate 				uintptr_t out;
13827c478bd9Sstevel@tonic-gate 
13837c478bd9Sstevel@tonic-gate 				if (cp->cache_flags & KMF_BUFTAG) {
13847c478bd9Sstevel@tonic-gate 					kmem_buftag_t *btp;
13857c478bd9Sstevel@tonic-gate 					kmem_buftag_t tag;
13867c478bd9Sstevel@tonic-gate 
13877c478bd9Sstevel@tonic-gate 					/* LINTED - alignment */
13887c478bd9Sstevel@tonic-gate 					btp = KMEM_BUFTAG(cp, buf);
13897c478bd9Sstevel@tonic-gate 					if (mdb_vread(&tag, sizeof (tag),
13907c478bd9Sstevel@tonic-gate 					    (uintptr_t)btp) == -1) {
13917c478bd9Sstevel@tonic-gate 						mdb_warn("reading buftag for "
13927c478bd9Sstevel@tonic-gate 						    "%p at %p", buf, btp);
13937c478bd9Sstevel@tonic-gate 						continue;
13947c478bd9Sstevel@tonic-gate 					}
13957c478bd9Sstevel@tonic-gate 					out = (uintptr_t)tag.bt_bufctl;
13967c478bd9Sstevel@tonic-gate 				} else {
13977c478bd9Sstevel@tonic-gate 					if (kmem_hash_lookup(cp, addr, buf,
13987c478bd9Sstevel@tonic-gate 					    &out) == -1)
13997c478bd9Sstevel@tonic-gate 						continue;
14007c478bd9Sstevel@tonic-gate 				}
14017c478bd9Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp, out);
14027c478bd9Sstevel@tonic-gate 			} else {
14037c478bd9Sstevel@tonic-gate 				ret = kmem_walk_callback(wsp, (uintptr_t)buf);
14047c478bd9Sstevel@tonic-gate 			}
14057c478bd9Sstevel@tonic-gate 
14067c478bd9Sstevel@tonic-gate 			if (ret != WALK_NEXT)
14077c478bd9Sstevel@tonic-gate 				return (ret);
14087c478bd9Sstevel@tonic-gate 		}
14097c478bd9Sstevel@tonic-gate 	}
14107c478bd9Sstevel@tonic-gate 
14117c478bd9Sstevel@tonic-gate 	/*
14127c478bd9Sstevel@tonic-gate 	 * If they want constructed buffers, we're finished, since the
14137c478bd9Sstevel@tonic-gate 	 * magazine layer holds them all.
14147c478bd9Sstevel@tonic-gate 	 */
14157c478bd9Sstevel@tonic-gate 	if (type & KM_CONSTRUCTED)
14167c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
14177c478bd9Sstevel@tonic-gate 
14187c478bd9Sstevel@tonic-gate 	/*
14197c478bd9Sstevel@tonic-gate 	 * Handle the buffers in the current slab
14207c478bd9Sstevel@tonic-gate 	 */
14217c478bd9Sstevel@tonic-gate 	chunksize = cp->cache_chunksize;
14227c478bd9Sstevel@tonic-gate 	slabsize = cp->cache_slabsize;
14237c478bd9Sstevel@tonic-gate 
14247c478bd9Sstevel@tonic-gate 	sp = wsp->walk_layer;
14257c478bd9Sstevel@tonic-gate 	chunks = sp->slab_chunks;
14267c478bd9Sstevel@tonic-gate 	kbase = sp->slab_base;
14277c478bd9Sstevel@tonic-gate 
14287c478bd9Sstevel@tonic-gate 	dprintf(("kbase is %p\n", kbase));
14297c478bd9Sstevel@tonic-gate 
14307c478bd9Sstevel@tonic-gate 	if (!(cp->cache_flags & KMF_HASH)) {
14317c478bd9Sstevel@tonic-gate 		valid = kmw->kmw_valid;
14327c478bd9Sstevel@tonic-gate 		ubase = kmw->kmw_ubase;
14337c478bd9Sstevel@tonic-gate 
14347c478bd9Sstevel@tonic-gate 		if (mdb_vread(ubase, chunks * chunksize,
14357c478bd9Sstevel@tonic-gate 		    (uintptr_t)kbase) == -1) {
14367c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read slab contents at %p", kbase);
14377c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
14387c478bd9Sstevel@tonic-gate 		}
14397c478bd9Sstevel@tonic-gate 
14407c478bd9Sstevel@tonic-gate 		/*
14417c478bd9Sstevel@tonic-gate 		 * Set up the valid map as fully allocated -- we'll punch
14427c478bd9Sstevel@tonic-gate 		 * out the freelist.
14437c478bd9Sstevel@tonic-gate 		 */
14447c478bd9Sstevel@tonic-gate 		if (type & KM_ALLOCATED)
14457c478bd9Sstevel@tonic-gate 			(void) memset(valid, 1, chunks);
14467c478bd9Sstevel@tonic-gate 	} else {
14477c478bd9Sstevel@tonic-gate 		valid = NULL;
14487c478bd9Sstevel@tonic-gate 		ubase = NULL;
14497c478bd9Sstevel@tonic-gate 	}
14507c478bd9Sstevel@tonic-gate 
14517c478bd9Sstevel@tonic-gate 	/*
14527c478bd9Sstevel@tonic-gate 	 * walk the slab's freelist
14537c478bd9Sstevel@tonic-gate 	 */
14547c478bd9Sstevel@tonic-gate 	bcp = sp->slab_head;
14557c478bd9Sstevel@tonic-gate 
14567c478bd9Sstevel@tonic-gate 	dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks));
14577c478bd9Sstevel@tonic-gate 
14587c478bd9Sstevel@tonic-gate 	/*
14597c478bd9Sstevel@tonic-gate 	 * since we could be in the middle of allocating a buffer,
14607c478bd9Sstevel@tonic-gate 	 * our refcnt could be one higher than it aught.  So we
14617c478bd9Sstevel@tonic-gate 	 * check one further on the freelist than the count allows.
14627c478bd9Sstevel@tonic-gate 	 */
14637c478bd9Sstevel@tonic-gate 	for (i = sp->slab_refcnt; i <= chunks; i++) {
14647c478bd9Sstevel@tonic-gate 		uint_t ndx;
14657c478bd9Sstevel@tonic-gate 
14667c478bd9Sstevel@tonic-gate 		dprintf(("bcp is %p\n", bcp));
14677c478bd9Sstevel@tonic-gate 
14687c478bd9Sstevel@tonic-gate 		if (bcp == NULL) {
14697c478bd9Sstevel@tonic-gate 			if (i == chunks)
14707c478bd9Sstevel@tonic-gate 				break;
14717c478bd9Sstevel@tonic-gate 			mdb_warn(
14727c478bd9Sstevel@tonic-gate 			    "slab %p in cache %p freelist too short by %d\n",
14737c478bd9Sstevel@tonic-gate 			    sp, addr, chunks - i);
14747c478bd9Sstevel@tonic-gate 			break;
14757c478bd9Sstevel@tonic-gate 		}
14767c478bd9Sstevel@tonic-gate 
14777c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & KMF_HASH) {
14787c478bd9Sstevel@tonic-gate 			if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
14797c478bd9Sstevel@tonic-gate 				mdb_warn("failed to read bufctl ptr at %p",
14807c478bd9Sstevel@tonic-gate 				    bcp);
14817c478bd9Sstevel@tonic-gate 				break;
14827c478bd9Sstevel@tonic-gate 			}
14837c478bd9Sstevel@tonic-gate 			buf = bc.bc_addr;
14847c478bd9Sstevel@tonic-gate 		} else {
14857c478bd9Sstevel@tonic-gate 			/*
14867c478bd9Sstevel@tonic-gate 			 * Otherwise the buffer is in the slab which
14877c478bd9Sstevel@tonic-gate 			 * we've read in;  we just need to determine
14887c478bd9Sstevel@tonic-gate 			 * its offset in the slab to find the
14897c478bd9Sstevel@tonic-gate 			 * kmem_bufctl_t.
14907c478bd9Sstevel@tonic-gate 			 */
14917c478bd9Sstevel@tonic-gate 			bc = *((kmem_bufctl_t *)
14927c478bd9Sstevel@tonic-gate 			    ((uintptr_t)bcp - (uintptr_t)kbase +
14937c478bd9Sstevel@tonic-gate 			    (uintptr_t)ubase));
14947c478bd9Sstevel@tonic-gate 
14957c478bd9Sstevel@tonic-gate 			buf = KMEM_BUF(cp, bcp);
14967c478bd9Sstevel@tonic-gate 		}
14977c478bd9Sstevel@tonic-gate 
14987c478bd9Sstevel@tonic-gate 		ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
14997c478bd9Sstevel@tonic-gate 
15007c478bd9Sstevel@tonic-gate 		if (ndx > slabsize / cp->cache_bufsize) {
15017c478bd9Sstevel@tonic-gate 			/*
15027c478bd9Sstevel@tonic-gate 			 * This is very wrong; we have managed to find
15037c478bd9Sstevel@tonic-gate 			 * a buffer in the slab which shouldn't
15047c478bd9Sstevel@tonic-gate 			 * actually be here.  Emit a warning, and
15057c478bd9Sstevel@tonic-gate 			 * try to continue.
15067c478bd9Sstevel@tonic-gate 			 */
15077c478bd9Sstevel@tonic-gate 			mdb_warn("buf %p is out of range for "
15087c478bd9Sstevel@tonic-gate 			    "slab %p, cache %p\n", buf, sp, addr);
15097c478bd9Sstevel@tonic-gate 		} else if (type & KM_ALLOCATED) {
15107c478bd9Sstevel@tonic-gate 			/*
15117c478bd9Sstevel@tonic-gate 			 * we have found a buffer on the slab's freelist;
15127c478bd9Sstevel@tonic-gate 			 * clear its entry
15137c478bd9Sstevel@tonic-gate 			 */
15147c478bd9Sstevel@tonic-gate 			valid[ndx] = 0;
15157c478bd9Sstevel@tonic-gate 		} else {
15167c478bd9Sstevel@tonic-gate 			/*
15177c478bd9Sstevel@tonic-gate 			 * Report this freed buffer
15187c478bd9Sstevel@tonic-gate 			 */
15197c478bd9Sstevel@tonic-gate 			if (type & KM_BUFCTL) {
15207c478bd9Sstevel@tonic-gate 				ret = bufctl_walk_callback(cp, wsp,
15217c478bd9Sstevel@tonic-gate 				    (uintptr_t)bcp);
15227c478bd9Sstevel@tonic-gate 			} else {
15237c478bd9Sstevel@tonic-gate 				ret = kmem_walk_callback(wsp, (uintptr_t)buf);
15247c478bd9Sstevel@tonic-gate 			}
15257c478bd9Sstevel@tonic-gate 			if (ret != WALK_NEXT)
15267c478bd9Sstevel@tonic-gate 				return (ret);
15277c478bd9Sstevel@tonic-gate 		}
15287c478bd9Sstevel@tonic-gate 
15297c478bd9Sstevel@tonic-gate 		bcp = bc.bc_next;
15307c478bd9Sstevel@tonic-gate 	}
15317c478bd9Sstevel@tonic-gate 
15327c478bd9Sstevel@tonic-gate 	if (bcp != NULL) {
15337c478bd9Sstevel@tonic-gate 		dprintf(("slab %p in cache %p freelist too long (%p)\n",
15347c478bd9Sstevel@tonic-gate 		    sp, addr, bcp));
15357c478bd9Sstevel@tonic-gate 	}
15367c478bd9Sstevel@tonic-gate 
15377c478bd9Sstevel@tonic-gate 	/*
15387c478bd9Sstevel@tonic-gate 	 * If we are walking freed buffers, the loop above handled reporting
15397c478bd9Sstevel@tonic-gate 	 * them.
15407c478bd9Sstevel@tonic-gate 	 */
15417c478bd9Sstevel@tonic-gate 	if (type & KM_FREE)
15427c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
15437c478bd9Sstevel@tonic-gate 
15447c478bd9Sstevel@tonic-gate 	if (type & KM_BUFCTL) {
15457c478bd9Sstevel@tonic-gate 		mdb_warn("impossible situation: small-slab KM_BUFCTL walk for "
15467c478bd9Sstevel@tonic-gate 		    "cache %p\n", addr);
15477c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
15487c478bd9Sstevel@tonic-gate 	}
15497c478bd9Sstevel@tonic-gate 
15507c478bd9Sstevel@tonic-gate 	/*
15517c478bd9Sstevel@tonic-gate 	 * Report allocated buffers, skipping buffers in the magazine layer.
15527c478bd9Sstevel@tonic-gate 	 * We only get this far for small-slab caches.
15537c478bd9Sstevel@tonic-gate 	 */
15547c478bd9Sstevel@tonic-gate 	for (i = 0; ret == WALK_NEXT && i < chunks; i++) {
15557c478bd9Sstevel@tonic-gate 		buf = (char *)kbase + i * chunksize;
15567c478bd9Sstevel@tonic-gate 
15577c478bd9Sstevel@tonic-gate 		if (!valid[i])
15587c478bd9Sstevel@tonic-gate 			continue;		/* on slab freelist */
15597c478bd9Sstevel@tonic-gate 
15607c478bd9Sstevel@tonic-gate 		if (magcnt > 0 &&
15617c478bd9Sstevel@tonic-gate 		    bsearch(&buf, maglist, magcnt, sizeof (void *),
15627c478bd9Sstevel@tonic-gate 		    addrcmp) != NULL)
15637c478bd9Sstevel@tonic-gate 			continue;		/* in magazine layer */
15647c478bd9Sstevel@tonic-gate 
15657c478bd9Sstevel@tonic-gate 		ret = kmem_walk_callback(wsp, (uintptr_t)buf);
15667c478bd9Sstevel@tonic-gate 	}
15677c478bd9Sstevel@tonic-gate 	return (ret);
15687c478bd9Sstevel@tonic-gate }
15697c478bd9Sstevel@tonic-gate 
15707c478bd9Sstevel@tonic-gate void
15717c478bd9Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp)
15727c478bd9Sstevel@tonic-gate {
15737c478bd9Sstevel@tonic-gate 	kmem_walk_t *kmw = wsp->walk_data;
15747c478bd9Sstevel@tonic-gate 	uintptr_t chunksize;
15757c478bd9Sstevel@tonic-gate 	uintptr_t slabsize;
15767c478bd9Sstevel@tonic-gate 
15777c478bd9Sstevel@tonic-gate 	if (kmw == NULL)
15787c478bd9Sstevel@tonic-gate 		return;
15797c478bd9Sstevel@tonic-gate 
15807c478bd9Sstevel@tonic-gate 	if (kmw->kmw_maglist != NULL)
15817c478bd9Sstevel@tonic-gate 		mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *));
15827c478bd9Sstevel@tonic-gate 
15837c478bd9Sstevel@tonic-gate 	chunksize = kmw->kmw_cp->cache_chunksize;
15847c478bd9Sstevel@tonic-gate 	slabsize = kmw->kmw_cp->cache_slabsize;
15857c478bd9Sstevel@tonic-gate 
15867c478bd9Sstevel@tonic-gate 	if (kmw->kmw_valid != NULL)
15877c478bd9Sstevel@tonic-gate 		mdb_free(kmw->kmw_valid, slabsize / chunksize);
15887c478bd9Sstevel@tonic-gate 	if (kmw->kmw_ubase != NULL)
15897c478bd9Sstevel@tonic-gate 		mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t));
15907c478bd9Sstevel@tonic-gate 
15917c478bd9Sstevel@tonic-gate 	mdb_free(kmw->kmw_cp, kmw->kmw_csize);
15927c478bd9Sstevel@tonic-gate 	mdb_free(kmw, sizeof (kmem_walk_t));
15937c478bd9Sstevel@tonic-gate }
15947c478bd9Sstevel@tonic-gate 
15957c478bd9Sstevel@tonic-gate /*ARGSUSED*/
15967c478bd9Sstevel@tonic-gate static int
15977c478bd9Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp)
15987c478bd9Sstevel@tonic-gate {
15997c478bd9Sstevel@tonic-gate 	/*
16007c478bd9Sstevel@tonic-gate 	 * Buffers allocated from NOTOUCH caches can also show up as freed
16017c478bd9Sstevel@tonic-gate 	 * memory in other caches.  This can be a little confusing, so we
16027c478bd9Sstevel@tonic-gate 	 * don't walk NOTOUCH caches when walking all caches (thereby assuring
16037c478bd9Sstevel@tonic-gate 	 * that "::walk kmem" and "::walk freemem" yield disjoint output).
16047c478bd9Sstevel@tonic-gate 	 */
16057c478bd9Sstevel@tonic-gate 	if (c->cache_cflags & KMC_NOTOUCH)
16067c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
16077c478bd9Sstevel@tonic-gate 
16087c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(wsp->walk_data, wsp->walk_callback,
16097c478bd9Sstevel@tonic-gate 	    wsp->walk_cbdata, addr) == -1)
16107c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
16117c478bd9Sstevel@tonic-gate 
16127c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
16137c478bd9Sstevel@tonic-gate }
16147c478bd9Sstevel@tonic-gate 
16157c478bd9Sstevel@tonic-gate #define	KMEM_WALK_ALL(name, wsp) { \
16167c478bd9Sstevel@tonic-gate 	wsp->walk_data = (name); \
16177c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \
16187c478bd9Sstevel@tonic-gate 		return (WALK_ERR); \
16197c478bd9Sstevel@tonic-gate 	return (WALK_DONE); \
16207c478bd9Sstevel@tonic-gate }
16217c478bd9Sstevel@tonic-gate 
16227c478bd9Sstevel@tonic-gate int
16237c478bd9Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp)
16247c478bd9Sstevel@tonic-gate {
16257c478bd9Sstevel@tonic-gate 	if (wsp->walk_arg != NULL)
16267c478bd9Sstevel@tonic-gate 		wsp->walk_addr = (uintptr_t)wsp->walk_arg;
16277c478bd9Sstevel@tonic-gate 
16287c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16297c478bd9Sstevel@tonic-gate 		KMEM_WALK_ALL("kmem", wsp);
16307c478bd9Sstevel@tonic-gate 	return (kmem_walk_init_common(wsp, KM_ALLOCATED));
16317c478bd9Sstevel@tonic-gate }
16327c478bd9Sstevel@tonic-gate 
16337c478bd9Sstevel@tonic-gate int
16347c478bd9Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp)
16357c478bd9Sstevel@tonic-gate {
16367c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16377c478bd9Sstevel@tonic-gate 		KMEM_WALK_ALL("bufctl", wsp);
16387c478bd9Sstevel@tonic-gate 	return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL));
16397c478bd9Sstevel@tonic-gate }
16407c478bd9Sstevel@tonic-gate 
16417c478bd9Sstevel@tonic-gate int
16427c478bd9Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp)
16437c478bd9Sstevel@tonic-gate {
16447c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16457c478bd9Sstevel@tonic-gate 		KMEM_WALK_ALL("freemem", wsp);
16467c478bd9Sstevel@tonic-gate 	return (kmem_walk_init_common(wsp, KM_FREE));
16477c478bd9Sstevel@tonic-gate }
16487c478bd9Sstevel@tonic-gate 
16497c478bd9Sstevel@tonic-gate int
16507c478bd9Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp)
16517c478bd9Sstevel@tonic-gate {
16527c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16537c478bd9Sstevel@tonic-gate 		KMEM_WALK_ALL("freemem_constructed", wsp);
16547c478bd9Sstevel@tonic-gate 	return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED));
16557c478bd9Sstevel@tonic-gate }
16567c478bd9Sstevel@tonic-gate 
16577c478bd9Sstevel@tonic-gate int
16587c478bd9Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp)
16597c478bd9Sstevel@tonic-gate {
16607c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16617c478bd9Sstevel@tonic-gate 		KMEM_WALK_ALL("freectl", wsp);
16627c478bd9Sstevel@tonic-gate 	return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL));
16637c478bd9Sstevel@tonic-gate }
16647c478bd9Sstevel@tonic-gate 
16657c478bd9Sstevel@tonic-gate int
16667c478bd9Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp)
16677c478bd9Sstevel@tonic-gate {
16687c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL)
16697c478bd9Sstevel@tonic-gate 		KMEM_WALK_ALL("freectl_constructed", wsp);
16707c478bd9Sstevel@tonic-gate 	return (kmem_walk_init_common(wsp,
16717c478bd9Sstevel@tonic-gate 	    KM_FREE | KM_BUFCTL | KM_CONSTRUCTED));
16727c478bd9Sstevel@tonic-gate }
16737c478bd9Sstevel@tonic-gate 
16747c478bd9Sstevel@tonic-gate typedef struct bufctl_history_walk {
16757c478bd9Sstevel@tonic-gate 	void		*bhw_next;
16767c478bd9Sstevel@tonic-gate 	kmem_cache_t	*bhw_cache;
16777c478bd9Sstevel@tonic-gate 	kmem_slab_t	*bhw_slab;
16787c478bd9Sstevel@tonic-gate 	hrtime_t	bhw_timestamp;
16797c478bd9Sstevel@tonic-gate } bufctl_history_walk_t;
16807c478bd9Sstevel@tonic-gate 
16817c478bd9Sstevel@tonic-gate int
16827c478bd9Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp)
16837c478bd9Sstevel@tonic-gate {
16847c478bd9Sstevel@tonic-gate 	bufctl_history_walk_t *bhw;
16857c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t bc;
16867c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t bcn;
16877c478bd9Sstevel@tonic-gate 
16887c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
16897c478bd9Sstevel@tonic-gate 		mdb_warn("bufctl_history walk doesn't support global walks\n");
16907c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
16917c478bd9Sstevel@tonic-gate 	}
16927c478bd9Sstevel@tonic-gate 
16937c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) {
16947c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", wsp->walk_addr);
16957c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
16967c478bd9Sstevel@tonic-gate 	}
16977c478bd9Sstevel@tonic-gate 
16987c478bd9Sstevel@tonic-gate 	bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP);
16997c478bd9Sstevel@tonic-gate 	bhw->bhw_timestamp = 0;
17007c478bd9Sstevel@tonic-gate 	bhw->bhw_cache = bc.bc_cache;
17017c478bd9Sstevel@tonic-gate 	bhw->bhw_slab = bc.bc_slab;
17027c478bd9Sstevel@tonic-gate 
17037c478bd9Sstevel@tonic-gate 	/*
17047c478bd9Sstevel@tonic-gate 	 * sometimes the first log entry matches the base bufctl;  in that
17057c478bd9Sstevel@tonic-gate 	 * case, skip the base bufctl.
17067c478bd9Sstevel@tonic-gate 	 */
17077c478bd9Sstevel@tonic-gate 	if (bc.bc_lastlog != NULL &&
17087c478bd9Sstevel@tonic-gate 	    mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
17097c478bd9Sstevel@tonic-gate 	    bc.bc_addr == bcn.bc_addr &&
17107c478bd9Sstevel@tonic-gate 	    bc.bc_cache == bcn.bc_cache &&
17117c478bd9Sstevel@tonic-gate 	    bc.bc_slab == bcn.bc_slab &&
17127c478bd9Sstevel@tonic-gate 	    bc.bc_timestamp == bcn.bc_timestamp &&
17137c478bd9Sstevel@tonic-gate 	    bc.bc_thread == bcn.bc_thread)
17147c478bd9Sstevel@tonic-gate 		bhw->bhw_next = bc.bc_lastlog;
17157c478bd9Sstevel@tonic-gate 	else
17167c478bd9Sstevel@tonic-gate 		bhw->bhw_next = (void *)wsp->walk_addr;
17177c478bd9Sstevel@tonic-gate 
17187c478bd9Sstevel@tonic-gate 	wsp->walk_addr = (uintptr_t)bc.bc_addr;
17197c478bd9Sstevel@tonic-gate 	wsp->walk_data = bhw;
17207c478bd9Sstevel@tonic-gate 
17217c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
17227c478bd9Sstevel@tonic-gate }
17237c478bd9Sstevel@tonic-gate 
17247c478bd9Sstevel@tonic-gate int
17257c478bd9Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp)
17267c478bd9Sstevel@tonic-gate {
17277c478bd9Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
17287c478bd9Sstevel@tonic-gate 	uintptr_t addr = (uintptr_t)bhw->bhw_next;
17297c478bd9Sstevel@tonic-gate 	uintptr_t baseaddr = wsp->walk_addr;
17307c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t bc;
17317c478bd9Sstevel@tonic-gate 
17327c478bd9Sstevel@tonic-gate 	if (addr == NULL)
17337c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
17347c478bd9Sstevel@tonic-gate 
17357c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
17367c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read bufctl at %p", bhw->bhw_next);
17377c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17387c478bd9Sstevel@tonic-gate 	}
17397c478bd9Sstevel@tonic-gate 
17407c478bd9Sstevel@tonic-gate 	/*
17417c478bd9Sstevel@tonic-gate 	 * The bufctl is only valid if the address, cache, and slab are
17427c478bd9Sstevel@tonic-gate 	 * correct.  We also check that the timestamp is decreasing, to
17437c478bd9Sstevel@tonic-gate 	 * prevent infinite loops.
17447c478bd9Sstevel@tonic-gate 	 */
17457c478bd9Sstevel@tonic-gate 	if ((uintptr_t)bc.bc_addr != baseaddr ||
17467c478bd9Sstevel@tonic-gate 	    bc.bc_cache != bhw->bhw_cache ||
17477c478bd9Sstevel@tonic-gate 	    bc.bc_slab != bhw->bhw_slab ||
17487c478bd9Sstevel@tonic-gate 	    (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp))
17497c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
17507c478bd9Sstevel@tonic-gate 
17517c478bd9Sstevel@tonic-gate 	bhw->bhw_next = bc.bc_lastlog;
17527c478bd9Sstevel@tonic-gate 	bhw->bhw_timestamp = bc.bc_timestamp;
17537c478bd9Sstevel@tonic-gate 
17547c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata));
17557c478bd9Sstevel@tonic-gate }
17567c478bd9Sstevel@tonic-gate 
17577c478bd9Sstevel@tonic-gate void
17587c478bd9Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp)
17597c478bd9Sstevel@tonic-gate {
17607c478bd9Sstevel@tonic-gate 	bufctl_history_walk_t *bhw = wsp->walk_data;
17617c478bd9Sstevel@tonic-gate 
17627c478bd9Sstevel@tonic-gate 	mdb_free(bhw, sizeof (*bhw));
17637c478bd9Sstevel@tonic-gate }
17647c478bd9Sstevel@tonic-gate 
17657c478bd9Sstevel@tonic-gate typedef struct kmem_log_walk {
17667c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t *klw_base;
17677c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t **klw_sorted;
17687c478bd9Sstevel@tonic-gate 	kmem_log_header_t klw_lh;
17697c478bd9Sstevel@tonic-gate 	size_t klw_size;
17707c478bd9Sstevel@tonic-gate 	size_t klw_maxndx;
17717c478bd9Sstevel@tonic-gate 	size_t klw_ndx;
17727c478bd9Sstevel@tonic-gate } kmem_log_walk_t;
17737c478bd9Sstevel@tonic-gate 
17747c478bd9Sstevel@tonic-gate int
17757c478bd9Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp)
17767c478bd9Sstevel@tonic-gate {
17777c478bd9Sstevel@tonic-gate 	uintptr_t lp = wsp->walk_addr;
17787c478bd9Sstevel@tonic-gate 	kmem_log_walk_t *klw;
17797c478bd9Sstevel@tonic-gate 	kmem_log_header_t *lhp;
17807c478bd9Sstevel@tonic-gate 	int maxndx, i, j, k;
17817c478bd9Sstevel@tonic-gate 
17827c478bd9Sstevel@tonic-gate 	/*
17837c478bd9Sstevel@tonic-gate 	 * By default (global walk), walk the kmem_transaction_log.  Otherwise
17847c478bd9Sstevel@tonic-gate 	 * read the log whose kmem_log_header_t is stored at walk_addr.
17857c478bd9Sstevel@tonic-gate 	 */
17867c478bd9Sstevel@tonic-gate 	if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) {
17877c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read 'kmem_transaction_log'");
17887c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17897c478bd9Sstevel@tonic-gate 	}
17907c478bd9Sstevel@tonic-gate 
17917c478bd9Sstevel@tonic-gate 	if (lp == NULL) {
17927c478bd9Sstevel@tonic-gate 		mdb_warn("log is disabled\n");
17937c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
17947c478bd9Sstevel@tonic-gate 	}
17957c478bd9Sstevel@tonic-gate 
17967c478bd9Sstevel@tonic-gate 	klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP);
17977c478bd9Sstevel@tonic-gate 	lhp = &klw->klw_lh;
17987c478bd9Sstevel@tonic-gate 
17997c478bd9Sstevel@tonic-gate 	if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) {
18007c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lp);
18017c478bd9Sstevel@tonic-gate 		mdb_free(klw, sizeof (kmem_log_walk_t));
18027c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
18037c478bd9Sstevel@tonic-gate 	}
18047c478bd9Sstevel@tonic-gate 
18057c478bd9Sstevel@tonic-gate 	klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks;
18067c478bd9Sstevel@tonic-gate 	klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP);
18077c478bd9Sstevel@tonic-gate 	maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1;
18087c478bd9Sstevel@tonic-gate 
18097c478bd9Sstevel@tonic-gate 	if (mdb_vread(klw->klw_base, klw->klw_size,
18107c478bd9Sstevel@tonic-gate 	    (uintptr_t)lhp->lh_base) == -1) {
18117c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read log at base %p", lhp->lh_base);
18127c478bd9Sstevel@tonic-gate 		mdb_free(klw->klw_base, klw->klw_size);
18137c478bd9Sstevel@tonic-gate 		mdb_free(klw, sizeof (kmem_log_walk_t));
18147c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
18157c478bd9Sstevel@tonic-gate 	}
18167c478bd9Sstevel@tonic-gate 
18177c478bd9Sstevel@tonic-gate 	klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks *
18187c478bd9Sstevel@tonic-gate 	    sizeof (kmem_bufctl_audit_t *), UM_SLEEP);
18197c478bd9Sstevel@tonic-gate 
18207c478bd9Sstevel@tonic-gate 	for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
18217c478bd9Sstevel@tonic-gate 		kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *)
18227c478bd9Sstevel@tonic-gate 		    ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize);
18237c478bd9Sstevel@tonic-gate 
18247c478bd9Sstevel@tonic-gate 		for (j = 0; j < maxndx; j++)
18257c478bd9Sstevel@tonic-gate 			klw->klw_sorted[k++] = &chunk[j];
18267c478bd9Sstevel@tonic-gate 	}
18277c478bd9Sstevel@tonic-gate 
18287c478bd9Sstevel@tonic-gate 	qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *),
18297c478bd9Sstevel@tonic-gate 	    (int(*)(const void *, const void *))bufctlcmp);
18307c478bd9Sstevel@tonic-gate 
18317c478bd9Sstevel@tonic-gate 	klw->klw_maxndx = k;
18327c478bd9Sstevel@tonic-gate 	wsp->walk_data = klw;
18337c478bd9Sstevel@tonic-gate 
18347c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
18357c478bd9Sstevel@tonic-gate }
18367c478bd9Sstevel@tonic-gate 
18377c478bd9Sstevel@tonic-gate int
18387c478bd9Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp)
18397c478bd9Sstevel@tonic-gate {
18407c478bd9Sstevel@tonic-gate 	kmem_log_walk_t *klw = wsp->walk_data;
18417c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t *bcp;
18427c478bd9Sstevel@tonic-gate 
18437c478bd9Sstevel@tonic-gate 	if (klw->klw_ndx == klw->klw_maxndx)
18447c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
18457c478bd9Sstevel@tonic-gate 
18467c478bd9Sstevel@tonic-gate 	bcp = klw->klw_sorted[klw->klw_ndx++];
18477c478bd9Sstevel@tonic-gate 
18487c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base +
18497c478bd9Sstevel@tonic-gate 	    (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata));
18507c478bd9Sstevel@tonic-gate }
18517c478bd9Sstevel@tonic-gate 
18527c478bd9Sstevel@tonic-gate void
18537c478bd9Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp)
18547c478bd9Sstevel@tonic-gate {
18557c478bd9Sstevel@tonic-gate 	kmem_log_walk_t *klw = wsp->walk_data;
18567c478bd9Sstevel@tonic-gate 
18577c478bd9Sstevel@tonic-gate 	mdb_free(klw->klw_base, klw->klw_size);
18587c478bd9Sstevel@tonic-gate 	mdb_free(klw->klw_sorted, klw->klw_maxndx *
18597c478bd9Sstevel@tonic-gate 	    sizeof (kmem_bufctl_audit_t *));
18607c478bd9Sstevel@tonic-gate 	mdb_free(klw, sizeof (kmem_log_walk_t));
18617c478bd9Sstevel@tonic-gate }
18627c478bd9Sstevel@tonic-gate 
18637c478bd9Sstevel@tonic-gate typedef struct allocdby_bufctl {
18647c478bd9Sstevel@tonic-gate 	uintptr_t abb_addr;
18657c478bd9Sstevel@tonic-gate 	hrtime_t abb_ts;
18667c478bd9Sstevel@tonic-gate } allocdby_bufctl_t;
18677c478bd9Sstevel@tonic-gate 
18687c478bd9Sstevel@tonic-gate typedef struct allocdby_walk {
18697c478bd9Sstevel@tonic-gate 	const char *abw_walk;
18707c478bd9Sstevel@tonic-gate 	uintptr_t abw_thread;
18717c478bd9Sstevel@tonic-gate 	size_t abw_nbufs;
18727c478bd9Sstevel@tonic-gate 	size_t abw_size;
18737c478bd9Sstevel@tonic-gate 	allocdby_bufctl_t *abw_buf;
18747c478bd9Sstevel@tonic-gate 	size_t abw_ndx;
18757c478bd9Sstevel@tonic-gate } allocdby_walk_t;
18767c478bd9Sstevel@tonic-gate 
18777c478bd9Sstevel@tonic-gate int
18787c478bd9Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp,
18797c478bd9Sstevel@tonic-gate     allocdby_walk_t *abw)
18807c478bd9Sstevel@tonic-gate {
18817c478bd9Sstevel@tonic-gate 	if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
18827c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
18837c478bd9Sstevel@tonic-gate 
18847c478bd9Sstevel@tonic-gate 	if (abw->abw_nbufs == abw->abw_size) {
18857c478bd9Sstevel@tonic-gate 		allocdby_bufctl_t *buf;
18867c478bd9Sstevel@tonic-gate 		size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size;
18877c478bd9Sstevel@tonic-gate 
18887c478bd9Sstevel@tonic-gate 		buf = mdb_zalloc(oldsize << 1, UM_SLEEP);
18897c478bd9Sstevel@tonic-gate 
18907c478bd9Sstevel@tonic-gate 		bcopy(abw->abw_buf, buf, oldsize);
18917c478bd9Sstevel@tonic-gate 		mdb_free(abw->abw_buf, oldsize);
18927c478bd9Sstevel@tonic-gate 
18937c478bd9Sstevel@tonic-gate 		abw->abw_size <<= 1;
18947c478bd9Sstevel@tonic-gate 		abw->abw_buf = buf;
18957c478bd9Sstevel@tonic-gate 	}
18967c478bd9Sstevel@tonic-gate 
18977c478bd9Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_addr = addr;
18987c478bd9Sstevel@tonic-gate 	abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp;
18997c478bd9Sstevel@tonic-gate 	abw->abw_nbufs++;
19007c478bd9Sstevel@tonic-gate 
19017c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
19027c478bd9Sstevel@tonic-gate }
19037c478bd9Sstevel@tonic-gate 
19047c478bd9Sstevel@tonic-gate /*ARGSUSED*/
19057c478bd9Sstevel@tonic-gate int
19067c478bd9Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw)
19077c478bd9Sstevel@tonic-gate {
19087c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl,
19097c478bd9Sstevel@tonic-gate 	    abw, addr) == -1) {
19107c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk bufctl for cache %p", addr);
19117c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
19127c478bd9Sstevel@tonic-gate 	}
19137c478bd9Sstevel@tonic-gate 
19147c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
19157c478bd9Sstevel@tonic-gate }
19167c478bd9Sstevel@tonic-gate 
19177c478bd9Sstevel@tonic-gate static int
19187c478bd9Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs)
19197c478bd9Sstevel@tonic-gate {
19207c478bd9Sstevel@tonic-gate 	if (lhs->abb_ts < rhs->abb_ts)
19217c478bd9Sstevel@tonic-gate 		return (1);
19227c478bd9Sstevel@tonic-gate 	if (lhs->abb_ts > rhs->abb_ts)
19237c478bd9Sstevel@tonic-gate 		return (-1);
19247c478bd9Sstevel@tonic-gate 	return (0);
19257c478bd9Sstevel@tonic-gate }
19267c478bd9Sstevel@tonic-gate 
19277c478bd9Sstevel@tonic-gate static int
19287c478bd9Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk)
19297c478bd9Sstevel@tonic-gate {
19307c478bd9Sstevel@tonic-gate 	allocdby_walk_t *abw;
19317c478bd9Sstevel@tonic-gate 
19327c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
19337c478bd9Sstevel@tonic-gate 		mdb_warn("allocdby walk doesn't support global walks\n");
19347c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
19357c478bd9Sstevel@tonic-gate 	}
19367c478bd9Sstevel@tonic-gate 
19377c478bd9Sstevel@tonic-gate 	abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP);
19387c478bd9Sstevel@tonic-gate 
19397c478bd9Sstevel@tonic-gate 	abw->abw_thread = wsp->walk_addr;
19407c478bd9Sstevel@tonic-gate 	abw->abw_walk = walk;
19417c478bd9Sstevel@tonic-gate 	abw->abw_size = 128;	/* something reasonable */
19427c478bd9Sstevel@tonic-gate 	abw->abw_buf =
19437c478bd9Sstevel@tonic-gate 	    mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP);
19447c478bd9Sstevel@tonic-gate 
19457c478bd9Sstevel@tonic-gate 	wsp->walk_data = abw;
19467c478bd9Sstevel@tonic-gate 
19477c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache",
19487c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) {
19497c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk kmem_cache");
19507c478bd9Sstevel@tonic-gate 		allocdby_walk_fini(wsp);
19517c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
19527c478bd9Sstevel@tonic-gate 	}
19537c478bd9Sstevel@tonic-gate 
19547c478bd9Sstevel@tonic-gate 	qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t),
19557c478bd9Sstevel@tonic-gate 	    (int(*)(const void *, const void *))allocdby_cmp);
19567c478bd9Sstevel@tonic-gate 
19577c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
19587c478bd9Sstevel@tonic-gate }
19597c478bd9Sstevel@tonic-gate 
19607c478bd9Sstevel@tonic-gate int
19617c478bd9Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp)
19627c478bd9Sstevel@tonic-gate {
19637c478bd9Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "bufctl"));
19647c478bd9Sstevel@tonic-gate }
19657c478bd9Sstevel@tonic-gate 
19667c478bd9Sstevel@tonic-gate int
19677c478bd9Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp)
19687c478bd9Sstevel@tonic-gate {
19697c478bd9Sstevel@tonic-gate 	return (allocdby_walk_init_common(wsp, "freectl"));
19707c478bd9Sstevel@tonic-gate }
19717c478bd9Sstevel@tonic-gate 
19727c478bd9Sstevel@tonic-gate int
19737c478bd9Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp)
19747c478bd9Sstevel@tonic-gate {
19757c478bd9Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
19767c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t bc;
19777c478bd9Sstevel@tonic-gate 	uintptr_t addr;
19787c478bd9Sstevel@tonic-gate 
19797c478bd9Sstevel@tonic-gate 	if (abw->abw_ndx == abw->abw_nbufs)
19807c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
19817c478bd9Sstevel@tonic-gate 
19827c478bd9Sstevel@tonic-gate 	addr = abw->abw_buf[abw->abw_ndx++].abb_addr;
19837c478bd9Sstevel@tonic-gate 
19847c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
19857c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
19867c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
19877c478bd9Sstevel@tonic-gate 	}
19887c478bd9Sstevel@tonic-gate 
19897c478bd9Sstevel@tonic-gate 	return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata));
19907c478bd9Sstevel@tonic-gate }
19917c478bd9Sstevel@tonic-gate 
19927c478bd9Sstevel@tonic-gate void
19937c478bd9Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp)
19947c478bd9Sstevel@tonic-gate {
19957c478bd9Sstevel@tonic-gate 	allocdby_walk_t *abw = wsp->walk_data;
19967c478bd9Sstevel@tonic-gate 
19977c478bd9Sstevel@tonic-gate 	mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size);
19987c478bd9Sstevel@tonic-gate 	mdb_free(abw, sizeof (allocdby_walk_t));
19997c478bd9Sstevel@tonic-gate }
20007c478bd9Sstevel@tonic-gate 
20017c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20027c478bd9Sstevel@tonic-gate int
20037c478bd9Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored)
20047c478bd9Sstevel@tonic-gate {
20057c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
20067c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
20077c478bd9Sstevel@tonic-gate 	int i;
20087c478bd9Sstevel@tonic-gate 
20097c478bd9Sstevel@tonic-gate 	mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp);
20107c478bd9Sstevel@tonic-gate 	for (i = 0; i < bcp->bc_depth; i++) {
20117c478bd9Sstevel@tonic-gate 		if (mdb_lookup_by_addr(bcp->bc_stack[i],
20127c478bd9Sstevel@tonic-gate 		    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
20137c478bd9Sstevel@tonic-gate 			continue;
20147c478bd9Sstevel@tonic-gate 		if (strncmp(c, "kmem_", 5) == 0)
20157c478bd9Sstevel@tonic-gate 			continue;
20167c478bd9Sstevel@tonic-gate 		mdb_printf("%s+0x%lx",
20177c478bd9Sstevel@tonic-gate 		    c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
20187c478bd9Sstevel@tonic-gate 		break;
20197c478bd9Sstevel@tonic-gate 	}
20207c478bd9Sstevel@tonic-gate 	mdb_printf("\n");
20217c478bd9Sstevel@tonic-gate 
20227c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
20237c478bd9Sstevel@tonic-gate }
20247c478bd9Sstevel@tonic-gate 
20257c478bd9Sstevel@tonic-gate static int
20267c478bd9Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w)
20277c478bd9Sstevel@tonic-gate {
20287c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
20297c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
20307c478bd9Sstevel@tonic-gate 
20317c478bd9Sstevel@tonic-gate 	mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER");
20327c478bd9Sstevel@tonic-gate 
20337c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) {
20347c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk '%s' for %p", w, addr);
20357c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
20367c478bd9Sstevel@tonic-gate 	}
20377c478bd9Sstevel@tonic-gate 
20387c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
20397c478bd9Sstevel@tonic-gate }
20407c478bd9Sstevel@tonic-gate 
20417c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20427c478bd9Sstevel@tonic-gate int
20437c478bd9Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20447c478bd9Sstevel@tonic-gate {
20457c478bd9Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "allocdby"));
20467c478bd9Sstevel@tonic-gate }
20477c478bd9Sstevel@tonic-gate 
20487c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20497c478bd9Sstevel@tonic-gate int
20507c478bd9Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20517c478bd9Sstevel@tonic-gate {
20527c478bd9Sstevel@tonic-gate 	return (allocdby_common(addr, flags, "freedby"));
20537c478bd9Sstevel@tonic-gate }
20547c478bd9Sstevel@tonic-gate 
20557c478bd9Sstevel@tonic-gate /*
20567c478bd9Sstevel@tonic-gate  * Return a string describing the address in relation to the given thread's
20577c478bd9Sstevel@tonic-gate  * stack.
20587c478bd9Sstevel@tonic-gate  *
20597c478bd9Sstevel@tonic-gate  * - If the thread state is TS_FREE, return " (inactive interrupt thread)".
20607c478bd9Sstevel@tonic-gate  *
20617c478bd9Sstevel@tonic-gate  * - If the address is above the stack pointer, return an empty string
20627c478bd9Sstevel@tonic-gate  *   signifying that the address is active.
20637c478bd9Sstevel@tonic-gate  *
20647c478bd9Sstevel@tonic-gate  * - If the address is below the stack pointer, and the thread is not on proc,
20657c478bd9Sstevel@tonic-gate  *   return " (below sp)".
20667c478bd9Sstevel@tonic-gate  *
20677c478bd9Sstevel@tonic-gate  * - If the address is below the stack pointer, and the thread is on proc,
20687c478bd9Sstevel@tonic-gate  *   return " (possibly below sp)".  Depending on context, we may or may not
20697c478bd9Sstevel@tonic-gate  *   have an accurate t_sp.
20707c478bd9Sstevel@tonic-gate  */
20717c478bd9Sstevel@tonic-gate static const char *
20727c478bd9Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr)
20737c478bd9Sstevel@tonic-gate {
20747c478bd9Sstevel@tonic-gate 	uintptr_t panicstk;
20757c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
20767c478bd9Sstevel@tonic-gate 
20777c478bd9Sstevel@tonic-gate 	if (t->t_state == TS_FREE)
20787c478bd9Sstevel@tonic-gate 		return (" (inactive interrupt thread)");
20797c478bd9Sstevel@tonic-gate 
20807c478bd9Sstevel@tonic-gate 	/*
20817c478bd9Sstevel@tonic-gate 	 * Check to see if we're on the panic stack.  If so, ignore t_sp, as it
20827c478bd9Sstevel@tonic-gate 	 * no longer relates to the thread's real stack.
20837c478bd9Sstevel@tonic-gate 	 */
20847c478bd9Sstevel@tonic-gate 	if (mdb_lookup_by_name("panic_stack", &sym) == 0) {
20857c478bd9Sstevel@tonic-gate 		panicstk = (uintptr_t)sym.st_value;
20867c478bd9Sstevel@tonic-gate 
20877c478bd9Sstevel@tonic-gate 		if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE)
20887c478bd9Sstevel@tonic-gate 			return ("");
20897c478bd9Sstevel@tonic-gate 	}
20907c478bd9Sstevel@tonic-gate 
20917c478bd9Sstevel@tonic-gate 	if (addr >= t->t_sp + STACK_BIAS)
20927c478bd9Sstevel@tonic-gate 		return ("");
20937c478bd9Sstevel@tonic-gate 
20947c478bd9Sstevel@tonic-gate 	if (t->t_state == TS_ONPROC)
20957c478bd9Sstevel@tonic-gate 		return (" (possibly below sp)");
20967c478bd9Sstevel@tonic-gate 
20977c478bd9Sstevel@tonic-gate 	return (" (below sp)");
20987c478bd9Sstevel@tonic-gate }
20997c478bd9Sstevel@tonic-gate 
21007c478bd9Sstevel@tonic-gate typedef struct whatis {
21017c478bd9Sstevel@tonic-gate 	uintptr_t w_addr;
21027c478bd9Sstevel@tonic-gate 	const kmem_cache_t *w_cache;
21037c478bd9Sstevel@tonic-gate 	const vmem_t *w_vmem;
21047c478bd9Sstevel@tonic-gate 	size_t w_slab_align;
21057c478bd9Sstevel@tonic-gate 	int w_slab_found;
21067c478bd9Sstevel@tonic-gate 	int w_found;
21077c478bd9Sstevel@tonic-gate 	int w_kmem_lite_count;
21087c478bd9Sstevel@tonic-gate 	uint_t w_verbose;
21097c478bd9Sstevel@tonic-gate 	uint_t w_freemem;
21107c478bd9Sstevel@tonic-gate 	uint_t w_all;
21117c478bd9Sstevel@tonic-gate 	uint_t w_bufctl;
21127c478bd9Sstevel@tonic-gate 	uint_t w_idspace;
21137c478bd9Sstevel@tonic-gate } whatis_t;
21147c478bd9Sstevel@tonic-gate 
21157c478bd9Sstevel@tonic-gate static void
21167c478bd9Sstevel@tonic-gate whatis_print_kmem(uintptr_t addr, uintptr_t baddr, whatis_t *w)
21177c478bd9Sstevel@tonic-gate {
21187c478bd9Sstevel@tonic-gate 	/* LINTED pointer cast may result in improper alignment */
21197c478bd9Sstevel@tonic-gate 	uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(w->w_cache, addr);
21207c478bd9Sstevel@tonic-gate 	intptr_t stat;
21217c478bd9Sstevel@tonic-gate 	int count = 0;
21227c478bd9Sstevel@tonic-gate 	int i;
21237c478bd9Sstevel@tonic-gate 	pc_t callers[16];
21247c478bd9Sstevel@tonic-gate 
21257c478bd9Sstevel@tonic-gate 	if (w->w_cache->cache_flags & KMF_REDZONE) {
21267c478bd9Sstevel@tonic-gate 		kmem_buftag_t bt;
21277c478bd9Sstevel@tonic-gate 
21287c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bt, sizeof (bt), btaddr) == -1)
21297c478bd9Sstevel@tonic-gate 			goto done;
21307c478bd9Sstevel@tonic-gate 
21317c478bd9Sstevel@tonic-gate 		stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat;
21327c478bd9Sstevel@tonic-gate 
21337c478bd9Sstevel@tonic-gate 		if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE)
21347c478bd9Sstevel@tonic-gate 			goto done;
21357c478bd9Sstevel@tonic-gate 
21367c478bd9Sstevel@tonic-gate 		/*
21377c478bd9Sstevel@tonic-gate 		 * provide the bufctl ptr if it has useful information
21387c478bd9Sstevel@tonic-gate 		 */
21397c478bd9Sstevel@tonic-gate 		if (baddr == 0 && (w->w_cache->cache_flags & KMF_AUDIT))
21407c478bd9Sstevel@tonic-gate 			baddr = (uintptr_t)bt.bt_bufctl;
21417c478bd9Sstevel@tonic-gate 
21427c478bd9Sstevel@tonic-gate 		if (w->w_cache->cache_flags & KMF_LITE) {
21437c478bd9Sstevel@tonic-gate 			count = w->w_kmem_lite_count;
21447c478bd9Sstevel@tonic-gate 
21457c478bd9Sstevel@tonic-gate 			if (count * sizeof (pc_t) > sizeof (callers))
21467c478bd9Sstevel@tonic-gate 				count = 0;
21477c478bd9Sstevel@tonic-gate 
21487c478bd9Sstevel@tonic-gate 			if (count > 0 &&
21497c478bd9Sstevel@tonic-gate 			    mdb_vread(callers, count * sizeof (pc_t),
21507c478bd9Sstevel@tonic-gate 			    btaddr +
21517c478bd9Sstevel@tonic-gate 			    offsetof(kmem_buftag_lite_t, bt_history)) == -1)
21527c478bd9Sstevel@tonic-gate 				count = 0;
21537c478bd9Sstevel@tonic-gate 
21547c478bd9Sstevel@tonic-gate 			/*
21557c478bd9Sstevel@tonic-gate 			 * skip unused callers
21567c478bd9Sstevel@tonic-gate 			 */
21577c478bd9Sstevel@tonic-gate 			while (count > 0 && callers[count - 1] ==
21587c478bd9Sstevel@tonic-gate 			    (pc_t)KMEM_UNINITIALIZED_PATTERN)
21597c478bd9Sstevel@tonic-gate 				count--;
21607c478bd9Sstevel@tonic-gate 		}
21617c478bd9Sstevel@tonic-gate 	}
21627c478bd9Sstevel@tonic-gate 
21637c478bd9Sstevel@tonic-gate done:
21647c478bd9Sstevel@tonic-gate 	if (baddr == 0)
21657c478bd9Sstevel@tonic-gate 		mdb_printf("%p is %p+%p, %s from %s\n",
21667c478bd9Sstevel@tonic-gate 		    w->w_addr, addr, w->w_addr - addr,
21677c478bd9Sstevel@tonic-gate 		    w->w_freemem == FALSE ? "allocated" : "freed",
21687c478bd9Sstevel@tonic-gate 		    w->w_cache->cache_name);
21697c478bd9Sstevel@tonic-gate 	else
21707c478bd9Sstevel@tonic-gate 		mdb_printf("%p is %p+%p, bufctl %p %s from %s\n",
21717c478bd9Sstevel@tonic-gate 		    w->w_addr, addr, w->w_addr - addr, baddr,
21727c478bd9Sstevel@tonic-gate 		    w->w_freemem == FALSE ? "allocated" : "freed",
21737c478bd9Sstevel@tonic-gate 		    w->w_cache->cache_name);
21747c478bd9Sstevel@tonic-gate 
21757c478bd9Sstevel@tonic-gate 	if (count > 0) {
21767c478bd9Sstevel@tonic-gate 		mdb_inc_indent(8);
21777c478bd9Sstevel@tonic-gate 		mdb_printf("recent caller%s: %a%s", (count != 1)? "s":"",
21787c478bd9Sstevel@tonic-gate 		    callers[0], (count != 1)? ", ":"\n");
21797c478bd9Sstevel@tonic-gate 		for (i = 1; i < count; i++)
21807c478bd9Sstevel@tonic-gate 			mdb_printf("%a%s", callers[i],
21817c478bd9Sstevel@tonic-gate 			    (i + 1 < count)? ", ":"\n");
21827c478bd9Sstevel@tonic-gate 		mdb_dec_indent(8);
21837c478bd9Sstevel@tonic-gate 	}
21847c478bd9Sstevel@tonic-gate }
21857c478bd9Sstevel@tonic-gate 
21867c478bd9Sstevel@tonic-gate /*ARGSUSED*/
21877c478bd9Sstevel@tonic-gate static int
21887c478bd9Sstevel@tonic-gate whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_t *w)
21897c478bd9Sstevel@tonic-gate {
21907c478bd9Sstevel@tonic-gate 	if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize)
21917c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
21927c478bd9Sstevel@tonic-gate 
21937c478bd9Sstevel@tonic-gate 	whatis_print_kmem(addr, 0, w);
21947c478bd9Sstevel@tonic-gate 	w->w_found++;
21957c478bd9Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
21967c478bd9Sstevel@tonic-gate }
21977c478bd9Sstevel@tonic-gate 
21987c478bd9Sstevel@tonic-gate static int
21997c478bd9Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w)
22007c478bd9Sstevel@tonic-gate {
22017c478bd9Sstevel@tonic-gate 	if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end)
22027c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22037c478bd9Sstevel@tonic-gate 
22047c478bd9Sstevel@tonic-gate 	mdb_printf("%p is %p+%p ", w->w_addr,
22057c478bd9Sstevel@tonic-gate 	    vs->vs_start, w->w_addr - vs->vs_start);
22067c478bd9Sstevel@tonic-gate 
22077c478bd9Sstevel@tonic-gate 	/*
22087c478bd9Sstevel@tonic-gate 	 * Always provide the vmem_seg pointer if it has a stack trace.
22097c478bd9Sstevel@tonic-gate 	 */
22107c478bd9Sstevel@tonic-gate 	if (w->w_bufctl == TRUE ||
22117c478bd9Sstevel@tonic-gate 	    (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0)) {
22127c478bd9Sstevel@tonic-gate 		mdb_printf("(vmem_seg %p) ", addr);
22137c478bd9Sstevel@tonic-gate 	}
22147c478bd9Sstevel@tonic-gate 
22157c478bd9Sstevel@tonic-gate 	mdb_printf("%sfrom %s vmem arena\n", w->w_freemem == TRUE ?
22167c478bd9Sstevel@tonic-gate 	    "freed " : "", w->w_vmem->vm_name);
22177c478bd9Sstevel@tonic-gate 
22187c478bd9Sstevel@tonic-gate 	w->w_found++;
22197c478bd9Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
22207c478bd9Sstevel@tonic-gate }
22217c478bd9Sstevel@tonic-gate 
22227c478bd9Sstevel@tonic-gate static int
22237c478bd9Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w)
22247c478bd9Sstevel@tonic-gate {
22257c478bd9Sstevel@tonic-gate 	const char *nm = vmem->vm_name;
22267c478bd9Sstevel@tonic-gate 	w->w_vmem = vmem;
22277c478bd9Sstevel@tonic-gate 	w->w_freemem = FALSE;
22287c478bd9Sstevel@tonic-gate 
22297c478bd9Sstevel@tonic-gate 	if (((vmem->vm_cflags & VMC_IDENTIFIER) != 0) ^ w->w_idspace)
22307c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22317c478bd9Sstevel@tonic-gate 
22327c478bd9Sstevel@tonic-gate 	if (w->w_verbose)
22337c478bd9Sstevel@tonic-gate 		mdb_printf("Searching vmem arena %s...\n", nm);
22347c478bd9Sstevel@tonic-gate 
22357c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("vmem_alloc",
22367c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) {
22377c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk vmem seg for %p", addr);
22387c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22397c478bd9Sstevel@tonic-gate 	}
22407c478bd9Sstevel@tonic-gate 
22417c478bd9Sstevel@tonic-gate 	if (w->w_found && w->w_all == FALSE)
22427c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
22437c478bd9Sstevel@tonic-gate 
22447c478bd9Sstevel@tonic-gate 	if (w->w_verbose)
22457c478bd9Sstevel@tonic-gate 		mdb_printf("Searching vmem arena %s for free virtual...\n", nm);
22467c478bd9Sstevel@tonic-gate 
22477c478bd9Sstevel@tonic-gate 	w->w_freemem = TRUE;
22487c478bd9Sstevel@tonic-gate 
22497c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("vmem_free",
22507c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) {
22517c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk vmem seg for %p", addr);
22527c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22537c478bd9Sstevel@tonic-gate 	}
22547c478bd9Sstevel@tonic-gate 
22557c478bd9Sstevel@tonic-gate 	return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT);
22567c478bd9Sstevel@tonic-gate }
22577c478bd9Sstevel@tonic-gate 
22587c478bd9Sstevel@tonic-gate /*ARGSUSED*/
22597c478bd9Sstevel@tonic-gate static int
22607c478bd9Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_t *w)
22617c478bd9Sstevel@tonic-gate {
22627c478bd9Sstevel@tonic-gate 	uintptr_t addr;
22637c478bd9Sstevel@tonic-gate 
22647c478bd9Sstevel@tonic-gate 	if (bcp == NULL)
22657c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22667c478bd9Sstevel@tonic-gate 
22677c478bd9Sstevel@tonic-gate 	addr = (uintptr_t)bcp->bc_addr;
22687c478bd9Sstevel@tonic-gate 
22697c478bd9Sstevel@tonic-gate 	if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize)
22707c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22717c478bd9Sstevel@tonic-gate 
22727c478bd9Sstevel@tonic-gate 	whatis_print_kmem(addr, baddr, w);
22737c478bd9Sstevel@tonic-gate 	w->w_found++;
22747c478bd9Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
22757c478bd9Sstevel@tonic-gate }
22767c478bd9Sstevel@tonic-gate 
22777c478bd9Sstevel@tonic-gate /*ARGSUSED*/
22787c478bd9Sstevel@tonic-gate static int
22797c478bd9Sstevel@tonic-gate whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_t *w)
22807c478bd9Sstevel@tonic-gate {
22817c478bd9Sstevel@tonic-gate 	uintptr_t base = P2ALIGN((uintptr_t)sp->slab_base, w->w_slab_align);
22827c478bd9Sstevel@tonic-gate 
22837c478bd9Sstevel@tonic-gate 	if ((w->w_addr - base) >= w->w_cache->cache_slabsize)
22847c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22857c478bd9Sstevel@tonic-gate 
22867c478bd9Sstevel@tonic-gate 	w->w_slab_found++;
22877c478bd9Sstevel@tonic-gate 	return (WALK_DONE);
22887c478bd9Sstevel@tonic-gate }
22897c478bd9Sstevel@tonic-gate 
22907c478bd9Sstevel@tonic-gate static int
22917c478bd9Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_t *w)
22927c478bd9Sstevel@tonic-gate {
22937c478bd9Sstevel@tonic-gate 	char *walk, *freewalk;
22947c478bd9Sstevel@tonic-gate 	mdb_walk_cb_t func;
22957c478bd9Sstevel@tonic-gate 	vmem_t *vmp = c->cache_arena;
22967c478bd9Sstevel@tonic-gate 
22977c478bd9Sstevel@tonic-gate 	if (((c->cache_flags & VMC_IDENTIFIER) != 0) ^ w->w_idspace)
22987c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
22997c478bd9Sstevel@tonic-gate 
23007c478bd9Sstevel@tonic-gate 	if (w->w_bufctl == FALSE) {
23017c478bd9Sstevel@tonic-gate 		walk = "kmem";
23027c478bd9Sstevel@tonic-gate 		freewalk = "freemem";
23037c478bd9Sstevel@tonic-gate 		func = (mdb_walk_cb_t)whatis_walk_kmem;
23047c478bd9Sstevel@tonic-gate 	} else {
23057c478bd9Sstevel@tonic-gate 		walk = "bufctl";
23067c478bd9Sstevel@tonic-gate 		freewalk = "freectl";
23077c478bd9Sstevel@tonic-gate 		func = (mdb_walk_cb_t)whatis_walk_bufctl;
23087c478bd9Sstevel@tonic-gate 	}
23097c478bd9Sstevel@tonic-gate 
23107c478bd9Sstevel@tonic-gate 	w->w_cache = c;
23117c478bd9Sstevel@tonic-gate 
23127c478bd9Sstevel@tonic-gate 	if (w->w_verbose)
23137c478bd9Sstevel@tonic-gate 		mdb_printf("Searching %s's slabs...\n", c->cache_name);
23147c478bd9Sstevel@tonic-gate 
23157c478bd9Sstevel@tonic-gate 	/*
23167c478bd9Sstevel@tonic-gate 	 * Verify that the address is in one of the cache's slabs.  If not,
23177c478bd9Sstevel@tonic-gate 	 * we can skip the more expensive walkers.  (this is purely a
23187c478bd9Sstevel@tonic-gate 	 * heuristic -- as long as there are no false-negatives, we'll be fine)
23197c478bd9Sstevel@tonic-gate 	 *
23207c478bd9Sstevel@tonic-gate 	 * We try to get the cache's arena's quantum, since to accurately
23217c478bd9Sstevel@tonic-gate 	 * get the base of a slab, you have to align it to the quantum.  If
23227c478bd9Sstevel@tonic-gate 	 * it doesn't look sensible, we fall back to not aligning.
23237c478bd9Sstevel@tonic-gate 	 */
23247c478bd9Sstevel@tonic-gate 	if (mdb_vread(&w->w_slab_align, sizeof (w->w_slab_align),
23257c478bd9Sstevel@tonic-gate 	    (uintptr_t)&vmp->vm_quantum) == -1) {
23267c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read %p->cache_arena->vm_quantum", c);
23277c478bd9Sstevel@tonic-gate 		w->w_slab_align = 1;
23287c478bd9Sstevel@tonic-gate 	}
23297c478bd9Sstevel@tonic-gate 
23307c478bd9Sstevel@tonic-gate 	if ((c->cache_slabsize < w->w_slab_align) || w->w_slab_align == 0 ||
23317c478bd9Sstevel@tonic-gate 	    (w->w_slab_align & (w->w_slab_align - 1))) {
23327c478bd9Sstevel@tonic-gate 		mdb_warn("%p's arena has invalid quantum (0x%p)\n", c,
23337c478bd9Sstevel@tonic-gate 		    w->w_slab_align);
23347c478bd9Sstevel@tonic-gate 		w->w_slab_align = 1;
23357c478bd9Sstevel@tonic-gate 	}
23367c478bd9Sstevel@tonic-gate 
23377c478bd9Sstevel@tonic-gate 	w->w_slab_found = 0;
23387c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, w,
23397c478bd9Sstevel@tonic-gate 	    addr) == -1) {
23407c478bd9Sstevel@tonic-gate 		mdb_warn("can't find kmem_slab walker");
23417c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
23427c478bd9Sstevel@tonic-gate 	}
23437c478bd9Sstevel@tonic-gate 	if (w->w_slab_found == 0)
23447c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
23457c478bd9Sstevel@tonic-gate 
23467c478bd9Sstevel@tonic-gate 	if (c->cache_flags & KMF_LITE) {
23477c478bd9Sstevel@tonic-gate 		if (mdb_readvar(&w->w_kmem_lite_count,
23487c478bd9Sstevel@tonic-gate 		    "kmem_lite_count") == -1 || w->w_kmem_lite_count > 16)
23497c478bd9Sstevel@tonic-gate 			w->w_kmem_lite_count = 0;
23507c478bd9Sstevel@tonic-gate 	}
23517c478bd9Sstevel@tonic-gate 
23527c478bd9Sstevel@tonic-gate 	if (w->w_verbose)
23537c478bd9Sstevel@tonic-gate 		mdb_printf("Searching %s...\n", c->cache_name);
23547c478bd9Sstevel@tonic-gate 
23557c478bd9Sstevel@tonic-gate 	w->w_freemem = FALSE;
23567c478bd9Sstevel@tonic-gate 
23577c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(walk, func, w, addr) == -1) {
23587c478bd9Sstevel@tonic-gate 		mdb_warn("can't find %s walker", walk);
23597c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
23607c478bd9Sstevel@tonic-gate 	}
23617c478bd9Sstevel@tonic-gate 
23627c478bd9Sstevel@tonic-gate 	if (w->w_found && w->w_all == FALSE)
23637c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
23647c478bd9Sstevel@tonic-gate 
23657c478bd9Sstevel@tonic-gate 	/*
23667c478bd9Sstevel@tonic-gate 	 * We have searched for allocated memory; now search for freed memory.
23677c478bd9Sstevel@tonic-gate 	 */
23687c478bd9Sstevel@tonic-gate 	if (w->w_verbose)
23697c478bd9Sstevel@tonic-gate 		mdb_printf("Searching %s for free memory...\n", c->cache_name);
23707c478bd9Sstevel@tonic-gate 
23717c478bd9Sstevel@tonic-gate 	w->w_freemem = TRUE;
23727c478bd9Sstevel@tonic-gate 
23737c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(freewalk, func, w, addr) == -1) {
23747c478bd9Sstevel@tonic-gate 		mdb_warn("can't find %s walker", freewalk);
23757c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
23767c478bd9Sstevel@tonic-gate 	}
23777c478bd9Sstevel@tonic-gate 
23787c478bd9Sstevel@tonic-gate 	return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT);
23797c478bd9Sstevel@tonic-gate }
23807c478bd9Sstevel@tonic-gate 
23817c478bd9Sstevel@tonic-gate static int
23827c478bd9Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w)
23837c478bd9Sstevel@tonic-gate {
23847c478bd9Sstevel@tonic-gate 	if (c->cache_cflags & KMC_NOTOUCH)
23857c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
23867c478bd9Sstevel@tonic-gate 
23877c478bd9Sstevel@tonic-gate 	return (whatis_walk_cache(addr, c, w));
23887c478bd9Sstevel@tonic-gate }
23897c478bd9Sstevel@tonic-gate 
23907c478bd9Sstevel@tonic-gate static int
23917c478bd9Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w)
23927c478bd9Sstevel@tonic-gate {
23937c478bd9Sstevel@tonic-gate 	if (!(c->cache_cflags & KMC_NOTOUCH))
23947c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
23957c478bd9Sstevel@tonic-gate 
23967c478bd9Sstevel@tonic-gate 	return (whatis_walk_cache(addr, c, w));
23977c478bd9Sstevel@tonic-gate }
23987c478bd9Sstevel@tonic-gate 
23997c478bd9Sstevel@tonic-gate static int
24007c478bd9Sstevel@tonic-gate whatis_walk_thread(uintptr_t addr, const kthread_t *t, whatis_t *w)
24017c478bd9Sstevel@tonic-gate {
24027c478bd9Sstevel@tonic-gate 	/*
24037c478bd9Sstevel@tonic-gate 	 * Often, one calls ::whatis on an address from a thread structure.
24047c478bd9Sstevel@tonic-gate 	 * We use this opportunity to short circuit this case...
24057c478bd9Sstevel@tonic-gate 	 */
24067c478bd9Sstevel@tonic-gate 	if (w->w_addr >= addr && w->w_addr < addr + sizeof (kthread_t)) {
24077c478bd9Sstevel@tonic-gate 		mdb_printf("%p is %p+%p, allocated as a thread structure\n",
24087c478bd9Sstevel@tonic-gate 		    w->w_addr, addr, w->w_addr - addr);
24097c478bd9Sstevel@tonic-gate 		w->w_found++;
24107c478bd9Sstevel@tonic-gate 		return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
24117c478bd9Sstevel@tonic-gate 	}
24127c478bd9Sstevel@tonic-gate 
24137c478bd9Sstevel@tonic-gate 	if (w->w_addr < (uintptr_t)t->t_stkbase ||
24147c478bd9Sstevel@tonic-gate 	    w->w_addr > (uintptr_t)t->t_stk)
24157c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
24167c478bd9Sstevel@tonic-gate 
24177c478bd9Sstevel@tonic-gate 	if (t->t_stkbase == NULL)
24187c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
24197c478bd9Sstevel@tonic-gate 
24207c478bd9Sstevel@tonic-gate 	mdb_printf("%p is in thread %p's stack%s\n", w->w_addr, addr,
24217c478bd9Sstevel@tonic-gate 	    stack_active(t, w->w_addr));
24227c478bd9Sstevel@tonic-gate 
24237c478bd9Sstevel@tonic-gate 	w->w_found++;
24247c478bd9Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
24257c478bd9Sstevel@tonic-gate }
24267c478bd9Sstevel@tonic-gate 
24277c478bd9Sstevel@tonic-gate static int
24287c478bd9Sstevel@tonic-gate whatis_walk_modctl(uintptr_t addr, const struct modctl *m, whatis_t *w)
24297c478bd9Sstevel@tonic-gate {
24307c478bd9Sstevel@tonic-gate 	struct module mod;
24317c478bd9Sstevel@tonic-gate 	char name[MODMAXNAMELEN], *where;
24327c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
24337c478bd9Sstevel@tonic-gate 	Shdr shdr;
24347c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
24357c478bd9Sstevel@tonic-gate 
24367c478bd9Sstevel@tonic-gate 	if (m->mod_mp == NULL)
24377c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
24387c478bd9Sstevel@tonic-gate 
24397c478bd9Sstevel@tonic-gate 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
24407c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read modctl %p's module", addr);
24417c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
24427c478bd9Sstevel@tonic-gate 	}
24437c478bd9Sstevel@tonic-gate 
24447c478bd9Sstevel@tonic-gate 	if (w->w_addr >= (uintptr_t)mod.text &&
24457c478bd9Sstevel@tonic-gate 	    w->w_addr < (uintptr_t)mod.text + mod.text_size) {
24467c478bd9Sstevel@tonic-gate 		where = "text segment";
24477c478bd9Sstevel@tonic-gate 		goto found;
24487c478bd9Sstevel@tonic-gate 	}
24497c478bd9Sstevel@tonic-gate 
24507c478bd9Sstevel@tonic-gate 	if (w->w_addr >= (uintptr_t)mod.data &&
24517c478bd9Sstevel@tonic-gate 	    w->w_addr < (uintptr_t)mod.data + mod.data_size) {
24527c478bd9Sstevel@tonic-gate 		where = "data segment";
24537c478bd9Sstevel@tonic-gate 		goto found;
24547c478bd9Sstevel@tonic-gate 	}
24557c478bd9Sstevel@tonic-gate 
24567c478bd9Sstevel@tonic-gate 	if (w->w_addr >= (uintptr_t)mod.bss &&
24577c478bd9Sstevel@tonic-gate 	    w->w_addr < (uintptr_t)mod.bss + mod.bss_size) {
24587c478bd9Sstevel@tonic-gate 		where = "bss";
24597c478bd9Sstevel@tonic-gate 		goto found;
24607c478bd9Sstevel@tonic-gate 	}
24617c478bd9Sstevel@tonic-gate 
24627c478bd9Sstevel@tonic-gate 	if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) {
24637c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read symbol header for %p's module", addr);
24647c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
24657c478bd9Sstevel@tonic-gate 	}
24667c478bd9Sstevel@tonic-gate 
24677c478bd9Sstevel@tonic-gate 	if (w->w_addr >= (uintptr_t)mod.symtbl && w->w_addr <
24687c478bd9Sstevel@tonic-gate 	    (uintptr_t)mod.symtbl + (uintptr_t)mod.nsyms * shdr.sh_entsize) {
24697c478bd9Sstevel@tonic-gate 		where = "symtab";
24707c478bd9Sstevel@tonic-gate 		goto found;
24717c478bd9Sstevel@tonic-gate 	}
24727c478bd9Sstevel@tonic-gate 
24737c478bd9Sstevel@tonic-gate 	if (w->w_addr >= (uintptr_t)mod.symspace &&
24747c478bd9Sstevel@tonic-gate 	    w->w_addr < (uintptr_t)mod.symspace + (uintptr_t)mod.symsize) {
24757c478bd9Sstevel@tonic-gate 		where = "symspace";
24767c478bd9Sstevel@tonic-gate 		goto found;
24777c478bd9Sstevel@tonic-gate 	}
24787c478bd9Sstevel@tonic-gate 
24797c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
24807c478bd9Sstevel@tonic-gate 
24817c478bd9Sstevel@tonic-gate found:
24827c478bd9Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
24837c478bd9Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
24847c478bd9Sstevel@tonic-gate 
24857c478bd9Sstevel@tonic-gate 	mdb_printf("%p is ", w->w_addr);
24867c478bd9Sstevel@tonic-gate 
24877c478bd9Sstevel@tonic-gate 	/*
24887c478bd9Sstevel@tonic-gate 	 * If we found this address in a module, then there's a chance that
24897c478bd9Sstevel@tonic-gate 	 * it's actually a named symbol.  Try the symbol lookup.
24907c478bd9Sstevel@tonic-gate 	 */
24917c478bd9Sstevel@tonic-gate 	if (mdb_lookup_by_addr(w->w_addr, MDB_SYM_FUZZY, c, sizeof (c),
24927c478bd9Sstevel@tonic-gate 	    &sym) != -1 && w->w_addr >= (uintptr_t)sym.st_value &&
24937c478bd9Sstevel@tonic-gate 	    w->w_addr < (uintptr_t)sym.st_value + sym.st_size) {
24947c478bd9Sstevel@tonic-gate 		mdb_printf("%s+%lx ", c, w->w_addr - (uintptr_t)sym.st_value);
24957c478bd9Sstevel@tonic-gate 	}
24967c478bd9Sstevel@tonic-gate 
24977c478bd9Sstevel@tonic-gate 	mdb_printf("in %s's %s\n", name, where);
24987c478bd9Sstevel@tonic-gate 
24997c478bd9Sstevel@tonic-gate 	w->w_found++;
25007c478bd9Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
25017c478bd9Sstevel@tonic-gate }
25027c478bd9Sstevel@tonic-gate 
25037c478bd9Sstevel@tonic-gate /*ARGSUSED*/
25047c478bd9Sstevel@tonic-gate static int
25057c478bd9Sstevel@tonic-gate whatis_walk_page(uintptr_t addr, const void *ignored, whatis_t *w)
25067c478bd9Sstevel@tonic-gate {
25077c478bd9Sstevel@tonic-gate 	static int machsize = 0;
25087c478bd9Sstevel@tonic-gate 	mdb_ctf_id_t id;
25097c478bd9Sstevel@tonic-gate 
25107c478bd9Sstevel@tonic-gate 	if (machsize == 0) {
25117c478bd9Sstevel@tonic-gate 		if (mdb_ctf_lookup_by_name("unix`page_t", &id) == 0)
25127c478bd9Sstevel@tonic-gate 			machsize = mdb_ctf_type_size(id);
25137c478bd9Sstevel@tonic-gate 		else {
25147c478bd9Sstevel@tonic-gate 			mdb_warn("could not get size of page_t");
25157c478bd9Sstevel@tonic-gate 			machsize = sizeof (page_t);
25167c478bd9Sstevel@tonic-gate 		}
25177c478bd9Sstevel@tonic-gate 	}
25187c478bd9Sstevel@tonic-gate 
25197c478bd9Sstevel@tonic-gate 	if (w->w_addr < addr || w->w_addr >= addr + machsize)
25207c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
25217c478bd9Sstevel@tonic-gate 
25227c478bd9Sstevel@tonic-gate 	mdb_printf("%p is %p+%p, allocated as a page structure\n",
25237c478bd9Sstevel@tonic-gate 	    w->w_addr, addr, w->w_addr - addr);
25247c478bd9Sstevel@tonic-gate 
25257c478bd9Sstevel@tonic-gate 	w->w_found++;
25267c478bd9Sstevel@tonic-gate 	return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE);
25277c478bd9Sstevel@tonic-gate }
25287c478bd9Sstevel@tonic-gate 
25297c478bd9Sstevel@tonic-gate int
25307c478bd9Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
25317c478bd9Sstevel@tonic-gate {
25327c478bd9Sstevel@tonic-gate 	whatis_t w;
25337c478bd9Sstevel@tonic-gate 
25347c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
25357c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
25367c478bd9Sstevel@tonic-gate 
25377c478bd9Sstevel@tonic-gate 	w.w_verbose = FALSE;
25387c478bd9Sstevel@tonic-gate 	w.w_bufctl = FALSE;
25397c478bd9Sstevel@tonic-gate 	w.w_all = FALSE;
25407c478bd9Sstevel@tonic-gate 	w.w_idspace = FALSE;
25417c478bd9Sstevel@tonic-gate 
25427c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
25437c478bd9Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose,
25447c478bd9Sstevel@tonic-gate 	    'a', MDB_OPT_SETBITS, TRUE, &w.w_all,
25457c478bd9Sstevel@tonic-gate 	    'i', MDB_OPT_SETBITS, TRUE, &w.w_idspace,
25467c478bd9Sstevel@tonic-gate 	    'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, NULL) != argc)
25477c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
25487c478bd9Sstevel@tonic-gate 
25497c478bd9Sstevel@tonic-gate 	w.w_addr = addr;
25507c478bd9Sstevel@tonic-gate 	w.w_found = 0;
25517c478bd9Sstevel@tonic-gate 
25527c478bd9Sstevel@tonic-gate 	if (w.w_verbose)
25537c478bd9Sstevel@tonic-gate 		mdb_printf("Searching modules...\n");
25547c478bd9Sstevel@tonic-gate 
25557c478bd9Sstevel@tonic-gate 	if (!w.w_idspace) {
25567c478bd9Sstevel@tonic-gate 		if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, &w)
25577c478bd9Sstevel@tonic-gate 		    == -1) {
25587c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't find modctl walker");
25597c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
25607c478bd9Sstevel@tonic-gate 		}
25617c478bd9Sstevel@tonic-gate 
25627c478bd9Sstevel@tonic-gate 		if (w.w_found && w.w_all == FALSE)
25637c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
25647c478bd9Sstevel@tonic-gate 
25657c478bd9Sstevel@tonic-gate 		/*
25667c478bd9Sstevel@tonic-gate 		 * Now search all thread stacks.  Yes, this is a little weak; we
25677c478bd9Sstevel@tonic-gate 		 * can save a lot of work by first checking to see if the
25687c478bd9Sstevel@tonic-gate 		 * address is in segkp vs. segkmem.  But hey, computers are
25697c478bd9Sstevel@tonic-gate 		 * fast.
25707c478bd9Sstevel@tonic-gate 		 */
25717c478bd9Sstevel@tonic-gate 		if (w.w_verbose)
25727c478bd9Sstevel@tonic-gate 			mdb_printf("Searching threads...\n");
25737c478bd9Sstevel@tonic-gate 
25747c478bd9Sstevel@tonic-gate 		if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, &w)
25757c478bd9Sstevel@tonic-gate 		    == -1) {
25767c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't find thread walker");
25777c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
25787c478bd9Sstevel@tonic-gate 		}
25797c478bd9Sstevel@tonic-gate 
25807c478bd9Sstevel@tonic-gate 		if (w.w_found && w.w_all == FALSE)
25817c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
25827c478bd9Sstevel@tonic-gate 
25837c478bd9Sstevel@tonic-gate 		if (w.w_verbose)
25847c478bd9Sstevel@tonic-gate 			mdb_printf("Searching page structures...\n");
25857c478bd9Sstevel@tonic-gate 
25867c478bd9Sstevel@tonic-gate 		if (mdb_walk("page", (mdb_walk_cb_t)whatis_walk_page, &w)
25877c478bd9Sstevel@tonic-gate 		    == -1) {
25887c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't find page walker");
25897c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
25907c478bd9Sstevel@tonic-gate 		}
25917c478bd9Sstevel@tonic-gate 
25927c478bd9Sstevel@tonic-gate 		if (w.w_found && w.w_all == FALSE)
25937c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
25947c478bd9Sstevel@tonic-gate 	}
25957c478bd9Sstevel@tonic-gate 
25967c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache",
25977c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) {
25987c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find kmem_cache walker");
25997c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
26007c478bd9Sstevel@tonic-gate 	}
26017c478bd9Sstevel@tonic-gate 
26027c478bd9Sstevel@tonic-gate 	if (w.w_found && w.w_all == FALSE)
26037c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26047c478bd9Sstevel@tonic-gate 
26057c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache",
26067c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) {
26077c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find kmem_cache walker");
26087c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
26097c478bd9Sstevel@tonic-gate 	}
26107c478bd9Sstevel@tonic-gate 
26117c478bd9Sstevel@tonic-gate 	if (w.w_found && w.w_all == FALSE)
26127c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
26137c478bd9Sstevel@tonic-gate 
26147c478bd9Sstevel@tonic-gate 	if (mdb_walk("vmem_postfix",
26157c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) {
26167c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find vmem_postfix walker");
26177c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
26187c478bd9Sstevel@tonic-gate 	}
26197c478bd9Sstevel@tonic-gate 
26207c478bd9Sstevel@tonic-gate 	if (w.w_found == 0)
26217c478bd9Sstevel@tonic-gate 		mdb_printf("%p is unknown\n", addr);
26227c478bd9Sstevel@tonic-gate 
26237c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
26247c478bd9Sstevel@tonic-gate }
26257c478bd9Sstevel@tonic-gate 
26267c478bd9Sstevel@tonic-gate void
26277c478bd9Sstevel@tonic-gate whatis_help(void)
26287c478bd9Sstevel@tonic-gate {
26297c478bd9Sstevel@tonic-gate 	mdb_printf(
26307c478bd9Sstevel@tonic-gate 	    "Given a virtual address, attempt to determine where it came\n"
26317c478bd9Sstevel@tonic-gate 	    "from.\n"
26327c478bd9Sstevel@tonic-gate 	    "\n"
26337c478bd9Sstevel@tonic-gate 	    "\t-v\tVerbose output; display caches/arenas/etc as they are\n"
26347c478bd9Sstevel@tonic-gate 	    "\t\tsearched\n"
26357c478bd9Sstevel@tonic-gate 	    "\t-a\tFind all possible sources.  Default behavior is to stop at\n"
26367c478bd9Sstevel@tonic-gate 	    "\t\tthe first (most specific) source.\n"
26377c478bd9Sstevel@tonic-gate 	    "\t-i\tSearch only identifier arenas and caches.  By default\n"
26387c478bd9Sstevel@tonic-gate 	    "\t\tthese are ignored.\n"
26397c478bd9Sstevel@tonic-gate 	    "\t-b\tReport bufctls and vmem_segs for matches in kmem and vmem,\n"
26407c478bd9Sstevel@tonic-gate 	    "\t\trespectively.  Warning: if the buffer exists, but does not\n"
26417c478bd9Sstevel@tonic-gate 	    "\t\thave a bufctl, it will not be reported.\n");
26427c478bd9Sstevel@tonic-gate }
26437c478bd9Sstevel@tonic-gate 
26447c478bd9Sstevel@tonic-gate typedef struct kmem_log_cpu {
26457c478bd9Sstevel@tonic-gate 	uintptr_t kmc_low;
26467c478bd9Sstevel@tonic-gate 	uintptr_t kmc_high;
26477c478bd9Sstevel@tonic-gate } kmem_log_cpu_t;
26487c478bd9Sstevel@tonic-gate 
26497c478bd9Sstevel@tonic-gate typedef struct kmem_log_data {
26507c478bd9Sstevel@tonic-gate 	uintptr_t kmd_addr;
26517c478bd9Sstevel@tonic-gate 	kmem_log_cpu_t *kmd_cpu;
26527c478bd9Sstevel@tonic-gate } kmem_log_data_t;
26537c478bd9Sstevel@tonic-gate 
26547c478bd9Sstevel@tonic-gate int
26557c478bd9Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b,
26567c478bd9Sstevel@tonic-gate     kmem_log_data_t *kmd)
26577c478bd9Sstevel@tonic-gate {
26587c478bd9Sstevel@tonic-gate 	int i;
26597c478bd9Sstevel@tonic-gate 	kmem_log_cpu_t *kmc = kmd->kmd_cpu;
26607c478bd9Sstevel@tonic-gate 	size_t bufsize;
26617c478bd9Sstevel@tonic-gate 
26627c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++) {
26637c478bd9Sstevel@tonic-gate 		if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high)
26647c478bd9Sstevel@tonic-gate 			break;
26657c478bd9Sstevel@tonic-gate 	}
26667c478bd9Sstevel@tonic-gate 
26677c478bd9Sstevel@tonic-gate 	if (kmd->kmd_addr) {
26687c478bd9Sstevel@tonic-gate 		if (b->bc_cache == NULL)
26697c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
26707c478bd9Sstevel@tonic-gate 
26717c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bufsize, sizeof (bufsize),
26727c478bd9Sstevel@tonic-gate 		    (uintptr_t)&b->bc_cache->cache_bufsize) == -1) {
26737c478bd9Sstevel@tonic-gate 			mdb_warn(
26747c478bd9Sstevel@tonic-gate 			    "failed to read cache_bufsize for cache at %p",
26757c478bd9Sstevel@tonic-gate 			    b->bc_cache);
26767c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
26777c478bd9Sstevel@tonic-gate 		}
26787c478bd9Sstevel@tonic-gate 
26797c478bd9Sstevel@tonic-gate 		if (kmd->kmd_addr < (uintptr_t)b->bc_addr ||
26807c478bd9Sstevel@tonic-gate 		    kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize)
26817c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
26827c478bd9Sstevel@tonic-gate 	}
26837c478bd9Sstevel@tonic-gate 
26847c478bd9Sstevel@tonic-gate 	if (i == NCPU)
26857c478bd9Sstevel@tonic-gate 		mdb_printf("   ");
26867c478bd9Sstevel@tonic-gate 	else
26877c478bd9Sstevel@tonic-gate 		mdb_printf("%3d", i);
26887c478bd9Sstevel@tonic-gate 
26897c478bd9Sstevel@tonic-gate 	mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr,
26907c478bd9Sstevel@tonic-gate 	    b->bc_timestamp, b->bc_thread);
26917c478bd9Sstevel@tonic-gate 
26927c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
26937c478bd9Sstevel@tonic-gate }
26947c478bd9Sstevel@tonic-gate 
26957c478bd9Sstevel@tonic-gate /*ARGSUSED*/
26967c478bd9Sstevel@tonic-gate int
26977c478bd9Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
26987c478bd9Sstevel@tonic-gate {
26997c478bd9Sstevel@tonic-gate 	kmem_log_header_t lh;
27007c478bd9Sstevel@tonic-gate 	kmem_cpu_log_header_t clh;
27017c478bd9Sstevel@tonic-gate 	uintptr_t lhp, clhp;
27027c478bd9Sstevel@tonic-gate 	int ncpus;
27037c478bd9Sstevel@tonic-gate 	uintptr_t *cpu;
27047c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
27057c478bd9Sstevel@tonic-gate 	kmem_log_cpu_t *kmc;
27067c478bd9Sstevel@tonic-gate 	int i;
27077c478bd9Sstevel@tonic-gate 	kmem_log_data_t kmd;
27087c478bd9Sstevel@tonic-gate 	uint_t opt_b = FALSE;
27097c478bd9Sstevel@tonic-gate 
27107c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
27117c478bd9Sstevel@tonic-gate 	    'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc)
27127c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
27137c478bd9Sstevel@tonic-gate 
27147c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) {
27157c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read 'kmem_transaction_log'");
27167c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
27177c478bd9Sstevel@tonic-gate 	}
27187c478bd9Sstevel@tonic-gate 
27197c478bd9Sstevel@tonic-gate 	if (lhp == NULL) {
27207c478bd9Sstevel@tonic-gate 		mdb_warn("no kmem transaction log\n");
27217c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
27227c478bd9Sstevel@tonic-gate 	}
27237c478bd9Sstevel@tonic-gate 
27247c478bd9Sstevel@tonic-gate 	mdb_readvar(&ncpus, "ncpus");
27257c478bd9Sstevel@tonic-gate 
27267c478bd9Sstevel@tonic-gate 	if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) {
27277c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read log header at %p", lhp);
27287c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
27297c478bd9Sstevel@tonic-gate 	}
27307c478bd9Sstevel@tonic-gate 
27317c478bd9Sstevel@tonic-gate 	clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
27327c478bd9Sstevel@tonic-gate 
27337c478bd9Sstevel@tonic-gate 	cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC);
27347c478bd9Sstevel@tonic-gate 
27357c478bd9Sstevel@tonic-gate 	if (mdb_lookup_by_name("cpu", &sym) == -1) {
27367c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't find 'cpu' array");
27377c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
27387c478bd9Sstevel@tonic-gate 	}
27397c478bd9Sstevel@tonic-gate 
27407c478bd9Sstevel@tonic-gate 	if (sym.st_size != NCPU * sizeof (uintptr_t)) {
27417c478bd9Sstevel@tonic-gate 		mdb_warn("expected 'cpu' to be of size %d; found %d\n",
27427c478bd9Sstevel@tonic-gate 		    NCPU * sizeof (uintptr_t), sym.st_size);
27437c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
27447c478bd9Sstevel@tonic-gate 	}
27457c478bd9Sstevel@tonic-gate 
27467c478bd9Sstevel@tonic-gate 	if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) {
27477c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read cpu array at %p", sym.st_value);
27487c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
27497c478bd9Sstevel@tonic-gate 	}
27507c478bd9Sstevel@tonic-gate 
27517c478bd9Sstevel@tonic-gate 	kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC);
27527c478bd9Sstevel@tonic-gate 	kmd.kmd_addr = NULL;
27537c478bd9Sstevel@tonic-gate 	kmd.kmd_cpu = kmc;
27547c478bd9Sstevel@tonic-gate 
27557c478bd9Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++) {
27567c478bd9Sstevel@tonic-gate 
27577c478bd9Sstevel@tonic-gate 		if (cpu[i] == NULL)
27587c478bd9Sstevel@tonic-gate 			continue;
27597c478bd9Sstevel@tonic-gate 
27607c478bd9Sstevel@tonic-gate 		if (mdb_vread(&clh, sizeof (clh), clhp) == -1) {
27617c478bd9Sstevel@tonic-gate 			mdb_warn("cannot read cpu %d's log header at %p",
27627c478bd9Sstevel@tonic-gate 			    i, clhp);
27637c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
27647c478bd9Sstevel@tonic-gate 		}
27657c478bd9Sstevel@tonic-gate 
27667c478bd9Sstevel@tonic-gate 		kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize +
27677c478bd9Sstevel@tonic-gate 		    (uintptr_t)lh.lh_base;
27687c478bd9Sstevel@tonic-gate 		kmc[i].kmc_high = (uintptr_t)clh.clh_current;
27697c478bd9Sstevel@tonic-gate 
27707c478bd9Sstevel@tonic-gate 		clhp += sizeof (kmem_cpu_log_header_t);
27717c478bd9Sstevel@tonic-gate 	}
27727c478bd9Sstevel@tonic-gate 
27737c478bd9Sstevel@tonic-gate 	mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR",
27747c478bd9Sstevel@tonic-gate 	    "TIMESTAMP", "THREAD");
27757c478bd9Sstevel@tonic-gate 
27767c478bd9Sstevel@tonic-gate 	/*
27777c478bd9Sstevel@tonic-gate 	 * If we have been passed an address, print out only log entries
27787c478bd9Sstevel@tonic-gate 	 * corresponding to that address.  If opt_b is specified, then interpret
27797c478bd9Sstevel@tonic-gate 	 * the address as a bufctl.
27807c478bd9Sstevel@tonic-gate 	 */
27817c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
27827c478bd9Sstevel@tonic-gate 		kmem_bufctl_audit_t b;
27837c478bd9Sstevel@tonic-gate 
27847c478bd9Sstevel@tonic-gate 		if (opt_b) {
27857c478bd9Sstevel@tonic-gate 			kmd.kmd_addr = addr;
27867c478bd9Sstevel@tonic-gate 		} else {
27877c478bd9Sstevel@tonic-gate 			if (mdb_vread(&b,
27887c478bd9Sstevel@tonic-gate 			    sizeof (kmem_bufctl_audit_t), addr) == -1) {
27897c478bd9Sstevel@tonic-gate 				mdb_warn("failed to read bufctl at %p", addr);
27907c478bd9Sstevel@tonic-gate 				return (DCMD_ERR);
27917c478bd9Sstevel@tonic-gate 			}
27927c478bd9Sstevel@tonic-gate 
27937c478bd9Sstevel@tonic-gate 			(void) kmem_log_walk(addr, &b, &kmd);
27947c478bd9Sstevel@tonic-gate 
27957c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
27967c478bd9Sstevel@tonic-gate 		}
27977c478bd9Sstevel@tonic-gate 	}
27987c478bd9Sstevel@tonic-gate 
27997c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) {
28007c478bd9Sstevel@tonic-gate 		mdb_warn("can't find kmem log walker");
28017c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
28027c478bd9Sstevel@tonic-gate 	}
28037c478bd9Sstevel@tonic-gate 
28047c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
28057c478bd9Sstevel@tonic-gate }
28067c478bd9Sstevel@tonic-gate 
28077c478bd9Sstevel@tonic-gate typedef struct bufctl_history_cb {
28087c478bd9Sstevel@tonic-gate 	int		bhc_flags;
28097c478bd9Sstevel@tonic-gate 	int		bhc_argc;
28107c478bd9Sstevel@tonic-gate 	const mdb_arg_t	*bhc_argv;
28117c478bd9Sstevel@tonic-gate 	int		bhc_ret;
28127c478bd9Sstevel@tonic-gate } bufctl_history_cb_t;
28137c478bd9Sstevel@tonic-gate 
28147c478bd9Sstevel@tonic-gate /*ARGSUSED*/
28157c478bd9Sstevel@tonic-gate static int
28167c478bd9Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
28177c478bd9Sstevel@tonic-gate {
28187c478bd9Sstevel@tonic-gate 	bufctl_history_cb_t *bhc = arg;
28197c478bd9Sstevel@tonic-gate 
28207c478bd9Sstevel@tonic-gate 	bhc->bhc_ret =
28217c478bd9Sstevel@tonic-gate 	    bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv);
28227c478bd9Sstevel@tonic-gate 
28237c478bd9Sstevel@tonic-gate 	bhc->bhc_flags &= ~DCMD_LOOPFIRST;
28247c478bd9Sstevel@tonic-gate 
28257c478bd9Sstevel@tonic-gate 	return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE);
28267c478bd9Sstevel@tonic-gate }
28277c478bd9Sstevel@tonic-gate 
28287c478bd9Sstevel@tonic-gate void
28297c478bd9Sstevel@tonic-gate bufctl_help(void)
28307c478bd9Sstevel@tonic-gate {
2831*b5fca8f8Stomee 	mdb_printf("%s",
2832*b5fca8f8Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n");
28337c478bd9Sstevel@tonic-gate 	mdb_dec_indent(2);
28347c478bd9Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
28357c478bd9Sstevel@tonic-gate 	mdb_inc_indent(2);
28367c478bd9Sstevel@tonic-gate 	mdb_printf("%s",
28377c478bd9Sstevel@tonic-gate "  -v    Display the full content of the bufctl, including its stack trace\n"
28387c478bd9Sstevel@tonic-gate "  -h    retrieve the bufctl's transaction history, if available\n"
28397c478bd9Sstevel@tonic-gate "  -a addr\n"
28407c478bd9Sstevel@tonic-gate "        filter out bufctls not involving the buffer at addr\n"
28417c478bd9Sstevel@tonic-gate "  -c caller\n"
28427c478bd9Sstevel@tonic-gate "        filter out bufctls without the function/PC in their stack trace\n"
28437c478bd9Sstevel@tonic-gate "  -e earliest\n"
28447c478bd9Sstevel@tonic-gate "        filter out bufctls timestamped before earliest\n"
28457c478bd9Sstevel@tonic-gate "  -l latest\n"
28467c478bd9Sstevel@tonic-gate "        filter out bufctls timestamped after latest\n"
28477c478bd9Sstevel@tonic-gate "  -t thread\n"
28487c478bd9Sstevel@tonic-gate "        filter out bufctls not involving thread\n");
28497c478bd9Sstevel@tonic-gate }
28507c478bd9Sstevel@tonic-gate 
28517c478bd9Sstevel@tonic-gate int
28527c478bd9Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
28537c478bd9Sstevel@tonic-gate {
28547c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t bc;
28557c478bd9Sstevel@tonic-gate 	uint_t verbose = FALSE;
28567c478bd9Sstevel@tonic-gate 	uint_t history = FALSE;
28577c478bd9Sstevel@tonic-gate 	uint_t in_history = FALSE;
28587c478bd9Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
28597c478bd9Sstevel@tonic-gate 	uintptr_t laddr, haddr, baddr = NULL;
28607c478bd9Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
28617c478bd9Sstevel@tonic-gate 	int i, depth;
28627c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
28637c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
28647c478bd9Sstevel@tonic-gate 
28657c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
28667c478bd9Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
28677c478bd9Sstevel@tonic-gate 	    'h', MDB_OPT_SETBITS, TRUE, &history,
28687c478bd9Sstevel@tonic-gate 	    'H', MDB_OPT_SETBITS, TRUE, &in_history,		/* internal */
28697c478bd9Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
28707c478bd9Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
28717c478bd9Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
28727c478bd9Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
28737c478bd9Sstevel@tonic-gate 	    'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc)
28747c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
28757c478bd9Sstevel@tonic-gate 
28767c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
28777c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
28787c478bd9Sstevel@tonic-gate 
28797c478bd9Sstevel@tonic-gate 	if (in_history && !history)
28807c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
28817c478bd9Sstevel@tonic-gate 
28827c478bd9Sstevel@tonic-gate 	if (history && !in_history) {
28837c478bd9Sstevel@tonic-gate 		mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1),
28847c478bd9Sstevel@tonic-gate 		    UM_SLEEP | UM_GC);
28857c478bd9Sstevel@tonic-gate 		bufctl_history_cb_t bhc;
28867c478bd9Sstevel@tonic-gate 
28877c478bd9Sstevel@tonic-gate 		nargv[0].a_type = MDB_TYPE_STRING;
28887c478bd9Sstevel@tonic-gate 		nargv[0].a_un.a_str = "-H";		/* prevent recursion */
28897c478bd9Sstevel@tonic-gate 
28907c478bd9Sstevel@tonic-gate 		for (i = 0; i < argc; i++)
28917c478bd9Sstevel@tonic-gate 			nargv[i + 1] = argv[i];
28927c478bd9Sstevel@tonic-gate 
28937c478bd9Sstevel@tonic-gate 		/*
28947c478bd9Sstevel@tonic-gate 		 * When in history mode, we treat each element as if it
28957c478bd9Sstevel@tonic-gate 		 * were in a seperate loop, so that the headers group
28967c478bd9Sstevel@tonic-gate 		 * bufctls with similar histories.
28977c478bd9Sstevel@tonic-gate 		 */
28987c478bd9Sstevel@tonic-gate 		bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST;
28997c478bd9Sstevel@tonic-gate 		bhc.bhc_argc = argc + 1;
29007c478bd9Sstevel@tonic-gate 		bhc.bhc_argv = nargv;
29017c478bd9Sstevel@tonic-gate 		bhc.bhc_ret = DCMD_OK;
29027c478bd9Sstevel@tonic-gate 
29037c478bd9Sstevel@tonic-gate 		if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc,
29047c478bd9Sstevel@tonic-gate 		    addr) == -1) {
29057c478bd9Sstevel@tonic-gate 			mdb_warn("unable to walk bufctl_history");
29067c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
29077c478bd9Sstevel@tonic-gate 		}
29087c478bd9Sstevel@tonic-gate 
29097c478bd9Sstevel@tonic-gate 		if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT))
29107c478bd9Sstevel@tonic-gate 			mdb_printf("\n");
29117c478bd9Sstevel@tonic-gate 
29127c478bd9Sstevel@tonic-gate 		return (bhc.bhc_ret);
29137c478bd9Sstevel@tonic-gate 	}
29147c478bd9Sstevel@tonic-gate 
29157c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
29167c478bd9Sstevel@tonic-gate 		if (verbose) {
29177c478bd9Sstevel@tonic-gate 			mdb_printf("%16s %16s %16s %16s\n"
29187c478bd9Sstevel@tonic-gate 			    "%<u>%16s %16s %16s %16s%</u>\n",
29197c478bd9Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
29207c478bd9Sstevel@tonic-gate 			    "", "CACHE", "LASTLOG", "CONTENTS");
29217c478bd9Sstevel@tonic-gate 		} else {
29227c478bd9Sstevel@tonic-gate 			mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n",
29237c478bd9Sstevel@tonic-gate 			    "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER");
29247c478bd9Sstevel@tonic-gate 		}
29257c478bd9Sstevel@tonic-gate 	}
29267c478bd9Sstevel@tonic-gate 
29277c478bd9Sstevel@tonic-gate 	if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
29287c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read bufctl at %p", addr);
29297c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
29307c478bd9Sstevel@tonic-gate 	}
29317c478bd9Sstevel@tonic-gate 
29327c478bd9Sstevel@tonic-gate 	/*
29337c478bd9Sstevel@tonic-gate 	 * Guard against bogus bc_depth in case the bufctl is corrupt or
29347c478bd9Sstevel@tonic-gate 	 * the address does not really refer to a bufctl.
29357c478bd9Sstevel@tonic-gate 	 */
29367c478bd9Sstevel@tonic-gate 	depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
29377c478bd9Sstevel@tonic-gate 
29387c478bd9Sstevel@tonic-gate 	if (caller != NULL) {
29397c478bd9Sstevel@tonic-gate 		laddr = caller;
29407c478bd9Sstevel@tonic-gate 		haddr = caller + sizeof (caller);
29417c478bd9Sstevel@tonic-gate 
29427c478bd9Sstevel@tonic-gate 		if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c),
29437c478bd9Sstevel@tonic-gate 		    &sym) != -1 && caller == (uintptr_t)sym.st_value) {
29447c478bd9Sstevel@tonic-gate 			/*
29457c478bd9Sstevel@tonic-gate 			 * We were provided an exact symbol value; any
29467c478bd9Sstevel@tonic-gate 			 * address in the function is valid.
29477c478bd9Sstevel@tonic-gate 			 */
29487c478bd9Sstevel@tonic-gate 			laddr = (uintptr_t)sym.st_value;
29497c478bd9Sstevel@tonic-gate 			haddr = (uintptr_t)sym.st_value + sym.st_size;
29507c478bd9Sstevel@tonic-gate 		}
29517c478bd9Sstevel@tonic-gate 
29527c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
29537c478bd9Sstevel@tonic-gate 			if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr)
29547c478bd9Sstevel@tonic-gate 				break;
29557c478bd9Sstevel@tonic-gate 
29567c478bd9Sstevel@tonic-gate 		if (i == depth)
29577c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
29587c478bd9Sstevel@tonic-gate 	}
29597c478bd9Sstevel@tonic-gate 
29607c478bd9Sstevel@tonic-gate 	if (thread != NULL && (uintptr_t)bc.bc_thread != thread)
29617c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
29627c478bd9Sstevel@tonic-gate 
29637c478bd9Sstevel@tonic-gate 	if (earliest != 0 && bc.bc_timestamp < earliest)
29647c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
29657c478bd9Sstevel@tonic-gate 
29667c478bd9Sstevel@tonic-gate 	if (latest != 0 && bc.bc_timestamp > latest)
29677c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
29687c478bd9Sstevel@tonic-gate 
29697c478bd9Sstevel@tonic-gate 	if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr)
29707c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
29717c478bd9Sstevel@tonic-gate 
29727c478bd9Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
29737c478bd9Sstevel@tonic-gate 		mdb_printf("%#lr\n", addr);
29747c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
29757c478bd9Sstevel@tonic-gate 	}
29767c478bd9Sstevel@tonic-gate 
29777c478bd9Sstevel@tonic-gate 	if (verbose) {
29787c478bd9Sstevel@tonic-gate 		mdb_printf(
29797c478bd9Sstevel@tonic-gate 		    "%<b>%16p%</b> %16p %16llx %16p\n"
29807c478bd9Sstevel@tonic-gate 		    "%16s %16p %16p %16p\n",
29817c478bd9Sstevel@tonic-gate 		    addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread,
29827c478bd9Sstevel@tonic-gate 		    "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents);
29837c478bd9Sstevel@tonic-gate 
29847c478bd9Sstevel@tonic-gate 		mdb_inc_indent(17);
29857c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++)
29867c478bd9Sstevel@tonic-gate 			mdb_printf("%a\n", bc.bc_stack[i]);
29877c478bd9Sstevel@tonic-gate 		mdb_dec_indent(17);
29887c478bd9Sstevel@tonic-gate 		mdb_printf("\n");
29897c478bd9Sstevel@tonic-gate 	} else {
29907c478bd9Sstevel@tonic-gate 		mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr,
29917c478bd9Sstevel@tonic-gate 		    bc.bc_timestamp, bc.bc_thread);
29927c478bd9Sstevel@tonic-gate 
29937c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
29947c478bd9Sstevel@tonic-gate 			if (mdb_lookup_by_addr(bc.bc_stack[i],
29957c478bd9Sstevel@tonic-gate 			    MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
29967c478bd9Sstevel@tonic-gate 				continue;
29977c478bd9Sstevel@tonic-gate 			if (strncmp(c, "kmem_", 5) == 0)
29987c478bd9Sstevel@tonic-gate 				continue;
29997c478bd9Sstevel@tonic-gate 			mdb_printf(" %a\n", bc.bc_stack[i]);
30007c478bd9Sstevel@tonic-gate 			break;
30017c478bd9Sstevel@tonic-gate 		}
30027c478bd9Sstevel@tonic-gate 
30037c478bd9Sstevel@tonic-gate 		if (i >= depth)
30047c478bd9Sstevel@tonic-gate 			mdb_printf("\n");
30057c478bd9Sstevel@tonic-gate 	}
30067c478bd9Sstevel@tonic-gate 
30077c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
30087c478bd9Sstevel@tonic-gate }
30097c478bd9Sstevel@tonic-gate 
30107c478bd9Sstevel@tonic-gate typedef struct kmem_verify {
30117c478bd9Sstevel@tonic-gate 	uint64_t *kmv_buf;		/* buffer to read cache contents into */
30127c478bd9Sstevel@tonic-gate 	size_t kmv_size;		/* number of bytes in kmv_buf */
30137c478bd9Sstevel@tonic-gate 	int kmv_corruption;		/* > 0 if corruption found. */
30147c478bd9Sstevel@tonic-gate 	int kmv_besilent;		/* report actual corruption sites */
30157c478bd9Sstevel@tonic-gate 	struct kmem_cache kmv_cache;	/* the cache we're operating on */
30167c478bd9Sstevel@tonic-gate } kmem_verify_t;
30177c478bd9Sstevel@tonic-gate 
30187c478bd9Sstevel@tonic-gate /*
30197c478bd9Sstevel@tonic-gate  * verify_pattern()
30207c478bd9Sstevel@tonic-gate  * 	verify that buf is filled with the pattern pat.
30217c478bd9Sstevel@tonic-gate  */
30227c478bd9Sstevel@tonic-gate static int64_t
30237c478bd9Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat)
30247c478bd9Sstevel@tonic-gate {
30257c478bd9Sstevel@tonic-gate 	/*LINTED*/
30267c478bd9Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
30277c478bd9Sstevel@tonic-gate 	uint64_t *buf;
30287c478bd9Sstevel@tonic-gate 
30297c478bd9Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++)
30307c478bd9Sstevel@tonic-gate 		if (*buf != pat)
30317c478bd9Sstevel@tonic-gate 			return ((uintptr_t)buf - (uintptr_t)buf_arg);
30327c478bd9Sstevel@tonic-gate 	return (-1);
30337c478bd9Sstevel@tonic-gate }
30347c478bd9Sstevel@tonic-gate 
30357c478bd9Sstevel@tonic-gate /*
30367c478bd9Sstevel@tonic-gate  * verify_buftag()
30377c478bd9Sstevel@tonic-gate  *	verify that btp->bt_bxstat == (bcp ^ pat)
30387c478bd9Sstevel@tonic-gate  */
30397c478bd9Sstevel@tonic-gate static int
30407c478bd9Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat)
30417c478bd9Sstevel@tonic-gate {
30427c478bd9Sstevel@tonic-gate 	return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1);
30437c478bd9Sstevel@tonic-gate }
30447c478bd9Sstevel@tonic-gate 
30457c478bd9Sstevel@tonic-gate /*
30467c478bd9Sstevel@tonic-gate  * verify_free()
30477c478bd9Sstevel@tonic-gate  * 	verify the integrity of a free block of memory by checking
30487c478bd9Sstevel@tonic-gate  * 	that it is filled with 0xdeadbeef and that its buftag is sane.
30497c478bd9Sstevel@tonic-gate  */
30507c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
30517c478bd9Sstevel@tonic-gate static int
30527c478bd9Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private)
30537c478bd9Sstevel@tonic-gate {
30547c478bd9Sstevel@tonic-gate 	kmem_verify_t *kmv = (kmem_verify_t *)private;
30557c478bd9Sstevel@tonic-gate 	uint64_t *buf = kmv->kmv_buf;	/* buf to validate */
30567c478bd9Sstevel@tonic-gate 	int64_t corrupt;		/* corruption offset */
30577c478bd9Sstevel@tonic-gate 	kmem_buftag_t *buftagp;		/* ptr to buftag */
30587c478bd9Sstevel@tonic-gate 	kmem_cache_t *cp = &kmv->kmv_cache;
30597c478bd9Sstevel@tonic-gate 	int besilent = kmv->kmv_besilent;
30607c478bd9Sstevel@tonic-gate 
30617c478bd9Sstevel@tonic-gate 	/*LINTED*/
30627c478bd9Sstevel@tonic-gate 	buftagp = KMEM_BUFTAG(cp, buf);
30637c478bd9Sstevel@tonic-gate 
30647c478bd9Sstevel@tonic-gate 	/*
30657c478bd9Sstevel@tonic-gate 	 * Read the buffer to check.
30667c478bd9Sstevel@tonic-gate 	 */
30677c478bd9Sstevel@tonic-gate 	if (mdb_vread(buf, kmv->kmv_size, addr) == -1) {
30687c478bd9Sstevel@tonic-gate 		if (!besilent)
30697c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
30707c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
30717c478bd9Sstevel@tonic-gate 	}
30727c478bd9Sstevel@tonic-gate 
30737c478bd9Sstevel@tonic-gate 	if ((corrupt = verify_pattern(buf, cp->cache_verify,
30747c478bd9Sstevel@tonic-gate 	    KMEM_FREE_PATTERN)) >= 0) {
30757c478bd9Sstevel@tonic-gate 		if (!besilent)
30767c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems corrupted, at %p\n",
30777c478bd9Sstevel@tonic-gate 			    addr, (uintptr_t)addr + corrupt);
30787c478bd9Sstevel@tonic-gate 		goto corrupt;
30797c478bd9Sstevel@tonic-gate 	}
30807c478bd9Sstevel@tonic-gate 	/*
30817c478bd9Sstevel@tonic-gate 	 * When KMF_LITE is set, buftagp->bt_redzone is used to hold
30827c478bd9Sstevel@tonic-gate 	 * the first bytes of the buffer, hence we cannot check for red
30837c478bd9Sstevel@tonic-gate 	 * zone corruption.
30847c478bd9Sstevel@tonic-gate 	 */
30857c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH &&
30867c478bd9Sstevel@tonic-gate 	    buftagp->bt_redzone != KMEM_REDZONE_PATTERN) {
30877c478bd9Sstevel@tonic-gate 		if (!besilent)
30887c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (free) seems to "
30897c478bd9Sstevel@tonic-gate 			    "have a corrupt redzone pattern\n", addr);
30907c478bd9Sstevel@tonic-gate 		goto corrupt;
30917c478bd9Sstevel@tonic-gate 	}
30927c478bd9Sstevel@tonic-gate 
30937c478bd9Sstevel@tonic-gate 	/*
30947c478bd9Sstevel@tonic-gate 	 * confirm bufctl pointer integrity.
30957c478bd9Sstevel@tonic-gate 	 */
30967c478bd9Sstevel@tonic-gate 	if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) {
30977c478bd9Sstevel@tonic-gate 		if (!besilent)
30987c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (free) has a corrupt "
30997c478bd9Sstevel@tonic-gate 			    "buftag\n", addr);
31007c478bd9Sstevel@tonic-gate 		goto corrupt;
31017c478bd9Sstevel@tonic-gate 	}
31027c478bd9Sstevel@tonic-gate 
31037c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
31047c478bd9Sstevel@tonic-gate corrupt:
31057c478bd9Sstevel@tonic-gate 	kmv->kmv_corruption++;
31067c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
31077c478bd9Sstevel@tonic-gate }
31087c478bd9Sstevel@tonic-gate 
31097c478bd9Sstevel@tonic-gate /*
31107c478bd9Sstevel@tonic-gate  * verify_alloc()
31117c478bd9Sstevel@tonic-gate  * 	Verify that the buftag of an allocated buffer makes sense with respect
31127c478bd9Sstevel@tonic-gate  * 	to the buffer.
31137c478bd9Sstevel@tonic-gate  */
31147c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
31157c478bd9Sstevel@tonic-gate static int
31167c478bd9Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private)
31177c478bd9Sstevel@tonic-gate {
31187c478bd9Sstevel@tonic-gate 	kmem_verify_t *kmv = (kmem_verify_t *)private;
31197c478bd9Sstevel@tonic-gate 	kmem_cache_t *cp = &kmv->kmv_cache;
31207c478bd9Sstevel@tonic-gate 	uint64_t *buf = kmv->kmv_buf;	/* buf to validate */
31217c478bd9Sstevel@tonic-gate 	/*LINTED*/
31227c478bd9Sstevel@tonic-gate 	kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf);
31237c478bd9Sstevel@tonic-gate 	uint32_t *ip = (uint32_t *)buftagp;
31247c478bd9Sstevel@tonic-gate 	uint8_t *bp = (uint8_t *)buf;
31257c478bd9Sstevel@tonic-gate 	int looks_ok = 0, size_ok = 1;	/* flags for finding corruption */
31267c478bd9Sstevel@tonic-gate 	int besilent = kmv->kmv_besilent;
31277c478bd9Sstevel@tonic-gate 
31287c478bd9Sstevel@tonic-gate 	/*
31297c478bd9Sstevel@tonic-gate 	 * Read the buffer to check.
31307c478bd9Sstevel@tonic-gate 	 */
31317c478bd9Sstevel@tonic-gate 	if (mdb_vread(buf, kmv->kmv_size, addr) == -1) {
31327c478bd9Sstevel@tonic-gate 		if (!besilent)
31337c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read %p", addr);
31347c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
31357c478bd9Sstevel@tonic-gate 	}
31367c478bd9Sstevel@tonic-gate 
31377c478bd9Sstevel@tonic-gate 	/*
31387c478bd9Sstevel@tonic-gate 	 * There are two cases to handle:
31397c478bd9Sstevel@tonic-gate 	 * 1. If the buf was alloc'd using kmem_cache_alloc, it will have
31407c478bd9Sstevel@tonic-gate 	 *    0xfeedfacefeedface at the end of it
31417c478bd9Sstevel@tonic-gate 	 * 2. If the buf was alloc'd using kmem_alloc, it will have
31427c478bd9Sstevel@tonic-gate 	 *    0xbb just past the end of the region in use.  At the buftag,
31437c478bd9Sstevel@tonic-gate 	 *    it will have 0xfeedface (or, if the whole buffer is in use,
31447c478bd9Sstevel@tonic-gate 	 *    0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
31457c478bd9Sstevel@tonic-gate 	 *    endianness), followed by 32 bits containing the offset of the
31467c478bd9Sstevel@tonic-gate 	 *    0xbb byte in the buffer.
31477c478bd9Sstevel@tonic-gate 	 *
31487c478bd9Sstevel@tonic-gate 	 * Finally, the two 32-bit words that comprise the second half of the
31497c478bd9Sstevel@tonic-gate 	 * buftag should xor to KMEM_BUFTAG_ALLOC
31507c478bd9Sstevel@tonic-gate 	 */
31517c478bd9Sstevel@tonic-gate 
31527c478bd9Sstevel@tonic-gate 	if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN)
31537c478bd9Sstevel@tonic-gate 		looks_ok = 1;
31547c478bd9Sstevel@tonic-gate 	else if (!KMEM_SIZE_VALID(ip[1]))
31557c478bd9Sstevel@tonic-gate 		size_ok = 0;
31567c478bd9Sstevel@tonic-gate 	else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE)
31577c478bd9Sstevel@tonic-gate 		looks_ok = 1;
31587c478bd9Sstevel@tonic-gate 	else
31597c478bd9Sstevel@tonic-gate 		size_ok = 0;
31607c478bd9Sstevel@tonic-gate 
31617c478bd9Sstevel@tonic-gate 	if (!size_ok) {
31627c478bd9Sstevel@tonic-gate 		if (!besilent)
31637c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
31647c478bd9Sstevel@tonic-gate 			    "redzone size encoding\n", addr);
31657c478bd9Sstevel@tonic-gate 		goto corrupt;
31667c478bd9Sstevel@tonic-gate 	}
31677c478bd9Sstevel@tonic-gate 
31687c478bd9Sstevel@tonic-gate 	if (!looks_ok) {
31697c478bd9Sstevel@tonic-gate 		if (!besilent)
31707c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a corrupt "
31717c478bd9Sstevel@tonic-gate 			    "redzone signature\n", addr);
31727c478bd9Sstevel@tonic-gate 		goto corrupt;
31737c478bd9Sstevel@tonic-gate 	}
31747c478bd9Sstevel@tonic-gate 
31757c478bd9Sstevel@tonic-gate 	if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) {
31767c478bd9Sstevel@tonic-gate 		if (!besilent)
31777c478bd9Sstevel@tonic-gate 			mdb_printf("buffer %p (allocated) has a "
31787c478bd9Sstevel@tonic-gate 			    "corrupt buftag\n", addr);
31797c478bd9Sstevel@tonic-gate 		goto corrupt;
31807c478bd9Sstevel@tonic-gate 	}
31817c478bd9Sstevel@tonic-gate 
31827c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
31837c478bd9Sstevel@tonic-gate corrupt:
31847c478bd9Sstevel@tonic-gate 	kmv->kmv_corruption++;
31857c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
31867c478bd9Sstevel@tonic-gate }
31877c478bd9Sstevel@tonic-gate 
31887c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
31897c478bd9Sstevel@tonic-gate int
31907c478bd9Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
31917c478bd9Sstevel@tonic-gate {
31927c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
31937c478bd9Sstevel@tonic-gate 		int check_alloc = 0, check_free = 0;
31947c478bd9Sstevel@tonic-gate 		kmem_verify_t kmv;
31957c478bd9Sstevel@tonic-gate 
31967c478bd9Sstevel@tonic-gate 		if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache),
31977c478bd9Sstevel@tonic-gate 		    addr) == -1) {
31987c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read kmem_cache %p", addr);
31997c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
32007c478bd9Sstevel@tonic-gate 		}
32017c478bd9Sstevel@tonic-gate 
32027c478bd9Sstevel@tonic-gate 		kmv.kmv_size = kmv.kmv_cache.cache_buftag +
32037c478bd9Sstevel@tonic-gate 		    sizeof (kmem_buftag_t);
32047c478bd9Sstevel@tonic-gate 		kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC);
32057c478bd9Sstevel@tonic-gate 		kmv.kmv_corruption = 0;
32067c478bd9Sstevel@tonic-gate 
32077c478bd9Sstevel@tonic-gate 		if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) {
32087c478bd9Sstevel@tonic-gate 			check_alloc = 1;
32097c478bd9Sstevel@tonic-gate 			if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF)
32107c478bd9Sstevel@tonic-gate 				check_free = 1;
32117c478bd9Sstevel@tonic-gate 		} else {
32127c478bd9Sstevel@tonic-gate 			if (!(flags & DCMD_LOOP)) {
32137c478bd9Sstevel@tonic-gate 				mdb_warn("cache %p (%s) does not have "
32147c478bd9Sstevel@tonic-gate 				    "redzone checking enabled\n", addr,
32157c478bd9Sstevel@tonic-gate 				    kmv.kmv_cache.cache_name);
32167c478bd9Sstevel@tonic-gate 			}
32177c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
32187c478bd9Sstevel@tonic-gate 		}
32197c478bd9Sstevel@tonic-gate 
32207c478bd9Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
32217c478bd9Sstevel@tonic-gate 			/*
32227c478bd9Sstevel@tonic-gate 			 * table mode, don't print out every corrupt buffer
32237c478bd9Sstevel@tonic-gate 			 */
32247c478bd9Sstevel@tonic-gate 			kmv.kmv_besilent = 1;
32257c478bd9Sstevel@tonic-gate 		} else {
32267c478bd9Sstevel@tonic-gate 			mdb_printf("Summary for cache '%s'\n",
32277c478bd9Sstevel@tonic-gate 			    kmv.kmv_cache.cache_name);
32287c478bd9Sstevel@tonic-gate 			mdb_inc_indent(2);
32297c478bd9Sstevel@tonic-gate 			kmv.kmv_besilent = 0;
32307c478bd9Sstevel@tonic-gate 		}
32317c478bd9Sstevel@tonic-gate 
32327c478bd9Sstevel@tonic-gate 		if (check_alloc)
32337c478bd9Sstevel@tonic-gate 			(void) mdb_pwalk("kmem", verify_alloc, &kmv, addr);
32347c478bd9Sstevel@tonic-gate 		if (check_free)
32357c478bd9Sstevel@tonic-gate 			(void) mdb_pwalk("freemem", verify_free, &kmv, addr);
32367c478bd9Sstevel@tonic-gate 
32377c478bd9Sstevel@tonic-gate 		if (flags & DCMD_LOOP) {
32387c478bd9Sstevel@tonic-gate 			if (kmv.kmv_corruption == 0) {
32397c478bd9Sstevel@tonic-gate 				mdb_printf("%-*s %?p clean\n",
32407c478bd9Sstevel@tonic-gate 				    KMEM_CACHE_NAMELEN,
32417c478bd9Sstevel@tonic-gate 				    kmv.kmv_cache.cache_name, addr);
32427c478bd9Sstevel@tonic-gate 			} else {
32437c478bd9Sstevel@tonic-gate 				char *s = "";	/* optional s in "buffer[s]" */
32447c478bd9Sstevel@tonic-gate 				if (kmv.kmv_corruption > 1)
32457c478bd9Sstevel@tonic-gate 					s = "s";
32467c478bd9Sstevel@tonic-gate 
32477c478bd9Sstevel@tonic-gate 				mdb_printf("%-*s %?p %d corrupt buffer%s\n",
32487c478bd9Sstevel@tonic-gate 				    KMEM_CACHE_NAMELEN,
32497c478bd9Sstevel@tonic-gate 				    kmv.kmv_cache.cache_name, addr,
32507c478bd9Sstevel@tonic-gate 				    kmv.kmv_corruption, s);
32517c478bd9Sstevel@tonic-gate 			}
32527c478bd9Sstevel@tonic-gate 		} else {
32537c478bd9Sstevel@tonic-gate 			/*
32547c478bd9Sstevel@tonic-gate 			 * This is the more verbose mode, when the user has
32557c478bd9Sstevel@tonic-gate 			 * type addr::kmem_verify.  If the cache was clean,
32567c478bd9Sstevel@tonic-gate 			 * nothing will have yet been printed. So say something.
32577c478bd9Sstevel@tonic-gate 			 */
32587c478bd9Sstevel@tonic-gate 			if (kmv.kmv_corruption == 0)
32597c478bd9Sstevel@tonic-gate 				mdb_printf("clean\n");
32607c478bd9Sstevel@tonic-gate 
32617c478bd9Sstevel@tonic-gate 			mdb_dec_indent(2);
32627c478bd9Sstevel@tonic-gate 		}
32637c478bd9Sstevel@tonic-gate 	} else {
32647c478bd9Sstevel@tonic-gate 		/*
32657c478bd9Sstevel@tonic-gate 		 * If the user didn't specify a cache to verify, we'll walk all
32667c478bd9Sstevel@tonic-gate 		 * kmem_cache's, specifying ourself as a callback for each...
32677c478bd9Sstevel@tonic-gate 		 * this is the equivalent of '::walk kmem_cache .::kmem_verify'
32687c478bd9Sstevel@tonic-gate 		 */
32697c478bd9Sstevel@tonic-gate 		mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN,
32707c478bd9Sstevel@tonic-gate 		    "Cache Name", "Addr", "Cache Integrity");
32717c478bd9Sstevel@tonic-gate 		(void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL));
32727c478bd9Sstevel@tonic-gate 	}
32737c478bd9Sstevel@tonic-gate 
32747c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
32757c478bd9Sstevel@tonic-gate }
32767c478bd9Sstevel@tonic-gate 
32777c478bd9Sstevel@tonic-gate typedef struct vmem_node {
32787c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_next;
32797c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_parent;
32807c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_sibling;
32817c478bd9Sstevel@tonic-gate 	struct vmem_node *vn_children;
32827c478bd9Sstevel@tonic-gate 	uintptr_t vn_addr;
32837c478bd9Sstevel@tonic-gate 	int vn_marked;
32847c478bd9Sstevel@tonic-gate 	vmem_t vn_vmem;
32857c478bd9Sstevel@tonic-gate } vmem_node_t;
32867c478bd9Sstevel@tonic-gate 
32877c478bd9Sstevel@tonic-gate typedef struct vmem_walk {
32887c478bd9Sstevel@tonic-gate 	vmem_node_t *vw_root;
32897c478bd9Sstevel@tonic-gate 	vmem_node_t *vw_current;
32907c478bd9Sstevel@tonic-gate } vmem_walk_t;
32917c478bd9Sstevel@tonic-gate 
32927c478bd9Sstevel@tonic-gate int
32937c478bd9Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp)
32947c478bd9Sstevel@tonic-gate {
32957c478bd9Sstevel@tonic-gate 	uintptr_t vaddr, paddr;
32967c478bd9Sstevel@tonic-gate 	vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp;
32977c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw;
32987c478bd9Sstevel@tonic-gate 
32997c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&vaddr, "vmem_list") == -1) {
33007c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read 'vmem_list'");
33017c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
33027c478bd9Sstevel@tonic-gate 	}
33037c478bd9Sstevel@tonic-gate 
33047c478bd9Sstevel@tonic-gate 	while (vaddr != NULL) {
33057c478bd9Sstevel@tonic-gate 		vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP);
33067c478bd9Sstevel@tonic-gate 		vp->vn_addr = vaddr;
33077c478bd9Sstevel@tonic-gate 		vp->vn_next = head;
33087c478bd9Sstevel@tonic-gate 		head = vp;
33097c478bd9Sstevel@tonic-gate 
33107c478bd9Sstevel@tonic-gate 		if (vaddr == wsp->walk_addr)
33117c478bd9Sstevel@tonic-gate 			current = vp;
33127c478bd9Sstevel@tonic-gate 
33137c478bd9Sstevel@tonic-gate 		if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) {
33147c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read vmem_t at %p", vaddr);
33157c478bd9Sstevel@tonic-gate 			goto err;
33167c478bd9Sstevel@tonic-gate 		}
33177c478bd9Sstevel@tonic-gate 
33187c478bd9Sstevel@tonic-gate 		vaddr = (uintptr_t)vp->vn_vmem.vm_next;
33197c478bd9Sstevel@tonic-gate 	}
33207c478bd9Sstevel@tonic-gate 
33217c478bd9Sstevel@tonic-gate 	for (vp = head; vp != NULL; vp = vp->vn_next) {
33227c478bd9Sstevel@tonic-gate 
33237c478bd9Sstevel@tonic-gate 		if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
33247c478bd9Sstevel@tonic-gate 			vp->vn_sibling = root;
33257c478bd9Sstevel@tonic-gate 			root = vp;
33267c478bd9Sstevel@tonic-gate 			continue;
33277c478bd9Sstevel@tonic-gate 		}
33287c478bd9Sstevel@tonic-gate 
33297c478bd9Sstevel@tonic-gate 		for (parent = head; parent != NULL; parent = parent->vn_next) {
33307c478bd9Sstevel@tonic-gate 			if (parent->vn_addr != paddr)
33317c478bd9Sstevel@tonic-gate 				continue;
33327c478bd9Sstevel@tonic-gate 			vp->vn_sibling = parent->vn_children;
33337c478bd9Sstevel@tonic-gate 			parent->vn_children = vp;
33347c478bd9Sstevel@tonic-gate 			vp->vn_parent = parent;
33357c478bd9Sstevel@tonic-gate 			break;
33367c478bd9Sstevel@tonic-gate 		}
33377c478bd9Sstevel@tonic-gate 
33387c478bd9Sstevel@tonic-gate 		if (parent == NULL) {
33397c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't find %p's parent (%p)\n",
33407c478bd9Sstevel@tonic-gate 			    vp->vn_addr, paddr);
33417c478bd9Sstevel@tonic-gate 			goto err;
33427c478bd9Sstevel@tonic-gate 		}
33437c478bd9Sstevel@tonic-gate 	}
33447c478bd9Sstevel@tonic-gate 
33457c478bd9Sstevel@tonic-gate 	vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP);
33467c478bd9Sstevel@tonic-gate 	vw->vw_root = root;
33477c478bd9Sstevel@tonic-gate 
33487c478bd9Sstevel@tonic-gate 	if (current != NULL)
33497c478bd9Sstevel@tonic-gate 		vw->vw_current = current;
33507c478bd9Sstevel@tonic-gate 	else
33517c478bd9Sstevel@tonic-gate 		vw->vw_current = root;
33527c478bd9Sstevel@tonic-gate 
33537c478bd9Sstevel@tonic-gate 	wsp->walk_data = vw;
33547c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
33557c478bd9Sstevel@tonic-gate err:
33567c478bd9Sstevel@tonic-gate 	for (vp = head; head != NULL; vp = head) {
33577c478bd9Sstevel@tonic-gate 		head = vp->vn_next;
33587c478bd9Sstevel@tonic-gate 		mdb_free(vp, sizeof (vmem_node_t));
33597c478bd9Sstevel@tonic-gate 	}
33607c478bd9Sstevel@tonic-gate 
33617c478bd9Sstevel@tonic-gate 	return (WALK_ERR);
33627c478bd9Sstevel@tonic-gate }
33637c478bd9Sstevel@tonic-gate 
33647c478bd9Sstevel@tonic-gate int
33657c478bd9Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp)
33667c478bd9Sstevel@tonic-gate {
33677c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
33687c478bd9Sstevel@tonic-gate 	vmem_node_t *vp;
33697c478bd9Sstevel@tonic-gate 	int rval;
33707c478bd9Sstevel@tonic-gate 
33717c478bd9Sstevel@tonic-gate 	if ((vp = vw->vw_current) == NULL)
33727c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
33737c478bd9Sstevel@tonic-gate 
33747c478bd9Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
33757c478bd9Sstevel@tonic-gate 
33767c478bd9Sstevel@tonic-gate 	if (vp->vn_children != NULL) {
33777c478bd9Sstevel@tonic-gate 		vw->vw_current = vp->vn_children;
33787c478bd9Sstevel@tonic-gate 		return (rval);
33797c478bd9Sstevel@tonic-gate 	}
33807c478bd9Sstevel@tonic-gate 
33817c478bd9Sstevel@tonic-gate 	do {
33827c478bd9Sstevel@tonic-gate 		vw->vw_current = vp->vn_sibling;
33837c478bd9Sstevel@tonic-gate 		vp = vp->vn_parent;
33847c478bd9Sstevel@tonic-gate 	} while (vw->vw_current == NULL && vp != NULL);
33857c478bd9Sstevel@tonic-gate 
33867c478bd9Sstevel@tonic-gate 	return (rval);
33877c478bd9Sstevel@tonic-gate }
33887c478bd9Sstevel@tonic-gate 
33897c478bd9Sstevel@tonic-gate /*
33907c478bd9Sstevel@tonic-gate  * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
33917c478bd9Sstevel@tonic-gate  * children are visited before their parent.  We perform the postfix walk
33927c478bd9Sstevel@tonic-gate  * iteratively (rather than recursively) to allow mdb to regain control
33937c478bd9Sstevel@tonic-gate  * after each callback.
33947c478bd9Sstevel@tonic-gate  */
33957c478bd9Sstevel@tonic-gate int
33967c478bd9Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp)
33977c478bd9Sstevel@tonic-gate {
33987c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
33997c478bd9Sstevel@tonic-gate 	vmem_node_t *vp = vw->vw_current;
34007c478bd9Sstevel@tonic-gate 	int rval;
34017c478bd9Sstevel@tonic-gate 
34027c478bd9Sstevel@tonic-gate 	/*
34037c478bd9Sstevel@tonic-gate 	 * If this node is marked, then we know that we have already visited
34047c478bd9Sstevel@tonic-gate 	 * all of its children.  If the node has any siblings, they need to
34057c478bd9Sstevel@tonic-gate 	 * be visited next; otherwise, we need to visit the parent.  Note
34067c478bd9Sstevel@tonic-gate 	 * that vp->vn_marked will only be zero on the first invocation of
34077c478bd9Sstevel@tonic-gate 	 * the step function.
34087c478bd9Sstevel@tonic-gate 	 */
34097c478bd9Sstevel@tonic-gate 	if (vp->vn_marked) {
34107c478bd9Sstevel@tonic-gate 		if (vp->vn_sibling != NULL)
34117c478bd9Sstevel@tonic-gate 			vp = vp->vn_sibling;
34127c478bd9Sstevel@tonic-gate 		else if (vp->vn_parent != NULL)
34137c478bd9Sstevel@tonic-gate 			vp = vp->vn_parent;
34147c478bd9Sstevel@tonic-gate 		else {
34157c478bd9Sstevel@tonic-gate 			/*
34167c478bd9Sstevel@tonic-gate 			 * We have neither a parent, nor a sibling, and we
34177c478bd9Sstevel@tonic-gate 			 * have already been visited; we're done.
34187c478bd9Sstevel@tonic-gate 			 */
34197c478bd9Sstevel@tonic-gate 			return (WALK_DONE);
34207c478bd9Sstevel@tonic-gate 		}
34217c478bd9Sstevel@tonic-gate 	}
34227c478bd9Sstevel@tonic-gate 
34237c478bd9Sstevel@tonic-gate 	/*
34247c478bd9Sstevel@tonic-gate 	 * Before we visit this node, visit its children.
34257c478bd9Sstevel@tonic-gate 	 */
34267c478bd9Sstevel@tonic-gate 	while (vp->vn_children != NULL && !vp->vn_children->vn_marked)
34277c478bd9Sstevel@tonic-gate 		vp = vp->vn_children;
34287c478bd9Sstevel@tonic-gate 
34297c478bd9Sstevel@tonic-gate 	vp->vn_marked = 1;
34307c478bd9Sstevel@tonic-gate 	vw->vw_current = vp;
34317c478bd9Sstevel@tonic-gate 	rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
34327c478bd9Sstevel@tonic-gate 
34337c478bd9Sstevel@tonic-gate 	return (rval);
34347c478bd9Sstevel@tonic-gate }
34357c478bd9Sstevel@tonic-gate 
34367c478bd9Sstevel@tonic-gate void
34377c478bd9Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp)
34387c478bd9Sstevel@tonic-gate {
34397c478bd9Sstevel@tonic-gate 	vmem_walk_t *vw = wsp->walk_data;
34407c478bd9Sstevel@tonic-gate 	vmem_node_t *root = vw->vw_root;
34417c478bd9Sstevel@tonic-gate 	int done;
34427c478bd9Sstevel@tonic-gate 
34437c478bd9Sstevel@tonic-gate 	if (root == NULL)
34447c478bd9Sstevel@tonic-gate 		return;
34457c478bd9Sstevel@tonic-gate 
34467c478bd9Sstevel@tonic-gate 	if ((vw->vw_root = root->vn_children) != NULL)
34477c478bd9Sstevel@tonic-gate 		vmem_walk_fini(wsp);
34487c478bd9Sstevel@tonic-gate 
34497c478bd9Sstevel@tonic-gate 	vw->vw_root = root->vn_sibling;
34507c478bd9Sstevel@tonic-gate 	done = (root->vn_sibling == NULL && root->vn_parent == NULL);
34517c478bd9Sstevel@tonic-gate 	mdb_free(root, sizeof (vmem_node_t));
34527c478bd9Sstevel@tonic-gate 
34537c478bd9Sstevel@tonic-gate 	if (done) {
34547c478bd9Sstevel@tonic-gate 		mdb_free(vw, sizeof (vmem_walk_t));
34557c478bd9Sstevel@tonic-gate 	} else {
34567c478bd9Sstevel@tonic-gate 		vmem_walk_fini(wsp);
34577c478bd9Sstevel@tonic-gate 	}
34587c478bd9Sstevel@tonic-gate }
34597c478bd9Sstevel@tonic-gate 
34607c478bd9Sstevel@tonic-gate typedef struct vmem_seg_walk {
34617c478bd9Sstevel@tonic-gate 	uint8_t vsw_type;
34627c478bd9Sstevel@tonic-gate 	uintptr_t vsw_start;
34637c478bd9Sstevel@tonic-gate 	uintptr_t vsw_current;
34647c478bd9Sstevel@tonic-gate } vmem_seg_walk_t;
34657c478bd9Sstevel@tonic-gate 
34667c478bd9Sstevel@tonic-gate /*ARGSUSED*/
34677c478bd9Sstevel@tonic-gate int
34687c478bd9Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name)
34697c478bd9Sstevel@tonic-gate {
34707c478bd9Sstevel@tonic-gate 	vmem_seg_walk_t *vsw;
34717c478bd9Sstevel@tonic-gate 
34727c478bd9Sstevel@tonic-gate 	if (wsp->walk_addr == NULL) {
34737c478bd9Sstevel@tonic-gate 		mdb_warn("vmem_%s does not support global walks\n", name);
34747c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
34757c478bd9Sstevel@tonic-gate 	}
34767c478bd9Sstevel@tonic-gate 
34777c478bd9Sstevel@tonic-gate 	wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP);
34787c478bd9Sstevel@tonic-gate 
34797c478bd9Sstevel@tonic-gate 	vsw->vsw_type = type;
34807c478bd9Sstevel@tonic-gate 	vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0);
34817c478bd9Sstevel@tonic-gate 	vsw->vsw_current = vsw->vsw_start;
34827c478bd9Sstevel@tonic-gate 
34837c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
34847c478bd9Sstevel@tonic-gate }
34857c478bd9Sstevel@tonic-gate 
34867c478bd9Sstevel@tonic-gate /*
34877c478bd9Sstevel@tonic-gate  * vmem segments can't have type 0 (this should be added to vmem_impl.h).
34887c478bd9Sstevel@tonic-gate  */
34897c478bd9Sstevel@tonic-gate #define	VMEM_NONE	0
34907c478bd9Sstevel@tonic-gate 
34917c478bd9Sstevel@tonic-gate int
34927c478bd9Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp)
34937c478bd9Sstevel@tonic-gate {
34947c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc"));
34957c478bd9Sstevel@tonic-gate }
34967c478bd9Sstevel@tonic-gate 
34977c478bd9Sstevel@tonic-gate int
34987c478bd9Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp)
34997c478bd9Sstevel@tonic-gate {
35007c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free"));
35017c478bd9Sstevel@tonic-gate }
35027c478bd9Sstevel@tonic-gate 
35037c478bd9Sstevel@tonic-gate int
35047c478bd9Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp)
35057c478bd9Sstevel@tonic-gate {
35067c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span"));
35077c478bd9Sstevel@tonic-gate }
35087c478bd9Sstevel@tonic-gate 
35097c478bd9Sstevel@tonic-gate int
35107c478bd9Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp)
35117c478bd9Sstevel@tonic-gate {
35127c478bd9Sstevel@tonic-gate 	return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg"));
35137c478bd9Sstevel@tonic-gate }
35147c478bd9Sstevel@tonic-gate 
35157c478bd9Sstevel@tonic-gate int
35167c478bd9Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp)
35177c478bd9Sstevel@tonic-gate {
35187c478bd9Sstevel@tonic-gate 	vmem_seg_t seg;
35197c478bd9Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
35207c478bd9Sstevel@tonic-gate 	uintptr_t addr = vsw->vsw_current;
35217c478bd9Sstevel@tonic-gate 	static size_t seg_size = 0;
35227c478bd9Sstevel@tonic-gate 	int rval;
35237c478bd9Sstevel@tonic-gate 
35247c478bd9Sstevel@tonic-gate 	if (!seg_size) {
35257c478bd9Sstevel@tonic-gate 		if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) {
35267c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read 'vmem_seg_size'");
35277c478bd9Sstevel@tonic-gate 			seg_size = sizeof (vmem_seg_t);
35287c478bd9Sstevel@tonic-gate 		}
35297c478bd9Sstevel@tonic-gate 	}
35307c478bd9Sstevel@tonic-gate 
35317c478bd9Sstevel@tonic-gate 	if (seg_size < sizeof (seg))
35327c478bd9Sstevel@tonic-gate 		bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size);
35337c478bd9Sstevel@tonic-gate 
35347c478bd9Sstevel@tonic-gate 	if (mdb_vread(&seg, seg_size, addr) == -1) {
35357c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
35367c478bd9Sstevel@tonic-gate 		return (WALK_ERR);
35377c478bd9Sstevel@tonic-gate 	}
35387c478bd9Sstevel@tonic-gate 
35397c478bd9Sstevel@tonic-gate 	vsw->vsw_current = (uintptr_t)seg.vs_anext;
35407c478bd9Sstevel@tonic-gate 	if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) {
35417c478bd9Sstevel@tonic-gate 		rval = WALK_NEXT;
35427c478bd9Sstevel@tonic-gate 	} else {
35437c478bd9Sstevel@tonic-gate 		rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata);
35447c478bd9Sstevel@tonic-gate 	}
35457c478bd9Sstevel@tonic-gate 
35467c478bd9Sstevel@tonic-gate 	if (vsw->vsw_current == vsw->vsw_start)
35477c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
35487c478bd9Sstevel@tonic-gate 
35497c478bd9Sstevel@tonic-gate 	return (rval);
35507c478bd9Sstevel@tonic-gate }
35517c478bd9Sstevel@tonic-gate 
35527c478bd9Sstevel@tonic-gate void
35537c478bd9Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp)
35547c478bd9Sstevel@tonic-gate {
35557c478bd9Sstevel@tonic-gate 	vmem_seg_walk_t *vsw = wsp->walk_data;
35567c478bd9Sstevel@tonic-gate 
35577c478bd9Sstevel@tonic-gate 	mdb_free(vsw, sizeof (vmem_seg_walk_t));
35587c478bd9Sstevel@tonic-gate }
35597c478bd9Sstevel@tonic-gate 
35607c478bd9Sstevel@tonic-gate #define	VMEM_NAMEWIDTH	22
35617c478bd9Sstevel@tonic-gate 
35627c478bd9Sstevel@tonic-gate int
35637c478bd9Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
35647c478bd9Sstevel@tonic-gate {
35657c478bd9Sstevel@tonic-gate 	vmem_t v, parent;
35667c478bd9Sstevel@tonic-gate 	vmem_kstat_t *vkp = &v.vm_kstat;
35677c478bd9Sstevel@tonic-gate 	uintptr_t paddr;
35687c478bd9Sstevel@tonic-gate 	int ident = 0;
35697c478bd9Sstevel@tonic-gate 	char c[VMEM_NAMEWIDTH];
35707c478bd9Sstevel@tonic-gate 
35717c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC)) {
35727c478bd9Sstevel@tonic-gate 		if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) {
35737c478bd9Sstevel@tonic-gate 			mdb_warn("can't walk vmem");
35747c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
35757c478bd9Sstevel@tonic-gate 		}
35767c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
35777c478bd9Sstevel@tonic-gate 	}
35787c478bd9Sstevel@tonic-gate 
35797c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags))
35807c478bd9Sstevel@tonic-gate 		mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
35817c478bd9Sstevel@tonic-gate 		    "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE",
35827c478bd9Sstevel@tonic-gate 		    "TOTAL", "SUCCEED", "FAIL");
35837c478bd9Sstevel@tonic-gate 
35847c478bd9Sstevel@tonic-gate 	if (mdb_vread(&v, sizeof (v), addr) == -1) {
35857c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read vmem at %p", addr);
35867c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
35877c478bd9Sstevel@tonic-gate 	}
35887c478bd9Sstevel@tonic-gate 
35897c478bd9Sstevel@tonic-gate 	for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
35907c478bd9Sstevel@tonic-gate 		if (mdb_vread(&parent, sizeof (parent), paddr) == -1) {
35917c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't trace %p's ancestry", addr);
35927c478bd9Sstevel@tonic-gate 			ident = 0;
35937c478bd9Sstevel@tonic-gate 			break;
35947c478bd9Sstevel@tonic-gate 		}
35957c478bd9Sstevel@tonic-gate 		paddr = (uintptr_t)parent.vm_source;
35967c478bd9Sstevel@tonic-gate 	}
35977c478bd9Sstevel@tonic-gate 
35987c478bd9Sstevel@tonic-gate 	(void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name);
35997c478bd9Sstevel@tonic-gate 
36007c478bd9Sstevel@tonic-gate 	mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
36017c478bd9Sstevel@tonic-gate 	    addr, VMEM_NAMEWIDTH, c,
36027c478bd9Sstevel@tonic-gate 	    vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64,
36037c478bd9Sstevel@tonic-gate 	    vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64);
36047c478bd9Sstevel@tonic-gate 
36057c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
36067c478bd9Sstevel@tonic-gate }
36077c478bd9Sstevel@tonic-gate 
36087c478bd9Sstevel@tonic-gate void
36097c478bd9Sstevel@tonic-gate vmem_seg_help(void)
36107c478bd9Sstevel@tonic-gate {
3611*b5fca8f8Stomee 	mdb_printf("%s",
3612*b5fca8f8Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n"
36137c478bd9Sstevel@tonic-gate "\n"
36147c478bd9Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
36157c478bd9Sstevel@tonic-gate "representing a single chunk of data.  Only ALLOC segments have debugging\n"
36167c478bd9Sstevel@tonic-gate "information.\n");
36177c478bd9Sstevel@tonic-gate 	mdb_dec_indent(2);
36187c478bd9Sstevel@tonic-gate 	mdb_printf("%<b>OPTIONS%</b>\n");
36197c478bd9Sstevel@tonic-gate 	mdb_inc_indent(2);
36207c478bd9Sstevel@tonic-gate 	mdb_printf("%s",
36217c478bd9Sstevel@tonic-gate "  -v    Display the full content of the vmem_seg, including its stack trace\n"
36227c478bd9Sstevel@tonic-gate "  -s    report the size of the segment, instead of the end address\n"
36237c478bd9Sstevel@tonic-gate "  -c caller\n"
36247c478bd9Sstevel@tonic-gate "        filter out segments without the function/PC in their stack trace\n"
36257c478bd9Sstevel@tonic-gate "  -e earliest\n"
36267c478bd9Sstevel@tonic-gate "        filter out segments timestamped before earliest\n"
36277c478bd9Sstevel@tonic-gate "  -l latest\n"
36287c478bd9Sstevel@tonic-gate "        filter out segments timestamped after latest\n"
36297c478bd9Sstevel@tonic-gate "  -m minsize\n"
36307c478bd9Sstevel@tonic-gate "        filer out segments smaller than minsize\n"
36317c478bd9Sstevel@tonic-gate "  -M maxsize\n"
36327c478bd9Sstevel@tonic-gate "        filer out segments larger than maxsize\n"
36337c478bd9Sstevel@tonic-gate "  -t thread\n"
36347c478bd9Sstevel@tonic-gate "        filter out segments not involving thread\n"
36357c478bd9Sstevel@tonic-gate "  -T type\n"
36367c478bd9Sstevel@tonic-gate "        filter out segments not of type 'type'\n"
36377c478bd9Sstevel@tonic-gate "        type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n");
36387c478bd9Sstevel@tonic-gate }
36397c478bd9Sstevel@tonic-gate 
36407c478bd9Sstevel@tonic-gate /*ARGSUSED*/
36417c478bd9Sstevel@tonic-gate int
36427c478bd9Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
36437c478bd9Sstevel@tonic-gate {
36447c478bd9Sstevel@tonic-gate 	vmem_seg_t vs;
36457c478bd9Sstevel@tonic-gate 	pc_t *stk = vs.vs_stack;
36467c478bd9Sstevel@tonic-gate 	uintptr_t sz;
36477c478bd9Sstevel@tonic-gate 	uint8_t t;
36487c478bd9Sstevel@tonic-gate 	const char *type = NULL;
36497c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
36507c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
36517c478bd9Sstevel@tonic-gate 	int no_debug;
36527c478bd9Sstevel@tonic-gate 	int i;
36537c478bd9Sstevel@tonic-gate 	int depth;
36547c478bd9Sstevel@tonic-gate 	uintptr_t laddr, haddr;
36557c478bd9Sstevel@tonic-gate 
36567c478bd9Sstevel@tonic-gate 	uintptr_t caller = NULL, thread = NULL;
36577c478bd9Sstevel@tonic-gate 	uintptr_t minsize = 0, maxsize = 0;
36587c478bd9Sstevel@tonic-gate 
36597c478bd9Sstevel@tonic-gate 	hrtime_t earliest = 0, latest = 0;
36607c478bd9Sstevel@tonic-gate 
36617c478bd9Sstevel@tonic-gate 	uint_t size = 0;
36627c478bd9Sstevel@tonic-gate 	uint_t verbose = 0;
36637c478bd9Sstevel@tonic-gate 
36647c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
36657c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
36667c478bd9Sstevel@tonic-gate 
36677c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
36687c478bd9Sstevel@tonic-gate 	    'c', MDB_OPT_UINTPTR, &caller,
36697c478bd9Sstevel@tonic-gate 	    'e', MDB_OPT_UINT64, &earliest,
36707c478bd9Sstevel@tonic-gate 	    'l', MDB_OPT_UINT64, &latest,
36717c478bd9Sstevel@tonic-gate 	    's', MDB_OPT_SETBITS, TRUE, &size,
36727c478bd9Sstevel@tonic-gate 	    'm', MDB_OPT_UINTPTR, &minsize,
36737c478bd9Sstevel@tonic-gate 	    'M', MDB_OPT_UINTPTR, &maxsize,
36747c478bd9Sstevel@tonic-gate 	    't', MDB_OPT_UINTPTR, &thread,
36757c478bd9Sstevel@tonic-gate 	    'T', MDB_OPT_STR, &type,
36767c478bd9Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &verbose,
36777c478bd9Sstevel@tonic-gate 	    NULL) != argc)
36787c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
36797c478bd9Sstevel@tonic-gate 
36807c478bd9Sstevel@tonic-gate 	if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
36817c478bd9Sstevel@tonic-gate 		if (verbose) {
36827c478bd9Sstevel@tonic-gate 			mdb_printf("%16s %4s %16s %16s %16s\n"
36837c478bd9Sstevel@tonic-gate 			    "%<u>%16s %4s %16s %16s %16s%</u>\n",
36847c478bd9Sstevel@tonic-gate 			    "ADDR", "TYPE", "START", "END", "SIZE",
36857c478bd9Sstevel@tonic-gate 			    "", "", "THREAD", "TIMESTAMP", "");
36867c478bd9Sstevel@tonic-gate 		} else {
36877c478bd9Sstevel@tonic-gate 			mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE",
36887c478bd9Sstevel@tonic-gate 			    "START", size? "SIZE" : "END", "WHO");
36897c478bd9Sstevel@tonic-gate 		}
36907c478bd9Sstevel@tonic-gate 	}
36917c478bd9Sstevel@tonic-gate 
36927c478bd9Sstevel@tonic-gate 	if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
36937c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read vmem_seg at %p", addr);
36947c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
36957c478bd9Sstevel@tonic-gate 	}
36967c478bd9Sstevel@tonic-gate 
36977c478bd9Sstevel@tonic-gate 	if (type != NULL) {
36987c478bd9Sstevel@tonic-gate 		if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0)
36997c478bd9Sstevel@tonic-gate 			t = VMEM_ALLOC;
37007c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "FREE") == 0)
37017c478bd9Sstevel@tonic-gate 			t = VMEM_FREE;
37027c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "SPAN") == 0)
37037c478bd9Sstevel@tonic-gate 			t = VMEM_SPAN;
37047c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "ROTR") == 0 ||
37057c478bd9Sstevel@tonic-gate 		    strcmp(type, "ROTOR") == 0)
37067c478bd9Sstevel@tonic-gate 			t = VMEM_ROTOR;
37077c478bd9Sstevel@tonic-gate 		else if (strcmp(type, "WLKR") == 0 ||
37087c478bd9Sstevel@tonic-gate 		    strcmp(type, "WALKER") == 0)
37097c478bd9Sstevel@tonic-gate 			t = VMEM_WALKER;
37107c478bd9Sstevel@tonic-gate 		else {
37117c478bd9Sstevel@tonic-gate 			mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
37127c478bd9Sstevel@tonic-gate 			    type);
37137c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
37147c478bd9Sstevel@tonic-gate 		}
37157c478bd9Sstevel@tonic-gate 
37167c478bd9Sstevel@tonic-gate 		if (vs.vs_type != t)
37177c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
37187c478bd9Sstevel@tonic-gate 	}
37197c478bd9Sstevel@tonic-gate 
37207c478bd9Sstevel@tonic-gate 	sz = vs.vs_end - vs.vs_start;
37217c478bd9Sstevel@tonic-gate 
37227c478bd9Sstevel@tonic-gate 	if (minsize != 0 && sz < minsize)
37237c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
37247c478bd9Sstevel@tonic-gate 
37257c478bd9Sstevel@tonic-gate 	if (maxsize != 0 && sz > maxsize)
37267c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
37277c478bd9Sstevel@tonic-gate 
37287c478bd9Sstevel@tonic-gate 	t = vs.vs_type;
37297c478bd9Sstevel@tonic-gate 	depth = vs.vs_depth;
37307c478bd9Sstevel@tonic-gate 
37317c478bd9Sstevel@tonic-gate 	/*
37327c478bd9Sstevel@tonic-gate 	 * debug info, when present, is only accurate for VMEM_ALLOC segments
37337c478bd9Sstevel@tonic-gate 	 */
37347c478bd9Sstevel@tonic-gate 	no_debug = (t != VMEM_ALLOC) ||
37357c478bd9Sstevel@tonic-gate 	    (depth == 0 || depth > VMEM_STACK_DEPTH);
37367c478bd9Sstevel@tonic-gate 
37377c478bd9Sstevel@tonic-gate 	if (no_debug) {
37387c478bd9Sstevel@tonic-gate 		if (caller != NULL || thread != NULL || earliest != 0 ||
37397c478bd9Sstevel@tonic-gate 		    latest != 0)
37407c478bd9Sstevel@tonic-gate 			return (DCMD_OK);		/* not enough info */
37417c478bd9Sstevel@tonic-gate 	} else {
37427c478bd9Sstevel@tonic-gate 		if (caller != NULL) {
37437c478bd9Sstevel@tonic-gate 			laddr = caller;
37447c478bd9Sstevel@tonic-gate 			haddr = caller + sizeof (caller);
37457c478bd9Sstevel@tonic-gate 
37467c478bd9Sstevel@tonic-gate 			if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c,
37477c478bd9Sstevel@tonic-gate 			    sizeof (c), &sym) != -1 &&
37487c478bd9Sstevel@tonic-gate 			    caller == (uintptr_t)sym.st_value) {
37497c478bd9Sstevel@tonic-gate 				/*
37507c478bd9Sstevel@tonic-gate 				 * We were provided an exact symbol value; any
37517c478bd9Sstevel@tonic-gate 				 * address in the function is valid.
37527c478bd9Sstevel@tonic-gate 				 */
37537c478bd9Sstevel@tonic-gate 				laddr = (uintptr_t)sym.st_value;
37547c478bd9Sstevel@tonic-gate 				haddr = (uintptr_t)sym.st_value + sym.st_size;
37557c478bd9Sstevel@tonic-gate 			}
37567c478bd9Sstevel@tonic-gate 
37577c478bd9Sstevel@tonic-gate 			for (i = 0; i < depth; i++)
37587c478bd9Sstevel@tonic-gate 				if (vs.vs_stack[i] >= laddr &&
37597c478bd9Sstevel@tonic-gate 				    vs.vs_stack[i] < haddr)
37607c478bd9Sstevel@tonic-gate 					break;
37617c478bd9Sstevel@tonic-gate 
37627c478bd9Sstevel@tonic-gate 			if (i == depth)
37637c478bd9Sstevel@tonic-gate 				return (DCMD_OK);
37647c478bd9Sstevel@tonic-gate 		}
37657c478bd9Sstevel@tonic-gate 
37667c478bd9Sstevel@tonic-gate 		if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
37677c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
37687c478bd9Sstevel@tonic-gate 
37697c478bd9Sstevel@tonic-gate 		if (earliest != 0 && vs.vs_timestamp < earliest)
37707c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
37717c478bd9Sstevel@tonic-gate 
37727c478bd9Sstevel@tonic-gate 		if (latest != 0 && vs.vs_timestamp > latest)
37737c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
37747c478bd9Sstevel@tonic-gate 	}
37757c478bd9Sstevel@tonic-gate 
37767c478bd9Sstevel@tonic-gate 	type = (t == VMEM_ALLOC ? "ALLC" :
37777c478bd9Sstevel@tonic-gate 	    t == VMEM_FREE ? "FREE" :
37787c478bd9Sstevel@tonic-gate 	    t == VMEM_SPAN ? "SPAN" :
37797c478bd9Sstevel@tonic-gate 	    t == VMEM_ROTOR ? "ROTR" :
37807c478bd9Sstevel@tonic-gate 	    t == VMEM_WALKER ? "WLKR" :
37817c478bd9Sstevel@tonic-gate 	    "????");
37827c478bd9Sstevel@tonic-gate 
37837c478bd9Sstevel@tonic-gate 	if (flags & DCMD_PIPE_OUT) {
37847c478bd9Sstevel@tonic-gate 		mdb_printf("%#lr\n", addr);
37857c478bd9Sstevel@tonic-gate 		return (DCMD_OK);
37867c478bd9Sstevel@tonic-gate 	}
37877c478bd9Sstevel@tonic-gate 
37887c478bd9Sstevel@tonic-gate 	if (verbose) {
37897c478bd9Sstevel@tonic-gate 		mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
37907c478bd9Sstevel@tonic-gate 		    addr, type, vs.vs_start, vs.vs_end, sz);
37917c478bd9Sstevel@tonic-gate 
37927c478bd9Sstevel@tonic-gate 		if (no_debug)
37937c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
37947c478bd9Sstevel@tonic-gate 
37957c478bd9Sstevel@tonic-gate 		mdb_printf("%16s %4s %16p %16llx\n",
37967c478bd9Sstevel@tonic-gate 		    "", "", vs.vs_thread, vs.vs_timestamp);
37977c478bd9Sstevel@tonic-gate 
37987c478bd9Sstevel@tonic-gate 		mdb_inc_indent(17);
37997c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
38007c478bd9Sstevel@tonic-gate 			mdb_printf("%a\n", stk[i]);
38017c478bd9Sstevel@tonic-gate 		}
38027c478bd9Sstevel@tonic-gate 		mdb_dec_indent(17);
38037c478bd9Sstevel@tonic-gate 		mdb_printf("\n");
38047c478bd9Sstevel@tonic-gate 	} else {
38057c478bd9Sstevel@tonic-gate 		mdb_printf("%0?p %4s %0?p %0?p", addr, type,
38067c478bd9Sstevel@tonic-gate 		    vs.vs_start, size? sz : vs.vs_end);
38077c478bd9Sstevel@tonic-gate 
38087c478bd9Sstevel@tonic-gate 		if (no_debug) {
38097c478bd9Sstevel@tonic-gate 			mdb_printf("\n");
38107c478bd9Sstevel@tonic-gate 			return (DCMD_OK);
38117c478bd9Sstevel@tonic-gate 		}
38127c478bd9Sstevel@tonic-gate 
38137c478bd9Sstevel@tonic-gate 		for (i = 0; i < depth; i++) {
38147c478bd9Sstevel@tonic-gate 			if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY,
38157c478bd9Sstevel@tonic-gate 			    c, sizeof (c), &sym) == -1)
38167c478bd9Sstevel@tonic-gate 				continue;
38177c478bd9Sstevel@tonic-gate 			if (strncmp(c, "vmem_", 5) == 0)
38187c478bd9Sstevel@tonic-gate 				continue;
38197c478bd9Sstevel@tonic-gate 			break;
38207c478bd9Sstevel@tonic-gate 		}
38217c478bd9Sstevel@tonic-gate 		mdb_printf(" %a\n", stk[i]);
38227c478bd9Sstevel@tonic-gate 	}
38237c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
38247c478bd9Sstevel@tonic-gate }
38257c478bd9Sstevel@tonic-gate 
38267c478bd9Sstevel@tonic-gate typedef struct kmalog_data {
38277c478bd9Sstevel@tonic-gate 	uintptr_t	kma_addr;
38287c478bd9Sstevel@tonic-gate 	hrtime_t	kma_newest;
38297c478bd9Sstevel@tonic-gate } kmalog_data_t;
38307c478bd9Sstevel@tonic-gate 
38317c478bd9Sstevel@tonic-gate /*ARGSUSED*/
38327c478bd9Sstevel@tonic-gate static int
38337c478bd9Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma)
38347c478bd9Sstevel@tonic-gate {
38357c478bd9Sstevel@tonic-gate 	char name[KMEM_CACHE_NAMELEN + 1];
38367c478bd9Sstevel@tonic-gate 	hrtime_t delta;
38377c478bd9Sstevel@tonic-gate 	int i, depth;
38387c478bd9Sstevel@tonic-gate 	size_t bufsize;
38397c478bd9Sstevel@tonic-gate 
38407c478bd9Sstevel@tonic-gate 	if (bcp->bc_timestamp == 0)
38417c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
38427c478bd9Sstevel@tonic-gate 
38437c478bd9Sstevel@tonic-gate 	if (kma->kma_newest == 0)
38447c478bd9Sstevel@tonic-gate 		kma->kma_newest = bcp->bc_timestamp;
38457c478bd9Sstevel@tonic-gate 
38467c478bd9Sstevel@tonic-gate 	if (kma->kma_addr) {
38477c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bufsize, sizeof (bufsize),
38487c478bd9Sstevel@tonic-gate 		    (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) {
38497c478bd9Sstevel@tonic-gate 			mdb_warn(
38507c478bd9Sstevel@tonic-gate 			    "failed to read cache_bufsize for cache at %p",
38517c478bd9Sstevel@tonic-gate 			    bcp->bc_cache);
38527c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
38537c478bd9Sstevel@tonic-gate 		}
38547c478bd9Sstevel@tonic-gate 
38557c478bd9Sstevel@tonic-gate 		if (kma->kma_addr < (uintptr_t)bcp->bc_addr ||
38567c478bd9Sstevel@tonic-gate 		    kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize)
38577c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
38587c478bd9Sstevel@tonic-gate 	}
38597c478bd9Sstevel@tonic-gate 
38607c478bd9Sstevel@tonic-gate 	delta = kma->kma_newest - bcp->bc_timestamp;
38617c478bd9Sstevel@tonic-gate 	depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
38627c478bd9Sstevel@tonic-gate 
38637c478bd9Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)
38647c478bd9Sstevel@tonic-gate 	    &bcp->bc_cache->cache_name) <= 0)
38657c478bd9Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache);
38667c478bd9Sstevel@tonic-gate 
38677c478bd9Sstevel@tonic-gate 	mdb_printf("\nT-%lld.%09lld  addr=%p  %s\n",
38687c478bd9Sstevel@tonic-gate 	    delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name);
38697c478bd9Sstevel@tonic-gate 
38707c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
38717c478bd9Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
38727c478bd9Sstevel@tonic-gate 
38737c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
38747c478bd9Sstevel@tonic-gate }
38757c478bd9Sstevel@tonic-gate 
38767c478bd9Sstevel@tonic-gate int
38777c478bd9Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
38787c478bd9Sstevel@tonic-gate {
38797c478bd9Sstevel@tonic-gate 	const char *logname = "kmem_transaction_log";
38807c478bd9Sstevel@tonic-gate 	kmalog_data_t kma;
38817c478bd9Sstevel@tonic-gate 
38827c478bd9Sstevel@tonic-gate 	if (argc > 1)
38837c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
38847c478bd9Sstevel@tonic-gate 
38857c478bd9Sstevel@tonic-gate 	kma.kma_newest = 0;
38867c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC)
38877c478bd9Sstevel@tonic-gate 		kma.kma_addr = addr;
38887c478bd9Sstevel@tonic-gate 	else
38897c478bd9Sstevel@tonic-gate 		kma.kma_addr = NULL;
38907c478bd9Sstevel@tonic-gate 
38917c478bd9Sstevel@tonic-gate 	if (argc > 0) {
38927c478bd9Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING)
38937c478bd9Sstevel@tonic-gate 			return (DCMD_USAGE);
38947c478bd9Sstevel@tonic-gate 		if (strcmp(argv->a_un.a_str, "fail") == 0)
38957c478bd9Sstevel@tonic-gate 			logname = "kmem_failure_log";
38967c478bd9Sstevel@tonic-gate 		else if (strcmp(argv->a_un.a_str, "slab") == 0)
38977c478bd9Sstevel@tonic-gate 			logname = "kmem_slab_log";
38987c478bd9Sstevel@tonic-gate 		else
38997c478bd9Sstevel@tonic-gate 			return (DCMD_USAGE);
39007c478bd9Sstevel@tonic-gate 	}
39017c478bd9Sstevel@tonic-gate 
39027c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&addr, logname) == -1) {
39037c478bd9Sstevel@tonic-gate 		mdb_warn("failed to read %s log header pointer");
39047c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
39057c478bd9Sstevel@tonic-gate 	}
39067c478bd9Sstevel@tonic-gate 
39077c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) {
39087c478bd9Sstevel@tonic-gate 		mdb_warn("failed to walk kmem log");
39097c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
39107c478bd9Sstevel@tonic-gate 	}
39117c478bd9Sstevel@tonic-gate 
39127c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
39137c478bd9Sstevel@tonic-gate }
39147c478bd9Sstevel@tonic-gate 
39157c478bd9Sstevel@tonic-gate /*
39167c478bd9Sstevel@tonic-gate  * As the final lure for die-hard crash(1M) users, we provide ::kmausers here.
39177c478bd9Sstevel@tonic-gate  * The first piece is a structure which we use to accumulate kmem_cache_t
39187c478bd9Sstevel@tonic-gate  * addresses of interest.  The kmc_add is used as a callback for the kmem_cache
39197c478bd9Sstevel@tonic-gate  * walker; we either add all caches, or ones named explicitly as arguments.
39207c478bd9Sstevel@tonic-gate  */
39217c478bd9Sstevel@tonic-gate 
39227c478bd9Sstevel@tonic-gate typedef struct kmclist {
39237c478bd9Sstevel@tonic-gate 	const char *kmc_name;			/* Name to match (or NULL) */
39247c478bd9Sstevel@tonic-gate 	uintptr_t *kmc_caches;			/* List of kmem_cache_t addrs */
39257c478bd9Sstevel@tonic-gate 	int kmc_nelems;				/* Num entries in kmc_caches */
39267c478bd9Sstevel@tonic-gate 	int kmc_size;				/* Size of kmc_caches array */
39277c478bd9Sstevel@tonic-gate } kmclist_t;
39287c478bd9Sstevel@tonic-gate 
39297c478bd9Sstevel@tonic-gate static int
39307c478bd9Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc)
39317c478bd9Sstevel@tonic-gate {
39327c478bd9Sstevel@tonic-gate 	void *p;
39337c478bd9Sstevel@tonic-gate 	int s;
39347c478bd9Sstevel@tonic-gate 
39357c478bd9Sstevel@tonic-gate 	if (kmc->kmc_name == NULL ||
39367c478bd9Sstevel@tonic-gate 	    strcmp(cp->cache_name, kmc->kmc_name) == 0) {
39377c478bd9Sstevel@tonic-gate 		/*
39387c478bd9Sstevel@tonic-gate 		 * If we have a match, grow our array (if necessary), and then
39397c478bd9Sstevel@tonic-gate 		 * add the virtual address of the matching cache to our list.
39407c478bd9Sstevel@tonic-gate 		 */
39417c478bd9Sstevel@tonic-gate 		if (kmc->kmc_nelems >= kmc->kmc_size) {
39427c478bd9Sstevel@tonic-gate 			s = kmc->kmc_size ? kmc->kmc_size * 2 : 256;
39437c478bd9Sstevel@tonic-gate 			p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
39447c478bd9Sstevel@tonic-gate 
39457c478bd9Sstevel@tonic-gate 			bcopy(kmc->kmc_caches, p,
39467c478bd9Sstevel@tonic-gate 			    sizeof (uintptr_t) * kmc->kmc_size);
39477c478bd9Sstevel@tonic-gate 
39487c478bd9Sstevel@tonic-gate 			kmc->kmc_caches = p;
39497c478bd9Sstevel@tonic-gate 			kmc->kmc_size = s;
39507c478bd9Sstevel@tonic-gate 		}
39517c478bd9Sstevel@tonic-gate 
39527c478bd9Sstevel@tonic-gate 		kmc->kmc_caches[kmc->kmc_nelems++] = addr;
39537c478bd9Sstevel@tonic-gate 		return (kmc->kmc_name ? WALK_DONE : WALK_NEXT);
39547c478bd9Sstevel@tonic-gate 	}
39557c478bd9Sstevel@tonic-gate 
39567c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
39577c478bd9Sstevel@tonic-gate }
39587c478bd9Sstevel@tonic-gate 
39597c478bd9Sstevel@tonic-gate /*
39607c478bd9Sstevel@tonic-gate  * The second piece of ::kmausers is a hash table of allocations.  Each
39617c478bd9Sstevel@tonic-gate  * allocation owner is identified by its stack trace and data_size.  We then
39627c478bd9Sstevel@tonic-gate  * track the total bytes of all such allocations, and the number of allocations
39637c478bd9Sstevel@tonic-gate  * to report at the end.  Once we have a list of caches, we walk through the
39647c478bd9Sstevel@tonic-gate  * allocated bufctls of each, and update our hash table accordingly.
39657c478bd9Sstevel@tonic-gate  */
39667c478bd9Sstevel@tonic-gate 
39677c478bd9Sstevel@tonic-gate typedef struct kmowner {
39687c478bd9Sstevel@tonic-gate 	struct kmowner *kmo_head;		/* First hash elt in bucket */
39697c478bd9Sstevel@tonic-gate 	struct kmowner *kmo_next;		/* Next hash elt in chain */
39707c478bd9Sstevel@tonic-gate 	size_t kmo_signature;			/* Hash table signature */
39717c478bd9Sstevel@tonic-gate 	uint_t kmo_num;				/* Number of allocations */
39727c478bd9Sstevel@tonic-gate 	size_t kmo_data_size;			/* Size of each allocation */
39737c478bd9Sstevel@tonic-gate 	size_t kmo_total_size;			/* Total bytes of allocation */
39747c478bd9Sstevel@tonic-gate 	int kmo_depth;				/* Depth of stack trace */
39757c478bd9Sstevel@tonic-gate 	uintptr_t kmo_stack[KMEM_STACK_DEPTH];	/* Stack trace */
39767c478bd9Sstevel@tonic-gate } kmowner_t;
39777c478bd9Sstevel@tonic-gate 
39787c478bd9Sstevel@tonic-gate typedef struct kmusers {
39797c478bd9Sstevel@tonic-gate 	uintptr_t kmu_addr;			/* address of interest */
39807c478bd9Sstevel@tonic-gate 	const kmem_cache_t *kmu_cache;		/* Current kmem cache */
39817c478bd9Sstevel@tonic-gate 	kmowner_t *kmu_hash;			/* Hash table of owners */
39827c478bd9Sstevel@tonic-gate 	int kmu_nelems;				/* Number of entries in use */
39837c478bd9Sstevel@tonic-gate 	int kmu_size;				/* Total number of entries */
39847c478bd9Sstevel@tonic-gate } kmusers_t;
39857c478bd9Sstevel@tonic-gate 
39867c478bd9Sstevel@tonic-gate static void
39877c478bd9Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp,
39887c478bd9Sstevel@tonic-gate     size_t size, size_t data_size)
39897c478bd9Sstevel@tonic-gate {
39907c478bd9Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
39917c478bd9Sstevel@tonic-gate 	size_t bucket, signature = data_size;
39927c478bd9Sstevel@tonic-gate 	kmowner_t *kmo, *kmoend;
39937c478bd9Sstevel@tonic-gate 
39947c478bd9Sstevel@tonic-gate 	/*
39957c478bd9Sstevel@tonic-gate 	 * If the hash table is full, double its size and rehash everything.
39967c478bd9Sstevel@tonic-gate 	 */
39977c478bd9Sstevel@tonic-gate 	if (kmu->kmu_nelems >= kmu->kmu_size) {
39987c478bd9Sstevel@tonic-gate 		int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024;
39997c478bd9Sstevel@tonic-gate 
40007c478bd9Sstevel@tonic-gate 		kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC);
40017c478bd9Sstevel@tonic-gate 		bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size);
40027c478bd9Sstevel@tonic-gate 		kmu->kmu_hash = kmo;
40037c478bd9Sstevel@tonic-gate 		kmu->kmu_size = s;
40047c478bd9Sstevel@tonic-gate 
40057c478bd9Sstevel@tonic-gate 		kmoend = kmu->kmu_hash + kmu->kmu_size;
40067c478bd9Sstevel@tonic-gate 		for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++)
40077c478bd9Sstevel@tonic-gate 			kmo->kmo_head = NULL;
40087c478bd9Sstevel@tonic-gate 
40097c478bd9Sstevel@tonic-gate 		kmoend = kmu->kmu_hash + kmu->kmu_nelems;
40107c478bd9Sstevel@tonic-gate 		for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) {
40117c478bd9Sstevel@tonic-gate 			bucket = kmo->kmo_signature & (kmu->kmu_size - 1);
40127c478bd9Sstevel@tonic-gate 			kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head;
40137c478bd9Sstevel@tonic-gate 			kmu->kmu_hash[bucket].kmo_head = kmo;
40147c478bd9Sstevel@tonic-gate 		}
40157c478bd9Sstevel@tonic-gate 	}
40167c478bd9Sstevel@tonic-gate 
40177c478bd9Sstevel@tonic-gate 	/*
40187c478bd9Sstevel@tonic-gate 	 * Finish computing the hash signature from the stack trace, and then
40197c478bd9Sstevel@tonic-gate 	 * see if the owner is in the hash table.  If so, update our stats.
40207c478bd9Sstevel@tonic-gate 	 */
40217c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
40227c478bd9Sstevel@tonic-gate 		signature += bcp->bc_stack[i];
40237c478bd9Sstevel@tonic-gate 
40247c478bd9Sstevel@tonic-gate 	bucket = signature & (kmu->kmu_size - 1);
40257c478bd9Sstevel@tonic-gate 
40267c478bd9Sstevel@tonic-gate 	for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) {
40277c478bd9Sstevel@tonic-gate 		if (kmo->kmo_signature == signature) {
40287c478bd9Sstevel@tonic-gate 			size_t difference = 0;
40297c478bd9Sstevel@tonic-gate 
40307c478bd9Sstevel@tonic-gate 			difference |= kmo->kmo_data_size - data_size;
40317c478bd9Sstevel@tonic-gate 			difference |= kmo->kmo_depth - depth;
40327c478bd9Sstevel@tonic-gate 
40337c478bd9Sstevel@tonic-gate 			for (i = 0; i < depth; i++) {
40347c478bd9Sstevel@tonic-gate 				difference |= kmo->kmo_stack[i] -
40357c478bd9Sstevel@tonic-gate 				    bcp->bc_stack[i];
40367c478bd9Sstevel@tonic-gate 			}
40377c478bd9Sstevel@tonic-gate 
40387c478bd9Sstevel@tonic-gate 			if (difference == 0) {
40397c478bd9Sstevel@tonic-gate 				kmo->kmo_total_size += size;
40407c478bd9Sstevel@tonic-gate 				kmo->kmo_num++;
40417c478bd9Sstevel@tonic-gate 				return;
40427c478bd9Sstevel@tonic-gate 			}
40437c478bd9Sstevel@tonic-gate 		}
40447c478bd9Sstevel@tonic-gate 	}
40457c478bd9Sstevel@tonic-gate 
40467c478bd9Sstevel@tonic-gate 	/*
40477c478bd9Sstevel@tonic-gate 	 * If the owner is not yet hashed, grab the next element and fill it
40487c478bd9Sstevel@tonic-gate 	 * in based on the allocation information.
40497c478bd9Sstevel@tonic-gate 	 */
40507c478bd9Sstevel@tonic-gate 	kmo = &kmu->kmu_hash[kmu->kmu_nelems++];
40517c478bd9Sstevel@tonic-gate 	kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head;
40527c478bd9Sstevel@tonic-gate 	kmu->kmu_hash[bucket].kmo_head = kmo;
40537c478bd9Sstevel@tonic-gate 
40547c478bd9Sstevel@tonic-gate 	kmo->kmo_signature = signature;
40557c478bd9Sstevel@tonic-gate 	kmo->kmo_num = 1;
40567c478bd9Sstevel@tonic-gate 	kmo->kmo_data_size = data_size;
40577c478bd9Sstevel@tonic-gate 	kmo->kmo_total_size = size;
40587c478bd9Sstevel@tonic-gate 	kmo->kmo_depth = depth;
40597c478bd9Sstevel@tonic-gate 
40607c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
40617c478bd9Sstevel@tonic-gate 		kmo->kmo_stack[i] = bcp->bc_stack[i];
40627c478bd9Sstevel@tonic-gate }
40637c478bd9Sstevel@tonic-gate 
40647c478bd9Sstevel@tonic-gate /*
40657c478bd9Sstevel@tonic-gate  * When ::kmausers is invoked without the -f flag, we simply update our hash
40667c478bd9Sstevel@tonic-gate  * table with the information from each allocated bufctl.
40677c478bd9Sstevel@tonic-gate  */
40687c478bd9Sstevel@tonic-gate /*ARGSUSED*/
40697c478bd9Sstevel@tonic-gate static int
40707c478bd9Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
40717c478bd9Sstevel@tonic-gate {
40727c478bd9Sstevel@tonic-gate 	const kmem_cache_t *cp = kmu->kmu_cache;
40737c478bd9Sstevel@tonic-gate 
40747c478bd9Sstevel@tonic-gate 	kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize);
40757c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
40767c478bd9Sstevel@tonic-gate }
40777c478bd9Sstevel@tonic-gate 
40787c478bd9Sstevel@tonic-gate /*
40797c478bd9Sstevel@tonic-gate  * When ::kmausers is invoked with the -f flag, we print out the information
40807c478bd9Sstevel@tonic-gate  * for each bufctl as well as updating the hash table.
40817c478bd9Sstevel@tonic-gate  */
40827c478bd9Sstevel@tonic-gate static int
40837c478bd9Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
40847c478bd9Sstevel@tonic-gate {
40857c478bd9Sstevel@tonic-gate 	int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
40867c478bd9Sstevel@tonic-gate 	const kmem_cache_t *cp = kmu->kmu_cache;
40877c478bd9Sstevel@tonic-gate 	kmem_bufctl_t bufctl;
40887c478bd9Sstevel@tonic-gate 
40897c478bd9Sstevel@tonic-gate 	if (kmu->kmu_addr) {
40907c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bufctl, sizeof (bufctl),  addr) == -1)
40917c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read bufctl at %p", addr);
40927c478bd9Sstevel@tonic-gate 		else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr ||
40937c478bd9Sstevel@tonic-gate 		    kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr +
40947c478bd9Sstevel@tonic-gate 		    cp->cache_bufsize)
40957c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
40967c478bd9Sstevel@tonic-gate 	}
40977c478bd9Sstevel@tonic-gate 
40987c478bd9Sstevel@tonic-gate 	mdb_printf("size %d, addr %p, thread %p, cache %s\n",
40997c478bd9Sstevel@tonic-gate 	    cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name);
41007c478bd9Sstevel@tonic-gate 
41017c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++)
41027c478bd9Sstevel@tonic-gate 		mdb_printf("\t %a\n", bcp->bc_stack[i]);
41037c478bd9Sstevel@tonic-gate 
41047c478bd9Sstevel@tonic-gate 	kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize);
41057c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
41067c478bd9Sstevel@tonic-gate }
41077c478bd9Sstevel@tonic-gate 
41087c478bd9Sstevel@tonic-gate /*
41097c478bd9Sstevel@tonic-gate  * We sort our results by allocation size before printing them.
41107c478bd9Sstevel@tonic-gate  */
41117c478bd9Sstevel@tonic-gate static int
41127c478bd9Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp)
41137c478bd9Sstevel@tonic-gate {
41147c478bd9Sstevel@tonic-gate 	const kmowner_t *lhs = lp;
41157c478bd9Sstevel@tonic-gate 	const kmowner_t *rhs = rp;
41167c478bd9Sstevel@tonic-gate 
41177c478bd9Sstevel@tonic-gate 	return (rhs->kmo_total_size - lhs->kmo_total_size);
41187c478bd9Sstevel@tonic-gate }
41197c478bd9Sstevel@tonic-gate 
41207c478bd9Sstevel@tonic-gate /*
41217c478bd9Sstevel@tonic-gate  * The main engine of ::kmausers is relatively straightforward: First we
41227c478bd9Sstevel@tonic-gate  * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we
41237c478bd9Sstevel@tonic-gate  * iterate over the allocated bufctls of each cache in the list.  Finally,
41247c478bd9Sstevel@tonic-gate  * we sort and print our results.
41257c478bd9Sstevel@tonic-gate  */
41267c478bd9Sstevel@tonic-gate /*ARGSUSED*/
41277c478bd9Sstevel@tonic-gate int
41287c478bd9Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
41297c478bd9Sstevel@tonic-gate {
41307c478bd9Sstevel@tonic-gate 	int mem_threshold = 8192;	/* Minimum # bytes for printing */
41317c478bd9Sstevel@tonic-gate 	int cnt_threshold = 100;	/* Minimum # blocks for printing */
41327c478bd9Sstevel@tonic-gate 	int audited_caches = 0;		/* Number of KMF_AUDIT caches found */
41337c478bd9Sstevel@tonic-gate 	int do_all_caches = 1;		/* Do all caches (no arguments) */
41347c478bd9Sstevel@tonic-gate 	int opt_e = FALSE;		/* Include "small" users */
41357c478bd9Sstevel@tonic-gate 	int opt_f = FALSE;		/* Print stack traces */
41367c478bd9Sstevel@tonic-gate 
41377c478bd9Sstevel@tonic-gate 	mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1;
41387c478bd9Sstevel@tonic-gate 	kmowner_t *kmo, *kmoend;
41397c478bd9Sstevel@tonic-gate 	int i, oelems;
41407c478bd9Sstevel@tonic-gate 
41417c478bd9Sstevel@tonic-gate 	kmclist_t kmc;
41427c478bd9Sstevel@tonic-gate 	kmusers_t kmu;
41437c478bd9Sstevel@tonic-gate 
41447c478bd9Sstevel@tonic-gate 	bzero(&kmc, sizeof (kmc));
41457c478bd9Sstevel@tonic-gate 	bzero(&kmu, sizeof (kmu));
41467c478bd9Sstevel@tonic-gate 
41477c478bd9Sstevel@tonic-gate 	while ((i = mdb_getopts(argc, argv,
41487c478bd9Sstevel@tonic-gate 	    'e', MDB_OPT_SETBITS, TRUE, &opt_e,
41497c478bd9Sstevel@tonic-gate 	    'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) {
41507c478bd9Sstevel@tonic-gate 
41517c478bd9Sstevel@tonic-gate 		argv += i;	/* skip past options we just processed */
41527c478bd9Sstevel@tonic-gate 		argc -= i;	/* adjust argc */
41537c478bd9Sstevel@tonic-gate 
41547c478bd9Sstevel@tonic-gate 		if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-')
41557c478bd9Sstevel@tonic-gate 			return (DCMD_USAGE);
41567c478bd9Sstevel@tonic-gate 
41577c478bd9Sstevel@tonic-gate 		oelems = kmc.kmc_nelems;
41587c478bd9Sstevel@tonic-gate 		kmc.kmc_name = argv->a_un.a_str;
41597c478bd9Sstevel@tonic-gate 		(void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc);
41607c478bd9Sstevel@tonic-gate 
41617c478bd9Sstevel@tonic-gate 		if (kmc.kmc_nelems == oelems) {
41627c478bd9Sstevel@tonic-gate 			mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name);
41637c478bd9Sstevel@tonic-gate 			return (DCMD_ERR);
41647c478bd9Sstevel@tonic-gate 		}
41657c478bd9Sstevel@tonic-gate 
41667c478bd9Sstevel@tonic-gate 		do_all_caches = 0;
41677c478bd9Sstevel@tonic-gate 		argv++;
41687c478bd9Sstevel@tonic-gate 		argc--;
41697c478bd9Sstevel@tonic-gate 	}
41707c478bd9Sstevel@tonic-gate 
41717c478bd9Sstevel@tonic-gate 	if (flags & DCMD_ADDRSPEC) {
41727c478bd9Sstevel@tonic-gate 		opt_f = TRUE;
41737c478bd9Sstevel@tonic-gate 		kmu.kmu_addr = addr;
41747c478bd9Sstevel@tonic-gate 	} else {
41757c478bd9Sstevel@tonic-gate 		kmu.kmu_addr = NULL;
41767c478bd9Sstevel@tonic-gate 	}
41777c478bd9Sstevel@tonic-gate 
41787c478bd9Sstevel@tonic-gate 	if (opt_e)
41797c478bd9Sstevel@tonic-gate 		mem_threshold = cnt_threshold = 0;
41807c478bd9Sstevel@tonic-gate 
41817c478bd9Sstevel@tonic-gate 	if (opt_f)
41827c478bd9Sstevel@tonic-gate 		callback = (mdb_walk_cb_t)kmause2;
41837c478bd9Sstevel@tonic-gate 
41847c478bd9Sstevel@tonic-gate 	if (do_all_caches) {
41857c478bd9Sstevel@tonic-gate 		kmc.kmc_name = NULL; /* match all cache names */
41867c478bd9Sstevel@tonic-gate 		(void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc);
41877c478bd9Sstevel@tonic-gate 	}
41887c478bd9Sstevel@tonic-gate 
41897c478bd9Sstevel@tonic-gate 	for (i = 0; i < kmc.kmc_nelems; i++) {
41907c478bd9Sstevel@tonic-gate 		uintptr_t cp = kmc.kmc_caches[i];
41917c478bd9Sstevel@tonic-gate 		kmem_cache_t c;
41927c478bd9Sstevel@tonic-gate 
41937c478bd9Sstevel@tonic-gate 		if (mdb_vread(&c, sizeof (c), cp) == -1) {
41947c478bd9Sstevel@tonic-gate 			mdb_warn("failed to read cache at %p", cp);
41957c478bd9Sstevel@tonic-gate 			continue;
41967c478bd9Sstevel@tonic-gate 		}
41977c478bd9Sstevel@tonic-gate 
41987c478bd9Sstevel@tonic-gate 		if (!(c.cache_flags & KMF_AUDIT)) {
41997c478bd9Sstevel@tonic-gate 			if (!do_all_caches) {
42007c478bd9Sstevel@tonic-gate 				mdb_warn("KMF_AUDIT is not enabled for %s\n",
42017c478bd9Sstevel@tonic-gate 				    c.cache_name);
42027c478bd9Sstevel@tonic-gate 			}
42037c478bd9Sstevel@tonic-gate 			continue;
42047c478bd9Sstevel@tonic-gate 		}
42057c478bd9Sstevel@tonic-gate 
42067c478bd9Sstevel@tonic-gate 		kmu.kmu_cache = &c;
42077c478bd9Sstevel@tonic-gate 		(void) mdb_pwalk("bufctl", callback, &kmu, cp);
42087c478bd9Sstevel@tonic-gate 		audited_caches++;
42097c478bd9Sstevel@tonic-gate 	}
42107c478bd9Sstevel@tonic-gate 
42117c478bd9Sstevel@tonic-gate 	if (audited_caches == 0 && do_all_caches) {
42127c478bd9Sstevel@tonic-gate 		mdb_warn("KMF_AUDIT is not enabled for any caches\n");
42137c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
42147c478bd9Sstevel@tonic-gate 	}
42157c478bd9Sstevel@tonic-gate 
42167c478bd9Sstevel@tonic-gate 	qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp);
42177c478bd9Sstevel@tonic-gate 	kmoend = kmu.kmu_hash + kmu.kmu_nelems;
42187c478bd9Sstevel@tonic-gate 
42197c478bd9Sstevel@tonic-gate 	for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) {
42207c478bd9Sstevel@tonic-gate 		if (kmo->kmo_total_size < mem_threshold &&
42217c478bd9Sstevel@tonic-gate 		    kmo->kmo_num < cnt_threshold)
42227c478bd9Sstevel@tonic-gate 			continue;
42237c478bd9Sstevel@tonic-gate 		mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
42247c478bd9Sstevel@tonic-gate 		    kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size);
42257c478bd9Sstevel@tonic-gate 		for (i = 0; i < kmo->kmo_depth; i++)
42267c478bd9Sstevel@tonic-gate 			mdb_printf("\t %a\n", kmo->kmo_stack[i]);
42277c478bd9Sstevel@tonic-gate 	}
42287c478bd9Sstevel@tonic-gate 
42297c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
42307c478bd9Sstevel@tonic-gate }
42317c478bd9Sstevel@tonic-gate 
42327c478bd9Sstevel@tonic-gate void
42337c478bd9Sstevel@tonic-gate kmausers_help(void)
42347c478bd9Sstevel@tonic-gate {
42357c478bd9Sstevel@tonic-gate 	mdb_printf(
42367c478bd9Sstevel@tonic-gate 	    "Displays the largest users of the kmem allocator, sorted by \n"
42377c478bd9Sstevel@tonic-gate 	    "trace.  If one or more caches is specified, only those caches\n"
42387c478bd9Sstevel@tonic-gate 	    "will be searched.  By default, all caches are searched.  If an\n"
42397c478bd9Sstevel@tonic-gate 	    "address is specified, then only those allocations which include\n"
42407c478bd9Sstevel@tonic-gate 	    "the given address are displayed.  Specifying an address implies\n"
42417c478bd9Sstevel@tonic-gate 	    "-f.\n"
42427c478bd9Sstevel@tonic-gate 	    "\n"
42437c478bd9Sstevel@tonic-gate 	    "\t-e\tInclude all users, not just the largest\n"
42447c478bd9Sstevel@tonic-gate 	    "\t-f\tDisplay individual allocations.  By default, users are\n"
42457c478bd9Sstevel@tonic-gate 	    "\t\tgrouped by stack\n");
42467c478bd9Sstevel@tonic-gate }
42477c478bd9Sstevel@tonic-gate 
42487c478bd9Sstevel@tonic-gate static int
42497c478bd9Sstevel@tonic-gate kmem_ready_check(void)
42507c478bd9Sstevel@tonic-gate {
42517c478bd9Sstevel@tonic-gate 	int ready;
42527c478bd9Sstevel@tonic-gate 
42537c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&ready, "kmem_ready") < 0)
42547c478bd9Sstevel@tonic-gate 		return (-1); /* errno is set for us */
42557c478bd9Sstevel@tonic-gate 
42567c478bd9Sstevel@tonic-gate 	return (ready);
42577c478bd9Sstevel@tonic-gate }
42587c478bd9Sstevel@tonic-gate 
42597c478bd9Sstevel@tonic-gate /*ARGSUSED*/
42607c478bd9Sstevel@tonic-gate static void
4261789d94c2Sjwadams kmem_statechange_cb(void *arg)
42627c478bd9Sstevel@tonic-gate {
4263789d94c2Sjwadams 	static int been_ready = 0;
4264789d94c2Sjwadams 
4265789d94c2Sjwadams 	leaky_cleanup(1);	/* state changes invalidate leaky state */
4266789d94c2Sjwadams 
4267789d94c2Sjwadams 	if (been_ready)
42687c478bd9Sstevel@tonic-gate 		return;
42697c478bd9Sstevel@tonic-gate 
4270789d94c2Sjwadams 	if (kmem_ready_check() <= 0)
4271789d94c2Sjwadams 		return;
42727c478bd9Sstevel@tonic-gate 
4273789d94c2Sjwadams 	been_ready = 1;
42747c478bd9Sstevel@tonic-gate 	(void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL);
42757c478bd9Sstevel@tonic-gate }
42767c478bd9Sstevel@tonic-gate 
42777c478bd9Sstevel@tonic-gate void
42787c478bd9Sstevel@tonic-gate kmem_init(void)
42797c478bd9Sstevel@tonic-gate {
42807c478bd9Sstevel@tonic-gate 	mdb_walker_t w = {
42817c478bd9Sstevel@tonic-gate 		"kmem_cache", "walk list of kmem caches", kmem_cache_walk_init,
4282*b5fca8f8Stomee 		list_walk_step, list_walk_fini
42837c478bd9Sstevel@tonic-gate 	};
42847c478bd9Sstevel@tonic-gate 
42857c478bd9Sstevel@tonic-gate 	/*
42867c478bd9Sstevel@tonic-gate 	 * If kmem is ready, we'll need to invoke the kmem_cache walker
42877c478bd9Sstevel@tonic-gate 	 * immediately.  Walkers in the linkage structure won't be ready until
42887c478bd9Sstevel@tonic-gate 	 * _mdb_init returns, so we'll need to add this one manually.  If kmem
42897c478bd9Sstevel@tonic-gate 	 * is ready, we'll use the walker to initialize the caches.  If kmem
42907c478bd9Sstevel@tonic-gate 	 * isn't ready, we'll register a callback that will allow us to defer
42917c478bd9Sstevel@tonic-gate 	 * cache walking until it is.
42927c478bd9Sstevel@tonic-gate 	 */
42937c478bd9Sstevel@tonic-gate 	if (mdb_add_walker(&w) != 0) {
42947c478bd9Sstevel@tonic-gate 		mdb_warn("failed to add kmem_cache walker");
42957c478bd9Sstevel@tonic-gate 		return;
42967c478bd9Sstevel@tonic-gate 	}
42977c478bd9Sstevel@tonic-gate 
4298789d94c2Sjwadams 	(void) mdb_callback_add(MDB_CALLBACK_STCHG, kmem_statechange_cb, NULL);
4299789d94c2Sjwadams 	kmem_statechange_cb(NULL);
43007c478bd9Sstevel@tonic-gate }
43017c478bd9Sstevel@tonic-gate 
43027c478bd9Sstevel@tonic-gate typedef struct whatthread {
43037c478bd9Sstevel@tonic-gate 	uintptr_t	wt_target;
43047c478bd9Sstevel@tonic-gate 	int		wt_verbose;
43057c478bd9Sstevel@tonic-gate } whatthread_t;
43067c478bd9Sstevel@tonic-gate 
43077c478bd9Sstevel@tonic-gate static int
43087c478bd9Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w)
43097c478bd9Sstevel@tonic-gate {
43107c478bd9Sstevel@tonic-gate 	uintptr_t current, data;
43117c478bd9Sstevel@tonic-gate 
43127c478bd9Sstevel@tonic-gate 	if (t->t_stkbase == NULL)
43137c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
43147c478bd9Sstevel@tonic-gate 
43157c478bd9Sstevel@tonic-gate 	/*
43167c478bd9Sstevel@tonic-gate 	 * Warn about swapped out threads, but drive on anyway
43177c478bd9Sstevel@tonic-gate 	 */
43187c478bd9Sstevel@tonic-gate 	if (!(t->t_schedflag & TS_LOAD)) {
43197c478bd9Sstevel@tonic-gate 		mdb_warn("thread %p's stack swapped out\n", addr);
43207c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
43217c478bd9Sstevel@tonic-gate 	}
43227c478bd9Sstevel@tonic-gate 
43237c478bd9Sstevel@tonic-gate 	/*
43247c478bd9Sstevel@tonic-gate 	 * Search the thread's stack for the given pointer.  Note that it would
43257c478bd9Sstevel@tonic-gate 	 * be more efficient to follow ::kgrep's lead and read in page-sized
43267c478bd9Sstevel@tonic-gate 	 * chunks, but this routine is already fast and simple.
43277c478bd9Sstevel@tonic-gate 	 */
43287c478bd9Sstevel@tonic-gate 	for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk;
43297c478bd9Sstevel@tonic-gate 	    current += sizeof (uintptr_t)) {
43307c478bd9Sstevel@tonic-gate 		if (mdb_vread(&data, sizeof (data), current) == -1) {
43317c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read thread %p's stack at %p",
43327c478bd9Sstevel@tonic-gate 			    addr, current);
43337c478bd9Sstevel@tonic-gate 			return (WALK_ERR);
43347c478bd9Sstevel@tonic-gate 		}
43357c478bd9Sstevel@tonic-gate 
43367c478bd9Sstevel@tonic-gate 		if (data == w->wt_target) {
43377c478bd9Sstevel@tonic-gate 			if (w->wt_verbose) {
43387c478bd9Sstevel@tonic-gate 				mdb_printf("%p in thread %p's stack%s\n",
43397c478bd9Sstevel@tonic-gate 				    current, addr, stack_active(t, current));
43407c478bd9Sstevel@tonic-gate 			} else {
43417c478bd9Sstevel@tonic-gate 				mdb_printf("%#lr\n", addr);
43427c478bd9Sstevel@tonic-gate 				return (WALK_NEXT);
43437c478bd9Sstevel@tonic-gate 			}
43447c478bd9Sstevel@tonic-gate 		}
43457c478bd9Sstevel@tonic-gate 	}
43467c478bd9Sstevel@tonic-gate 
43477c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
43487c478bd9Sstevel@tonic-gate }
43497c478bd9Sstevel@tonic-gate 
43507c478bd9Sstevel@tonic-gate int
43517c478bd9Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
43527c478bd9Sstevel@tonic-gate {
43537c478bd9Sstevel@tonic-gate 	whatthread_t w;
43547c478bd9Sstevel@tonic-gate 
43557c478bd9Sstevel@tonic-gate 	if (!(flags & DCMD_ADDRSPEC))
43567c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
43577c478bd9Sstevel@tonic-gate 
43587c478bd9Sstevel@tonic-gate 	w.wt_verbose = FALSE;
43597c478bd9Sstevel@tonic-gate 	w.wt_target = addr;
43607c478bd9Sstevel@tonic-gate 
43617c478bd9Sstevel@tonic-gate 	if (mdb_getopts(argc, argv,
43627c478bd9Sstevel@tonic-gate 	    'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc)
43637c478bd9Sstevel@tonic-gate 		return (DCMD_USAGE);
43647c478bd9Sstevel@tonic-gate 
43657c478bd9Sstevel@tonic-gate 	if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w)
43667c478bd9Sstevel@tonic-gate 	    == -1) {
43677c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk threads");
43687c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
43697c478bd9Sstevel@tonic-gate 	}
43707c478bd9Sstevel@tonic-gate 
43717c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
43727c478bd9Sstevel@tonic-gate }
4373