xref: /illumos-gate/usr/src/cmd/mdb/common/modules/genunix/leaky_subr.c (revision 7c478bd95313f5f23a4c958a745db2134aa0324)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate #include <mdb/mdb_param.h>
30*7c478bd9Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
31*7c478bd9Sstevel@tonic-gate 
32*7c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_inode.h>
33*7c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h>
34*7c478bd9Sstevel@tonic-gate #include <sys/vmem_impl.h>
35*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
36*7c478bd9Sstevel@tonic-gate #include <sys/kobj.h>
37*7c478bd9Sstevel@tonic-gate #include <sys/kobj_impl.h>
38*7c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
39*7c478bd9Sstevel@tonic-gate #include <vm/as.h>
40*7c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
41*7c478bd9Sstevel@tonic-gate #include <mdb/mdb_ctf.h>
42*7c478bd9Sstevel@tonic-gate 
43*7c478bd9Sstevel@tonic-gate #include "kmem.h"
44*7c478bd9Sstevel@tonic-gate #include "leaky_impl.h"
45*7c478bd9Sstevel@tonic-gate 
46*7c478bd9Sstevel@tonic-gate /*
47*7c478bd9Sstevel@tonic-gate  * This file defines the genunix target for leaky.c.  There are three types
48*7c478bd9Sstevel@tonic-gate  * of buffers in the kernel's heap:  TYPE_VMEM, for kmem_oversize allocations,
49*7c478bd9Sstevel@tonic-gate  * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
50*7c478bd9Sstevel@tonic-gate  * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
51*7c478bd9Sstevel@tonic-gate  *
52*7c478bd9Sstevel@tonic-gate  * See "leaky_impl.h" for the target interface definition.
53*7c478bd9Sstevel@tonic-gate  */
54*7c478bd9Sstevel@tonic-gate 
55*7c478bd9Sstevel@tonic-gate #define	TYPE_VMEM	0		/* lkb_data is the vmem_seg's size */
56*7c478bd9Sstevel@tonic-gate #define	TYPE_CACHE	1		/* lkb_cid is the bufctl's cache */
57*7c478bd9Sstevel@tonic-gate #define	TYPE_KMEM	2		/* lkb_cid is the bufctl's cache */
58*7c478bd9Sstevel@tonic-gate 
59*7c478bd9Sstevel@tonic-gate #define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
60*7c478bd9Sstevel@tonic-gate #define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
61*7c478bd9Sstevel@tonic-gate #define	LKM_CTL_CACHE	2	/* normal alloc, non-debug, PTR is cache */
62*7c478bd9Sstevel@tonic-gate #define	LKM_CTL_MASK	3L
63*7c478bd9Sstevel@tonic-gate 
64*7c478bd9Sstevel@tonic-gate #define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
65*7c478bd9Sstevel@tonic-gate #define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
66*7c478bd9Sstevel@tonic-gate #define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
67*7c478bd9Sstevel@tonic-gate 
68*7c478bd9Sstevel@tonic-gate static int kmem_lite_count = 0;	/* cache of the kernel's version */
69*7c478bd9Sstevel@tonic-gate 
70*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
71*7c478bd9Sstevel@tonic-gate static int
72*7c478bd9Sstevel@tonic-gate leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
73*7c478bd9Sstevel@tonic-gate {
74*7c478bd9Sstevel@tonic-gate 	leak_mtab_t *lm = (*lmp)++;
75*7c478bd9Sstevel@tonic-gate 
76*7c478bd9Sstevel@tonic-gate 	lm->lkm_base = (uintptr_t)bcp->bc_addr;
77*7c478bd9Sstevel@tonic-gate 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
78*7c478bd9Sstevel@tonic-gate 
79*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
80*7c478bd9Sstevel@tonic-gate }
81*7c478bd9Sstevel@tonic-gate 
82*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
83*7c478bd9Sstevel@tonic-gate static int
84*7c478bd9Sstevel@tonic-gate leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
85*7c478bd9Sstevel@tonic-gate {
86*7c478bd9Sstevel@tonic-gate 	leak_mtab_t *lm = (*lmp)++;
87*7c478bd9Sstevel@tonic-gate 
88*7c478bd9Sstevel@tonic-gate 	lm->lkm_base = addr;
89*7c478bd9Sstevel@tonic-gate 
90*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
91*7c478bd9Sstevel@tonic-gate }
92*7c478bd9Sstevel@tonic-gate 
93*7c478bd9Sstevel@tonic-gate static int
94*7c478bd9Sstevel@tonic-gate leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
95*7c478bd9Sstevel@tonic-gate {
96*7c478bd9Sstevel@tonic-gate 	leak_mtab_t *lm = (*lmp)++;
97*7c478bd9Sstevel@tonic-gate 
98*7c478bd9Sstevel@tonic-gate 	lm->lkm_base = seg->vs_start;
99*7c478bd9Sstevel@tonic-gate 	lm->lkm_limit = seg->vs_end;
100*7c478bd9Sstevel@tonic-gate 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
101*7c478bd9Sstevel@tonic-gate 
102*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
103*7c478bd9Sstevel@tonic-gate }
104*7c478bd9Sstevel@tonic-gate 
105*7c478bd9Sstevel@tonic-gate static int
106*7c478bd9Sstevel@tonic-gate leaky_vmem_interested(const vmem_t *vmem)
107*7c478bd9Sstevel@tonic-gate {
108*7c478bd9Sstevel@tonic-gate 	if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
109*7c478bd9Sstevel@tonic-gate 	    strcmp(vmem->vm_name, "static_alloc") != 0)
110*7c478bd9Sstevel@tonic-gate 		return (0);
111*7c478bd9Sstevel@tonic-gate 	return (1);
112*7c478bd9Sstevel@tonic-gate }
113*7c478bd9Sstevel@tonic-gate 
114*7c478bd9Sstevel@tonic-gate static int
115*7c478bd9Sstevel@tonic-gate leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
116*7c478bd9Sstevel@tonic-gate {
117*7c478bd9Sstevel@tonic-gate 	if (!leaky_vmem_interested(vmem))
118*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
119*7c478bd9Sstevel@tonic-gate 
120*7c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
121*7c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
122*7c478bd9Sstevel@tonic-gate 
123*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
124*7c478bd9Sstevel@tonic-gate }
125*7c478bd9Sstevel@tonic-gate 
126*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
127*7c478bd9Sstevel@tonic-gate static int
128*7c478bd9Sstevel@tonic-gate leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
129*7c478bd9Sstevel@tonic-gate {
130*7c478bd9Sstevel@tonic-gate 	if (!leaky_vmem_interested(vmem))
131*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
132*7c478bd9Sstevel@tonic-gate 
133*7c478bd9Sstevel@tonic-gate 	*est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
134*7c478bd9Sstevel@tonic-gate 	    vmem->vm_kstat.vk_free.value.ui64);
135*7c478bd9Sstevel@tonic-gate 
136*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
137*7c478bd9Sstevel@tonic-gate }
138*7c478bd9Sstevel@tonic-gate 
139*7c478bd9Sstevel@tonic-gate static int
140*7c478bd9Sstevel@tonic-gate leaky_interested(const kmem_cache_t *c)
141*7c478bd9Sstevel@tonic-gate {
142*7c478bd9Sstevel@tonic-gate 	vmem_t vmem;
143*7c478bd9Sstevel@tonic-gate 
144*7c478bd9Sstevel@tonic-gate 	/*
145*7c478bd9Sstevel@tonic-gate 	 * ignore HAT-related caches that happen to derive from kmem_default
146*7c478bd9Sstevel@tonic-gate 	 */
147*7c478bd9Sstevel@tonic-gate 	if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
148*7c478bd9Sstevel@tonic-gate 	    strcmp(c->cache_name, "sf_hment_cache") == 0 ||
149*7c478bd9Sstevel@tonic-gate 	    strcmp(c->cache_name, "pa_hment_cache") == 0)
150*7c478bd9Sstevel@tonic-gate 		return (0);
151*7c478bd9Sstevel@tonic-gate 
152*7c478bd9Sstevel@tonic-gate 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
153*7c478bd9Sstevel@tonic-gate 		mdb_warn("cannot read arena %p for cache '%s'",
154*7c478bd9Sstevel@tonic-gate 		    (uintptr_t)c->cache_arena, c->cache_name);
155*7c478bd9Sstevel@tonic-gate 		return (0);
156*7c478bd9Sstevel@tonic-gate 	}
157*7c478bd9Sstevel@tonic-gate 
158*7c478bd9Sstevel@tonic-gate 	/*
159*7c478bd9Sstevel@tonic-gate 	 * If this cache isn't allocating from the kmem_default,
160*7c478bd9Sstevel@tonic-gate 	 * kmem_firewall, or static vmem arenas, we're not interested.
161*7c478bd9Sstevel@tonic-gate 	 */
162*7c478bd9Sstevel@tonic-gate 	if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
163*7c478bd9Sstevel@tonic-gate 	    strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
164*7c478bd9Sstevel@tonic-gate 	    strcmp(vmem.vm_name, "static") != 0)
165*7c478bd9Sstevel@tonic-gate 		return (0);
166*7c478bd9Sstevel@tonic-gate 
167*7c478bd9Sstevel@tonic-gate 	return (1);
168*7c478bd9Sstevel@tonic-gate }
169*7c478bd9Sstevel@tonic-gate 
170*7c478bd9Sstevel@tonic-gate static int
171*7c478bd9Sstevel@tonic-gate leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
172*7c478bd9Sstevel@tonic-gate {
173*7c478bd9Sstevel@tonic-gate 	if (!leaky_interested(c))
174*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
175*7c478bd9Sstevel@tonic-gate 
176*7c478bd9Sstevel@tonic-gate 	*est += kmem_estimate_allocated(addr, c);
177*7c478bd9Sstevel@tonic-gate 
178*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
179*7c478bd9Sstevel@tonic-gate }
180*7c478bd9Sstevel@tonic-gate 
181*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
182*7c478bd9Sstevel@tonic-gate static int
183*7c478bd9Sstevel@tonic-gate leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
184*7c478bd9Sstevel@tonic-gate {
185*7c478bd9Sstevel@tonic-gate 	leak_mtab_t *lm = *lmp;
186*7c478bd9Sstevel@tonic-gate 	mdb_walk_cb_t cb;
187*7c478bd9Sstevel@tonic-gate 	const char *walk;
188*7c478bd9Sstevel@tonic-gate 	int audit = (c->cache_flags & KMF_AUDIT);
189*7c478bd9Sstevel@tonic-gate 
190*7c478bd9Sstevel@tonic-gate 	if (!leaky_interested(c))
191*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
192*7c478bd9Sstevel@tonic-gate 
193*7c478bd9Sstevel@tonic-gate 	if (audit) {
194*7c478bd9Sstevel@tonic-gate 		walk = "bufctl";
195*7c478bd9Sstevel@tonic-gate 		cb = (mdb_walk_cb_t)leaky_mtab;
196*7c478bd9Sstevel@tonic-gate 	} else {
197*7c478bd9Sstevel@tonic-gate 		walk = "kmem";
198*7c478bd9Sstevel@tonic-gate 		cb = (mdb_walk_cb_t)leaky_mtab_addr;
199*7c478bd9Sstevel@tonic-gate 	}
200*7c478bd9Sstevel@tonic-gate 	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
201*7c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk kmem for cache %p (%s)", addr,
202*7c478bd9Sstevel@tonic-gate 		    c->cache_name);
203*7c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
204*7c478bd9Sstevel@tonic-gate 	}
205*7c478bd9Sstevel@tonic-gate 
206*7c478bd9Sstevel@tonic-gate 	for (; lm < *lmp; lm++) {
207*7c478bd9Sstevel@tonic-gate 		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
208*7c478bd9Sstevel@tonic-gate 		if (!audit)
209*7c478bd9Sstevel@tonic-gate 			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
210*7c478bd9Sstevel@tonic-gate 	}
211*7c478bd9Sstevel@tonic-gate 
212*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
213*7c478bd9Sstevel@tonic-gate }
214*7c478bd9Sstevel@tonic-gate 
215*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
216*7c478bd9Sstevel@tonic-gate static int
217*7c478bd9Sstevel@tonic-gate leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
218*7c478bd9Sstevel@tonic-gate {
219*7c478bd9Sstevel@tonic-gate 	leaky_grep(addr, c->cache_bufsize);
220*7c478bd9Sstevel@tonic-gate 
221*7c478bd9Sstevel@tonic-gate 	/*
222*7c478bd9Sstevel@tonic-gate 	 * free, constructed KMF_LITE buffers keep their first uint64_t in
223*7c478bd9Sstevel@tonic-gate 	 * their buftag's redzone.
224*7c478bd9Sstevel@tonic-gate 	 */
225*7c478bd9Sstevel@tonic-gate 	if (c->cache_flags & KMF_LITE) {
226*7c478bd9Sstevel@tonic-gate 		/* LINTED alignment */
227*7c478bd9Sstevel@tonic-gate 		kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
228*7c478bd9Sstevel@tonic-gate 		leaky_grep((uintptr_t)&btp->bt_redzone,
229*7c478bd9Sstevel@tonic-gate 		    sizeof (btp->bt_redzone));
230*7c478bd9Sstevel@tonic-gate 	}
231*7c478bd9Sstevel@tonic-gate 
232*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
233*7c478bd9Sstevel@tonic-gate }
234*7c478bd9Sstevel@tonic-gate 
235*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
236*7c478bd9Sstevel@tonic-gate static int
237*7c478bd9Sstevel@tonic-gate leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
238*7c478bd9Sstevel@tonic-gate {
239*7c478bd9Sstevel@tonic-gate 	if (!leaky_interested(c))
240*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
241*7c478bd9Sstevel@tonic-gate 
242*7c478bd9Sstevel@tonic-gate 	/*
243*7c478bd9Sstevel@tonic-gate 	 * Scan all of the free, constructed buffers, since they may have
244*7c478bd9Sstevel@tonic-gate 	 * pointers to allocated objects.
245*7c478bd9Sstevel@tonic-gate 	 */
246*7c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("freemem_constructed",
247*7c478bd9Sstevel@tonic-gate 	    (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
248*7c478bd9Sstevel@tonic-gate 		mdb_warn("can't walk freemem_constructed for cache %p (%s)",
249*7c478bd9Sstevel@tonic-gate 		    addr, c->cache_name);
250*7c478bd9Sstevel@tonic-gate 		return (WALK_DONE);
251*7c478bd9Sstevel@tonic-gate 	}
252*7c478bd9Sstevel@tonic-gate 
253*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
254*7c478bd9Sstevel@tonic-gate }
255*7c478bd9Sstevel@tonic-gate 
256*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
257*7c478bd9Sstevel@tonic-gate static int
258*7c478bd9Sstevel@tonic-gate leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
259*7c478bd9Sstevel@tonic-gate {
260*7c478bd9Sstevel@tonic-gate 	struct module mod;
261*7c478bd9Sstevel@tonic-gate 	char name[MODMAXNAMELEN];
262*7c478bd9Sstevel@tonic-gate 
263*7c478bd9Sstevel@tonic-gate 	if (m->mod_mp == NULL)
264*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
265*7c478bd9Sstevel@tonic-gate 
266*7c478bd9Sstevel@tonic-gate 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
267*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read modctl %p's module", addr);
268*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
269*7c478bd9Sstevel@tonic-gate 	}
270*7c478bd9Sstevel@tonic-gate 
271*7c478bd9Sstevel@tonic-gate 	if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
272*7c478bd9Sstevel@tonic-gate 		(void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
273*7c478bd9Sstevel@tonic-gate 
274*7c478bd9Sstevel@tonic-gate 	leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
275*7c478bd9Sstevel@tonic-gate 	leaky_grep((uintptr_t)mod.data, mod.data_size);
276*7c478bd9Sstevel@tonic-gate 	leaky_grep((uintptr_t)mod.bss, mod.bss_size);
277*7c478bd9Sstevel@tonic-gate 
278*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
279*7c478bd9Sstevel@tonic-gate }
280*7c478bd9Sstevel@tonic-gate 
281*7c478bd9Sstevel@tonic-gate static int
282*7c478bd9Sstevel@tonic-gate leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
283*7c478bd9Sstevel@tonic-gate {
284*7c478bd9Sstevel@tonic-gate 	uintptr_t size, base = (uintptr_t)t->t_stkbase;
285*7c478bd9Sstevel@tonic-gate 	uintptr_t stk = (uintptr_t)t->t_stk;
286*7c478bd9Sstevel@tonic-gate 
287*7c478bd9Sstevel@tonic-gate 	/*
288*7c478bd9Sstevel@tonic-gate 	 * If this thread isn't in memory, we can't look at its stack.  This
289*7c478bd9Sstevel@tonic-gate 	 * may result in false positives, so we print a warning.
290*7c478bd9Sstevel@tonic-gate 	 */
291*7c478bd9Sstevel@tonic-gate 	if (!(t->t_schedflag & TS_LOAD)) {
292*7c478bd9Sstevel@tonic-gate 		mdb_printf("findleaks: thread %p's stack swapped out; "
293*7c478bd9Sstevel@tonic-gate 		    "false positives possible\n", addr);
294*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
295*7c478bd9Sstevel@tonic-gate 	}
296*7c478bd9Sstevel@tonic-gate 
297*7c478bd9Sstevel@tonic-gate 	if (t->t_state != TS_FREE)
298*7c478bd9Sstevel@tonic-gate 		leaky_grep(base, stk - base);
299*7c478bd9Sstevel@tonic-gate 
300*7c478bd9Sstevel@tonic-gate 	/*
301*7c478bd9Sstevel@tonic-gate 	 * There is always gunk hanging out between t_stk and the page
302*7c478bd9Sstevel@tonic-gate 	 * boundary.  If this thread structure wasn't kmem allocated,
303*7c478bd9Sstevel@tonic-gate 	 * this will include the thread structure itself.  If the thread
304*7c478bd9Sstevel@tonic-gate 	 * _is_ kmem allocated, we'll be able to get to it via allthreads.
305*7c478bd9Sstevel@tonic-gate 	 */
306*7c478bd9Sstevel@tonic-gate 	size = *pagesize - (stk & (*pagesize - 1));
307*7c478bd9Sstevel@tonic-gate 
308*7c478bd9Sstevel@tonic-gate 	leaky_grep(stk, size);
309*7c478bd9Sstevel@tonic-gate 
310*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
311*7c478bd9Sstevel@tonic-gate }
312*7c478bd9Sstevel@tonic-gate 
313*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
314*7c478bd9Sstevel@tonic-gate static int
315*7c478bd9Sstevel@tonic-gate leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
316*7c478bd9Sstevel@tonic-gate {
317*7c478bd9Sstevel@tonic-gate 	leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
318*7c478bd9Sstevel@tonic-gate 
319*7c478bd9Sstevel@tonic-gate 	return (WALK_NEXT);
320*7c478bd9Sstevel@tonic-gate }
321*7c478bd9Sstevel@tonic-gate 
322*7c478bd9Sstevel@tonic-gate static void
323*7c478bd9Sstevel@tonic-gate leaky_kludge(void)
324*7c478bd9Sstevel@tonic-gate {
325*7c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
326*7c478bd9Sstevel@tonic-gate 	mdb_ctf_id_t id, rid;
327*7c478bd9Sstevel@tonic-gate 
328*7c478bd9Sstevel@tonic-gate 	int max_mem_nodes;
329*7c478bd9Sstevel@tonic-gate 	uintptr_t *counters;
330*7c478bd9Sstevel@tonic-gate 	size_t ncounters;
331*7c478bd9Sstevel@tonic-gate 	ssize_t hwpm_size;
332*7c478bd9Sstevel@tonic-gate 	int idx;
333*7c478bd9Sstevel@tonic-gate 
334*7c478bd9Sstevel@tonic-gate 	/*
335*7c478bd9Sstevel@tonic-gate 	 * Because of DR, the page counters (which live in the kmem64 segment)
336*7c478bd9Sstevel@tonic-gate 	 * can point into kmem_alloc()ed memory.  The "page_counters" array
337*7c478bd9Sstevel@tonic-gate 	 * is multi-dimensional, and each entry points to an array of
338*7c478bd9Sstevel@tonic-gate 	 * "hw_page_map_t"s which is "max_mem_nodes" in length.
339*7c478bd9Sstevel@tonic-gate 	 *
340*7c478bd9Sstevel@tonic-gate 	 * To keep this from having too much grotty knowledge of internals,
341*7c478bd9Sstevel@tonic-gate 	 * we use CTF data to get the size of the structure.  For simplicity,
342*7c478bd9Sstevel@tonic-gate 	 * we treat the page_counters array as a flat array of pointers, and
343*7c478bd9Sstevel@tonic-gate 	 * use its size to determine how much to scan.  Unused entries will
344*7c478bd9Sstevel@tonic-gate 	 * be NULL.
345*7c478bd9Sstevel@tonic-gate 	 */
346*7c478bd9Sstevel@tonic-gate 	if (mdb_lookup_by_name("page_counters", &sym) == -1) {
347*7c478bd9Sstevel@tonic-gate 		mdb_warn("unable to lookup page_counters");
348*7c478bd9Sstevel@tonic-gate 		return;
349*7c478bd9Sstevel@tonic-gate 	}
350*7c478bd9Sstevel@tonic-gate 
351*7c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
352*7c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read max_mem_nodes");
353*7c478bd9Sstevel@tonic-gate 		return;
354*7c478bd9Sstevel@tonic-gate 	}
355*7c478bd9Sstevel@tonic-gate 
356*7c478bd9Sstevel@tonic-gate 	if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
357*7c478bd9Sstevel@tonic-gate 	    mdb_ctf_type_resolve(id, &rid) == -1 ||
358*7c478bd9Sstevel@tonic-gate 	    (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
359*7c478bd9Sstevel@tonic-gate 		mdb_warn("unable to lookup unix`hw_page_map_t");
360*7c478bd9Sstevel@tonic-gate 		return;
361*7c478bd9Sstevel@tonic-gate 	}
362*7c478bd9Sstevel@tonic-gate 
363*7c478bd9Sstevel@tonic-gate 	counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
364*7c478bd9Sstevel@tonic-gate 
365*7c478bd9Sstevel@tonic-gate 	if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
366*7c478bd9Sstevel@tonic-gate 		mdb_warn("unable to read page_counters");
367*7c478bd9Sstevel@tonic-gate 		return;
368*7c478bd9Sstevel@tonic-gate 	}
369*7c478bd9Sstevel@tonic-gate 
370*7c478bd9Sstevel@tonic-gate 	ncounters = sym.st_size / sizeof (counters);
371*7c478bd9Sstevel@tonic-gate 
372*7c478bd9Sstevel@tonic-gate 	for (idx = 0; idx < ncounters; idx++) {
373*7c478bd9Sstevel@tonic-gate 		uintptr_t addr = counters[idx];
374*7c478bd9Sstevel@tonic-gate 		if (addr != 0)
375*7c478bd9Sstevel@tonic-gate 			leaky_grep(addr, hwpm_size * max_mem_nodes);
376*7c478bd9Sstevel@tonic-gate 	}
377*7c478bd9Sstevel@tonic-gate }
378*7c478bd9Sstevel@tonic-gate 
379*7c478bd9Sstevel@tonic-gate int
380*7c478bd9Sstevel@tonic-gate leaky_subr_estimate(size_t *estp)
381*7c478bd9Sstevel@tonic-gate {
382*7c478bd9Sstevel@tonic-gate 	uintptr_t panicstr;
383*7c478bd9Sstevel@tonic-gate 	int state;
384*7c478bd9Sstevel@tonic-gate 
385*7c478bd9Sstevel@tonic-gate 	if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
386*7c478bd9Sstevel@tonic-gate 		mdb_warn("findleaks: can only be run on a system "
387*7c478bd9Sstevel@tonic-gate 		    "dump or under kmdb; see dumpadm(1M)\n");
388*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
389*7c478bd9Sstevel@tonic-gate 	}
390*7c478bd9Sstevel@tonic-gate 
391*7c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&panicstr, "panicstr") == -1) {
392*7c478bd9Sstevel@tonic-gate 		mdb_warn("can't read variable 'panicstr'");
393*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
394*7c478bd9Sstevel@tonic-gate 	}
395*7c478bd9Sstevel@tonic-gate 
396*7c478bd9Sstevel@tonic-gate 	if (state != MDB_STATE_STOPPED && panicstr == NULL) {
397*7c478bd9Sstevel@tonic-gate 		mdb_warn("findleaks: cannot be run on a live dump.\n");
398*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
399*7c478bd9Sstevel@tonic-gate 	}
400*7c478bd9Sstevel@tonic-gate 
401*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
402*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'kmem_cache'");
403*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
404*7c478bd9Sstevel@tonic-gate 	}
405*7c478bd9Sstevel@tonic-gate 
406*7c478bd9Sstevel@tonic-gate 	if (*estp == 0) {
407*7c478bd9Sstevel@tonic-gate 		mdb_warn("findleaks: no buffers found\n");
408*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
409*7c478bd9Sstevel@tonic-gate 	}
410*7c478bd9Sstevel@tonic-gate 
411*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
412*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'vmem'");
413*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
414*7c478bd9Sstevel@tonic-gate 	}
415*7c478bd9Sstevel@tonic-gate 
416*7c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
417*7c478bd9Sstevel@tonic-gate }
418*7c478bd9Sstevel@tonic-gate 
419*7c478bd9Sstevel@tonic-gate int
420*7c478bd9Sstevel@tonic-gate leaky_subr_fill(leak_mtab_t **lmpp)
421*7c478bd9Sstevel@tonic-gate {
422*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
423*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'vmem'");
424*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
425*7c478bd9Sstevel@tonic-gate 	}
426*7c478bd9Sstevel@tonic-gate 
427*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
428*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'kmem_cache'");
429*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
430*7c478bd9Sstevel@tonic-gate 	}
431*7c478bd9Sstevel@tonic-gate 
432*7c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
433*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read 'kmem_lite_count'");
434*7c478bd9Sstevel@tonic-gate 		kmem_lite_count = 0;
435*7c478bd9Sstevel@tonic-gate 	} else if (kmem_lite_count > 16) {
436*7c478bd9Sstevel@tonic-gate 		mdb_warn("kmem_lite_count nonsensical, ignored\n");
437*7c478bd9Sstevel@tonic-gate 		kmem_lite_count = 0;
438*7c478bd9Sstevel@tonic-gate 	}
439*7c478bd9Sstevel@tonic-gate 
440*7c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
441*7c478bd9Sstevel@tonic-gate }
442*7c478bd9Sstevel@tonic-gate 
443*7c478bd9Sstevel@tonic-gate int
444*7c478bd9Sstevel@tonic-gate leaky_subr_run(void)
445*7c478bd9Sstevel@tonic-gate {
446*7c478bd9Sstevel@tonic-gate 	unsigned long ps;
447*7c478bd9Sstevel@tonic-gate 	uintptr_t kstat_arena;
448*7c478bd9Sstevel@tonic-gate 
449*7c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&ps, "_pagesize") == -1) {
450*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read '_pagesize'");
451*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
452*7c478bd9Sstevel@tonic-gate 	}
453*7c478bd9Sstevel@tonic-gate 
454*7c478bd9Sstevel@tonic-gate 	leaky_kludge();
455*7c478bd9Sstevel@tonic-gate 
456*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
457*7c478bd9Sstevel@tonic-gate 	    NULL) == -1) {
458*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'kmem_cache'");
459*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
460*7c478bd9Sstevel@tonic-gate 	}
461*7c478bd9Sstevel@tonic-gate 
462*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
463*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'modctl'");
464*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
465*7c478bd9Sstevel@tonic-gate 	}
466*7c478bd9Sstevel@tonic-gate 
467*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
468*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'thread'");
469*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
470*7c478bd9Sstevel@tonic-gate 	}
471*7c478bd9Sstevel@tonic-gate 
472*7c478bd9Sstevel@tonic-gate 	if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
473*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk 'deathrow'");
474*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
475*7c478bd9Sstevel@tonic-gate 	}
476*7c478bd9Sstevel@tonic-gate 
477*7c478bd9Sstevel@tonic-gate 	if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
478*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't read 'kstat_arena'");
479*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
480*7c478bd9Sstevel@tonic-gate 	}
481*7c478bd9Sstevel@tonic-gate 
482*7c478bd9Sstevel@tonic-gate 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
483*7c478bd9Sstevel@tonic-gate 	    NULL, kstat_arena) == -1) {
484*7c478bd9Sstevel@tonic-gate 		mdb_warn("couldn't walk kstat vmem arena");
485*7c478bd9Sstevel@tonic-gate 		return (DCMD_ERR);
486*7c478bd9Sstevel@tonic-gate 	}
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate 	return (DCMD_OK);
489*7c478bd9Sstevel@tonic-gate }
490*7c478bd9Sstevel@tonic-gate 
491*7c478bd9Sstevel@tonic-gate void
492*7c478bd9Sstevel@tonic-gate leaky_subr_add_leak(leak_mtab_t *lmp)
493*7c478bd9Sstevel@tonic-gate {
494*7c478bd9Sstevel@tonic-gate 	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
495*7c478bd9Sstevel@tonic-gate 	size_t depth;
496*7c478bd9Sstevel@tonic-gate 
497*7c478bd9Sstevel@tonic-gate 	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
498*7c478bd9Sstevel@tonic-gate 	case LKM_CTL_VMSEG: {
499*7c478bd9Sstevel@tonic-gate 		vmem_seg_t vs;
500*7c478bd9Sstevel@tonic-gate 
501*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
502*7c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read leaked vmem_seg at addr %p",
503*7c478bd9Sstevel@tonic-gate 			    addr);
504*7c478bd9Sstevel@tonic-gate 			return;
505*7c478bd9Sstevel@tonic-gate 		}
506*7c478bd9Sstevel@tonic-gate 		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
507*7c478bd9Sstevel@tonic-gate 
508*7c478bd9Sstevel@tonic-gate 		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
509*7c478bd9Sstevel@tonic-gate 		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
510*7c478bd9Sstevel@tonic-gate 		break;
511*7c478bd9Sstevel@tonic-gate 	}
512*7c478bd9Sstevel@tonic-gate 	case LKM_CTL_BUFCTL: {
513*7c478bd9Sstevel@tonic-gate 		kmem_bufctl_audit_t bc;
514*7c478bd9Sstevel@tonic-gate 
515*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
516*7c478bd9Sstevel@tonic-gate 			mdb_warn("couldn't read leaked bufctl at addr %p",
517*7c478bd9Sstevel@tonic-gate 			    addr);
518*7c478bd9Sstevel@tonic-gate 			return;
519*7c478bd9Sstevel@tonic-gate 		}
520*7c478bd9Sstevel@tonic-gate 
521*7c478bd9Sstevel@tonic-gate 		depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
522*7c478bd9Sstevel@tonic-gate 
523*7c478bd9Sstevel@tonic-gate 		/*
524*7c478bd9Sstevel@tonic-gate 		 * The top of the stack will be kmem_cache_alloc+offset.
525*7c478bd9Sstevel@tonic-gate 		 * Since the offset in kmem_cache_alloc() isn't interesting
526*7c478bd9Sstevel@tonic-gate 		 * we skip that frame for the purposes of uniquifying stacks.
527*7c478bd9Sstevel@tonic-gate 		 *
528*7c478bd9Sstevel@tonic-gate 		 * We also use the cache pointer as the leaks's cid, to
529*7c478bd9Sstevel@tonic-gate 		 * prevent the coalescing of leaks from different caches.
530*7c478bd9Sstevel@tonic-gate 		 */
531*7c478bd9Sstevel@tonic-gate 		if (depth > 0)
532*7c478bd9Sstevel@tonic-gate 			depth--;
533*7c478bd9Sstevel@tonic-gate 		leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
534*7c478bd9Sstevel@tonic-gate 		    bc.bc_timestamp, bc.bc_stack + 1, depth,
535*7c478bd9Sstevel@tonic-gate 		    (uintptr_t)bc.bc_cache, 0);
536*7c478bd9Sstevel@tonic-gate 		break;
537*7c478bd9Sstevel@tonic-gate 	}
538*7c478bd9Sstevel@tonic-gate 	case LKM_CTL_CACHE: {
539*7c478bd9Sstevel@tonic-gate 		kmem_cache_t cache;
540*7c478bd9Sstevel@tonic-gate 		kmem_buftag_lite_t bt;
541*7c478bd9Sstevel@tonic-gate 		pc_t caller;
542*7c478bd9Sstevel@tonic-gate 		int depth = 0;
543*7c478bd9Sstevel@tonic-gate 
544*7c478bd9Sstevel@tonic-gate 		/*
545*7c478bd9Sstevel@tonic-gate 		 * For KMF_LITE caches, we can get the allocation PC
546*7c478bd9Sstevel@tonic-gate 		 * out of the buftag structure.
547*7c478bd9Sstevel@tonic-gate 		 */
548*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
549*7c478bd9Sstevel@tonic-gate 		    (cache.cache_flags & KMF_LITE) &&
550*7c478bd9Sstevel@tonic-gate 		    kmem_lite_count > 0 &&
551*7c478bd9Sstevel@tonic-gate 		    mdb_vread(&bt, sizeof (bt),
552*7c478bd9Sstevel@tonic-gate 		    /* LINTED alignment */
553*7c478bd9Sstevel@tonic-gate 		    (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
554*7c478bd9Sstevel@tonic-gate 			caller = bt.bt_history[0];
555*7c478bd9Sstevel@tonic-gate 			depth = 1;
556*7c478bd9Sstevel@tonic-gate 		}
557*7c478bd9Sstevel@tonic-gate 		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
558*7c478bd9Sstevel@tonic-gate 		    &caller, depth, addr, addr);
559*7c478bd9Sstevel@tonic-gate 		break;
560*7c478bd9Sstevel@tonic-gate 	}
561*7c478bd9Sstevel@tonic-gate 	default:
562*7c478bd9Sstevel@tonic-gate 		mdb_warn("internal error: invalid leak_bufctl_t\n");
563*7c478bd9Sstevel@tonic-gate 		break;
564*7c478bd9Sstevel@tonic-gate 	}
565*7c478bd9Sstevel@tonic-gate }
566*7c478bd9Sstevel@tonic-gate 
567*7c478bd9Sstevel@tonic-gate static void
568*7c478bd9Sstevel@tonic-gate leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
569*7c478bd9Sstevel@tonic-gate {
570*7c478bd9Sstevel@tonic-gate 	int i;
571*7c478bd9Sstevel@tonic-gate 	GElf_Sym sym;
572*7c478bd9Sstevel@tonic-gate 	uintptr_t pc = 0;
573*7c478bd9Sstevel@tonic-gate 
574*7c478bd9Sstevel@tonic-gate 	buf[0] = 0;
575*7c478bd9Sstevel@tonic-gate 
576*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < depth; i++) {
577*7c478bd9Sstevel@tonic-gate 		pc = stack[i];
578*7c478bd9Sstevel@tonic-gate 
579*7c478bd9Sstevel@tonic-gate 		if (mdb_lookup_by_addr(pc,
580*7c478bd9Sstevel@tonic-gate 		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
581*7c478bd9Sstevel@tonic-gate 			continue;
582*7c478bd9Sstevel@tonic-gate 		if (strncmp(buf, "kmem_", 5) == 0)
583*7c478bd9Sstevel@tonic-gate 			continue;
584*7c478bd9Sstevel@tonic-gate 		if (strncmp(buf, "vmem_", 5) == 0)
585*7c478bd9Sstevel@tonic-gate 			continue;
586*7c478bd9Sstevel@tonic-gate 		*pcp = pc;
587*7c478bd9Sstevel@tonic-gate 
588*7c478bd9Sstevel@tonic-gate 		return;
589*7c478bd9Sstevel@tonic-gate 	}
590*7c478bd9Sstevel@tonic-gate 
591*7c478bd9Sstevel@tonic-gate 	/*
592*7c478bd9Sstevel@tonic-gate 	 * We're only here if the entire call chain begins with "kmem_";
593*7c478bd9Sstevel@tonic-gate 	 * this shouldn't happen, but we'll just use the last caller.
594*7c478bd9Sstevel@tonic-gate 	 */
595*7c478bd9Sstevel@tonic-gate 	*pcp = pc;
596*7c478bd9Sstevel@tonic-gate }
597*7c478bd9Sstevel@tonic-gate 
598*7c478bd9Sstevel@tonic-gate int
599*7c478bd9Sstevel@tonic-gate leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
600*7c478bd9Sstevel@tonic-gate {
601*7c478bd9Sstevel@tonic-gate 	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
602*7c478bd9Sstevel@tonic-gate 	uintptr_t lcaller, rcaller;
603*7c478bd9Sstevel@tonic-gate 	int rval;
604*7c478bd9Sstevel@tonic-gate 
605*7c478bd9Sstevel@tonic-gate 	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
606*7c478bd9Sstevel@tonic-gate 	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
607*7c478bd9Sstevel@tonic-gate 
608*7c478bd9Sstevel@tonic-gate 	if (rval = strcmp(lbuf, rbuf))
609*7c478bd9Sstevel@tonic-gate 		return (rval);
610*7c478bd9Sstevel@tonic-gate 
611*7c478bd9Sstevel@tonic-gate 	if (lcaller < rcaller)
612*7c478bd9Sstevel@tonic-gate 		return (-1);
613*7c478bd9Sstevel@tonic-gate 
614*7c478bd9Sstevel@tonic-gate 	if (lcaller > rcaller)
615*7c478bd9Sstevel@tonic-gate 		return (1);
616*7c478bd9Sstevel@tonic-gate 
617*7c478bd9Sstevel@tonic-gate 	if (lhs->lkb_data < rhs->lkb_data)
618*7c478bd9Sstevel@tonic-gate 		return (-1);
619*7c478bd9Sstevel@tonic-gate 
620*7c478bd9Sstevel@tonic-gate 	if (lhs->lkb_data > rhs->lkb_data)
621*7c478bd9Sstevel@tonic-gate 		return (1);
622*7c478bd9Sstevel@tonic-gate 
623*7c478bd9Sstevel@tonic-gate 	return (0);
624*7c478bd9Sstevel@tonic-gate }
625*7c478bd9Sstevel@tonic-gate 
626*7c478bd9Sstevel@tonic-gate /*
627*7c478bd9Sstevel@tonic-gate  * Global state variables used by the leaky_subr_dump_* routines.  Note that
628*7c478bd9Sstevel@tonic-gate  * they are carefully cleared before use.
629*7c478bd9Sstevel@tonic-gate  */
630*7c478bd9Sstevel@tonic-gate static int lk_vmem_seen;
631*7c478bd9Sstevel@tonic-gate static int lk_cache_seen;
632*7c478bd9Sstevel@tonic-gate static int lk_kmem_seen;
633*7c478bd9Sstevel@tonic-gate static size_t lk_ttl;
634*7c478bd9Sstevel@tonic-gate static size_t lk_bytes;
635*7c478bd9Sstevel@tonic-gate 
636*7c478bd9Sstevel@tonic-gate void
637*7c478bd9Sstevel@tonic-gate leaky_subr_dump_start(int type)
638*7c478bd9Sstevel@tonic-gate {
639*7c478bd9Sstevel@tonic-gate 	switch (type) {
640*7c478bd9Sstevel@tonic-gate 	case TYPE_VMEM:
641*7c478bd9Sstevel@tonic-gate 		lk_vmem_seen = 0;
642*7c478bd9Sstevel@tonic-gate 		break;
643*7c478bd9Sstevel@tonic-gate 	case TYPE_CACHE:
644*7c478bd9Sstevel@tonic-gate 		lk_cache_seen = 0;
645*7c478bd9Sstevel@tonic-gate 		break;
646*7c478bd9Sstevel@tonic-gate 	case TYPE_KMEM:
647*7c478bd9Sstevel@tonic-gate 		lk_kmem_seen = 0;
648*7c478bd9Sstevel@tonic-gate 		break;
649*7c478bd9Sstevel@tonic-gate 	default:
650*7c478bd9Sstevel@tonic-gate 		break;
651*7c478bd9Sstevel@tonic-gate 	}
652*7c478bd9Sstevel@tonic-gate 
653*7c478bd9Sstevel@tonic-gate 	lk_ttl = 0;
654*7c478bd9Sstevel@tonic-gate 	lk_bytes = 0;
655*7c478bd9Sstevel@tonic-gate }
656*7c478bd9Sstevel@tonic-gate 
657*7c478bd9Sstevel@tonic-gate void
658*7c478bd9Sstevel@tonic-gate leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
659*7c478bd9Sstevel@tonic-gate {
660*7c478bd9Sstevel@tonic-gate 	const leak_bufctl_t *cur;
661*7c478bd9Sstevel@tonic-gate 	kmem_cache_t cache;
662*7c478bd9Sstevel@tonic-gate 	size_t min, max, size;
663*7c478bd9Sstevel@tonic-gate 	char sz[30];
664*7c478bd9Sstevel@tonic-gate 	char c[MDB_SYM_NAMLEN];
665*7c478bd9Sstevel@tonic-gate 	uintptr_t caller;
666*7c478bd9Sstevel@tonic-gate 
667*7c478bd9Sstevel@tonic-gate 	if (verbose) {
668*7c478bd9Sstevel@tonic-gate 		lk_ttl = 0;
669*7c478bd9Sstevel@tonic-gate 		lk_bytes = 0;
670*7c478bd9Sstevel@tonic-gate 	}
671*7c478bd9Sstevel@tonic-gate 
672*7c478bd9Sstevel@tonic-gate 	switch (lkb->lkb_type) {
673*7c478bd9Sstevel@tonic-gate 	case TYPE_VMEM:
674*7c478bd9Sstevel@tonic-gate 		if (!verbose && !lk_vmem_seen) {
675*7c478bd9Sstevel@tonic-gate 			lk_vmem_seen = 1;
676*7c478bd9Sstevel@tonic-gate 			mdb_printf("%-16s %7s %?s %s\n",
677*7c478bd9Sstevel@tonic-gate 			    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
678*7c478bd9Sstevel@tonic-gate 		}
679*7c478bd9Sstevel@tonic-gate 
680*7c478bd9Sstevel@tonic-gate 		min = max = lkb->lkb_data;
681*7c478bd9Sstevel@tonic-gate 
682*7c478bd9Sstevel@tonic-gate 		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
683*7c478bd9Sstevel@tonic-gate 			size = cur->lkb_data;
684*7c478bd9Sstevel@tonic-gate 
685*7c478bd9Sstevel@tonic-gate 			if (size < min)
686*7c478bd9Sstevel@tonic-gate 				min = size;
687*7c478bd9Sstevel@tonic-gate 			if (size > max)
688*7c478bd9Sstevel@tonic-gate 				max = size;
689*7c478bd9Sstevel@tonic-gate 
690*7c478bd9Sstevel@tonic-gate 			lk_ttl++;
691*7c478bd9Sstevel@tonic-gate 			lk_bytes += size;
692*7c478bd9Sstevel@tonic-gate 		}
693*7c478bd9Sstevel@tonic-gate 
694*7c478bd9Sstevel@tonic-gate 		if (min == max)
695*7c478bd9Sstevel@tonic-gate 			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
696*7c478bd9Sstevel@tonic-gate 		else
697*7c478bd9Sstevel@tonic-gate 			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
698*7c478bd9Sstevel@tonic-gate 			    min, max);
699*7c478bd9Sstevel@tonic-gate 
700*7c478bd9Sstevel@tonic-gate 		if (!verbose) {
701*7c478bd9Sstevel@tonic-gate 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
702*7c478bd9Sstevel@tonic-gate 			    c, &caller);
703*7c478bd9Sstevel@tonic-gate 
704*7c478bd9Sstevel@tonic-gate 			if (caller != 0) {
705*7c478bd9Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
706*7c478bd9Sstevel@tonic-gate 				    "%a", caller);
707*7c478bd9Sstevel@tonic-gate 			} else {
708*7c478bd9Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
709*7c478bd9Sstevel@tonic-gate 				    "%s", "?");
710*7c478bd9Sstevel@tonic-gate 			}
711*7c478bd9Sstevel@tonic-gate 			mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
712*7c478bd9Sstevel@tonic-gate 			    lkb->lkb_addr, c);
713*7c478bd9Sstevel@tonic-gate 		} else {
714*7c478bd9Sstevel@tonic-gate 			mdb_arg_t v;
715*7c478bd9Sstevel@tonic-gate 
716*7c478bd9Sstevel@tonic-gate 			if (lk_ttl == 1)
717*7c478bd9Sstevel@tonic-gate 				mdb_printf("kmem_oversize leak: 1 vmem_seg, "
718*7c478bd9Sstevel@tonic-gate 				    "%ld bytes\n", lk_bytes);
719*7c478bd9Sstevel@tonic-gate 			else
720*7c478bd9Sstevel@tonic-gate 				mdb_printf("kmem_oversize leak: %d vmem_segs, "
721*7c478bd9Sstevel@tonic-gate 				    "%s bytes each, %ld bytes total\n",
722*7c478bd9Sstevel@tonic-gate 				    lk_ttl, sz, lk_bytes);
723*7c478bd9Sstevel@tonic-gate 
724*7c478bd9Sstevel@tonic-gate 			v.a_type = MDB_TYPE_STRING;
725*7c478bd9Sstevel@tonic-gate 			v.a_un.a_str = "-v";
726*7c478bd9Sstevel@tonic-gate 
727*7c478bd9Sstevel@tonic-gate 			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
728*7c478bd9Sstevel@tonic-gate 			    DCMD_ADDRSPEC, 1, &v) == -1) {
729*7c478bd9Sstevel@tonic-gate 				mdb_warn("'%p::vmem_seg -v' failed",
730*7c478bd9Sstevel@tonic-gate 				    lkb->lkb_addr);
731*7c478bd9Sstevel@tonic-gate 			}
732*7c478bd9Sstevel@tonic-gate 		}
733*7c478bd9Sstevel@tonic-gate 		return;
734*7c478bd9Sstevel@tonic-gate 
735*7c478bd9Sstevel@tonic-gate 	case TYPE_CACHE:
736*7c478bd9Sstevel@tonic-gate 		if (!verbose && !lk_cache_seen) {
737*7c478bd9Sstevel@tonic-gate 			lk_cache_seen = 1;
738*7c478bd9Sstevel@tonic-gate 			if (lk_vmem_seen)
739*7c478bd9Sstevel@tonic-gate 				mdb_printf("\n");
740*7c478bd9Sstevel@tonic-gate 			mdb_printf("%-?s %7s %?s %s\n",
741*7c478bd9Sstevel@tonic-gate 			    "CACHE", "LEAKED", "BUFFER", "CALLER");
742*7c478bd9Sstevel@tonic-gate 		}
743*7c478bd9Sstevel@tonic-gate 
744*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
745*7c478bd9Sstevel@tonic-gate 			/*
746*7c478bd9Sstevel@tonic-gate 			 * This _really_ shouldn't happen; we shouldn't
747*7c478bd9Sstevel@tonic-gate 			 * have been able to get this far if this
748*7c478bd9Sstevel@tonic-gate 			 * cache wasn't readable.
749*7c478bd9Sstevel@tonic-gate 			 */
750*7c478bd9Sstevel@tonic-gate 			mdb_warn("can't read cache %p for leaked "
751*7c478bd9Sstevel@tonic-gate 			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
752*7c478bd9Sstevel@tonic-gate 			return;
753*7c478bd9Sstevel@tonic-gate 		}
754*7c478bd9Sstevel@tonic-gate 
755*7c478bd9Sstevel@tonic-gate 		lk_ttl += lkb->lkb_dups + 1;
756*7c478bd9Sstevel@tonic-gate 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
757*7c478bd9Sstevel@tonic-gate 
758*7c478bd9Sstevel@tonic-gate 		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
759*7c478bd9Sstevel@tonic-gate 		if (caller != 0) {
760*7c478bd9Sstevel@tonic-gate 			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
761*7c478bd9Sstevel@tonic-gate 		} else {
762*7c478bd9Sstevel@tonic-gate 			(void) mdb_snprintf(c, sizeof (c),
763*7c478bd9Sstevel@tonic-gate 			    "%s", (verbose) ? "" : "?");
764*7c478bd9Sstevel@tonic-gate 		}
765*7c478bd9Sstevel@tonic-gate 
766*7c478bd9Sstevel@tonic-gate 		if (!verbose) {
767*7c478bd9Sstevel@tonic-gate 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
768*7c478bd9Sstevel@tonic-gate 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
769*7c478bd9Sstevel@tonic-gate 		} else {
770*7c478bd9Sstevel@tonic-gate 			if (lk_ttl == 1)
771*7c478bd9Sstevel@tonic-gate 				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
772*7c478bd9Sstevel@tonic-gate 				    cache.cache_name, lk_bytes);
773*7c478bd9Sstevel@tonic-gate 			else
774*7c478bd9Sstevel@tonic-gate 				mdb_printf("%s leak: %d buffers, "
775*7c478bd9Sstevel@tonic-gate 				    "%ld bytes each, %ld bytes total,\n",
776*7c478bd9Sstevel@tonic-gate 				    cache.cache_name, lk_ttl,
777*7c478bd9Sstevel@tonic-gate 				    cache.cache_bufsize, lk_bytes);
778*7c478bd9Sstevel@tonic-gate 
779*7c478bd9Sstevel@tonic-gate 			mdb_printf("    sample addr %p%s%s\n",
780*7c478bd9Sstevel@tonic-gate 			    lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
781*7c478bd9Sstevel@tonic-gate 		}
782*7c478bd9Sstevel@tonic-gate 		return;
783*7c478bd9Sstevel@tonic-gate 
784*7c478bd9Sstevel@tonic-gate 	case TYPE_KMEM:
785*7c478bd9Sstevel@tonic-gate 		if (!verbose && !lk_kmem_seen) {
786*7c478bd9Sstevel@tonic-gate 			lk_kmem_seen = 1;
787*7c478bd9Sstevel@tonic-gate 			if (lk_vmem_seen || lk_cache_seen)
788*7c478bd9Sstevel@tonic-gate 				mdb_printf("\n");
789*7c478bd9Sstevel@tonic-gate 			mdb_printf("%-?s %7s %?s %s\n",
790*7c478bd9Sstevel@tonic-gate 			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
791*7c478bd9Sstevel@tonic-gate 		}
792*7c478bd9Sstevel@tonic-gate 
793*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
794*7c478bd9Sstevel@tonic-gate 			/*
795*7c478bd9Sstevel@tonic-gate 			 * This _really_ shouldn't happen; we shouldn't
796*7c478bd9Sstevel@tonic-gate 			 * have been able to get this far if this
797*7c478bd9Sstevel@tonic-gate 			 * cache wasn't readable.
798*7c478bd9Sstevel@tonic-gate 			 */
799*7c478bd9Sstevel@tonic-gate 			mdb_warn("can't read cache %p for leaked "
800*7c478bd9Sstevel@tonic-gate 			    "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
801*7c478bd9Sstevel@tonic-gate 			return;
802*7c478bd9Sstevel@tonic-gate 		}
803*7c478bd9Sstevel@tonic-gate 
804*7c478bd9Sstevel@tonic-gate 		lk_ttl += lkb->lkb_dups + 1;
805*7c478bd9Sstevel@tonic-gate 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
806*7c478bd9Sstevel@tonic-gate 
807*7c478bd9Sstevel@tonic-gate 		if (!verbose) {
808*7c478bd9Sstevel@tonic-gate 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
809*7c478bd9Sstevel@tonic-gate 			    c, &caller);
810*7c478bd9Sstevel@tonic-gate 
811*7c478bd9Sstevel@tonic-gate 			if (caller != 0) {
812*7c478bd9Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
813*7c478bd9Sstevel@tonic-gate 				    "%a", caller);
814*7c478bd9Sstevel@tonic-gate 			} else {
815*7c478bd9Sstevel@tonic-gate 				(void) mdb_snprintf(c, sizeof (c),
816*7c478bd9Sstevel@tonic-gate 				    "%s", "?");
817*7c478bd9Sstevel@tonic-gate 			}
818*7c478bd9Sstevel@tonic-gate 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
819*7c478bd9Sstevel@tonic-gate 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
820*7c478bd9Sstevel@tonic-gate 		} else {
821*7c478bd9Sstevel@tonic-gate 			mdb_arg_t v;
822*7c478bd9Sstevel@tonic-gate 
823*7c478bd9Sstevel@tonic-gate 			if (lk_ttl == 1)
824*7c478bd9Sstevel@tonic-gate 				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
825*7c478bd9Sstevel@tonic-gate 				    cache.cache_name, lk_bytes);
826*7c478bd9Sstevel@tonic-gate 			else
827*7c478bd9Sstevel@tonic-gate 				mdb_printf("%s leak: %d buffers, "
828*7c478bd9Sstevel@tonic-gate 				    "%ld bytes each, %ld bytes total\n",
829*7c478bd9Sstevel@tonic-gate 				    cache.cache_name, lk_ttl,
830*7c478bd9Sstevel@tonic-gate 				    cache.cache_bufsize, lk_bytes);
831*7c478bd9Sstevel@tonic-gate 
832*7c478bd9Sstevel@tonic-gate 			v.a_type = MDB_TYPE_STRING;
833*7c478bd9Sstevel@tonic-gate 			v.a_un.a_str = "-v";
834*7c478bd9Sstevel@tonic-gate 
835*7c478bd9Sstevel@tonic-gate 			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
836*7c478bd9Sstevel@tonic-gate 			    DCMD_ADDRSPEC, 1, &v) == -1) {
837*7c478bd9Sstevel@tonic-gate 				mdb_warn("'%p::bufctl -v' failed",
838*7c478bd9Sstevel@tonic-gate 				    lkb->lkb_addr);
839*7c478bd9Sstevel@tonic-gate 			}
840*7c478bd9Sstevel@tonic-gate 		}
841*7c478bd9Sstevel@tonic-gate 		return;
842*7c478bd9Sstevel@tonic-gate 
843*7c478bd9Sstevel@tonic-gate 	default:
844*7c478bd9Sstevel@tonic-gate 		return;
845*7c478bd9Sstevel@tonic-gate 	}
846*7c478bd9Sstevel@tonic-gate }
847*7c478bd9Sstevel@tonic-gate 
848*7c478bd9Sstevel@tonic-gate void
849*7c478bd9Sstevel@tonic-gate leaky_subr_dump_end(int type)
850*7c478bd9Sstevel@tonic-gate {
851*7c478bd9Sstevel@tonic-gate 	int i;
852*7c478bd9Sstevel@tonic-gate 	int width;
853*7c478bd9Sstevel@tonic-gate 	const char *leaks;
854*7c478bd9Sstevel@tonic-gate 
855*7c478bd9Sstevel@tonic-gate 	switch (type) {
856*7c478bd9Sstevel@tonic-gate 	case TYPE_VMEM:
857*7c478bd9Sstevel@tonic-gate 		if (!lk_vmem_seen)
858*7c478bd9Sstevel@tonic-gate 			return;
859*7c478bd9Sstevel@tonic-gate 
860*7c478bd9Sstevel@tonic-gate 		width = 16;
861*7c478bd9Sstevel@tonic-gate 		leaks = "kmem_oversize leak";
862*7c478bd9Sstevel@tonic-gate 		break;
863*7c478bd9Sstevel@tonic-gate 
864*7c478bd9Sstevel@tonic-gate 	case TYPE_CACHE:
865*7c478bd9Sstevel@tonic-gate 		if (!lk_cache_seen)
866*7c478bd9Sstevel@tonic-gate 			return;
867*7c478bd9Sstevel@tonic-gate 
868*7c478bd9Sstevel@tonic-gate 		width = sizeof (uintptr_t) * 2;
869*7c478bd9Sstevel@tonic-gate 		leaks = "buffer";
870*7c478bd9Sstevel@tonic-gate 		break;
871*7c478bd9Sstevel@tonic-gate 
872*7c478bd9Sstevel@tonic-gate 	case TYPE_KMEM:
873*7c478bd9Sstevel@tonic-gate 		if (!lk_kmem_seen)
874*7c478bd9Sstevel@tonic-gate 			return;
875*7c478bd9Sstevel@tonic-gate 
876*7c478bd9Sstevel@tonic-gate 		width = sizeof (uintptr_t) * 2;
877*7c478bd9Sstevel@tonic-gate 		leaks = "buffer";
878*7c478bd9Sstevel@tonic-gate 		break;
879*7c478bd9Sstevel@tonic-gate 
880*7c478bd9Sstevel@tonic-gate 	default:
881*7c478bd9Sstevel@tonic-gate 		return;
882*7c478bd9Sstevel@tonic-gate 	}
883*7c478bd9Sstevel@tonic-gate 
884*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < 72; i++)
885*7c478bd9Sstevel@tonic-gate 		mdb_printf("-");
886*7c478bd9Sstevel@tonic-gate 	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
887*7c478bd9Sstevel@tonic-gate 	    width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
888*7c478bd9Sstevel@tonic-gate 	    lk_bytes, (lk_bytes == 1) ? "" : "s");
889*7c478bd9Sstevel@tonic-gate }
890*7c478bd9Sstevel@tonic-gate 
891*7c478bd9Sstevel@tonic-gate int
892*7c478bd9Sstevel@tonic-gate leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
893*7c478bd9Sstevel@tonic-gate     void *cbdata)
894*7c478bd9Sstevel@tonic-gate {
895*7c478bd9Sstevel@tonic-gate 	kmem_bufctl_audit_t bc;
896*7c478bd9Sstevel@tonic-gate 	vmem_seg_t vs;
897*7c478bd9Sstevel@tonic-gate 
898*7c478bd9Sstevel@tonic-gate 	switch (lkb->lkb_type) {
899*7c478bd9Sstevel@tonic-gate 	case TYPE_VMEM:
900*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
901*7c478bd9Sstevel@tonic-gate 			mdb_warn("unable to read vmem_seg at %p",
902*7c478bd9Sstevel@tonic-gate 			    lkb->lkb_addr);
903*7c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
904*7c478bd9Sstevel@tonic-gate 		}
905*7c478bd9Sstevel@tonic-gate 		return (cb(lkb->lkb_addr, &vs, cbdata));
906*7c478bd9Sstevel@tonic-gate 
907*7c478bd9Sstevel@tonic-gate 	case TYPE_CACHE:
908*7c478bd9Sstevel@tonic-gate 		return (cb(lkb->lkb_addr, NULL, cbdata));
909*7c478bd9Sstevel@tonic-gate 
910*7c478bd9Sstevel@tonic-gate 	case TYPE_KMEM:
911*7c478bd9Sstevel@tonic-gate 		if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
912*7c478bd9Sstevel@tonic-gate 			mdb_warn("unable to read bufctl at %p",
913*7c478bd9Sstevel@tonic-gate 			    lkb->lkb_addr);
914*7c478bd9Sstevel@tonic-gate 			return (WALK_NEXT);
915*7c478bd9Sstevel@tonic-gate 		}
916*7c478bd9Sstevel@tonic-gate 		return (cb(lkb->lkb_addr, &bc, cbdata));
917*7c478bd9Sstevel@tonic-gate 	default:
918*7c478bd9Sstevel@tonic-gate 		return (WALK_NEXT);
919*7c478bd9Sstevel@tonic-gate 	}
920*7c478bd9Sstevel@tonic-gate }
921