1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#include "umem.h"
29
30#include <sys/vmem_impl_user.h>
31#include <umem_impl.h>
32
33#include <alloca.h>
34#include <libproc.h>
35#include <stdio.h>
36#include <string.h>
37#include <sys/stack.h>
38
39#include "leaky_impl.h"
40#include "misc.h"
41#include "proc_kludges.h"
42
43#include "umem_pagesize.h"
44
45/*
46 * This file defines the libumem target for ../genunix/leaky.c.
47 *
48 * See ../genunix/leaky_impl.h for the target interface definition.
49 */
50
51/*
52 * leaky_subr_dump_start()/_end() depend on the ordering of TYPE_VMEM,
53 * TYPE_MMAP and TYPE_SBRK.
54 */
55#define	TYPE_MMAP	0		/* lkb_data is the size */
56#define	TYPE_SBRK	1		/* lkb_data is the size */
57#define	TYPE_VMEM	2		/* lkb_data is the vmem_seg's size */
58#define	TYPE_CACHE	3		/* lkb_cid is the bufctl's cache */
59#define	TYPE_UMEM	4		/* lkb_cid is the bufctl's cache */
60
61#define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
62#define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
63#define	LKM_CTL_MEMORY	2	/* non-umem mmap or brk, PTR is region start */
64#define	LKM_CTL_CACHE	3	/* normal alloc, non-debug, PTR is cache */
65#define	LKM_CTL_MASK	3L
66
67/*
68 * create a lkm_bufctl from a pointer and a type
69 */
70#define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
71#define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
72#define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
73
74static uintptr_t leak_brkbase;
75static uintptr_t leak_brksize;
76
77#define	LEAKY_INBRK(ptr) \
78	(((uintptr_t)(ptr) - leak_brkbase) < leak_brksize)
79
80typedef struct leaky_seg_info {
81	uintptr_t ls_start;
82	uintptr_t ls_end;
83} leaky_seg_info_t;
84
85typedef struct leaky_maps {
86	leaky_seg_info_t	*lm_segs;
87	uintptr_t		lm_seg_count;
88	uintptr_t		lm_seg_max;
89
90	pstatus_t		*lm_pstatus;
91
92	leak_mtab_t		**lm_lmp;
93} leaky_maps_t;
94
95/*ARGSUSED*/
96static int
97leaky_mtab(uintptr_t addr, const umem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
98{
99	leak_mtab_t *lm = (*lmp)++;
100
101	lm->lkm_base = (uintptr_t)bcp->bc_addr;
102	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
103
104	return (WALK_NEXT);
105}
106
107/*ARGSUSED*/
108static int
109leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
110{
111	leak_mtab_t *lm = (*lmp)++;
112
113	lm->lkm_base = addr;
114
115	return (WALK_NEXT);
116}
117
118static int
119leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
120{
121	leak_mtab_t *lm = (*lmp)++;
122
123	lm->lkm_base = seg->vs_start;
124	lm->lkm_limit = seg->vs_end;
125	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
126	return (WALK_NEXT);
127}
128
129static int
130leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
131{
132	if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
133	    strcmp(vmem->vm_name, "umem_memalign") != 0)
134		return (WALK_NEXT);
135
136	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
137		mdb_warn("can't walk vmem_alloc for %s (%p)", vmem->vm_name,
138		    addr);
139
140	return (WALK_NEXT);
141}
142
143/*ARGSUSED*/
144static int
145leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
146{
147	if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
148	    strcmp(vmem->vm_name, "umem_memalign") != 0)
149		return (WALK_NEXT);
150
151	*est += (int)(vmem->vm_kstat.vk_alloc - vmem->vm_kstat.vk_free);
152
153	return (WALK_NEXT);
154}
155
156static int
157leaky_seg_cmp(const void *l, const void *r)
158{
159	const leaky_seg_info_t *lhs = (const leaky_seg_info_t *)l;
160	const leaky_seg_info_t *rhs = (const leaky_seg_info_t *)r;
161
162	if (lhs->ls_start < rhs->ls_start)
163		return (-1);
164	if (lhs->ls_start > rhs->ls_start)
165		return (1);
166
167	return (0);
168}
169
170static ssize_t
171leaky_seg_search(uintptr_t addr, leaky_seg_info_t *listp, unsigned count)
172{
173	ssize_t left = 0, right = count - 1, guess;
174
175	while (right >= left) {
176		guess = (right + left) >> 1;
177
178		if (addr < listp[guess].ls_start) {
179			right = guess - 1;
180			continue;
181		}
182
183		if (addr >= listp[guess].ls_end) {
184			left = guess + 1;
185			continue;
186		}
187
188		return (guess);
189	}
190
191	return (-1);
192}
193
194/*ARGSUSED*/
195static int
196leaky_count(uintptr_t addr, void *unused, size_t *total)
197{
198	++*total;
199
200	return (WALK_NEXT);
201}
202
203/*ARGSUSED*/
204static int
205leaky_read_segs(uintptr_t addr, const vmem_seg_t *seg, leaky_maps_t *lmp)
206{
207	leaky_seg_info_t *my_si = lmp->lm_segs + lmp->lm_seg_count;
208
209	if (seg->vs_start == seg->vs_end && seg->vs_start == 0)
210		return (WALK_NEXT);
211
212	if (lmp->lm_seg_count++ >= lmp->lm_seg_max)
213		return (WALK_ERR);
214
215	my_si->ls_start = seg->vs_start;
216	my_si->ls_end = seg->vs_end;
217
218	return (WALK_NEXT);
219}
220
221/* ARGSUSED */
222static int
223leaky_process_anon_mappings(uintptr_t ignored, const prmap_t *pmp,
224    leaky_maps_t *lmp)
225{
226	uintptr_t start = pmp->pr_vaddr;
227	uintptr_t end = pmp->pr_vaddr + pmp->pr_size;
228
229	leak_mtab_t *lm;
230	pstatus_t *Psp = lmp->lm_pstatus;
231
232	uintptr_t brk_start = Psp->pr_brkbase;
233	uintptr_t brk_end = Psp->pr_brkbase + Psp->pr_brksize;
234
235	int has_brk = 0;
236	int in_vmem = 0;
237
238	/*
239	 * This checks if there is any overlap between the segment and the brk.
240	 */
241	if (end > brk_start && start < brk_end)
242		has_brk = 1;
243
244	if (leaky_seg_search(start, lmp->lm_segs, lmp->lm_seg_count) != -1)
245		in_vmem = 1;
246
247	/*
248	 * We only want anonymous, mmaped memory.  That means:
249	 *
250	 * 1. Must be read-write
251	 * 2. Cannot be shared
252	 * 3. Cannot have backing
253	 * 4. Cannot be in the brk
254	 * 5. Cannot be part of the vmem heap.
255	 */
256	if ((pmp->pr_mflags & (MA_READ | MA_WRITE)) == (MA_READ | MA_WRITE) &&
257	    (pmp->pr_mflags & MA_SHARED) == 0 &&
258	    (pmp->pr_mapname[0] == 0) &&
259	    !has_brk &&
260	    !in_vmem) {
261		dprintf(("mmaped region: [%p, %p)\n", start, end));
262		lm = (*lmp->lm_lmp)++;
263		lm->lkm_base = start;
264		lm->lkm_limit = end;
265		lm->lkm_bufctl = LKM_CTL(pmp->pr_vaddr, LKM_CTL_MEMORY);
266	}
267
268	return (WALK_NEXT);
269}
270
271static void
272leaky_handle_sbrk(leaky_maps_t *lmp)
273{
274	uintptr_t brkbase = lmp->lm_pstatus->pr_brkbase;
275	uintptr_t brkend = brkbase + lmp->lm_pstatus->pr_brksize;
276
277	leak_mtab_t *lm;
278
279	leaky_seg_info_t *segs = lmp->lm_segs;
280
281	int x, first = -1, last = -1;
282
283	dprintf(("brk: [%p, %p)\n", brkbase, brkend));
284
285	for (x = 0; x < lmp->lm_seg_count; x++) {
286		if (segs[x].ls_start >= brkbase && segs[x].ls_end <= brkend) {
287			if (first == -1)
288				first = x;
289			last = x;
290		}
291	}
292
293	if (brkbase == brkend) {
294		dprintf(("empty brk -- do nothing\n"));
295	} else if (first == -1) {
296		dprintf(("adding [%p, %p) whole brk\n", brkbase, brkend));
297
298		lm = (*lmp->lm_lmp)++;
299		lm->lkm_base = brkbase;
300		lm->lkm_limit = brkend;
301		lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
302	} else {
303		uintptr_t curbrk = P2ROUNDUP(brkbase, umem_pagesize);
304
305		if (curbrk != segs[first].ls_start) {
306			dprintf(("adding [%p, %p) in brk, before first seg\n",
307			    brkbase, segs[first].ls_start));
308
309			lm = (*lmp->lm_lmp)++;
310			lm->lkm_base = brkbase;
311			lm->lkm_limit = segs[first].ls_start;
312			lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
313
314			curbrk = segs[first].ls_start;
315
316		} else if (curbrk != brkbase) {
317			dprintf(("ignore [%p, %p) -- realign\n", brkbase,
318			    curbrk));
319		}
320
321		for (x = first; x <= last; x++) {
322			if (curbrk < segs[x].ls_start) {
323				dprintf(("adding [%p, %p) in brk\n", curbrk,
324				    segs[x].ls_start));
325
326				lm = (*lmp->lm_lmp)++;
327				lm->lkm_base = curbrk;
328				lm->lkm_limit = segs[x].ls_start;
329				lm->lkm_bufctl = LKM_CTL(curbrk,
330				    LKM_CTL_MEMORY);
331			}
332			curbrk = segs[x].ls_end;
333		}
334
335		if (curbrk < brkend) {
336			dprintf(("adding [%p, %p) in brk, after last seg\n",
337			    curbrk, brkend));
338
339			lm = (*lmp->lm_lmp)++;
340			lm->lkm_base = curbrk;
341			lm->lkm_limit = brkend;
342			lm->lkm_bufctl = LKM_CTL(curbrk, LKM_CTL_MEMORY);
343		}
344	}
345}
346
347static int
348leaky_handle_anon_mappings(leak_mtab_t **lmp)
349{
350	leaky_maps_t		lm;
351
352	vmem_t *heap_arena;
353	vmem_t *vm_next;
354	vmem_t *heap_top;
355	vmem_t vmem;
356
357	pstatus_t Ps;
358
359	if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
360		mdb_warn("couldn't read pstatus xdata");
361		return (DCMD_ERR);
362	}
363	lm.lm_pstatus = &Ps;
364
365	leak_brkbase = Ps.pr_brkbase;
366	leak_brksize = Ps.pr_brksize;
367
368	if (umem_readvar(&heap_arena, "heap_arena") == -1) {
369		mdb_warn("couldn't read heap_arena");
370		return (DCMD_ERR);
371	}
372
373	if (heap_arena == NULL) {
374		mdb_warn("heap_arena is NULL.\n");
375		return (DCMD_ERR);
376	}
377
378	for (vm_next = heap_arena; vm_next != NULL; vm_next = vmem.vm_source) {
379		if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)vm_next) == -1) {
380			mdb_warn("couldn't read vmem at %p", vm_next);
381			return (DCMD_ERR);
382		}
383		heap_top = vm_next;
384	}
385
386	lm.lm_seg_count = 0;
387	lm.lm_seg_max = 0;
388
389	if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_count,
390	    &lm.lm_seg_max, (uintptr_t)heap_top) == -1) {
391		mdb_warn("couldn't walk vmem_span for vmem %p", heap_top);
392		return (DCMD_ERR);
393	}
394	lm.lm_segs = mdb_alloc(lm.lm_seg_max * sizeof (*lm.lm_segs),
395	    UM_SLEEP | UM_GC);
396
397	if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_read_segs, &lm,
398	    (uintptr_t)heap_top) == -1) {
399		mdb_warn("couldn't walk vmem_span for vmem %p",
400		    heap_top);
401		return (DCMD_ERR);
402	}
403
404	if (lm.lm_seg_count > lm.lm_seg_max) {
405		mdb_warn("segment list for vmem %p grew\n", heap_top);
406		return (DCMD_ERR);
407	}
408
409	qsort(lm.lm_segs, lm.lm_seg_count, sizeof (*lm.lm_segs), leaky_seg_cmp);
410
411	lm.lm_lmp = lmp;
412
413	prockludge_add_walkers();
414
415	if (mdb_walk(KLUDGE_MAPWALK_NAME,
416	    (mdb_walk_cb_t)leaky_process_anon_mappings, &lm) == -1) {
417		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
418		prockludge_remove_walkers();
419		return (DCMD_ERR);
420	}
421
422	prockludge_remove_walkers();
423	leaky_handle_sbrk(&lm);
424
425	return (DCMD_OK);
426}
427
428static int
429leaky_interested(const umem_cache_t *c)
430{
431	vmem_t vmem;
432
433	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
434		mdb_warn("cannot read arena %p for cache '%s'",
435		    (uintptr_t)c->cache_arena, c->cache_name);
436		return (0);
437	}
438
439	/*
440	 * If this cache isn't allocating from either the umem_default or
441	 * umem_firewall vmem arena, we're not interested.
442	 */
443	if (strcmp(vmem.vm_name, "umem_default") != 0 &&
444	    strcmp(vmem.vm_name, "umem_firewall") != 0) {
445		dprintf(("Skipping cache '%s' with arena '%s'\n",
446		    c->cache_name, vmem.vm_name));
447		return (0);
448	}
449
450	return (1);
451}
452
453/*ARGSUSED*/
454static int
455leaky_estimate(uintptr_t addr, const umem_cache_t *c, size_t *est)
456{
457	if (!leaky_interested(c))
458		return (WALK_NEXT);
459
460	*est += umem_estimate_allocated(addr, c);
461
462	return (WALK_NEXT);
463}
464
465/*ARGSUSED*/
466static int
467leaky_cache(uintptr_t addr, const umem_cache_t *c, leak_mtab_t **lmp)
468{
469	leak_mtab_t *lm = *lmp;
470	mdb_walk_cb_t cb;
471	const char *walk;
472	int audit = (c->cache_flags & UMF_AUDIT);
473
474	if (!leaky_interested(c))
475		return (WALK_NEXT);
476
477	if (audit) {
478		walk = "bufctl";
479		cb = (mdb_walk_cb_t)leaky_mtab;
480	} else {
481		walk = "umem";
482		cb = (mdb_walk_cb_t)leaky_mtab_addr;
483	}
484	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
485		mdb_warn("can't walk umem for cache %p (%s)", addr,
486		    c->cache_name);
487		return (WALK_DONE);
488	}
489
490	for (; lm < *lmp; lm++) {
491		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
492		if (!audit)
493			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
494	}
495	return (WALK_NEXT);
496}
497
498static char *map_head = "%-?s  %?s  %-10s used reason\n";
499static char *map_fmt  = "[%?p,%?p) %-10s ";
500#define	BACKING_LEN 10 /* must match the third field's width in map_fmt */
501
502static void
503leaky_mappings_header(void)
504{
505	dprintf((map_head, "mapping", "", "backing"));
506}
507
508/* ARGSUSED */
509static int
510leaky_grep_mappings(uintptr_t ignored, const prmap_t *pmp,
511    const pstatus_t *Psp)
512{
513	const char *map_libname_ptr;
514	char db_mp_name[BACKING_LEN+1];
515
516	map_libname_ptr = strrchr(pmp->pr_mapname, '/');
517	if (map_libname_ptr != NULL)
518		map_libname_ptr++;
519	else
520		map_libname_ptr = pmp->pr_mapname;
521
522	strlcpy(db_mp_name, map_libname_ptr, sizeof (db_mp_name));
523
524	dprintf((map_fmt, pmp->pr_vaddr, (char *)pmp->pr_vaddr + pmp->pr_size,
525	    db_mp_name));
526
527#define	USE(rsn)	dprintf_cont(("yes  %s\n", (rsn)))
528#define	IGNORE(rsn)	dprintf_cont(("no   %s\n", (rsn)))
529
530	if (!(pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_READ)) {
531		IGNORE("read-only");
532	} else if (pmp->pr_vaddr <= Psp->pr_brkbase &&
533	    pmp->pr_vaddr + pmp->pr_size > Psp->pr_brkbase) {
534		USE("bss");			/* grab up to brkbase */
535		leaky_grep(pmp->pr_vaddr, Psp->pr_brkbase - pmp->pr_vaddr);
536	} else if (pmp->pr_vaddr >= Psp->pr_brkbase &&
537	    pmp->pr_vaddr < Psp->pr_brkbase + Psp->pr_brksize) {
538		IGNORE("in brk");
539	} else if (pmp->pr_vaddr == Psp->pr_stkbase &&
540	    pmp->pr_size == Psp->pr_stksize) {
541		IGNORE("stack");
542	} else if (0 == strcmp(map_libname_ptr, "a.out")) {
543		USE("a.out data");
544		leaky_grep(pmp->pr_vaddr, pmp->pr_size);
545	} else if (0 == strncmp(map_libname_ptr, "libumem.so", 10)) {
546		IGNORE("part of umem");
547	} else if (pmp->pr_mapname[0] != 0) {
548		USE("lib data");		/* library data/bss */
549		leaky_grep(pmp->pr_vaddr, pmp->pr_size);
550	} else if ((pmp->pr_mflags & MA_ANON) && pmp->pr_mapname[0] == 0) {
551		IGNORE("anon");
552	} else {
553		IGNORE("");		/* default to ignoring */
554	}
555
556#undef	USE
557#undef	IGNORE
558
559	return (WALK_NEXT);
560}
561
562/*ARGSUSED*/
563static int
564leaky_mark_lwp(void *ignored, const lwpstatus_t *lwp)
565{
566	leaky_mark_ptr(lwp->pr_reg[R_SP] + STACK_BIAS);
567	return (0);
568}
569
570/*ARGSUSED*/
571static int
572leaky_process_lwp(void *ignored, const lwpstatus_t *lwp)
573{
574	const uintptr_t *regs = (const uintptr_t *)&lwp->pr_reg;
575	int i;
576	uintptr_t sp;
577	uintptr_t addr;
578	size_t size;
579
580	for (i = 0; i < R_SP; i++)
581		leaky_grep_ptr(regs[i]);
582
583	sp = regs[i++] + STACK_BIAS;
584	if (leaky_lookup_marked(sp, &addr, &size))
585		leaky_grep(sp, size - (sp - addr));
586
587	for (; i < NPRGREG; i++)
588		leaky_grep_ptr(regs[i]);
589
590	return (0);
591}
592
593/*
594 * Handles processing various proc-related things:
595 * 1. calls leaky_process_lwp on each the LWP
596 * 2. leaky_greps the bss/data of libraries and a.out, and the a.out stack.
597 */
598static int
599leaky_process_proc(void)
600{
601	pstatus_t Ps;
602	struct ps_prochandle *Pr;
603
604	if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
605		mdb_warn("couldn't read pstatus xdata");
606		return (DCMD_ERR);
607	}
608
609	dprintf(("pstatus says:\n"));
610	dprintf(("\tbrk: base %p size %p\n",
611	    Ps.pr_brkbase, Ps.pr_brksize));
612	dprintf(("\tstk: base %p size %p\n",
613	    Ps.pr_stkbase, Ps.pr_stksize));
614
615	if (mdb_get_xdata("pshandle", &Pr, sizeof (Pr)) == -1) {
616		mdb_warn("couldn't read pshandle xdata");
617		return (DCMD_ERR);
618	}
619
620	if (Plwp_iter(Pr, leaky_mark_lwp, NULL) != 0) {
621		mdb_warn("findleaks: Failed to iterate lwps\n");
622		return (DCMD_ERR);
623	}
624
625	if (Plwp_iter(Pr, leaky_process_lwp, NULL) != 0) {
626		mdb_warn("findleaks: Failed to iterate lwps\n");
627		return (DCMD_ERR);
628	}
629
630	prockludge_add_walkers();
631
632	leaky_mappings_header();
633
634	if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_grep_mappings,
635	    &Ps) == -1) {
636		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
637		prockludge_remove_walkers();
638		return (-1);
639	}
640
641	prockludge_remove_walkers();
642
643	return (0);
644}
645
646static void
647leaky_subr_caller(const uintptr_t *stack, uint_t depth, char *buf,
648    uintptr_t *pcp)
649{
650	int i;
651	GElf_Sym sym;
652	uintptr_t pc = 0;
653
654	buf[0] = 0;
655
656	for (i = 0; i < depth; i++) {
657		pc = stack[i];
658
659		if (mdb_lookup_by_addr(pc,
660		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
661			continue;
662		if (strncmp(buf, "libumem.so", 10) == 0)
663			continue;
664
665		*pcp = pc;
666		return;
667	}
668
669	/*
670	 * We're only here if the entire call chain is in libumem.so;
671	 * this shouldn't happen, but we'll just use the last caller.
672	 */
673	*pcp = pc;
674}
675
676int
677leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
678{
679	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
680	uintptr_t lcaller, rcaller;
681	int rval;
682
683	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
684	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
685
686	if (rval = strcmp(lbuf, rbuf))
687		return (rval);
688
689	if (lcaller < rcaller)
690		return (-1);
691
692	if (lcaller > rcaller)
693		return (1);
694
695	if (lhs->lkb_data < rhs->lkb_data)
696		return (-1);
697
698	if (lhs->lkb_data > rhs->lkb_data)
699		return (1);
700
701	return (0);
702}
703
704/*ARGSUSED*/
705int
706leaky_subr_estimate(size_t *estp)
707{
708	if (umem_ready == 0) {
709		mdb_warn(
710		    "findleaks: umem is not loaded in the address space\n");
711		return (DCMD_ERR);
712	}
713
714	if (umem_ready == UMEM_READY_INIT_FAILED) {
715		mdb_warn("findleaks: umem initialization failed -- no "
716		    "possible leaks.\n");
717		return (DCMD_ERR);
718	}
719
720	if (umem_ready != UMEM_READY) {
721		mdb_warn("findleaks: No allocations have occured -- no "
722		    "possible leaks.\n");
723		return (DCMD_ERR);
724	}
725
726	if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
727		mdb_warn("couldn't walk 'umem_cache'");
728		return (DCMD_ERR);
729	}
730
731	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
732		mdb_warn("couldn't walk 'vmem'");
733		return (DCMD_ERR);
734	}
735
736	if (*estp == 0) {
737		mdb_warn("findleaks: No allocated buffers found.\n");
738		return (DCMD_ERR);
739	}
740
741	prockludge_add_walkers();
742
743	if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_count,
744	    estp) == -1) {
745		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
746		prockludge_remove_walkers();
747		return (DCMD_ERR);
748	}
749
750	prockludge_remove_walkers();
751
752	return (DCMD_OK);
753}
754
755int
756leaky_subr_fill(leak_mtab_t **lmpp)
757{
758	if (leaky_handle_anon_mappings(lmpp) != DCMD_OK) {
759		mdb_warn("unable to process mappings\n");
760		return (DCMD_ERR);
761	}
762
763	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
764		mdb_warn("couldn't walk 'vmem'");
765		return (DCMD_ERR);
766	}
767
768	if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
769		mdb_warn("couldn't walk 'umem_cache'");
770		return (DCMD_ERR);
771	}
772
773	return (DCMD_OK);
774}
775
776int
777leaky_subr_run(void)
778{
779	if (leaky_process_proc() == DCMD_ERR) {
780		mdb_warn("failed to process proc");
781		return (DCMD_ERR);
782	}
783	return (DCMD_OK);
784}
785
786void
787leaky_subr_add_leak(leak_mtab_t *lmp)
788{
789	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
790	uint_t depth;
791
792	vmem_seg_t vs;
793	umem_bufctl_audit_t *bcp;
794	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
795
796	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
797	case LKM_CTL_BUFCTL:
798		if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
799			mdb_warn("couldn't read leaked bufctl at addr %p",
800			    addr);
801			return;
802		}
803
804		depth = MIN(bcp->bc_depth, umem_stack_depth);
805
806		/*
807		 * The top of the stack will be in umem_cache_alloc().
808		 * Since the offset in umem_cache_alloc() isn't interesting
809		 * we skip that frame for the purposes of uniquifying stacks.
810		 *
811		 * Also, we use the cache pointer as the leaks's cid, to
812		 * prevent the coalescing of leaks from different caches.
813		 */
814		if (depth > 0)
815			depth--;
816		leaky_add_leak(TYPE_UMEM, addr, (uintptr_t)bcp->bc_addr,
817		    bcp->bc_timestamp, bcp->bc_stack + 1, depth,
818		    (uintptr_t)bcp->bc_cache, (uintptr_t)bcp->bc_cache);
819		break;
820	case LKM_CTL_VMSEG:
821		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
822			mdb_warn("couldn't read leaked vmem_seg at addr %p",
823			    addr);
824			return;
825		}
826		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
827
828		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
829		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
830		break;
831	case LKM_CTL_MEMORY:
832		if (LEAKY_INBRK(addr))
833			leaky_add_leak(TYPE_SBRK, addr, addr, 0, NULL, 0, 0,
834			    lmp->lkm_limit - addr);
835		else
836			leaky_add_leak(TYPE_MMAP, addr, addr, 0, NULL, 0, 0,
837			    lmp->lkm_limit - addr);
838		break;
839	case LKM_CTL_CACHE:
840		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
841		    NULL, 0, addr, addr);
842		break;
843	default:
844		mdb_warn("internal error:  invalid leak_bufctl_t\n");
845		break;
846	}
847}
848
849static int lk_vmem_seen;
850static int lk_cache_seen;
851static int lk_umem_seen;
852static size_t lk_ttl;
853static size_t lk_bytes;
854
855void
856leaky_subr_dump_start(int type)
857{
858	switch (type) {
859	case TYPE_MMAP:
860		lk_vmem_seen = 0;
861		break;
862
863	case TYPE_SBRK:
864	case TYPE_VMEM:
865		return;			/* don't zero counts */
866
867	case TYPE_CACHE:
868		lk_cache_seen = 0;
869		break;
870
871	case TYPE_UMEM:
872		lk_umem_seen = 0;
873		break;
874
875	default:
876		break;
877	}
878
879	lk_ttl = 0;
880	lk_bytes = 0;
881}
882
883void
884leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
885{
886	const leak_bufctl_t *cur;
887	umem_cache_t cache;
888	size_t min, max, size;
889	char sz[30];
890	char c[MDB_SYM_NAMLEN];
891	uintptr_t caller;
892	const char *nm, *nm_lc;
893	uint8_t type = lkb->lkb_type;
894
895	if (verbose) {
896		lk_ttl = 0;
897		lk_bytes = 0;
898	} else if (!lk_vmem_seen && (type == TYPE_VMEM || type == TYPE_MMAP ||
899	    type == TYPE_SBRK)) {
900		lk_vmem_seen = 1;
901		mdb_printf("%-16s %7s %?s %s\n",
902		    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
903	}
904
905	switch (lkb->lkb_type) {
906	case TYPE_MMAP:
907	case TYPE_SBRK:
908		nm = (lkb->lkb_type == TYPE_MMAP) ? "MMAP" : "SBRK";
909		nm_lc = (lkb->lkb_type == TYPE_MMAP) ? "mmap(2)" : "sbrk(2)";
910
911		for (; lkb != NULL; lkb = lkb->lkb_next) {
912			if (!verbose)
913				mdb_printf("%-16d %7d %?p %s\n", lkb->lkb_data,
914				    lkb->lkb_dups + 1, lkb->lkb_addr, nm);
915			else
916				mdb_printf("%s leak: [%p, %p), %ld bytes\n",
917				    nm_lc, lkb->lkb_addr,
918				    lkb->lkb_addr + lkb->lkb_data,
919				    lkb->lkb_data);
920			lk_ttl++;
921			lk_bytes += lkb->lkb_data;
922		}
923		return;
924
925	case TYPE_VMEM:
926		min = max = lkb->lkb_data;
927
928		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
929			size = cur->lkb_data;
930
931			if (size < min)
932				min = size;
933			if (size > max)
934				max = size;
935
936			lk_ttl++;
937			lk_bytes += size;
938		}
939
940		if (min == max)
941			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
942		else
943			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
944			    min, max);
945
946		if (!verbose) {
947			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
948			    c, &caller);
949
950			mdb_printf("%-16s %7d %?p %a\n", sz, lkb->lkb_dups + 1,
951			    lkb->lkb_addr, caller);
952		} else {
953			mdb_arg_t v;
954
955			if (lk_ttl == 1)
956				mdb_printf("umem_oversize leak: 1 vmem_seg, "
957				    "%ld bytes\n", lk_bytes);
958			else
959				mdb_printf("umem_oversize leak: %d vmem_segs, "
960				    "%s bytes each, %ld bytes total\n",
961				    lk_ttl, sz, lk_bytes);
962
963			v.a_type = MDB_TYPE_STRING;
964			v.a_un.a_str = "-v";
965
966			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
967			    DCMD_ADDRSPEC, 1, &v) == -1) {
968				mdb_warn("'%p::vmem_seg -v' failed",
969				    lkb->lkb_addr);
970			}
971		}
972		return;
973
974	case TYPE_CACHE:
975		if (!lk_cache_seen) {
976			lk_cache_seen = 1;
977			if (lk_vmem_seen)
978				mdb_printf("\n");
979			mdb_printf("%-?s %7s %?s %s\n",
980			    "CACHE", "LEAKED", "BUFFER", "CALLER");
981		}
982
983		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
984			/*
985			 * This _really_ shouldn't happen; we shouldn't
986			 * have been able to get this far if this
987			 * cache wasn't readable.
988			 */
989			mdb_warn("can't read cache %p for leaked "
990			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
991			return;
992		}
993
994		lk_ttl += lkb->lkb_dups + 1;
995		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
996
997		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
998		if (caller != 0) {
999			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
1000		} else {
1001			(void) mdb_snprintf(c, sizeof (c), "%s",
1002			    (verbose) ? "" : "?");
1003		}
1004
1005		if (!verbose) {
1006			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
1007			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
1008		} else {
1009			if (lk_ttl == 1)
1010				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
1011				    cache.cache_name, lk_bytes);
1012			else
1013				mdb_printf("%s leak: %d buffers, "
1014				    "%ld bytes each, %ld bytes total,\n",
1015				    cache.cache_name, lk_ttl,
1016				    cache.cache_bufsize, lk_bytes);
1017			mdb_printf("    %s%s%ssample addr %p\n",
1018			    (caller == 0) ? "" : "caller ", c,
1019			    (caller == 0) ? "" : ", ", lkb->lkb_addr);
1020		}
1021		return;
1022
1023	case TYPE_UMEM:
1024		if (!lk_umem_seen) {
1025			lk_umem_seen = 1;
1026			if (lk_vmem_seen || lk_cache_seen)
1027				mdb_printf("\n");
1028			mdb_printf("%-?s %7s %?s %s\n",
1029			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
1030		}
1031		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
1032			/*
1033			 * This _really_ shouldn't happen; we shouldn't
1034			 * have been able to get this far if this
1035			 * cache wasn't readable.
1036			 */
1037			mdb_warn("can't read cache %p for leaked "
1038			    "bufctl %p", lkb->lkb_data, lkb->lkb_addr);
1039			return;
1040		}
1041
1042		lk_ttl += lkb->lkb_dups + 1;
1043		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
1044
1045		if (!verbose) {
1046			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth, c,
1047			    &caller);
1048
1049			mdb_printf("%0?p %7d %0?p %a\n", lkb->lkb_data,
1050			    lkb->lkb_dups + 1, lkb->lkb_addr, caller);
1051		} else {
1052			mdb_arg_t v;
1053
1054			if (lk_ttl == 1)
1055				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
1056				    cache.cache_name, lk_bytes);
1057			else
1058				mdb_printf("%s leak: %d buffers, "
1059				    "%ld bytes each, %ld bytes total\n",
1060				    cache.cache_name, lk_ttl,
1061				    cache.cache_bufsize, lk_bytes);
1062
1063			v.a_type = MDB_TYPE_STRING;
1064			v.a_un.a_str = "-v";
1065
1066			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
1067			    DCMD_ADDRSPEC, 1, &v) == -1) {
1068				mdb_warn("'%p::bufctl -v' failed",
1069				    lkb->lkb_addr);
1070			}
1071		}
1072		return;
1073
1074	default:
1075		return;
1076	}
1077}
1078
1079void
1080leaky_subr_dump_end(int type)
1081{
1082	int i;
1083	int width;
1084	const char *leak;
1085
1086	switch (type) {
1087	case TYPE_VMEM:
1088		if (!lk_vmem_seen)
1089			return;
1090
1091		width = 16;
1092		leak = "oversized leak";
1093		break;
1094
1095	case TYPE_CACHE:
1096		if (!lk_cache_seen)
1097			return;
1098
1099		width = sizeof (uintptr_t) * 2;
1100		leak = "buffer";
1101		break;
1102
1103	case TYPE_UMEM:
1104		if (!lk_umem_seen)
1105			return;
1106
1107		width = sizeof (uintptr_t) * 2;
1108		leak = "buffer";
1109		break;
1110
1111	default:
1112		return;
1113	}
1114
1115	for (i = 0; i < 72; i++)
1116		mdb_printf("-");
1117	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
1118	    width, "Total", lk_ttl, leak, (lk_ttl == 1) ? "" : "s",
1119	    lk_bytes, (lk_bytes == 1) ? "" : "s");
1120}
1121
1122int
1123leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
1124    void *cbdata)
1125{
1126	vmem_seg_t vs;
1127	umem_bufctl_audit_t *bcp;
1128	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1129
1130	switch (lkb->lkb_type) {
1131	case TYPE_VMEM:
1132		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
1133			mdb_warn("unable to read vmem_seg at %p",
1134			    lkb->lkb_addr);
1135			return (WALK_NEXT);
1136		}
1137		return (cb(lkb->lkb_addr, &vs, cbdata));
1138
1139	case TYPE_UMEM:
1140		if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE,
1141		    lkb->lkb_addr) == -1) {
1142			mdb_warn("unable to read bufctl at %p",
1143			    lkb->lkb_addr);
1144			return (WALK_NEXT);
1145		}
1146		return (cb(lkb->lkb_addr, bcp, cbdata));
1147
1148	default:
1149		return (cb(lkb->lkb_addr, NULL, cbdata));
1150	}
1151}
1152