17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5789d94c2Sjwadams * Common Development and Distribution License (the "License"). 6789d94c2Sjwadams * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*346799e8SJonathan W Adams * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <mdb/mdb_param.h> 277c478bd9Sstevel@tonic-gate #include <mdb/mdb_modapi.h> 287c478bd9Sstevel@tonic-gate #include <mdb/mdb_ctf.h> 297c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 307c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h> 317c478bd9Sstevel@tonic-gate #include <sys/vmem_impl.h> 327c478bd9Sstevel@tonic-gate #include <sys/machelf.h> 337c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 347c478bd9Sstevel@tonic-gate #include <sys/kobj.h> 357c478bd9Sstevel@tonic-gate #include <sys/panic.h> 367c478bd9Sstevel@tonic-gate #include <sys/stack.h> 377c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 387c478bd9Sstevel@tonic-gate #include <vm/page.h> 397c478bd9Sstevel@tonic-gate 40b5fca8f8Stomee #include "avl.h" 41b5fca8f8Stomee #include "combined.h" 42087e1372Stomee #include "dist.h" 437c478bd9Sstevel@tonic-gate #include "kmem.h" 44b5fca8f8Stomee #include "list.h" 457c478bd9Sstevel@tonic-gate 467c478bd9Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \ 477c478bd9Sstevel@tonic-gate mdb_printf("kmem debug: "); \ 487c478bd9Sstevel@tonic-gate /*CSTYLED*/\ 497c478bd9Sstevel@tonic-gate mdb_printf x ;\ 507c478bd9Sstevel@tonic-gate } 517c478bd9Sstevel@tonic-gate 527c478bd9Sstevel@tonic-gate #define KM_ALLOCATED 0x01 537c478bd9Sstevel@tonic-gate #define KM_FREE 0x02 547c478bd9Sstevel@tonic-gate #define KM_BUFCTL 0x04 557c478bd9Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */ 567c478bd9Sstevel@tonic-gate #define KM_HASH 0x10 577c478bd9Sstevel@tonic-gate 587c478bd9Sstevel@tonic-gate static int mdb_debug_level = 0; 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 617c478bd9Sstevel@tonic-gate static int 627c478bd9Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored) 637c478bd9Sstevel@tonic-gate { 647c478bd9Sstevel@tonic-gate mdb_walker_t w; 657c478bd9Sstevel@tonic-gate char descr[64]; 667c478bd9Sstevel@tonic-gate 677c478bd9Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 687c478bd9Sstevel@tonic-gate "walk the %s cache", c->cache_name); 697c478bd9Sstevel@tonic-gate 707c478bd9Sstevel@tonic-gate w.walk_name = c->cache_name; 717c478bd9Sstevel@tonic-gate w.walk_descr = descr; 727c478bd9Sstevel@tonic-gate w.walk_init = kmem_walk_init; 737c478bd9Sstevel@tonic-gate w.walk_step = kmem_walk_step; 747c478bd9Sstevel@tonic-gate w.walk_fini = kmem_walk_fini; 757c478bd9Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 787c478bd9Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 797c478bd9Sstevel@tonic-gate 807c478bd9Sstevel@tonic-gate return (WALK_NEXT); 817c478bd9Sstevel@tonic-gate } 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 847c478bd9Sstevel@tonic-gate int 857c478bd9Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 867c478bd9Sstevel@tonic-gate { 877c478bd9Sstevel@tonic-gate mdb_debug_level ^= 1; 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n", 907c478bd9Sstevel@tonic-gate mdb_debug_level ? "on" : "off"); 917c478bd9Sstevel@tonic-gate 927c478bd9Sstevel@tonic-gate return (DCMD_OK); 937c478bd9Sstevel@tonic-gate } 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate int 967c478bd9Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp) 977c478bd9Sstevel@tonic-gate { 987c478bd9Sstevel@tonic-gate GElf_Sym sym; 997c478bd9Sstevel@tonic-gate 100b5fca8f8Stomee if (mdb_lookup_by_name("kmem_caches", &sym) == -1) { 101b5fca8f8Stomee mdb_warn("couldn't find kmem_caches"); 1027c478bd9Sstevel@tonic-gate return (WALK_ERR); 1037c478bd9Sstevel@tonic-gate } 1047c478bd9Sstevel@tonic-gate 105b5fca8f8Stomee wsp->walk_addr = (uintptr_t)sym.st_value; 1067c478bd9Sstevel@tonic-gate 107b5fca8f8Stomee return (list_walk_init_named(wsp, "cache list", "cache")); 1087c478bd9Sstevel@tonic-gate } 1097c478bd9Sstevel@tonic-gate 1107c478bd9Sstevel@tonic-gate int 1117c478bd9Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 1127c478bd9Sstevel@tonic-gate { 1137c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 1147c478bd9Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks"); 1157c478bd9Sstevel@tonic-gate return (WALK_ERR); 1167c478bd9Sstevel@tonic-gate } 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) { 1197c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'"); 1207c478bd9Sstevel@tonic-gate return (WALK_ERR); 1217c478bd9Sstevel@tonic-gate } 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate return (WALK_NEXT); 1267c478bd9Sstevel@tonic-gate } 1277c478bd9Sstevel@tonic-gate 1287c478bd9Sstevel@tonic-gate int 1297c478bd9Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 1307c478bd9Sstevel@tonic-gate { 1317c478bd9Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 1327c478bd9Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer; 1337c478bd9Sstevel@tonic-gate kmem_cpu_cache_t cc; 1347c478bd9Sstevel@tonic-gate 1357c478bd9Sstevel@tonic-gate caddr += cpu->cpu_cache_offset; 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) { 1387c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr); 1397c478bd9Sstevel@tonic-gate return (WALK_ERR); 1407c478bd9Sstevel@tonic-gate } 1417c478bd9Sstevel@tonic-gate 1427c478bd9Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 1437c478bd9Sstevel@tonic-gate } 1447c478bd9Sstevel@tonic-gate 145b5fca8f8Stomee static int 146b5fca8f8Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg) 147b5fca8f8Stomee { 148b5fca8f8Stomee kmem_slab_t *sp = p; 149b5fca8f8Stomee uintptr_t caddr = (uintptr_t)arg; 150b5fca8f8Stomee if ((uintptr_t)sp->slab_cache != caddr) { 151b5fca8f8Stomee mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 152b5fca8f8Stomee saddr, caddr, sp->slab_cache); 153b5fca8f8Stomee return (-1); 154b5fca8f8Stomee } 155b5fca8f8Stomee 156b5fca8f8Stomee return (0); 157b5fca8f8Stomee } 158b5fca8f8Stomee 159b5fca8f8Stomee static int 160b5fca8f8Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg) 161b5fca8f8Stomee { 162b5fca8f8Stomee kmem_slab_t *sp = p; 163b5fca8f8Stomee 164b5fca8f8Stomee int rc = kmem_slab_check(p, saddr, arg); 165b5fca8f8Stomee if (rc != 0) { 166b5fca8f8Stomee return (rc); 167b5fca8f8Stomee } 168b5fca8f8Stomee 169b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 170b5fca8f8Stomee mdb_warn("slab %p is not a partial slab\n", saddr); 171b5fca8f8Stomee return (-1); 172b5fca8f8Stomee } 173b5fca8f8Stomee 174b5fca8f8Stomee return (0); 175b5fca8f8Stomee } 176b5fca8f8Stomee 177b5fca8f8Stomee static int 178b5fca8f8Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg) 179b5fca8f8Stomee { 180b5fca8f8Stomee kmem_slab_t *sp = p; 181b5fca8f8Stomee 182b5fca8f8Stomee int rc = kmem_slab_check(p, saddr, arg); 183b5fca8f8Stomee if (rc != 0) { 184b5fca8f8Stomee return (rc); 185b5fca8f8Stomee } 186b5fca8f8Stomee 187b5fca8f8Stomee if (!KMEM_SLAB_IS_ALL_USED(sp)) { 188b5fca8f8Stomee mdb_warn("slab %p is not completely allocated\n", saddr); 189b5fca8f8Stomee return (-1); 190b5fca8f8Stomee } 191b5fca8f8Stomee 192b5fca8f8Stomee return (0); 193b5fca8f8Stomee } 194b5fca8f8Stomee 195b5fca8f8Stomee typedef struct { 196b5fca8f8Stomee uintptr_t kns_cache_addr; 197b5fca8f8Stomee int kns_nslabs; 198b5fca8f8Stomee } kmem_nth_slab_t; 199b5fca8f8Stomee 200b5fca8f8Stomee static int 201b5fca8f8Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg) 202b5fca8f8Stomee { 203b5fca8f8Stomee kmem_nth_slab_t *chkp = arg; 204b5fca8f8Stomee 205b5fca8f8Stomee int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr); 206b5fca8f8Stomee if (rc != 0) { 207b5fca8f8Stomee return (rc); 208b5fca8f8Stomee } 209b5fca8f8Stomee 210b5fca8f8Stomee return (chkp->kns_nslabs-- == 0 ? 1 : 0); 211b5fca8f8Stomee } 212b5fca8f8Stomee 213b5fca8f8Stomee static int 214b5fca8f8Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp) 215b5fca8f8Stomee { 216b5fca8f8Stomee uintptr_t caddr = wsp->walk_addr; 217b5fca8f8Stomee 218b5fca8f8Stomee wsp->walk_addr = (uintptr_t)(caddr + 219b5fca8f8Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 220b5fca8f8Stomee 221b5fca8f8Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 222b5fca8f8Stomee kmem_complete_slab_check, (void *)caddr)); 223b5fca8f8Stomee } 224b5fca8f8Stomee 225b5fca8f8Stomee static int 226b5fca8f8Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp) 227b5fca8f8Stomee { 228b5fca8f8Stomee uintptr_t caddr = wsp->walk_addr; 229b5fca8f8Stomee 230b5fca8f8Stomee wsp->walk_addr = (uintptr_t)(caddr + 231b5fca8f8Stomee offsetof(kmem_cache_t, cache_partial_slabs)); 232b5fca8f8Stomee 233b5fca8f8Stomee return (avl_walk_init_checked(wsp, "slab list", "slab", 234b5fca8f8Stomee kmem_partial_slab_check, (void *)caddr)); 235b5fca8f8Stomee } 236b5fca8f8Stomee 2377c478bd9Sstevel@tonic-gate int 2387c478bd9Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp) 2397c478bd9Sstevel@tonic-gate { 2407c478bd9Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate if (caddr == NULL) { 2437c478bd9Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n"); 2447c478bd9Sstevel@tonic-gate return (WALK_ERR); 2457c478bd9Sstevel@tonic-gate } 2467c478bd9Sstevel@tonic-gate 247b5fca8f8Stomee combined_walk_init(wsp); 248b5fca8f8Stomee combined_walk_add(wsp, 249b5fca8f8Stomee kmem_complete_slab_walk_init, list_walk_step, list_walk_fini); 250b5fca8f8Stomee combined_walk_add(wsp, 251b5fca8f8Stomee kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini); 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate return (WALK_NEXT); 2547c478bd9Sstevel@tonic-gate } 2557c478bd9Sstevel@tonic-gate 256b5fca8f8Stomee static int 257b5fca8f8Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp) 258b5fca8f8Stomee { 259b5fca8f8Stomee uintptr_t caddr = wsp->walk_addr; 260b5fca8f8Stomee kmem_nth_slab_t *chk; 261b5fca8f8Stomee 262b5fca8f8Stomee chk = mdb_alloc(sizeof (kmem_nth_slab_t), 263b5fca8f8Stomee UM_SLEEP | UM_GC); 264b5fca8f8Stomee chk->kns_cache_addr = caddr; 265b5fca8f8Stomee chk->kns_nslabs = 1; 266b5fca8f8Stomee wsp->walk_addr = (uintptr_t)(caddr + 267b5fca8f8Stomee offsetof(kmem_cache_t, cache_complete_slabs)); 268b5fca8f8Stomee 269b5fca8f8Stomee return (list_walk_init_checked(wsp, "slab list", "slab", 270b5fca8f8Stomee kmem_nth_slab_check, chk)); 271b5fca8f8Stomee } 272b5fca8f8Stomee 2737c478bd9Sstevel@tonic-gate int 2747c478bd9Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp) 2757c478bd9Sstevel@tonic-gate { 2767c478bd9Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 2777c478bd9Sstevel@tonic-gate kmem_cache_t c; 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate if (caddr == NULL) { 2807c478bd9Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n"); 2817c478bd9Sstevel@tonic-gate return (WALK_ERR); 2827c478bd9Sstevel@tonic-gate } 2837c478bd9Sstevel@tonic-gate 2847c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 2857c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr); 2867c478bd9Sstevel@tonic-gate return (WALK_ERR); 2877c478bd9Sstevel@tonic-gate } 2887c478bd9Sstevel@tonic-gate 289b5fca8f8Stomee combined_walk_init(wsp); 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate /* 2927c478bd9Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 2937c478bd9Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 294b5fca8f8Stomee * if there are *no* partial slabs, report the first full slab, if 2957c478bd9Sstevel@tonic-gate * any. 2967c478bd9Sstevel@tonic-gate * 2977c478bd9Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 2987c478bd9Sstevel@tonic-gate */ 299b5fca8f8Stomee if (c.cache_partial_slabs.avl_numnodes == 0) { 300b5fca8f8Stomee combined_walk_add(wsp, kmem_first_complete_slab_walk_init, 301b5fca8f8Stomee list_walk_step, list_walk_fini); 302b5fca8f8Stomee } else { 303b5fca8f8Stomee combined_walk_add(wsp, kmem_partial_slab_walk_init, 304b5fca8f8Stomee avl_walk_step, avl_walk_fini); 3057c478bd9Sstevel@tonic-gate } 3067c478bd9Sstevel@tonic-gate 307b5fca8f8Stomee return (WALK_NEXT); 3087c478bd9Sstevel@tonic-gate } 3097c478bd9Sstevel@tonic-gate 3107c478bd9Sstevel@tonic-gate int 3117c478bd9Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 3127c478bd9Sstevel@tonic-gate { 3137c478bd9Sstevel@tonic-gate kmem_cache_t c; 314b5fca8f8Stomee const char *filter = NULL; 315b5fca8f8Stomee 316b5fca8f8Stomee if (mdb_getopts(ac, argv, 317b5fca8f8Stomee 'n', MDB_OPT_STR, &filter, 318b5fca8f8Stomee NULL) != ac) { 319b5fca8f8Stomee return (DCMD_USAGE); 320b5fca8f8Stomee } 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 3237c478bd9Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) { 3247c478bd9Sstevel@tonic-gate mdb_warn("can't walk kmem_cache"); 3257c478bd9Sstevel@tonic-gate return (DCMD_ERR); 3267c478bd9Sstevel@tonic-gate } 3277c478bd9Sstevel@tonic-gate return (DCMD_OK); 3287c478bd9Sstevel@tonic-gate } 3297c478bd9Sstevel@tonic-gate 3307c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 3317c478bd9Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME", 3327c478bd9Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 3337c478bd9Sstevel@tonic-gate 3347c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 3357c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr); 3367c478bd9Sstevel@tonic-gate return (DCMD_ERR); 3377c478bd9Sstevel@tonic-gate } 3387c478bd9Sstevel@tonic-gate 339b5fca8f8Stomee if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL)) 340b5fca8f8Stomee return (DCMD_OK); 341b5fca8f8Stomee 3427c478bd9Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name, 3437c478bd9Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate return (DCMD_OK); 3467c478bd9Sstevel@tonic-gate } 3477c478bd9Sstevel@tonic-gate 348b5fca8f8Stomee void 349b5fca8f8Stomee kmem_cache_help(void) 350b5fca8f8Stomee { 351b5fca8f8Stomee mdb_printf("%s", "Print kernel memory caches.\n\n"); 352b5fca8f8Stomee mdb_dec_indent(2); 353b5fca8f8Stomee mdb_printf("%<b>OPTIONS%</b>\n"); 354b5fca8f8Stomee mdb_inc_indent(2); 355b5fca8f8Stomee mdb_printf("%s", 356b5fca8f8Stomee " -n name\n" 357b5fca8f8Stomee " name of kmem cache (or matching partial name)\n" 358b5fca8f8Stomee "\n" 359b5fca8f8Stomee "Column\tDescription\n" 360b5fca8f8Stomee "\n" 361b5fca8f8Stomee "ADDR\t\taddress of kmem cache\n" 362b5fca8f8Stomee "NAME\t\tname of kmem cache\n" 363b5fca8f8Stomee "FLAG\t\tvarious cache state flags\n" 364b5fca8f8Stomee "CFLAG\t\tcache creation flags\n" 365b5fca8f8Stomee "BUFSIZE\tobject size in bytes\n" 366b5fca8f8Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n"); 367b5fca8f8Stomee } 3683893cb7fStomee 3693893cb7fStomee #define LABEL_WIDTH 11 3703893cb7fStomee static void 3713893cb7fStomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab, 3723893cb7fStomee size_t maxbuckets, size_t minbucketsize) 3733893cb7fStomee { 3743893cb7fStomee uint64_t total; 3753893cb7fStomee int buckets; 3763893cb7fStomee int i; 3773893cb7fStomee const int *distarray; 3783893cb7fStomee int complete[2]; 3793893cb7fStomee 3803893cb7fStomee buckets = buffers_per_slab; 3813893cb7fStomee 3823893cb7fStomee total = 0; 3833893cb7fStomee for (i = 0; i <= buffers_per_slab; i++) 3843893cb7fStomee total += ks_bucket[i]; 3853893cb7fStomee 3863893cb7fStomee if (maxbuckets > 1) 3873893cb7fStomee buckets = MIN(buckets, maxbuckets); 3883893cb7fStomee 3893893cb7fStomee if (minbucketsize > 1) { 3903893cb7fStomee /* 3913893cb7fStomee * minbucketsize does not apply to the first bucket reserved 3923893cb7fStomee * for completely allocated slabs 3933893cb7fStomee */ 3943893cb7fStomee buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) / 3953893cb7fStomee minbucketsize)); 3963893cb7fStomee if ((buckets < 2) && (buffers_per_slab > 1)) { 3973893cb7fStomee buckets = 2; 3983893cb7fStomee minbucketsize = (buffers_per_slab - 1); 3993893cb7fStomee } 4003893cb7fStomee } 4013893cb7fStomee 4023893cb7fStomee /* 4033893cb7fStomee * The first printed bucket is reserved for completely allocated slabs. 4043893cb7fStomee * Passing (buckets - 1) excludes that bucket from the generated 4053893cb7fStomee * distribution, since we're handling it as a special case. 4063893cb7fStomee */ 4073893cb7fStomee complete[0] = buffers_per_slab; 4083893cb7fStomee complete[1] = buffers_per_slab + 1; 409087e1372Stomee distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1); 4103893cb7fStomee 4113893cb7fStomee mdb_printf("%*s\n", LABEL_WIDTH, "Allocated"); 412087e1372Stomee dist_print_header("Buffers", LABEL_WIDTH, "Slabs"); 4133893cb7fStomee 414087e1372Stomee dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH); 4153893cb7fStomee /* 4163893cb7fStomee * Print bucket ranges in descending order after the first bucket for 4173893cb7fStomee * completely allocated slabs, so a person can see immediately whether 4183893cb7fStomee * or not there is fragmentation without having to scan possibly 4193893cb7fStomee * multiple screens of output. Starting at (buckets - 2) excludes the 4203893cb7fStomee * extra terminating bucket. 4213893cb7fStomee */ 4223893cb7fStomee for (i = buckets - 2; i >= 0; i--) { 423087e1372Stomee dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH); 4243893cb7fStomee } 4253893cb7fStomee mdb_printf("\n"); 4263893cb7fStomee } 4273893cb7fStomee #undef LABEL_WIDTH 4283893cb7fStomee 4293893cb7fStomee /*ARGSUSED*/ 4303893cb7fStomee static int 4313893cb7fStomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab) 4323893cb7fStomee { 4333893cb7fStomee *is_slab = B_TRUE; 4343893cb7fStomee return (WALK_DONE); 4353893cb7fStomee } 4363893cb7fStomee 4373893cb7fStomee /*ARGSUSED*/ 4383893cb7fStomee static int 4393893cb7fStomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp, 4403893cb7fStomee boolean_t *is_slab) 4413893cb7fStomee { 4423893cb7fStomee /* 443b5fca8f8Stomee * The "kmem_partial_slab" walker reports the first full slab if there 4443893cb7fStomee * are no partial slabs (for the sake of consumers that require at least 4453893cb7fStomee * one callback if there are any buffers in the cache). 4463893cb7fStomee */ 447b5fca8f8Stomee *is_slab = KMEM_SLAB_IS_PARTIAL(sp); 4483893cb7fStomee return (WALK_DONE); 4493893cb7fStomee } 4503893cb7fStomee 451b5fca8f8Stomee typedef struct kmem_slab_usage { 452b5fca8f8Stomee int ksu_refcnt; /* count of allocated buffers on slab */ 453b5fca8f8Stomee boolean_t ksu_nomove; /* slab marked non-reclaimable */ 454b5fca8f8Stomee } kmem_slab_usage_t; 455b5fca8f8Stomee 456b5fca8f8Stomee typedef struct kmem_slab_stats { 457b5fca8f8Stomee const kmem_cache_t *ks_cp; 458b5fca8f8Stomee int ks_slabs; /* slabs in cache */ 459b5fca8f8Stomee int ks_partial_slabs; /* partially allocated slabs in cache */ 460b5fca8f8Stomee uint64_t ks_unused_buffers; /* total unused buffers in cache */ 461b5fca8f8Stomee int ks_max_buffers_per_slab; /* max buffers per slab */ 462b5fca8f8Stomee int ks_usage_len; /* ks_usage array length */ 463b5fca8f8Stomee kmem_slab_usage_t *ks_usage; /* partial slab usage */ 464b5fca8f8Stomee uint_t *ks_bucket; /* slab usage distribution */ 465b5fca8f8Stomee } kmem_slab_stats_t; 466b5fca8f8Stomee 4673893cb7fStomee /*ARGSUSED*/ 4683893cb7fStomee static int 4693893cb7fStomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp, 4703893cb7fStomee kmem_slab_stats_t *ks) 4713893cb7fStomee { 4723893cb7fStomee kmem_slab_usage_t *ksu; 4733893cb7fStomee long unused; 4743893cb7fStomee 4753893cb7fStomee ks->ks_slabs++; 4763893cb7fStomee ks->ks_bucket[sp->slab_refcnt]++; 4773893cb7fStomee 4783893cb7fStomee unused = (sp->slab_chunks - sp->slab_refcnt); 4793893cb7fStomee if (unused == 0) { 4803893cb7fStomee return (WALK_NEXT); 4813893cb7fStomee } 4823893cb7fStomee 4833893cb7fStomee ks->ks_partial_slabs++; 4843893cb7fStomee ks->ks_unused_buffers += unused; 4853893cb7fStomee 4863893cb7fStomee if (ks->ks_partial_slabs > ks->ks_usage_len) { 4873893cb7fStomee kmem_slab_usage_t *usage; 4883893cb7fStomee int len = ks->ks_usage_len; 4893893cb7fStomee 4903893cb7fStomee len = (len == 0 ? 16 : len * 2); 4913893cb7fStomee usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP); 4923893cb7fStomee if (ks->ks_usage != NULL) { 4933893cb7fStomee bcopy(ks->ks_usage, usage, 4943893cb7fStomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4953893cb7fStomee mdb_free(ks->ks_usage, 4963893cb7fStomee ks->ks_usage_len * sizeof (kmem_slab_usage_t)); 4973893cb7fStomee } 4983893cb7fStomee ks->ks_usage = usage; 4993893cb7fStomee ks->ks_usage_len = len; 5003893cb7fStomee } 5013893cb7fStomee 5023893cb7fStomee ksu = &ks->ks_usage[ks->ks_partial_slabs - 1]; 5033893cb7fStomee ksu->ksu_refcnt = sp->slab_refcnt; 504b5fca8f8Stomee ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE); 5053893cb7fStomee return (WALK_NEXT); 5063893cb7fStomee } 5073893cb7fStomee 5083893cb7fStomee static void 5093893cb7fStomee kmem_slabs_header() 5103893cb7fStomee { 5113893cb7fStomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5123893cb7fStomee "", "", "Partial", "", "Unused", ""); 5133893cb7fStomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5143893cb7fStomee "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste"); 5153893cb7fStomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n", 5163893cb7fStomee "-------------------------", "--------", "--------", "---------", 5173893cb7fStomee "---------", "------"); 5183893cb7fStomee } 5193893cb7fStomee 5203893cb7fStomee int 5213893cb7fStomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 5223893cb7fStomee { 5233893cb7fStomee kmem_cache_t c; 5243893cb7fStomee kmem_slab_stats_t stats; 5253893cb7fStomee mdb_walk_cb_t cb; 5263893cb7fStomee int pct; 5273893cb7fStomee int tenths_pct; 5283893cb7fStomee size_t maxbuckets = 1; 5293893cb7fStomee size_t minbucketsize = 0; 5303893cb7fStomee const char *filter = NULL; 531b5fca8f8Stomee const char *name = NULL; 5323893cb7fStomee uint_t opt_v = FALSE; 533b5fca8f8Stomee boolean_t buckets = B_FALSE; 5343893cb7fStomee boolean_t skip = B_FALSE; 5353893cb7fStomee 5363893cb7fStomee if (mdb_getopts(argc, argv, 5373893cb7fStomee 'B', MDB_OPT_UINTPTR, &minbucketsize, 5383893cb7fStomee 'b', MDB_OPT_UINTPTR, &maxbuckets, 5393893cb7fStomee 'n', MDB_OPT_STR, &filter, 540b5fca8f8Stomee 'N', MDB_OPT_STR, &name, 5413893cb7fStomee 'v', MDB_OPT_SETBITS, TRUE, &opt_v, 5423893cb7fStomee NULL) != argc) { 5433893cb7fStomee return (DCMD_USAGE); 5443893cb7fStomee } 5453893cb7fStomee 546b5fca8f8Stomee if ((maxbuckets != 1) || (minbucketsize != 0)) { 547b5fca8f8Stomee buckets = B_TRUE; 5483893cb7fStomee } 5493893cb7fStomee 5503893cb7fStomee if (!(flags & DCMD_ADDRSPEC)) { 5513893cb7fStomee if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc, 5523893cb7fStomee argv) == -1) { 5533893cb7fStomee mdb_warn("can't walk kmem_cache"); 5543893cb7fStomee return (DCMD_ERR); 5553893cb7fStomee } 5563893cb7fStomee return (DCMD_OK); 5573893cb7fStomee } 5583893cb7fStomee 5593893cb7fStomee if (mdb_vread(&c, sizeof (c), addr) == -1) { 5603893cb7fStomee mdb_warn("couldn't read kmem_cache at %p", addr); 5613893cb7fStomee return (DCMD_ERR); 5623893cb7fStomee } 5633893cb7fStomee 564b5fca8f8Stomee if (name == NULL) { 565b5fca8f8Stomee skip = ((filter != NULL) && 566b5fca8f8Stomee (strstr(c.cache_name, filter) == NULL)); 567b5fca8f8Stomee } else if (filter == NULL) { 568b5fca8f8Stomee skip = (strcmp(c.cache_name, name) != 0); 569b5fca8f8Stomee } else { 570b5fca8f8Stomee /* match either -n or -N */ 571b5fca8f8Stomee skip = ((strcmp(c.cache_name, name) != 0) && 572b5fca8f8Stomee (strstr(c.cache_name, filter) == NULL)); 5733893cb7fStomee } 5743893cb7fStomee 575b5fca8f8Stomee if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) { 5763893cb7fStomee kmem_slabs_header(); 577b5fca8f8Stomee } else if ((opt_v || buckets) && !skip) { 5783893cb7fStomee if (DCMD_HDRSPEC(flags)) { 5793893cb7fStomee kmem_slabs_header(); 5803893cb7fStomee } else { 5813893cb7fStomee boolean_t is_slab = B_FALSE; 5823893cb7fStomee const char *walker_name; 5833893cb7fStomee if (opt_v) { 5843893cb7fStomee cb = (mdb_walk_cb_t)kmem_first_partial_slab; 5853893cb7fStomee walker_name = "kmem_slab_partial"; 5863893cb7fStomee } else { 5873893cb7fStomee cb = (mdb_walk_cb_t)kmem_first_slab; 5883893cb7fStomee walker_name = "kmem_slab"; 5893893cb7fStomee } 5903893cb7fStomee (void) mdb_pwalk(walker_name, cb, &is_slab, addr); 5913893cb7fStomee if (is_slab) { 5923893cb7fStomee kmem_slabs_header(); 5933893cb7fStomee } 5943893cb7fStomee } 5953893cb7fStomee } 5963893cb7fStomee 5973893cb7fStomee if (skip) { 5983893cb7fStomee return (DCMD_OK); 5993893cb7fStomee } 6003893cb7fStomee 6013893cb7fStomee bzero(&stats, sizeof (kmem_slab_stats_t)); 602b5fca8f8Stomee stats.ks_cp = &c; 603b5fca8f8Stomee stats.ks_max_buffers_per_slab = c.cache_maxchunks; 604b5fca8f8Stomee /* +1 to include a zero bucket */ 605b5fca8f8Stomee stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) * 606b5fca8f8Stomee sizeof (*stats.ks_bucket), UM_SLEEP); 6073893cb7fStomee cb = (mdb_walk_cb_t)kmem_slablist_stat; 6083893cb7fStomee (void) mdb_pwalk("kmem_slab", cb, &stats, addr); 6093893cb7fStomee 6103893cb7fStomee if (c.cache_buftotal == 0) { 6113893cb7fStomee pct = 0; 6123893cb7fStomee tenths_pct = 0; 6133893cb7fStomee } else { 6143893cb7fStomee uint64_t n = stats.ks_unused_buffers * 10000; 6153893cb7fStomee pct = (int)(n / c.cache_buftotal); 6163893cb7fStomee tenths_pct = pct - ((pct / 100) * 100); 6173893cb7fStomee tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */ 6183893cb7fStomee if (tenths_pct == 10) { 6193893cb7fStomee pct += 100; 6203893cb7fStomee tenths_pct = 0; 6213893cb7fStomee } 6223893cb7fStomee } 6233893cb7fStomee 6243893cb7fStomee pct /= 100; 6253893cb7fStomee mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name, 6263893cb7fStomee stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal, 6273893cb7fStomee stats.ks_unused_buffers, pct, tenths_pct); 6283893cb7fStomee 6293893cb7fStomee if (maxbuckets == 0) { 630b5fca8f8Stomee maxbuckets = stats.ks_max_buffers_per_slab; 6313893cb7fStomee } 6323893cb7fStomee 6333893cb7fStomee if (((maxbuckets > 1) || (minbucketsize > 0)) && 6343893cb7fStomee (stats.ks_slabs > 0)) { 6353893cb7fStomee mdb_printf("\n"); 6363893cb7fStomee kmem_slabs_print_dist(stats.ks_bucket, 637b5fca8f8Stomee stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize); 638b5fca8f8Stomee } 639b5fca8f8Stomee 640b5fca8f8Stomee mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) * 641b5fca8f8Stomee sizeof (*stats.ks_bucket)); 642b5fca8f8Stomee 643b5fca8f8Stomee if (!opt_v) { 644b5fca8f8Stomee return (DCMD_OK); 6453893cb7fStomee } 6463893cb7fStomee 6473893cb7fStomee if (opt_v && (stats.ks_partial_slabs > 0)) { 6483893cb7fStomee int i; 6493893cb7fStomee kmem_slab_usage_t *ksu; 6503893cb7fStomee 6513893cb7fStomee mdb_printf(" %d complete, %d partial", 6523893cb7fStomee (stats.ks_slabs - stats.ks_partial_slabs), 6533893cb7fStomee stats.ks_partial_slabs); 6543893cb7fStomee if (stats.ks_partial_slabs > 0) { 655b5fca8f8Stomee mdb_printf(" (%d):", stats.ks_max_buffers_per_slab); 6563893cb7fStomee } 6573893cb7fStomee for (i = 0; i < stats.ks_partial_slabs; i++) { 6583893cb7fStomee ksu = &stats.ks_usage[i]; 659b5fca8f8Stomee if (ksu->ksu_nomove) { 660b5fca8f8Stomee const char *symbol = "*"; 661b5fca8f8Stomee mdb_printf(" %d%s", ksu->ksu_refcnt, symbol); 662b5fca8f8Stomee } else { 663b5fca8f8Stomee mdb_printf(" %d", ksu->ksu_refcnt); 664b5fca8f8Stomee } 6653893cb7fStomee } 6663893cb7fStomee mdb_printf("\n\n"); 6673893cb7fStomee } 6683893cb7fStomee 6693893cb7fStomee if (stats.ks_usage_len > 0) { 6703893cb7fStomee mdb_free(stats.ks_usage, 6713893cb7fStomee stats.ks_usage_len * sizeof (kmem_slab_usage_t)); 6723893cb7fStomee } 6733893cb7fStomee 6743893cb7fStomee return (DCMD_OK); 6753893cb7fStomee } 6763893cb7fStomee 6773893cb7fStomee void 6783893cb7fStomee kmem_slabs_help(void) 6793893cb7fStomee { 680b5fca8f8Stomee mdb_printf("%s", 681b5fca8f8Stomee "Display slab usage per kmem cache.\n\n"); 6823893cb7fStomee mdb_dec_indent(2); 6833893cb7fStomee mdb_printf("%<b>OPTIONS%</b>\n"); 6843893cb7fStomee mdb_inc_indent(2); 6853893cb7fStomee mdb_printf("%s", 6863893cb7fStomee " -n name\n" 6873893cb7fStomee " name of kmem cache (or matching partial name)\n" 688b5fca8f8Stomee " -N name\n" 689b5fca8f8Stomee " exact name of kmem cache\n" 6903893cb7fStomee " -b maxbins\n" 6913893cb7fStomee " Print a distribution of allocated buffers per slab using at\n" 6923893cb7fStomee " most maxbins bins. The first bin is reserved for completely\n" 6933893cb7fStomee " allocated slabs. Setting maxbins to zero (-b 0) has the same\n" 6943893cb7fStomee " effect as specifying the maximum allocated buffers per slab\n" 6953893cb7fStomee " or setting minbinsize to 1 (-B 1).\n" 6963893cb7fStomee " -B minbinsize\n" 6973893cb7fStomee " Print a distribution of allocated buffers per slab, making\n" 6983893cb7fStomee " all bins (except the first, reserved for completely allocated\n" 6993893cb7fStomee " slabs) at least minbinsize buffers apart.\n" 7003893cb7fStomee " -v verbose output: List the allocated buffer count of each partial\n" 7013893cb7fStomee " slab on the free list in order from front to back to show how\n" 7023893cb7fStomee " closely the slabs are ordered by usage. For example\n" 7033893cb7fStomee "\n" 7043893cb7fStomee " 10 complete, 3 partial (8): 7 3 1\n" 7053893cb7fStomee "\n" 7063893cb7fStomee " means there are thirteen slabs with eight buffers each, including\n" 7073893cb7fStomee " three partially allocated slabs with less than all eight buffers\n" 7083893cb7fStomee " allocated.\n" 7093893cb7fStomee "\n" 7103893cb7fStomee " Buffer allocations are always from the front of the partial slab\n" 7113893cb7fStomee " list. When a buffer is freed from a completely used slab, that\n" 7123893cb7fStomee " slab is added to the front of the partial slab list. Assuming\n" 7133893cb7fStomee " that all buffers are equally likely to be freed soon, the\n" 7143893cb7fStomee " desired order of partial slabs is most-used at the front of the\n" 7153893cb7fStomee " list and least-used at the back (as in the example above).\n" 7163893cb7fStomee " However, if a slab contains an allocated buffer that will not\n" 7173893cb7fStomee " soon be freed, it would be better for that slab to be at the\n" 718b5fca8f8Stomee " front where all of its buffers can be allocated. Taking a slab\n" 719b5fca8f8Stomee " off the partial slab list (either with all buffers freed or all\n" 720b5fca8f8Stomee " buffers allocated) reduces cache fragmentation.\n" 721b5fca8f8Stomee "\n" 722b5fca8f8Stomee " A slab's allocated buffer count representing a partial slab (9 in\n" 723b5fca8f8Stomee " the example below) may be marked as follows:\n" 724b5fca8f8Stomee "\n" 725b5fca8f8Stomee " 9* An asterisk indicates that kmem has marked the slab non-\n" 726b5fca8f8Stomee " reclaimable because the kmem client refused to move one of the\n" 727b5fca8f8Stomee " slab's buffers. Since kmem does not expect to completely free the\n" 728b5fca8f8Stomee " slab, it moves it to the front of the list in the hope of\n" 729b5fca8f8Stomee " completely allocating it instead. A slab marked with an asterisk\n" 730b5fca8f8Stomee " stays marked for as long as it remains on the partial slab list.\n" 7313893cb7fStomee "\n" 7323893cb7fStomee "Column\t\tDescription\n" 7333893cb7fStomee "\n" 7343893cb7fStomee "Cache Name\t\tname of kmem cache\n" 7353893cb7fStomee "Slabs\t\t\ttotal slab count\n" 7363893cb7fStomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n" 7373893cb7fStomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n" 7383893cb7fStomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n" 7393893cb7fStomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n" 7403893cb7fStomee "\t\t\t for accounting structures (debug mode), slab\n" 7413893cb7fStomee "\t\t\t coloring (incremental small offsets to stagger\n" 7423893cb7fStomee "\t\t\t buffer alignment), or the per-CPU magazine layer\n"); 7433893cb7fStomee } 7443893cb7fStomee 7457c478bd9Sstevel@tonic-gate static int 7467c478bd9Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 7477c478bd9Sstevel@tonic-gate { 7487c478bd9Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 7497c478bd9Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 7507c478bd9Sstevel@tonic-gate 7517c478bd9Sstevel@tonic-gate if (p1 < p2) 7527c478bd9Sstevel@tonic-gate return (-1); 7537c478bd9Sstevel@tonic-gate if (p1 > p2) 7547c478bd9Sstevel@tonic-gate return (1); 7557c478bd9Sstevel@tonic-gate return (0); 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate static int 7597c478bd9Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs) 7607c478bd9Sstevel@tonic-gate { 7617c478bd9Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs; 7627c478bd9Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs; 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 7657c478bd9Sstevel@tonic-gate return (-1); 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 7687c478bd9Sstevel@tonic-gate return (1); 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate return (0); 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate typedef struct kmem_hash_walk { 7747c478bd9Sstevel@tonic-gate uintptr_t *kmhw_table; 7757c478bd9Sstevel@tonic-gate size_t kmhw_nelems; 7767c478bd9Sstevel@tonic-gate size_t kmhw_pos; 7777c478bd9Sstevel@tonic-gate kmem_bufctl_t kmhw_cur; 7787c478bd9Sstevel@tonic-gate } kmem_hash_walk_t; 7797c478bd9Sstevel@tonic-gate 7807c478bd9Sstevel@tonic-gate int 7817c478bd9Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp) 7827c478bd9Sstevel@tonic-gate { 7837c478bd9Sstevel@tonic-gate kmem_hash_walk_t *kmhw; 7847c478bd9Sstevel@tonic-gate uintptr_t *hash; 7857c478bd9Sstevel@tonic-gate kmem_cache_t c; 7867c478bd9Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 7877c478bd9Sstevel@tonic-gate size_t nelems; 7887c478bd9Sstevel@tonic-gate size_t hsize; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate if (addr == NULL) { 7917c478bd9Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n"); 7927c478bd9Sstevel@tonic-gate return (WALK_ERR); 7937c478bd9Sstevel@tonic-gate } 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 7967c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 7977c478bd9Sstevel@tonic-gate return (WALK_ERR); 7987c478bd9Sstevel@tonic-gate } 7997c478bd9Sstevel@tonic-gate 8007c478bd9Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) { 8017c478bd9Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 8027c478bd9Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 8037c478bd9Sstevel@tonic-gate } 8047c478bd9Sstevel@tonic-gate 8057c478bd9Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP); 8067c478bd9Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL; 8077c478bd9Sstevel@tonic-gate kmhw->kmhw_pos = 0; 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1; 8107c478bd9Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 8117c478bd9Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 8127c478bd9Sstevel@tonic-gate 8137c478bd9Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 8147c478bd9Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 8157c478bd9Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 8167c478bd9Sstevel@tonic-gate mdb_free(hash, hsize); 8177c478bd9Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8187c478bd9Sstevel@tonic-gate return (WALK_ERR); 8197c478bd9Sstevel@tonic-gate } 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate wsp->walk_data = kmhw; 8227c478bd9Sstevel@tonic-gate 8237c478bd9Sstevel@tonic-gate return (WALK_NEXT); 8247c478bd9Sstevel@tonic-gate } 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate int 8277c478bd9Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp) 8287c478bd9Sstevel@tonic-gate { 8297c478bd9Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8307c478bd9Sstevel@tonic-gate uintptr_t addr = NULL; 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) { 8337c478bd9Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) { 8347c478bd9Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL) 8357c478bd9Sstevel@tonic-gate break; 8367c478bd9Sstevel@tonic-gate } 8377c478bd9Sstevel@tonic-gate } 8387c478bd9Sstevel@tonic-gate if (addr == NULL) 8397c478bd9Sstevel@tonic-gate return (WALK_DONE); 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) { 8427c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr); 8437c478bd9Sstevel@tonic-gate return (WALK_ERR); 8447c478bd9Sstevel@tonic-gate } 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata)); 8477c478bd9Sstevel@tonic-gate } 8487c478bd9Sstevel@tonic-gate 8497c478bd9Sstevel@tonic-gate void 8507c478bd9Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp) 8517c478bd9Sstevel@tonic-gate { 8527c478bd9Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data; 8537c478bd9Sstevel@tonic-gate 8547c478bd9Sstevel@tonic-gate if (kmhw == NULL) 8557c478bd9Sstevel@tonic-gate return; 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t)); 8587c478bd9Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t)); 8597c478bd9Sstevel@tonic-gate } 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate /* 8627c478bd9Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 8637c478bd9Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 8647c478bd9Sstevel@tonic-gate */ 8657c478bd9Sstevel@tonic-gate static int 8667c478bd9Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 8677c478bd9Sstevel@tonic-gate { 8687c478bd9Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf); 8697c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp; 8707c478bd9Sstevel@tonic-gate kmem_bufctl_t bc; 8717c478bd9Sstevel@tonic-gate 8727c478bd9Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) { 8737c478bd9Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 8747c478bd9Sstevel@tonic-gate buf, caddr); 8757c478bd9Sstevel@tonic-gate return (-1); 8767c478bd9Sstevel@tonic-gate } 8777c478bd9Sstevel@tonic-gate 8787c478bd9Sstevel@tonic-gate while (bcp != NULL) { 8797c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t), 8807c478bd9Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 8817c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 8827c478bd9Sstevel@tonic-gate return (-1); 8837c478bd9Sstevel@tonic-gate } 8847c478bd9Sstevel@tonic-gate if (bc.bc_addr == buf) { 8857c478bd9Sstevel@tonic-gate *out = (uintptr_t)bcp; 8867c478bd9Sstevel@tonic-gate return (0); 8877c478bd9Sstevel@tonic-gate } 8887c478bd9Sstevel@tonic-gate bcp = bc.bc_next; 8897c478bd9Sstevel@tonic-gate } 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 8927c478bd9Sstevel@tonic-gate return (-1); 8937c478bd9Sstevel@tonic-gate } 8947c478bd9Sstevel@tonic-gate 8957c478bd9Sstevel@tonic-gate int 8967c478bd9Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp) 8977c478bd9Sstevel@tonic-gate { 8987c478bd9Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 8997c478bd9Sstevel@tonic-gate GElf_Sym mt_sym; 9007c478bd9Sstevel@tonic-gate kmem_magtype_t mt; 9017c478bd9Sstevel@tonic-gate int res; 9027c478bd9Sstevel@tonic-gate 9037c478bd9Sstevel@tonic-gate /* 9047c478bd9Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 9057c478bd9Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so 9067c478bd9Sstevel@tonic-gate * it is okay to return 0 for them. 9077c478bd9Sstevel@tonic-gate */ 9087c478bd9Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 9097c478bd9Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE)) 9107c478bd9Sstevel@tonic-gate return (res); 9117c478bd9Sstevel@tonic-gate 9127c478bd9Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) { 9137c478bd9Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'"); 9147c478bd9Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 9157c478bd9Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 9167c478bd9Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 9177c478bd9Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 9187c478bd9Sstevel@tonic-gate cp->cache_name, addr); 9197c478bd9Sstevel@tonic-gate return (0); 9207c478bd9Sstevel@tonic-gate } 9217c478bd9Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 9227c478bd9Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 9237c478bd9Sstevel@tonic-gate return (0); 9247c478bd9Sstevel@tonic-gate } 9257c478bd9Sstevel@tonic-gate return (mt.mt_magsize); 9267c478bd9Sstevel@tonic-gate } 9277c478bd9Sstevel@tonic-gate 9287c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9297c478bd9Sstevel@tonic-gate static int 9307c478bd9Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est) 9317c478bd9Sstevel@tonic-gate { 9327c478bd9Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate return (WALK_NEXT); 9357c478bd9Sstevel@tonic-gate } 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate /* 9387c478bd9Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 9397c478bd9Sstevel@tonic-gate * cache. 9407c478bd9Sstevel@tonic-gate */ 9417c478bd9Sstevel@tonic-gate size_t 9427c478bd9Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp) 9437c478bd9Sstevel@tonic-gate { 9447c478bd9Sstevel@tonic-gate int magsize; 9457c478bd9Sstevel@tonic-gate size_t cache_est; 9467c478bd9Sstevel@tonic-gate 9477c478bd9Sstevel@tonic-gate cache_est = cp->cache_buftotal; 9487c478bd9Sstevel@tonic-gate 9497c478bd9Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial", 9507c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr); 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) { 9537c478bd9Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 9547c478bd9Sstevel@tonic-gate 9557c478bd9Sstevel@tonic-gate if (cache_est >= mag_est) { 9567c478bd9Sstevel@tonic-gate cache_est -= mag_est; 9577c478bd9Sstevel@tonic-gate } else { 9587c478bd9Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 9597c478bd9Sstevel@tonic-gate "than the slab layer.\n", addr); 9607c478bd9Sstevel@tonic-gate } 9617c478bd9Sstevel@tonic-gate } 9627c478bd9Sstevel@tonic-gate return (cache_est); 9637c478bd9Sstevel@tonic-gate } 9647c478bd9Sstevel@tonic-gate 9657c478bd9Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 9667c478bd9Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \ 9677c478bd9Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \ 9687c478bd9Sstevel@tonic-gate goto fail; \ 9697c478bd9Sstevel@tonic-gate } \ 9707c478bd9Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 9717c478bd9Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 9727c478bd9Sstevel@tonic-gate if (magcnt == magmax) { \ 9737c478bd9Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 9747c478bd9Sstevel@tonic-gate magcnt); \ 9757c478bd9Sstevel@tonic-gate goto fail; \ 9767c478bd9Sstevel@tonic-gate } \ 9777c478bd9Sstevel@tonic-gate } \ 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate int 9817c478bd9Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus, 9827c478bd9Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags) 9837c478bd9Sstevel@tonic-gate { 9847c478bd9Sstevel@tonic-gate kmem_magazine_t *kmp, *mp; 9857c478bd9Sstevel@tonic-gate void **maglist = NULL; 9867c478bd9Sstevel@tonic-gate int i, cpu; 9877c478bd9Sstevel@tonic-gate size_t magsize, magmax, magbsize; 9887c478bd9Sstevel@tonic-gate size_t magcnt = 0; 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate /* 9917c478bd9Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 9927c478bd9Sstevel@tonic-gate * correctness. 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate magsize = kmem_get_magsize(cp); 995789d94c2Sjwadams if (magsize == 0) { 996789d94c2Sjwadams *maglistp = NULL; 997789d94c2Sjwadams *magcntp = 0; 998789d94c2Sjwadams *magmaxp = 0; 999789d94c2Sjwadams return (WALK_NEXT); 1000789d94c2Sjwadams } 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate /* 10037c478bd9Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 10047c478bd9Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 10057c478bd9Sstevel@tonic-gate * and the full magazine list in the depot. 10067c478bd9Sstevel@tonic-gate * 10077c478bd9Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 10087c478bd9Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 10097c478bd9Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 10107c478bd9Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 10117c478bd9Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 10127c478bd9Sstevel@tonic-gate * crash(1M)). 10137c478bd9Sstevel@tonic-gate */ 10147c478bd9Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize; 10157c478bd9Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]); 10167c478bd9Sstevel@tonic-gate 10177c478bd9Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 10187c478bd9Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 10197c478bd9Sstevel@tonic-gate addr, magbsize); 1020789d94c2Sjwadams return (WALK_ERR); 10217c478bd9Sstevel@tonic-gate } 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags); 10247c478bd9Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags); 10257c478bd9Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 10267c478bd9Sstevel@tonic-gate goto fail; 10277c478bd9Sstevel@tonic-gate 10287c478bd9Sstevel@tonic-gate /* 10297c478bd9Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 10307c478bd9Sstevel@tonic-gate */ 10317c478bd9Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) { 10327c478bd9Sstevel@tonic-gate READMAG_ROUNDS(magsize); 10337c478bd9Sstevel@tonic-gate kmp = mp->mag_next; 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list) 10367c478bd9Sstevel@tonic-gate break; /* cache_full list loop detected */ 10377c478bd9Sstevel@tonic-gate } 10387c478bd9Sstevel@tonic-gate 10397c478bd9Sstevel@tonic-gate dprintf(("cache_full list done\n")); 10407c478bd9Sstevel@tonic-gate 10417c478bd9Sstevel@tonic-gate /* 10427c478bd9Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 10437c478bd9Sstevel@tonic-gate * and full spares. 10447c478bd9Sstevel@tonic-gate */ 10457c478bd9Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) { 10467c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 10497c478bd9Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 10507c478bd9Sstevel@tonic-gate 10517c478bd9Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 10527c478bd9Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) { 10537c478bd9Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 10547c478bd9Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 10557c478bd9Sstevel@tonic-gate } 10567c478bd9Sstevel@tonic-gate 10577c478bd9Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 10587c478bd9Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) { 10597c478bd9Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 10607c478bd9Sstevel@tonic-gate ccp->cc_prounds)); 10617c478bd9Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate } 10647c478bd9Sstevel@tonic-gate 10657c478bd9Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) 10687c478bd9Sstevel@tonic-gate mdb_free(mp, magbsize); 10697c478bd9Sstevel@tonic-gate 10707c478bd9Sstevel@tonic-gate *maglistp = maglist; 10717c478bd9Sstevel@tonic-gate *magcntp = magcnt; 10727c478bd9Sstevel@tonic-gate *magmaxp = magmax; 10737c478bd9Sstevel@tonic-gate 10747c478bd9Sstevel@tonic-gate return (WALK_NEXT); 10757c478bd9Sstevel@tonic-gate 10767c478bd9Sstevel@tonic-gate fail: 10777c478bd9Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) { 10787c478bd9Sstevel@tonic-gate if (mp) 10797c478bd9Sstevel@tonic-gate mdb_free(mp, magbsize); 10807c478bd9Sstevel@tonic-gate if (maglist) 10817c478bd9Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *)); 10827c478bd9Sstevel@tonic-gate } 10837c478bd9Sstevel@tonic-gate return (WALK_ERR); 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate 10867c478bd9Sstevel@tonic-gate static int 10877c478bd9Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 10887c478bd9Sstevel@tonic-gate { 10897c478bd9Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 10907c478bd9Sstevel@tonic-gate } 10917c478bd9Sstevel@tonic-gate 10927c478bd9Sstevel@tonic-gate static int 10937c478bd9Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 10947c478bd9Sstevel@tonic-gate { 10957c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t b; 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate /* 10987c478bd9Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a 10997c478bd9Sstevel@tonic-gate * kmem_bufctl_t. 11007c478bd9Sstevel@tonic-gate */ 11017c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) || 11027c478bd9Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) { 11037c478bd9Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b)); 11047c478bd9Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) { 11057c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 11067c478bd9Sstevel@tonic-gate return (WALK_ERR); 11077c478bd9Sstevel@tonic-gate } 11087c478bd9Sstevel@tonic-gate } 11097c478bd9Sstevel@tonic-gate 11107c478bd9Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata)); 11117c478bd9Sstevel@tonic-gate } 11127c478bd9Sstevel@tonic-gate 11137c478bd9Sstevel@tonic-gate typedef struct kmem_walk { 11147c478bd9Sstevel@tonic-gate int kmw_type; 11157c478bd9Sstevel@tonic-gate 11167c478bd9Sstevel@tonic-gate int kmw_addr; /* cache address */ 11177c478bd9Sstevel@tonic-gate kmem_cache_t *kmw_cp; 11187c478bd9Sstevel@tonic-gate size_t kmw_csize; 11197c478bd9Sstevel@tonic-gate 11207c478bd9Sstevel@tonic-gate /* 11217c478bd9Sstevel@tonic-gate * magazine layer 11227c478bd9Sstevel@tonic-gate */ 11237c478bd9Sstevel@tonic-gate void **kmw_maglist; 11247c478bd9Sstevel@tonic-gate size_t kmw_max; 11257c478bd9Sstevel@tonic-gate size_t kmw_count; 11267c478bd9Sstevel@tonic-gate size_t kmw_pos; 11277c478bd9Sstevel@tonic-gate 11287c478bd9Sstevel@tonic-gate /* 11297c478bd9Sstevel@tonic-gate * slab layer 11307c478bd9Sstevel@tonic-gate */ 11317c478bd9Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */ 11327c478bd9Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */ 11337c478bd9Sstevel@tonic-gate } kmem_walk_t; 11347c478bd9Sstevel@tonic-gate 11357c478bd9Sstevel@tonic-gate static int 11367c478bd9Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type) 11377c478bd9Sstevel@tonic-gate { 11387c478bd9Sstevel@tonic-gate kmem_walk_t *kmw; 11397c478bd9Sstevel@tonic-gate int ncpus, csize; 11407c478bd9Sstevel@tonic-gate kmem_cache_t *cp; 1141789d94c2Sjwadams size_t vm_quantum; 11427c478bd9Sstevel@tonic-gate 11437c478bd9Sstevel@tonic-gate size_t magmax, magcnt; 11447c478bd9Sstevel@tonic-gate void **maglist = NULL; 11457c478bd9Sstevel@tonic-gate uint_t chunksize, slabsize; 11467c478bd9Sstevel@tonic-gate int status = WALK_ERR; 11477c478bd9Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 11487c478bd9Sstevel@tonic-gate const char *layered; 11497c478bd9Sstevel@tonic-gate 11507c478bd9Sstevel@tonic-gate type &= ~KM_HASH; 11517c478bd9Sstevel@tonic-gate 11527c478bd9Sstevel@tonic-gate if (addr == NULL) { 11537c478bd9Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n"); 11547c478bd9Sstevel@tonic-gate return (WALK_ERR); 11557c478bd9Sstevel@tonic-gate } 11567c478bd9Sstevel@tonic-gate 11577c478bd9Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 11587c478bd9Sstevel@tonic-gate 11597c478bd9Sstevel@tonic-gate /* 11607c478bd9Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the 11617c478bd9Sstevel@tonic-gate * system to know how much to slurp out. 11627c478bd9Sstevel@tonic-gate */ 11637c478bd9Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus"); 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus); 11667c478bd9Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 11677c478bd9Sstevel@tonic-gate 11687c478bd9Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 11697c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 11707c478bd9Sstevel@tonic-gate goto out2; 11717c478bd9Sstevel@tonic-gate } 11727c478bd9Sstevel@tonic-gate 1173789d94c2Sjwadams /* 1174789d94c2Sjwadams * It's easy for someone to hand us an invalid cache address. 1175789d94c2Sjwadams * Unfortunately, it is hard for this walker to survive an 1176789d94c2Sjwadams * invalid cache cleanly. So we make sure that: 1177789d94c2Sjwadams * 1178789d94c2Sjwadams * 1. the vmem arena for the cache is readable, 1179789d94c2Sjwadams * 2. the vmem arena's quantum is a power of 2, 1180789d94c2Sjwadams * 3. our slabsize is a multiple of the quantum, and 1181789d94c2Sjwadams * 4. our chunksize is >0 and less than our slabsize. 1182789d94c2Sjwadams */ 1183789d94c2Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum), 1184789d94c2Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 || 1185789d94c2Sjwadams vm_quantum == 0 || 1186789d94c2Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 || 1187789d94c2Sjwadams cp->cache_slabsize < vm_quantum || 1188789d94c2Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 || 1189789d94c2Sjwadams cp->cache_chunksize == 0 || 1190789d94c2Sjwadams cp->cache_chunksize > cp->cache_slabsize) { 1191789d94c2Sjwadams mdb_warn("%p is not a valid kmem_cache_t\n", addr); 1192789d94c2Sjwadams goto out2; 1193789d94c2Sjwadams } 1194789d94c2Sjwadams 11957c478bd9Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 11967c478bd9Sstevel@tonic-gate 11977c478bd9Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 11987c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 11997c478bd9Sstevel@tonic-gate return (WALK_DONE); 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate /* 12037c478bd9Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 12047c478bd9Sstevel@tonic-gate * there is nothing to report. 12057c478bd9Sstevel@tonic-gate */ 12067c478bd9Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) { 12077c478bd9Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n", 12087c478bd9Sstevel@tonic-gate cp->cache_flags)); 12097c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 12107c478bd9Sstevel@tonic-gate return (WALK_DONE); 12117c478bd9Sstevel@tonic-gate } 12127c478bd9Sstevel@tonic-gate 12137c478bd9Sstevel@tonic-gate /* 12147c478bd9Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or 12157c478bd9Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report. 12167c478bd9Sstevel@tonic-gate */ 12177c478bd9Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) || 12187c478bd9Sstevel@tonic-gate cp->cache_constructor == NULL || 12197c478bd9Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) { 12207c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 12217c478bd9Sstevel@tonic-gate return (WALK_DONE); 12227c478bd9Sstevel@tonic-gate } 12237c478bd9Sstevel@tonic-gate 12247c478bd9Sstevel@tonic-gate /* 12257c478bd9Sstevel@tonic-gate * Read in the contents of the magazine layer 12267c478bd9Sstevel@tonic-gate */ 12277c478bd9Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt, 12287c478bd9Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR) 12297c478bd9Sstevel@tonic-gate goto out2; 12307c478bd9Sstevel@tonic-gate 12317c478bd9Sstevel@tonic-gate /* 12327c478bd9Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking 12337c478bd9Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later. 12347c478bd9Sstevel@tonic-gate */ 12357c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED) 12367c478bd9Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 12377c478bd9Sstevel@tonic-gate 12387c478bd9Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP); 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate kmw->kmw_type = type; 12417c478bd9Sstevel@tonic-gate kmw->kmw_addr = addr; 12427c478bd9Sstevel@tonic-gate kmw->kmw_cp = cp; 12437c478bd9Sstevel@tonic-gate kmw->kmw_csize = csize; 12447c478bd9Sstevel@tonic-gate kmw->kmw_maglist = maglist; 12457c478bd9Sstevel@tonic-gate kmw->kmw_max = magmax; 12467c478bd9Sstevel@tonic-gate kmw->kmw_count = magcnt; 12477c478bd9Sstevel@tonic-gate kmw->kmw_pos = 0; 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate /* 12507c478bd9Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the 12517c478bd9Sstevel@tonic-gate * hash table instead of the slab layer. 12527c478bd9Sstevel@tonic-gate */ 12537c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) { 12547c478bd9Sstevel@tonic-gate layered = "kmem_hash"; 12557c478bd9Sstevel@tonic-gate 12567c478bd9Sstevel@tonic-gate kmw->kmw_type |= KM_HASH; 12577c478bd9Sstevel@tonic-gate } else { 12587c478bd9Sstevel@tonic-gate /* 12597c478bd9Sstevel@tonic-gate * If we are walking freed buffers, we only need the 12607c478bd9Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 12617c478bd9Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 12627c478bd9Sstevel@tonic-gate */ 12637c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED) 12647c478bd9Sstevel@tonic-gate layered = "kmem_slab"; 12657c478bd9Sstevel@tonic-gate else 12667c478bd9Sstevel@tonic-gate layered = "kmem_slab_partial"; 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate /* 12697c478bd9Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 12707c478bd9Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 12717c478bd9Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 12727c478bd9Sstevel@tonic-gate * the freed buffers. 12737c478bd9Sstevel@tonic-gate */ 12747c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 12757c478bd9Sstevel@tonic-gate chunksize = cp->cache_chunksize; 12767c478bd9Sstevel@tonic-gate slabsize = cp->cache_slabsize; 12777c478bd9Sstevel@tonic-gate 12787c478bd9Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize + 12797c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP); 12807c478bd9Sstevel@tonic-gate 12817c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED) 12827c478bd9Sstevel@tonic-gate kmw->kmw_valid = 12837c478bd9Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 12847c478bd9Sstevel@tonic-gate } 12857c478bd9Sstevel@tonic-gate } 12867c478bd9Sstevel@tonic-gate 12877c478bd9Sstevel@tonic-gate status = WALK_NEXT; 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 12907c478bd9Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 12917c478bd9Sstevel@tonic-gate status = WALK_ERR; 12927c478bd9Sstevel@tonic-gate } 12937c478bd9Sstevel@tonic-gate 12947c478bd9Sstevel@tonic-gate out1: 12957c478bd9Sstevel@tonic-gate if (status == WALK_ERR) { 12967c478bd9Sstevel@tonic-gate if (kmw->kmw_valid) 12977c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate if (kmw->kmw_ubase) 13007c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + 13017c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_t)); 13027c478bd9Sstevel@tonic-gate 1303789d94c2Sjwadams if (kmw->kmw_maglist) 1304789d94c2Sjwadams mdb_free(kmw->kmw_maglist, 1305789d94c2Sjwadams kmw->kmw_max * sizeof (uintptr_t)); 1306789d94c2Sjwadams 13077c478bd9Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 13087c478bd9Sstevel@tonic-gate wsp->walk_data = NULL; 13097c478bd9Sstevel@tonic-gate } 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate out2: 13127c478bd9Sstevel@tonic-gate if (status == WALK_ERR) 13137c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 13147c478bd9Sstevel@tonic-gate 13157c478bd9Sstevel@tonic-gate return (status); 13167c478bd9Sstevel@tonic-gate } 13177c478bd9Sstevel@tonic-gate 13187c478bd9Sstevel@tonic-gate int 13197c478bd9Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp) 13207c478bd9Sstevel@tonic-gate { 13217c478bd9Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 13227c478bd9Sstevel@tonic-gate int type = kmw->kmw_type; 13237c478bd9Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp; 13247c478bd9Sstevel@tonic-gate 13257c478bd9Sstevel@tonic-gate void **maglist = kmw->kmw_maglist; 13267c478bd9Sstevel@tonic-gate int magcnt = kmw->kmw_count; 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate uintptr_t chunksize, slabsize; 13297c478bd9Sstevel@tonic-gate uintptr_t addr; 13307c478bd9Sstevel@tonic-gate const kmem_slab_t *sp; 13317c478bd9Sstevel@tonic-gate const kmem_bufctl_t *bcp; 13327c478bd9Sstevel@tonic-gate kmem_bufctl_t bc; 13337c478bd9Sstevel@tonic-gate 13347c478bd9Sstevel@tonic-gate int chunks; 13357c478bd9Sstevel@tonic-gate char *kbase; 13367c478bd9Sstevel@tonic-gate void *buf; 13377c478bd9Sstevel@tonic-gate int i, ret; 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate char *valid, *ubase; 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate /* 13427c478bd9Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case 13437c478bd9Sstevel@tonic-gate */ 13447c478bd9Sstevel@tonic-gate if (type & KM_HASH) { 13457c478bd9Sstevel@tonic-gate /* 13467c478bd9Sstevel@tonic-gate * We have a buffer which has been allocated out of the 13477c478bd9Sstevel@tonic-gate * global layer. We need to make sure that it's not 13487c478bd9Sstevel@tonic-gate * actually sitting in a magazine before we report it as 13497c478bd9Sstevel@tonic-gate * an allocated buffer. 13507c478bd9Sstevel@tonic-gate */ 13517c478bd9Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr; 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate if (magcnt > 0 && 13547c478bd9Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 13557c478bd9Sstevel@tonic-gate addrcmp) != NULL) 13567c478bd9Sstevel@tonic-gate return (WALK_NEXT); 13577c478bd9Sstevel@tonic-gate 13587c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) 13597c478bd9Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 13607c478bd9Sstevel@tonic-gate 13617c478bd9Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf)); 13627c478bd9Sstevel@tonic-gate } 13637c478bd9Sstevel@tonic-gate 13647c478bd9Sstevel@tonic-gate ret = WALK_NEXT; 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate addr = kmw->kmw_addr; 13677c478bd9Sstevel@tonic-gate 13687c478bd9Sstevel@tonic-gate /* 13697c478bd9Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 13707c478bd9Sstevel@tonic-gate * magazine layer before processing the first slab. 13717c478bd9Sstevel@tonic-gate */ 13727c478bd9Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) { 13737c478bd9Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */ 13747c478bd9Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 13757c478bd9Sstevel@tonic-gate buf = maglist[i]; 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) { 13787c478bd9Sstevel@tonic-gate uintptr_t out; 13797c478bd9Sstevel@tonic-gate 13807c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 13817c478bd9Sstevel@tonic-gate kmem_buftag_t *btp; 13827c478bd9Sstevel@tonic-gate kmem_buftag_t tag; 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate /* LINTED - alignment */ 13857c478bd9Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf); 13867c478bd9Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 13877c478bd9Sstevel@tonic-gate (uintptr_t)btp) == -1) { 13887c478bd9Sstevel@tonic-gate mdb_warn("reading buftag for " 13897c478bd9Sstevel@tonic-gate "%p at %p", buf, btp); 13907c478bd9Sstevel@tonic-gate continue; 13917c478bd9Sstevel@tonic-gate } 13927c478bd9Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 13937c478bd9Sstevel@tonic-gate } else { 13947c478bd9Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf, 13957c478bd9Sstevel@tonic-gate &out) == -1) 13967c478bd9Sstevel@tonic-gate continue; 13977c478bd9Sstevel@tonic-gate } 13987c478bd9Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 13997c478bd9Sstevel@tonic-gate } else { 14007c478bd9Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 14017c478bd9Sstevel@tonic-gate } 14027c478bd9Sstevel@tonic-gate 14037c478bd9Sstevel@tonic-gate if (ret != WALK_NEXT) 14047c478bd9Sstevel@tonic-gate return (ret); 14057c478bd9Sstevel@tonic-gate } 14067c478bd9Sstevel@tonic-gate } 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate /* 14097c478bd9Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the 14107c478bd9Sstevel@tonic-gate * magazine layer holds them all. 14117c478bd9Sstevel@tonic-gate */ 14127c478bd9Sstevel@tonic-gate if (type & KM_CONSTRUCTED) 14137c478bd9Sstevel@tonic-gate return (WALK_DONE); 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate /* 14167c478bd9Sstevel@tonic-gate * Handle the buffers in the current slab 14177c478bd9Sstevel@tonic-gate */ 14187c478bd9Sstevel@tonic-gate chunksize = cp->cache_chunksize; 14197c478bd9Sstevel@tonic-gate slabsize = cp->cache_slabsize; 14207c478bd9Sstevel@tonic-gate 14217c478bd9Sstevel@tonic-gate sp = wsp->walk_layer; 14227c478bd9Sstevel@tonic-gate chunks = sp->slab_chunks; 14237c478bd9Sstevel@tonic-gate kbase = sp->slab_base; 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) { 14287c478bd9Sstevel@tonic-gate valid = kmw->kmw_valid; 14297c478bd9Sstevel@tonic-gate ubase = kmw->kmw_ubase; 14307c478bd9Sstevel@tonic-gate 14317c478bd9Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 14327c478bd9Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 14337c478bd9Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 14347c478bd9Sstevel@tonic-gate return (WALK_ERR); 14357c478bd9Sstevel@tonic-gate } 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate /* 14387c478bd9Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 14397c478bd9Sstevel@tonic-gate * out the freelist. 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED) 14427c478bd9Sstevel@tonic-gate (void) memset(valid, 1, chunks); 14437c478bd9Sstevel@tonic-gate } else { 14447c478bd9Sstevel@tonic-gate valid = NULL; 14457c478bd9Sstevel@tonic-gate ubase = NULL; 14467c478bd9Sstevel@tonic-gate } 14477c478bd9Sstevel@tonic-gate 14487c478bd9Sstevel@tonic-gate /* 14497c478bd9Sstevel@tonic-gate * walk the slab's freelist 14507c478bd9Sstevel@tonic-gate */ 14517c478bd9Sstevel@tonic-gate bcp = sp->slab_head; 14527c478bd9Sstevel@tonic-gate 14537c478bd9Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 14547c478bd9Sstevel@tonic-gate 14557c478bd9Sstevel@tonic-gate /* 14567c478bd9Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 14577c478bd9Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 14587c478bd9Sstevel@tonic-gate * check one further on the freelist than the count allows. 14597c478bd9Sstevel@tonic-gate */ 14607c478bd9Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 14617c478bd9Sstevel@tonic-gate uint_t ndx; 14627c478bd9Sstevel@tonic-gate 14637c478bd9Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 14647c478bd9Sstevel@tonic-gate 14657c478bd9Sstevel@tonic-gate if (bcp == NULL) { 14667c478bd9Sstevel@tonic-gate if (i == chunks) 14677c478bd9Sstevel@tonic-gate break; 14687c478bd9Sstevel@tonic-gate mdb_warn( 14697c478bd9Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 14707c478bd9Sstevel@tonic-gate sp, addr, chunks - i); 14717c478bd9Sstevel@tonic-gate break; 14727c478bd9Sstevel@tonic-gate } 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 14757c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 14767c478bd9Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 14777c478bd9Sstevel@tonic-gate bcp); 14787c478bd9Sstevel@tonic-gate break; 14797c478bd9Sstevel@tonic-gate } 14807c478bd9Sstevel@tonic-gate buf = bc.bc_addr; 14817c478bd9Sstevel@tonic-gate } else { 14827c478bd9Sstevel@tonic-gate /* 14837c478bd9Sstevel@tonic-gate * Otherwise the buffer is in the slab which 14847c478bd9Sstevel@tonic-gate * we've read in; we just need to determine 14857c478bd9Sstevel@tonic-gate * its offset in the slab to find the 14867c478bd9Sstevel@tonic-gate * kmem_bufctl_t. 14877c478bd9Sstevel@tonic-gate */ 14887c478bd9Sstevel@tonic-gate bc = *((kmem_bufctl_t *) 14897c478bd9Sstevel@tonic-gate ((uintptr_t)bcp - (uintptr_t)kbase + 14907c478bd9Sstevel@tonic-gate (uintptr_t)ubase)); 14917c478bd9Sstevel@tonic-gate 14927c478bd9Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp); 14937c478bd9Sstevel@tonic-gate } 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 14967c478bd9Sstevel@tonic-gate 14977c478bd9Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 14987c478bd9Sstevel@tonic-gate /* 14997c478bd9Sstevel@tonic-gate * This is very wrong; we have managed to find 15007c478bd9Sstevel@tonic-gate * a buffer in the slab which shouldn't 15017c478bd9Sstevel@tonic-gate * actually be here. Emit a warning, and 15027c478bd9Sstevel@tonic-gate * try to continue. 15037c478bd9Sstevel@tonic-gate */ 15047c478bd9Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 15057c478bd9Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 15067c478bd9Sstevel@tonic-gate } else if (type & KM_ALLOCATED) { 15077c478bd9Sstevel@tonic-gate /* 15087c478bd9Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 15097c478bd9Sstevel@tonic-gate * clear its entry 15107c478bd9Sstevel@tonic-gate */ 15117c478bd9Sstevel@tonic-gate valid[ndx] = 0; 15127c478bd9Sstevel@tonic-gate } else { 15137c478bd9Sstevel@tonic-gate /* 15147c478bd9Sstevel@tonic-gate * Report this freed buffer 15157c478bd9Sstevel@tonic-gate */ 15167c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) { 15177c478bd9Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 15187c478bd9Sstevel@tonic-gate (uintptr_t)bcp); 15197c478bd9Sstevel@tonic-gate } else { 15207c478bd9Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15217c478bd9Sstevel@tonic-gate } 15227c478bd9Sstevel@tonic-gate if (ret != WALK_NEXT) 15237c478bd9Sstevel@tonic-gate return (ret); 15247c478bd9Sstevel@tonic-gate } 15257c478bd9Sstevel@tonic-gate 15267c478bd9Sstevel@tonic-gate bcp = bc.bc_next; 15277c478bd9Sstevel@tonic-gate } 15287c478bd9Sstevel@tonic-gate 15297c478bd9Sstevel@tonic-gate if (bcp != NULL) { 15307c478bd9Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 15317c478bd9Sstevel@tonic-gate sp, addr, bcp)); 15327c478bd9Sstevel@tonic-gate } 15337c478bd9Sstevel@tonic-gate 15347c478bd9Sstevel@tonic-gate /* 15357c478bd9Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 15367c478bd9Sstevel@tonic-gate * them. 15377c478bd9Sstevel@tonic-gate */ 15387c478bd9Sstevel@tonic-gate if (type & KM_FREE) 15397c478bd9Sstevel@tonic-gate return (WALK_NEXT); 15407c478bd9Sstevel@tonic-gate 15417c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) { 15427c478bd9Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for " 15437c478bd9Sstevel@tonic-gate "cache %p\n", addr); 15447c478bd9Sstevel@tonic-gate return (WALK_ERR); 15457c478bd9Sstevel@tonic-gate } 15467c478bd9Sstevel@tonic-gate 15477c478bd9Sstevel@tonic-gate /* 15487c478bd9Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 15497c478bd9Sstevel@tonic-gate * We only get this far for small-slab caches. 15507c478bd9Sstevel@tonic-gate */ 15517c478bd9Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 15527c478bd9Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 15537c478bd9Sstevel@tonic-gate 15547c478bd9Sstevel@tonic-gate if (!valid[i]) 15557c478bd9Sstevel@tonic-gate continue; /* on slab freelist */ 15567c478bd9Sstevel@tonic-gate 15577c478bd9Sstevel@tonic-gate if (magcnt > 0 && 15587c478bd9Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 15597c478bd9Sstevel@tonic-gate addrcmp) != NULL) 15607c478bd9Sstevel@tonic-gate continue; /* in magazine layer */ 15617c478bd9Sstevel@tonic-gate 15627c478bd9Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf); 15637c478bd9Sstevel@tonic-gate } 15647c478bd9Sstevel@tonic-gate return (ret); 15657c478bd9Sstevel@tonic-gate } 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate void 15687c478bd9Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp) 15697c478bd9Sstevel@tonic-gate { 15707c478bd9Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data; 15717c478bd9Sstevel@tonic-gate uintptr_t chunksize; 15727c478bd9Sstevel@tonic-gate uintptr_t slabsize; 15737c478bd9Sstevel@tonic-gate 15747c478bd9Sstevel@tonic-gate if (kmw == NULL) 15757c478bd9Sstevel@tonic-gate return; 15767c478bd9Sstevel@tonic-gate 15777c478bd9Sstevel@tonic-gate if (kmw->kmw_maglist != NULL) 15787c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *)); 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize; 15817c478bd9Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize; 15827c478bd9Sstevel@tonic-gate 15837c478bd9Sstevel@tonic-gate if (kmw->kmw_valid != NULL) 15847c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize); 15857c478bd9Sstevel@tonic-gate if (kmw->kmw_ubase != NULL) 15867c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t)); 15877c478bd9Sstevel@tonic-gate 15887c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize); 15897c478bd9Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t)); 15907c478bd9Sstevel@tonic-gate } 15917c478bd9Sstevel@tonic-gate 15927c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15937c478bd9Sstevel@tonic-gate static int 15947c478bd9Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp) 15957c478bd9Sstevel@tonic-gate { 15967c478bd9Sstevel@tonic-gate /* 15977c478bd9Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 15987c478bd9Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 15997c478bd9Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 16007c478bd9Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output). 16017c478bd9Sstevel@tonic-gate */ 16027c478bd9Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 16037c478bd9Sstevel@tonic-gate return (WALK_NEXT); 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 16067c478bd9Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 16077c478bd9Sstevel@tonic-gate return (WALK_DONE); 16087c478bd9Sstevel@tonic-gate 16097c478bd9Sstevel@tonic-gate return (WALK_NEXT); 16107c478bd9Sstevel@tonic-gate } 16117c478bd9Sstevel@tonic-gate 16127c478bd9Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \ 16137c478bd9Sstevel@tonic-gate wsp->walk_data = (name); \ 16147c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \ 16157c478bd9Sstevel@tonic-gate return (WALK_ERR); \ 16167c478bd9Sstevel@tonic-gate return (WALK_DONE); \ 16177c478bd9Sstevel@tonic-gate } 16187c478bd9Sstevel@tonic-gate 16197c478bd9Sstevel@tonic-gate int 16207c478bd9Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp) 16217c478bd9Sstevel@tonic-gate { 16227c478bd9Sstevel@tonic-gate if (wsp->walk_arg != NULL) 16237c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 16247c478bd9Sstevel@tonic-gate 16257c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16267c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp); 16277c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED)); 16287c478bd9Sstevel@tonic-gate } 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate int 16317c478bd9Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 16327c478bd9Sstevel@tonic-gate { 16337c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16347c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp); 16357c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL)); 16367c478bd9Sstevel@tonic-gate } 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate int 16397c478bd9Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 16407c478bd9Sstevel@tonic-gate { 16417c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16427c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp); 16437c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE)); 16447c478bd9Sstevel@tonic-gate } 16457c478bd9Sstevel@tonic-gate 16467c478bd9Sstevel@tonic-gate int 16477c478bd9Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp) 16487c478bd9Sstevel@tonic-gate { 16497c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16507c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp); 16517c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED)); 16527c478bd9Sstevel@tonic-gate } 16537c478bd9Sstevel@tonic-gate 16547c478bd9Sstevel@tonic-gate int 16557c478bd9Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 16567c478bd9Sstevel@tonic-gate { 16577c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16587c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp); 16597c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL)); 16607c478bd9Sstevel@tonic-gate } 16617c478bd9Sstevel@tonic-gate 16627c478bd9Sstevel@tonic-gate int 16637c478bd9Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp) 16647c478bd9Sstevel@tonic-gate { 16657c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) 16667c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp); 16677c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, 16687c478bd9Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED)); 16697c478bd9Sstevel@tonic-gate } 16707c478bd9Sstevel@tonic-gate 16717c478bd9Sstevel@tonic-gate typedef struct bufctl_history_walk { 16727c478bd9Sstevel@tonic-gate void *bhw_next; 16737c478bd9Sstevel@tonic-gate kmem_cache_t *bhw_cache; 16747c478bd9Sstevel@tonic-gate kmem_slab_t *bhw_slab; 16757c478bd9Sstevel@tonic-gate hrtime_t bhw_timestamp; 16767c478bd9Sstevel@tonic-gate } bufctl_history_walk_t; 16777c478bd9Sstevel@tonic-gate 16787c478bd9Sstevel@tonic-gate int 16797c478bd9Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 16807c478bd9Sstevel@tonic-gate { 16817c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw; 16827c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc; 16837c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bcn; 16847c478bd9Sstevel@tonic-gate 16857c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 16867c478bd9Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 16877c478bd9Sstevel@tonic-gate return (WALK_ERR); 16887c478bd9Sstevel@tonic-gate } 16897c478bd9Sstevel@tonic-gate 16907c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 16917c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 16927c478bd9Sstevel@tonic-gate return (WALK_ERR); 16937c478bd9Sstevel@tonic-gate } 16947c478bd9Sstevel@tonic-gate 16957c478bd9Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 16967c478bd9Sstevel@tonic-gate bhw->bhw_timestamp = 0; 16977c478bd9Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 16987c478bd9Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 16997c478bd9Sstevel@tonic-gate 17007c478bd9Sstevel@tonic-gate /* 17017c478bd9Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 17027c478bd9Sstevel@tonic-gate * case, skip the base bufctl. 17037c478bd9Sstevel@tonic-gate */ 17047c478bd9Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 17057c478bd9Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 17067c478bd9Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 17077c478bd9Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 17087c478bd9Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 17097c478bd9Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 17107c478bd9Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 17117c478bd9Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17127c478bd9Sstevel@tonic-gate else 17137c478bd9Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 17147c478bd9Sstevel@tonic-gate 17157c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 17167c478bd9Sstevel@tonic-gate wsp->walk_data = bhw; 17177c478bd9Sstevel@tonic-gate 17187c478bd9Sstevel@tonic-gate return (WALK_NEXT); 17197c478bd9Sstevel@tonic-gate } 17207c478bd9Sstevel@tonic-gate 17217c478bd9Sstevel@tonic-gate int 17227c478bd9Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 17237c478bd9Sstevel@tonic-gate { 17247c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17257c478bd9Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 17267c478bd9Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 17277c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc; 17287c478bd9Sstevel@tonic-gate 17297c478bd9Sstevel@tonic-gate if (addr == NULL) 17307c478bd9Sstevel@tonic-gate return (WALK_DONE); 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 17337c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 17347c478bd9Sstevel@tonic-gate return (WALK_ERR); 17357c478bd9Sstevel@tonic-gate } 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate /* 17387c478bd9Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 17397c478bd9Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 17407c478bd9Sstevel@tonic-gate * prevent infinite loops. 17417c478bd9Sstevel@tonic-gate */ 17427c478bd9Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr || 17437c478bd9Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache || 17447c478bd9Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab || 17457c478bd9Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp)) 17467c478bd9Sstevel@tonic-gate return (WALK_DONE); 17477c478bd9Sstevel@tonic-gate 17487c478bd9Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 17497c478bd9Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp; 17507c478bd9Sstevel@tonic-gate 17517c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 17527c478bd9Sstevel@tonic-gate } 17537c478bd9Sstevel@tonic-gate 17547c478bd9Sstevel@tonic-gate void 17557c478bd9Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 17567c478bd9Sstevel@tonic-gate { 17577c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17587c478bd9Sstevel@tonic-gate 17597c478bd9Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 17607c478bd9Sstevel@tonic-gate } 17617c478bd9Sstevel@tonic-gate 17627c478bd9Sstevel@tonic-gate typedef struct kmem_log_walk { 17637c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base; 17647c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted; 17657c478bd9Sstevel@tonic-gate kmem_log_header_t klw_lh; 17667c478bd9Sstevel@tonic-gate size_t klw_size; 17677c478bd9Sstevel@tonic-gate size_t klw_maxndx; 17687c478bd9Sstevel@tonic-gate size_t klw_ndx; 17697c478bd9Sstevel@tonic-gate } kmem_log_walk_t; 17707c478bd9Sstevel@tonic-gate 17717c478bd9Sstevel@tonic-gate int 17727c478bd9Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp) 17737c478bd9Sstevel@tonic-gate { 17747c478bd9Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 17757c478bd9Sstevel@tonic-gate kmem_log_walk_t *klw; 17767c478bd9Sstevel@tonic-gate kmem_log_header_t *lhp; 17777c478bd9Sstevel@tonic-gate int maxndx, i, j, k; 17787c478bd9Sstevel@tonic-gate 17797c478bd9Sstevel@tonic-gate /* 17807c478bd9Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise 17817c478bd9Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr. 17827c478bd9Sstevel@tonic-gate */ 17837c478bd9Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) { 17847c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 17857c478bd9Sstevel@tonic-gate return (WALK_ERR); 17867c478bd9Sstevel@tonic-gate } 17877c478bd9Sstevel@tonic-gate 17887c478bd9Sstevel@tonic-gate if (lp == NULL) { 17897c478bd9Sstevel@tonic-gate mdb_warn("log is disabled\n"); 17907c478bd9Sstevel@tonic-gate return (WALK_ERR); 17917c478bd9Sstevel@tonic-gate } 17927c478bd9Sstevel@tonic-gate 17937c478bd9Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP); 17947c478bd9Sstevel@tonic-gate lhp = &klw->klw_lh; 17957c478bd9Sstevel@tonic-gate 17967c478bd9Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) { 17977c478bd9Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 17987c478bd9Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 17997c478bd9Sstevel@tonic-gate return (WALK_ERR); 18007c478bd9Sstevel@tonic-gate } 18017c478bd9Sstevel@tonic-gate 18027c478bd9Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks; 18037c478bd9Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP); 18047c478bd9Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1; 18057c478bd9Sstevel@tonic-gate 18067c478bd9Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size, 18077c478bd9Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 18087c478bd9Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 18097c478bd9Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18107c478bd9Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18117c478bd9Sstevel@tonic-gate return (WALK_ERR); 18127c478bd9Sstevel@tonic-gate } 18137c478bd9Sstevel@tonic-gate 18147c478bd9Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 18157c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP); 18167c478bd9Sstevel@tonic-gate 18177c478bd9Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 18187c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *) 18197c478bd9Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize); 18207c478bd9Sstevel@tonic-gate 18217c478bd9Sstevel@tonic-gate for (j = 0; j < maxndx; j++) 18227c478bd9Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j]; 18237c478bd9Sstevel@tonic-gate } 18247c478bd9Sstevel@tonic-gate 18257c478bd9Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *), 18267c478bd9Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 18277c478bd9Sstevel@tonic-gate 18287c478bd9Sstevel@tonic-gate klw->klw_maxndx = k; 18297c478bd9Sstevel@tonic-gate wsp->walk_data = klw; 18307c478bd9Sstevel@tonic-gate 18317c478bd9Sstevel@tonic-gate return (WALK_NEXT); 18327c478bd9Sstevel@tonic-gate } 18337c478bd9Sstevel@tonic-gate 18347c478bd9Sstevel@tonic-gate int 18357c478bd9Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp) 18367c478bd9Sstevel@tonic-gate { 18377c478bd9Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18387c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcp; 18397c478bd9Sstevel@tonic-gate 18407c478bd9Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx) 18417c478bd9Sstevel@tonic-gate return (WALK_DONE); 18427c478bd9Sstevel@tonic-gate 18437c478bd9Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++]; 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base + 18467c478bd9Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata)); 18477c478bd9Sstevel@tonic-gate } 18487c478bd9Sstevel@tonic-gate 18497c478bd9Sstevel@tonic-gate void 18507c478bd9Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp) 18517c478bd9Sstevel@tonic-gate { 18527c478bd9Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data; 18537c478bd9Sstevel@tonic-gate 18547c478bd9Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size); 18557c478bd9Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx * 18567c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *)); 18577c478bd9Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t)); 18587c478bd9Sstevel@tonic-gate } 18597c478bd9Sstevel@tonic-gate 18607c478bd9Sstevel@tonic-gate typedef struct allocdby_bufctl { 18617c478bd9Sstevel@tonic-gate uintptr_t abb_addr; 18627c478bd9Sstevel@tonic-gate hrtime_t abb_ts; 18637c478bd9Sstevel@tonic-gate } allocdby_bufctl_t; 18647c478bd9Sstevel@tonic-gate 18657c478bd9Sstevel@tonic-gate typedef struct allocdby_walk { 18667c478bd9Sstevel@tonic-gate const char *abw_walk; 18677c478bd9Sstevel@tonic-gate uintptr_t abw_thread; 18687c478bd9Sstevel@tonic-gate size_t abw_nbufs; 18697c478bd9Sstevel@tonic-gate size_t abw_size; 18707c478bd9Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 18717c478bd9Sstevel@tonic-gate size_t abw_ndx; 18727c478bd9Sstevel@tonic-gate } allocdby_walk_t; 18737c478bd9Sstevel@tonic-gate 18747c478bd9Sstevel@tonic-gate int 18757c478bd9Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp, 18767c478bd9Sstevel@tonic-gate allocdby_walk_t *abw) 18777c478bd9Sstevel@tonic-gate { 18787c478bd9Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 18797c478bd9Sstevel@tonic-gate return (WALK_NEXT); 18807c478bd9Sstevel@tonic-gate 18817c478bd9Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 18827c478bd9Sstevel@tonic-gate allocdby_bufctl_t *buf; 18837c478bd9Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 18847c478bd9Sstevel@tonic-gate 18857c478bd9Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 18887c478bd9Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 18897c478bd9Sstevel@tonic-gate 18907c478bd9Sstevel@tonic-gate abw->abw_size <<= 1; 18917c478bd9Sstevel@tonic-gate abw->abw_buf = buf; 18927c478bd9Sstevel@tonic-gate } 18937c478bd9Sstevel@tonic-gate 18947c478bd9Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 18957c478bd9Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 18967c478bd9Sstevel@tonic-gate abw->abw_nbufs++; 18977c478bd9Sstevel@tonic-gate 18987c478bd9Sstevel@tonic-gate return (WALK_NEXT); 18997c478bd9Sstevel@tonic-gate } 19007c478bd9Sstevel@tonic-gate 19017c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19027c478bd9Sstevel@tonic-gate int 19037c478bd9Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw) 19047c478bd9Sstevel@tonic-gate { 19057c478bd9Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 19067c478bd9Sstevel@tonic-gate abw, addr) == -1) { 19077c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 19087c478bd9Sstevel@tonic-gate return (WALK_DONE); 19097c478bd9Sstevel@tonic-gate } 19107c478bd9Sstevel@tonic-gate 19117c478bd9Sstevel@tonic-gate return (WALK_NEXT); 19127c478bd9Sstevel@tonic-gate } 19137c478bd9Sstevel@tonic-gate 19147c478bd9Sstevel@tonic-gate static int 19157c478bd9Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 19167c478bd9Sstevel@tonic-gate { 19177c478bd9Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 19187c478bd9Sstevel@tonic-gate return (1); 19197c478bd9Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 19207c478bd9Sstevel@tonic-gate return (-1); 19217c478bd9Sstevel@tonic-gate return (0); 19227c478bd9Sstevel@tonic-gate } 19237c478bd9Sstevel@tonic-gate 19247c478bd9Sstevel@tonic-gate static int 19257c478bd9Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 19267c478bd9Sstevel@tonic-gate { 19277c478bd9Sstevel@tonic-gate allocdby_walk_t *abw; 19287c478bd9Sstevel@tonic-gate 19297c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 19307c478bd9Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 19317c478bd9Sstevel@tonic-gate return (WALK_ERR); 19327c478bd9Sstevel@tonic-gate } 19337c478bd9Sstevel@tonic-gate 19347c478bd9Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 19357c478bd9Sstevel@tonic-gate 19367c478bd9Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 19377c478bd9Sstevel@tonic-gate abw->abw_walk = walk; 19387c478bd9Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 19397c478bd9Sstevel@tonic-gate abw->abw_buf = 19407c478bd9Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 19417c478bd9Sstevel@tonic-gate 19427c478bd9Sstevel@tonic-gate wsp->walk_data = abw; 19437c478bd9Sstevel@tonic-gate 19447c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_cache", 19457c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 19467c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache"); 19477c478bd9Sstevel@tonic-gate allocdby_walk_fini(wsp); 19487c478bd9Sstevel@tonic-gate return (WALK_ERR); 19497c478bd9Sstevel@tonic-gate } 19507c478bd9Sstevel@tonic-gate 19517c478bd9Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 19527c478bd9Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 19537c478bd9Sstevel@tonic-gate 19547c478bd9Sstevel@tonic-gate return (WALK_NEXT); 19557c478bd9Sstevel@tonic-gate } 19567c478bd9Sstevel@tonic-gate 19577c478bd9Sstevel@tonic-gate int 19587c478bd9Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 19597c478bd9Sstevel@tonic-gate { 19607c478bd9Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 19617c478bd9Sstevel@tonic-gate } 19627c478bd9Sstevel@tonic-gate 19637c478bd9Sstevel@tonic-gate int 19647c478bd9Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 19657c478bd9Sstevel@tonic-gate { 19667c478bd9Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 19677c478bd9Sstevel@tonic-gate } 19687c478bd9Sstevel@tonic-gate 19697c478bd9Sstevel@tonic-gate int 19707c478bd9Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 19717c478bd9Sstevel@tonic-gate { 19727c478bd9Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19737c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc; 19747c478bd9Sstevel@tonic-gate uintptr_t addr; 19757c478bd9Sstevel@tonic-gate 19767c478bd9Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 19777c478bd9Sstevel@tonic-gate return (WALK_DONE); 19787c478bd9Sstevel@tonic-gate 19797c478bd9Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 19807c478bd9Sstevel@tonic-gate 19817c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 19827c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 19837c478bd9Sstevel@tonic-gate return (WALK_DONE); 19847c478bd9Sstevel@tonic-gate } 19857c478bd9Sstevel@tonic-gate 19867c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata)); 19877c478bd9Sstevel@tonic-gate } 19887c478bd9Sstevel@tonic-gate 19897c478bd9Sstevel@tonic-gate void 19907c478bd9Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 19917c478bd9Sstevel@tonic-gate { 19927c478bd9Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19937c478bd9Sstevel@tonic-gate 19947c478bd9Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 19957c478bd9Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 19967c478bd9Sstevel@tonic-gate } 19977c478bd9Sstevel@tonic-gate 19987c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19997c478bd9Sstevel@tonic-gate int 20007c478bd9Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored) 20017c478bd9Sstevel@tonic-gate { 20027c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 20037c478bd9Sstevel@tonic-gate GElf_Sym sym; 20047c478bd9Sstevel@tonic-gate int i; 20057c478bd9Sstevel@tonic-gate 20067c478bd9Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 20077c478bd9Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 20087c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 20097c478bd9Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 20107c478bd9Sstevel@tonic-gate continue; 20117c478bd9Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 20127c478bd9Sstevel@tonic-gate continue; 20137c478bd9Sstevel@tonic-gate mdb_printf("%s+0x%lx", 20147c478bd9Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 20157c478bd9Sstevel@tonic-gate break; 20167c478bd9Sstevel@tonic-gate } 20177c478bd9Sstevel@tonic-gate mdb_printf("\n"); 20187c478bd9Sstevel@tonic-gate 20197c478bd9Sstevel@tonic-gate return (WALK_NEXT); 20207c478bd9Sstevel@tonic-gate } 20217c478bd9Sstevel@tonic-gate 20227c478bd9Sstevel@tonic-gate static int 20237c478bd9Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 20247c478bd9Sstevel@tonic-gate { 20257c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 20267c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 20277c478bd9Sstevel@tonic-gate 20287c478bd9Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 20297c478bd9Sstevel@tonic-gate 20307c478bd9Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 20317c478bd9Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 20327c478bd9Sstevel@tonic-gate return (DCMD_ERR); 20337c478bd9Sstevel@tonic-gate } 20347c478bd9Sstevel@tonic-gate 20357c478bd9Sstevel@tonic-gate return (DCMD_OK); 20367c478bd9Sstevel@tonic-gate } 20377c478bd9Sstevel@tonic-gate 20387c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 20397c478bd9Sstevel@tonic-gate int 20407c478bd9Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20417c478bd9Sstevel@tonic-gate { 20427c478bd9Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 20437c478bd9Sstevel@tonic-gate } 20447c478bd9Sstevel@tonic-gate 20457c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 20467c478bd9Sstevel@tonic-gate int 20477c478bd9Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20487c478bd9Sstevel@tonic-gate { 20497c478bd9Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 20507c478bd9Sstevel@tonic-gate } 20517c478bd9Sstevel@tonic-gate 20527c478bd9Sstevel@tonic-gate /* 20537c478bd9Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's 20547c478bd9Sstevel@tonic-gate * stack. 20557c478bd9Sstevel@tonic-gate * 20567c478bd9Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)". 20577c478bd9Sstevel@tonic-gate * 20587c478bd9Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string 20597c478bd9Sstevel@tonic-gate * signifying that the address is active. 20607c478bd9Sstevel@tonic-gate * 20617c478bd9Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc, 20627c478bd9Sstevel@tonic-gate * return " (below sp)". 20637c478bd9Sstevel@tonic-gate * 20647c478bd9Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc, 20657c478bd9Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not 20667c478bd9Sstevel@tonic-gate * have an accurate t_sp. 20677c478bd9Sstevel@tonic-gate */ 20687c478bd9Sstevel@tonic-gate static const char * 20697c478bd9Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr) 20707c478bd9Sstevel@tonic-gate { 20717c478bd9Sstevel@tonic-gate uintptr_t panicstk; 20727c478bd9Sstevel@tonic-gate GElf_Sym sym; 20737c478bd9Sstevel@tonic-gate 20747c478bd9Sstevel@tonic-gate if (t->t_state == TS_FREE) 20757c478bd9Sstevel@tonic-gate return (" (inactive interrupt thread)"); 20767c478bd9Sstevel@tonic-gate 20777c478bd9Sstevel@tonic-gate /* 20787c478bd9Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it 20797c478bd9Sstevel@tonic-gate * no longer relates to the thread's real stack. 20807c478bd9Sstevel@tonic-gate */ 20817c478bd9Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) { 20827c478bd9Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value; 20837c478bd9Sstevel@tonic-gate 20847c478bd9Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE) 20857c478bd9Sstevel@tonic-gate return (""); 20867c478bd9Sstevel@tonic-gate } 20877c478bd9Sstevel@tonic-gate 20887c478bd9Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS) 20897c478bd9Sstevel@tonic-gate return (""); 20907c478bd9Sstevel@tonic-gate 20917c478bd9Sstevel@tonic-gate if (t->t_state == TS_ONPROC) 20927c478bd9Sstevel@tonic-gate return (" (possibly below sp)"); 20937c478bd9Sstevel@tonic-gate 20947c478bd9Sstevel@tonic-gate return (" (below sp)"); 20957c478bd9Sstevel@tonic-gate } 20967c478bd9Sstevel@tonic-gate 20977c478bd9Sstevel@tonic-gate typedef struct whatis { 20987c478bd9Sstevel@tonic-gate uintptr_t w_addr; 20997c478bd9Sstevel@tonic-gate const kmem_cache_t *w_cache; 21007c478bd9Sstevel@tonic-gate const vmem_t *w_vmem; 21017c478bd9Sstevel@tonic-gate size_t w_slab_align; 21027c478bd9Sstevel@tonic-gate int w_slab_found; 21037c478bd9Sstevel@tonic-gate int w_found; 21047c478bd9Sstevel@tonic-gate int w_kmem_lite_count; 21057c478bd9Sstevel@tonic-gate uint_t w_verbose; 21067c478bd9Sstevel@tonic-gate uint_t w_freemem; 21077c478bd9Sstevel@tonic-gate uint_t w_all; 21087c478bd9Sstevel@tonic-gate uint_t w_bufctl; 21097c478bd9Sstevel@tonic-gate uint_t w_idspace; 21107c478bd9Sstevel@tonic-gate } whatis_t; 21117c478bd9Sstevel@tonic-gate 21127c478bd9Sstevel@tonic-gate static void 21137c478bd9Sstevel@tonic-gate whatis_print_kmem(uintptr_t addr, uintptr_t baddr, whatis_t *w) 21147c478bd9Sstevel@tonic-gate { 21157c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 21167c478bd9Sstevel@tonic-gate uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(w->w_cache, addr); 21177c478bd9Sstevel@tonic-gate intptr_t stat; 21187c478bd9Sstevel@tonic-gate int count = 0; 21197c478bd9Sstevel@tonic-gate int i; 21207c478bd9Sstevel@tonic-gate pc_t callers[16]; 21217c478bd9Sstevel@tonic-gate 21227c478bd9Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_REDZONE) { 21237c478bd9Sstevel@tonic-gate kmem_buftag_t bt; 21247c478bd9Sstevel@tonic-gate 21257c478bd9Sstevel@tonic-gate if (mdb_vread(&bt, sizeof (bt), btaddr) == -1) 21267c478bd9Sstevel@tonic-gate goto done; 21277c478bd9Sstevel@tonic-gate 21287c478bd9Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat; 21297c478bd9Sstevel@tonic-gate 21307c478bd9Sstevel@tonic-gate if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE) 21317c478bd9Sstevel@tonic-gate goto done; 21327c478bd9Sstevel@tonic-gate 21337c478bd9Sstevel@tonic-gate /* 21347c478bd9Sstevel@tonic-gate * provide the bufctl ptr if it has useful information 21357c478bd9Sstevel@tonic-gate */ 21367c478bd9Sstevel@tonic-gate if (baddr == 0 && (w->w_cache->cache_flags & KMF_AUDIT)) 21377c478bd9Sstevel@tonic-gate baddr = (uintptr_t)bt.bt_bufctl; 21387c478bd9Sstevel@tonic-gate 21397c478bd9Sstevel@tonic-gate if (w->w_cache->cache_flags & KMF_LITE) { 21407c478bd9Sstevel@tonic-gate count = w->w_kmem_lite_count; 21417c478bd9Sstevel@tonic-gate 21427c478bd9Sstevel@tonic-gate if (count * sizeof (pc_t) > sizeof (callers)) 21437c478bd9Sstevel@tonic-gate count = 0; 21447c478bd9Sstevel@tonic-gate 21457c478bd9Sstevel@tonic-gate if (count > 0 && 21467c478bd9Sstevel@tonic-gate mdb_vread(callers, count * sizeof (pc_t), 21477c478bd9Sstevel@tonic-gate btaddr + 21487c478bd9Sstevel@tonic-gate offsetof(kmem_buftag_lite_t, bt_history)) == -1) 21497c478bd9Sstevel@tonic-gate count = 0; 21507c478bd9Sstevel@tonic-gate 21517c478bd9Sstevel@tonic-gate /* 21527c478bd9Sstevel@tonic-gate * skip unused callers 21537c478bd9Sstevel@tonic-gate */ 21547c478bd9Sstevel@tonic-gate while (count > 0 && callers[count - 1] == 21557c478bd9Sstevel@tonic-gate (pc_t)KMEM_UNINITIALIZED_PATTERN) 21567c478bd9Sstevel@tonic-gate count--; 21577c478bd9Sstevel@tonic-gate } 21587c478bd9Sstevel@tonic-gate } 21597c478bd9Sstevel@tonic-gate 21607c478bd9Sstevel@tonic-gate done: 21617c478bd9Sstevel@tonic-gate if (baddr == 0) 21627c478bd9Sstevel@tonic-gate mdb_printf("%p is %p+%p, %s from %s\n", 21637c478bd9Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, 21647c478bd9Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 21657c478bd9Sstevel@tonic-gate w->w_cache->cache_name); 21667c478bd9Sstevel@tonic-gate else 21677c478bd9Sstevel@tonic-gate mdb_printf("%p is %p+%p, bufctl %p %s from %s\n", 21687c478bd9Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr, baddr, 21697c478bd9Sstevel@tonic-gate w->w_freemem == FALSE ? "allocated" : "freed", 21707c478bd9Sstevel@tonic-gate w->w_cache->cache_name); 21717c478bd9Sstevel@tonic-gate 21727c478bd9Sstevel@tonic-gate if (count > 0) { 21737c478bd9Sstevel@tonic-gate mdb_inc_indent(8); 21747c478bd9Sstevel@tonic-gate mdb_printf("recent caller%s: %a%s", (count != 1)? "s":"", 21757c478bd9Sstevel@tonic-gate callers[0], (count != 1)? ", ":"\n"); 21767c478bd9Sstevel@tonic-gate for (i = 1; i < count; i++) 21777c478bd9Sstevel@tonic-gate mdb_printf("%a%s", callers[i], 21787c478bd9Sstevel@tonic-gate (i + 1 < count)? ", ":"\n"); 21797c478bd9Sstevel@tonic-gate mdb_dec_indent(8); 21807c478bd9Sstevel@tonic-gate } 21817c478bd9Sstevel@tonic-gate } 21827c478bd9Sstevel@tonic-gate 21837c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 21847c478bd9Sstevel@tonic-gate static int 21857c478bd9Sstevel@tonic-gate whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_t *w) 21867c478bd9Sstevel@tonic-gate { 21877c478bd9Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 21887c478bd9Sstevel@tonic-gate return (WALK_NEXT); 21897c478bd9Sstevel@tonic-gate 21907c478bd9Sstevel@tonic-gate whatis_print_kmem(addr, 0, w); 21917c478bd9Sstevel@tonic-gate w->w_found++; 21927c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 21937c478bd9Sstevel@tonic-gate } 21947c478bd9Sstevel@tonic-gate 21957c478bd9Sstevel@tonic-gate static int 21967c478bd9Sstevel@tonic-gate whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_t *w) 21977c478bd9Sstevel@tonic-gate { 21987c478bd9Sstevel@tonic-gate if (w->w_addr < vs->vs_start || w->w_addr >= vs->vs_end) 21997c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22007c478bd9Sstevel@tonic-gate 22017c478bd9Sstevel@tonic-gate mdb_printf("%p is %p+%p ", w->w_addr, 22027c478bd9Sstevel@tonic-gate vs->vs_start, w->w_addr - vs->vs_start); 22037c478bd9Sstevel@tonic-gate 22047c478bd9Sstevel@tonic-gate /* 22057c478bd9Sstevel@tonic-gate * Always provide the vmem_seg pointer if it has a stack trace. 22067c478bd9Sstevel@tonic-gate */ 22077c478bd9Sstevel@tonic-gate if (w->w_bufctl == TRUE || 22087c478bd9Sstevel@tonic-gate (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0)) { 22097c478bd9Sstevel@tonic-gate mdb_printf("(vmem_seg %p) ", addr); 22107c478bd9Sstevel@tonic-gate } 22117c478bd9Sstevel@tonic-gate 22127c478bd9Sstevel@tonic-gate mdb_printf("%sfrom %s vmem arena\n", w->w_freemem == TRUE ? 22137c478bd9Sstevel@tonic-gate "freed " : "", w->w_vmem->vm_name); 22147c478bd9Sstevel@tonic-gate 22157c478bd9Sstevel@tonic-gate w->w_found++; 22167c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22177c478bd9Sstevel@tonic-gate } 22187c478bd9Sstevel@tonic-gate 22197c478bd9Sstevel@tonic-gate static int 22207c478bd9Sstevel@tonic-gate whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_t *w) 22217c478bd9Sstevel@tonic-gate { 22227c478bd9Sstevel@tonic-gate const char *nm = vmem->vm_name; 22237c478bd9Sstevel@tonic-gate w->w_vmem = vmem; 22247c478bd9Sstevel@tonic-gate w->w_freemem = FALSE; 22257c478bd9Sstevel@tonic-gate 22267c478bd9Sstevel@tonic-gate if (((vmem->vm_cflags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22277c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22287c478bd9Sstevel@tonic-gate 22297c478bd9Sstevel@tonic-gate if (w->w_verbose) 22307c478bd9Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 22317c478bd9Sstevel@tonic-gate 22327c478bd9Sstevel@tonic-gate if (mdb_pwalk("vmem_alloc", 22337c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22347c478bd9Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22357c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22367c478bd9Sstevel@tonic-gate } 22377c478bd9Sstevel@tonic-gate 22387c478bd9Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 22397c478bd9Sstevel@tonic-gate return (WALK_DONE); 22407c478bd9Sstevel@tonic-gate 22417c478bd9Sstevel@tonic-gate if (w->w_verbose) 22427c478bd9Sstevel@tonic-gate mdb_printf("Searching vmem arena %s for free virtual...\n", nm); 22437c478bd9Sstevel@tonic-gate 22447c478bd9Sstevel@tonic-gate w->w_freemem = TRUE; 22457c478bd9Sstevel@tonic-gate 22467c478bd9Sstevel@tonic-gate if (mdb_pwalk("vmem_free", 22477c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_seg, w, addr) == -1) { 22487c478bd9Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 22497c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22507c478bd9Sstevel@tonic-gate } 22517c478bd9Sstevel@tonic-gate 22527c478bd9Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 22537c478bd9Sstevel@tonic-gate } 22547c478bd9Sstevel@tonic-gate 22557c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 22567c478bd9Sstevel@tonic-gate static int 22577c478bd9Sstevel@tonic-gate whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_t *w) 22587c478bd9Sstevel@tonic-gate { 22597c478bd9Sstevel@tonic-gate uintptr_t addr; 22607c478bd9Sstevel@tonic-gate 22617c478bd9Sstevel@tonic-gate if (bcp == NULL) 22627c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22637c478bd9Sstevel@tonic-gate 22647c478bd9Sstevel@tonic-gate addr = (uintptr_t)bcp->bc_addr; 22657c478bd9Sstevel@tonic-gate 22667c478bd9Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + w->w_cache->cache_bufsize) 22677c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22687c478bd9Sstevel@tonic-gate 22697c478bd9Sstevel@tonic-gate whatis_print_kmem(addr, baddr, w); 22707c478bd9Sstevel@tonic-gate w->w_found++; 22717c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 22727c478bd9Sstevel@tonic-gate } 22737c478bd9Sstevel@tonic-gate 22747c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 22757c478bd9Sstevel@tonic-gate static int 22767c478bd9Sstevel@tonic-gate whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_t *w) 22777c478bd9Sstevel@tonic-gate { 22787c478bd9Sstevel@tonic-gate uintptr_t base = P2ALIGN((uintptr_t)sp->slab_base, w->w_slab_align); 22797c478bd9Sstevel@tonic-gate 22807c478bd9Sstevel@tonic-gate if ((w->w_addr - base) >= w->w_cache->cache_slabsize) 22817c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22827c478bd9Sstevel@tonic-gate 22837c478bd9Sstevel@tonic-gate w->w_slab_found++; 22847c478bd9Sstevel@tonic-gate return (WALK_DONE); 22857c478bd9Sstevel@tonic-gate } 22867c478bd9Sstevel@tonic-gate 22877c478bd9Sstevel@tonic-gate static int 22887c478bd9Sstevel@tonic-gate whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 22897c478bd9Sstevel@tonic-gate { 22907c478bd9Sstevel@tonic-gate char *walk, *freewalk; 22917c478bd9Sstevel@tonic-gate mdb_walk_cb_t func; 22927c478bd9Sstevel@tonic-gate vmem_t *vmp = c->cache_arena; 22937c478bd9Sstevel@tonic-gate 22947c478bd9Sstevel@tonic-gate if (((c->cache_flags & VMC_IDENTIFIER) != 0) ^ w->w_idspace) 22957c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22967c478bd9Sstevel@tonic-gate 22977c478bd9Sstevel@tonic-gate if (w->w_bufctl == FALSE) { 22987c478bd9Sstevel@tonic-gate walk = "kmem"; 22997c478bd9Sstevel@tonic-gate freewalk = "freemem"; 23007c478bd9Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_kmem; 23017c478bd9Sstevel@tonic-gate } else { 23027c478bd9Sstevel@tonic-gate walk = "bufctl"; 23037c478bd9Sstevel@tonic-gate freewalk = "freectl"; 23047c478bd9Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_bufctl; 23057c478bd9Sstevel@tonic-gate } 23067c478bd9Sstevel@tonic-gate 23077c478bd9Sstevel@tonic-gate w->w_cache = c; 23087c478bd9Sstevel@tonic-gate 23097c478bd9Sstevel@tonic-gate if (w->w_verbose) 23107c478bd9Sstevel@tonic-gate mdb_printf("Searching %s's slabs...\n", c->cache_name); 23117c478bd9Sstevel@tonic-gate 23127c478bd9Sstevel@tonic-gate /* 23137c478bd9Sstevel@tonic-gate * Verify that the address is in one of the cache's slabs. If not, 23147c478bd9Sstevel@tonic-gate * we can skip the more expensive walkers. (this is purely a 23157c478bd9Sstevel@tonic-gate * heuristic -- as long as there are no false-negatives, we'll be fine) 23167c478bd9Sstevel@tonic-gate * 23177c478bd9Sstevel@tonic-gate * We try to get the cache's arena's quantum, since to accurately 23187c478bd9Sstevel@tonic-gate * get the base of a slab, you have to align it to the quantum. If 23197c478bd9Sstevel@tonic-gate * it doesn't look sensible, we fall back to not aligning. 23207c478bd9Sstevel@tonic-gate */ 23217c478bd9Sstevel@tonic-gate if (mdb_vread(&w->w_slab_align, sizeof (w->w_slab_align), 23227c478bd9Sstevel@tonic-gate (uintptr_t)&vmp->vm_quantum) == -1) { 23237c478bd9Sstevel@tonic-gate mdb_warn("unable to read %p->cache_arena->vm_quantum", c); 23247c478bd9Sstevel@tonic-gate w->w_slab_align = 1; 23257c478bd9Sstevel@tonic-gate } 23267c478bd9Sstevel@tonic-gate 23277c478bd9Sstevel@tonic-gate if ((c->cache_slabsize < w->w_slab_align) || w->w_slab_align == 0 || 23287c478bd9Sstevel@tonic-gate (w->w_slab_align & (w->w_slab_align - 1))) { 23297c478bd9Sstevel@tonic-gate mdb_warn("%p's arena has invalid quantum (0x%p)\n", c, 23307c478bd9Sstevel@tonic-gate w->w_slab_align); 23317c478bd9Sstevel@tonic-gate w->w_slab_align = 1; 23327c478bd9Sstevel@tonic-gate } 23337c478bd9Sstevel@tonic-gate 23347c478bd9Sstevel@tonic-gate w->w_slab_found = 0; 23357c478bd9Sstevel@tonic-gate if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, w, 23367c478bd9Sstevel@tonic-gate addr) == -1) { 23377c478bd9Sstevel@tonic-gate mdb_warn("can't find kmem_slab walker"); 23387c478bd9Sstevel@tonic-gate return (WALK_DONE); 23397c478bd9Sstevel@tonic-gate } 23407c478bd9Sstevel@tonic-gate if (w->w_slab_found == 0) 23417c478bd9Sstevel@tonic-gate return (WALK_NEXT); 23427c478bd9Sstevel@tonic-gate 23437c478bd9Sstevel@tonic-gate if (c->cache_flags & KMF_LITE) { 23447c478bd9Sstevel@tonic-gate if (mdb_readvar(&w->w_kmem_lite_count, 23457c478bd9Sstevel@tonic-gate "kmem_lite_count") == -1 || w->w_kmem_lite_count > 16) 23467c478bd9Sstevel@tonic-gate w->w_kmem_lite_count = 0; 23477c478bd9Sstevel@tonic-gate } 23487c478bd9Sstevel@tonic-gate 23497c478bd9Sstevel@tonic-gate if (w->w_verbose) 23507c478bd9Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 23517c478bd9Sstevel@tonic-gate 23527c478bd9Sstevel@tonic-gate w->w_freemem = FALSE; 23537c478bd9Sstevel@tonic-gate 23547c478bd9Sstevel@tonic-gate if (mdb_pwalk(walk, func, w, addr) == -1) { 23557c478bd9Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 23567c478bd9Sstevel@tonic-gate return (WALK_DONE); 23577c478bd9Sstevel@tonic-gate } 23587c478bd9Sstevel@tonic-gate 23597c478bd9Sstevel@tonic-gate if (w->w_found && w->w_all == FALSE) 23607c478bd9Sstevel@tonic-gate return (WALK_DONE); 23617c478bd9Sstevel@tonic-gate 23627c478bd9Sstevel@tonic-gate /* 23637c478bd9Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 23647c478bd9Sstevel@tonic-gate */ 23657c478bd9Sstevel@tonic-gate if (w->w_verbose) 23667c478bd9Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 23677c478bd9Sstevel@tonic-gate 23687c478bd9Sstevel@tonic-gate w->w_freemem = TRUE; 23697c478bd9Sstevel@tonic-gate 23707c478bd9Sstevel@tonic-gate if (mdb_pwalk(freewalk, func, w, addr) == -1) { 23717c478bd9Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 23727c478bd9Sstevel@tonic-gate return (WALK_DONE); 23737c478bd9Sstevel@tonic-gate } 23747c478bd9Sstevel@tonic-gate 23757c478bd9Sstevel@tonic-gate return (w->w_found && w->w_all == FALSE ? WALK_DONE : WALK_NEXT); 23767c478bd9Sstevel@tonic-gate } 23777c478bd9Sstevel@tonic-gate 23787c478bd9Sstevel@tonic-gate static int 23797c478bd9Sstevel@tonic-gate whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23807c478bd9Sstevel@tonic-gate { 23817c478bd9Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH) 23827c478bd9Sstevel@tonic-gate return (WALK_NEXT); 23837c478bd9Sstevel@tonic-gate 23847c478bd9Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 23857c478bd9Sstevel@tonic-gate } 23867c478bd9Sstevel@tonic-gate 23877c478bd9Sstevel@tonic-gate static int 23887c478bd9Sstevel@tonic-gate whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_t *w) 23897c478bd9Sstevel@tonic-gate { 23907c478bd9Sstevel@tonic-gate if (!(c->cache_cflags & KMC_NOTOUCH)) 23917c478bd9Sstevel@tonic-gate return (WALK_NEXT); 23927c478bd9Sstevel@tonic-gate 23937c478bd9Sstevel@tonic-gate return (whatis_walk_cache(addr, c, w)); 23947c478bd9Sstevel@tonic-gate } 23957c478bd9Sstevel@tonic-gate 23967c478bd9Sstevel@tonic-gate static int 23977c478bd9Sstevel@tonic-gate whatis_walk_thread(uintptr_t addr, const kthread_t *t, whatis_t *w) 23987c478bd9Sstevel@tonic-gate { 23997c478bd9Sstevel@tonic-gate /* 24007c478bd9Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure. 24017c478bd9Sstevel@tonic-gate * We use this opportunity to short circuit this case... 24027c478bd9Sstevel@tonic-gate */ 24037c478bd9Sstevel@tonic-gate if (w->w_addr >= addr && w->w_addr < addr + sizeof (kthread_t)) { 24047c478bd9Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a thread structure\n", 24057c478bd9Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 24067c478bd9Sstevel@tonic-gate w->w_found++; 24077c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24087c478bd9Sstevel@tonic-gate } 24097c478bd9Sstevel@tonic-gate 24107c478bd9Sstevel@tonic-gate if (w->w_addr < (uintptr_t)t->t_stkbase || 24117c478bd9Sstevel@tonic-gate w->w_addr > (uintptr_t)t->t_stk) 24127c478bd9Sstevel@tonic-gate return (WALK_NEXT); 24137c478bd9Sstevel@tonic-gate 24147c478bd9Sstevel@tonic-gate if (t->t_stkbase == NULL) 24157c478bd9Sstevel@tonic-gate return (WALK_NEXT); 24167c478bd9Sstevel@tonic-gate 24177c478bd9Sstevel@tonic-gate mdb_printf("%p is in thread %p's stack%s\n", w->w_addr, addr, 24187c478bd9Sstevel@tonic-gate stack_active(t, w->w_addr)); 24197c478bd9Sstevel@tonic-gate 24207c478bd9Sstevel@tonic-gate w->w_found++; 24217c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24227c478bd9Sstevel@tonic-gate } 24237c478bd9Sstevel@tonic-gate 24247c478bd9Sstevel@tonic-gate static int 24257c478bd9Sstevel@tonic-gate whatis_walk_modctl(uintptr_t addr, const struct modctl *m, whatis_t *w) 24267c478bd9Sstevel@tonic-gate { 24277c478bd9Sstevel@tonic-gate struct module mod; 24287c478bd9Sstevel@tonic-gate char name[MODMAXNAMELEN], *where; 24297c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 24307c478bd9Sstevel@tonic-gate Shdr shdr; 24317c478bd9Sstevel@tonic-gate GElf_Sym sym; 24327c478bd9Sstevel@tonic-gate 24337c478bd9Sstevel@tonic-gate if (m->mod_mp == NULL) 24347c478bd9Sstevel@tonic-gate return (WALK_NEXT); 24357c478bd9Sstevel@tonic-gate 24367c478bd9Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) { 24377c478bd9Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr); 24387c478bd9Sstevel@tonic-gate return (WALK_NEXT); 24397c478bd9Sstevel@tonic-gate } 24407c478bd9Sstevel@tonic-gate 24417c478bd9Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.text && 24427c478bd9Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.text + mod.text_size) { 24437c478bd9Sstevel@tonic-gate where = "text segment"; 24447c478bd9Sstevel@tonic-gate goto found; 24457c478bd9Sstevel@tonic-gate } 24467c478bd9Sstevel@tonic-gate 24477c478bd9Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.data && 24487c478bd9Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.data + mod.data_size) { 24497c478bd9Sstevel@tonic-gate where = "data segment"; 24507c478bd9Sstevel@tonic-gate goto found; 24517c478bd9Sstevel@tonic-gate } 24527c478bd9Sstevel@tonic-gate 24537c478bd9Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.bss && 24547c478bd9Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.bss + mod.bss_size) { 24557c478bd9Sstevel@tonic-gate where = "bss"; 24567c478bd9Sstevel@tonic-gate goto found; 24577c478bd9Sstevel@tonic-gate } 24587c478bd9Sstevel@tonic-gate 24597c478bd9Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) { 24607c478bd9Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr); 24617c478bd9Sstevel@tonic-gate return (WALK_NEXT); 24627c478bd9Sstevel@tonic-gate } 24637c478bd9Sstevel@tonic-gate 24647c478bd9Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symtbl && w->w_addr < 24657c478bd9Sstevel@tonic-gate (uintptr_t)mod.symtbl + (uintptr_t)mod.nsyms * shdr.sh_entsize) { 24667c478bd9Sstevel@tonic-gate where = "symtab"; 24677c478bd9Sstevel@tonic-gate goto found; 24687c478bd9Sstevel@tonic-gate } 24697c478bd9Sstevel@tonic-gate 24707c478bd9Sstevel@tonic-gate if (w->w_addr >= (uintptr_t)mod.symspace && 24717c478bd9Sstevel@tonic-gate w->w_addr < (uintptr_t)mod.symspace + (uintptr_t)mod.symsize) { 24727c478bd9Sstevel@tonic-gate where = "symspace"; 24737c478bd9Sstevel@tonic-gate goto found; 24747c478bd9Sstevel@tonic-gate } 24757c478bd9Sstevel@tonic-gate 24767c478bd9Sstevel@tonic-gate return (WALK_NEXT); 24777c478bd9Sstevel@tonic-gate 24787c478bd9Sstevel@tonic-gate found: 24797c478bd9Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1) 24807c478bd9Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "0x%p", addr); 24817c478bd9Sstevel@tonic-gate 24827c478bd9Sstevel@tonic-gate mdb_printf("%p is ", w->w_addr); 24837c478bd9Sstevel@tonic-gate 24847c478bd9Sstevel@tonic-gate /* 24857c478bd9Sstevel@tonic-gate * If we found this address in a module, then there's a chance that 24867c478bd9Sstevel@tonic-gate * it's actually a named symbol. Try the symbol lookup. 24877c478bd9Sstevel@tonic-gate */ 24887c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(w->w_addr, MDB_SYM_FUZZY, c, sizeof (c), 24897c478bd9Sstevel@tonic-gate &sym) != -1 && w->w_addr >= (uintptr_t)sym.st_value && 24907c478bd9Sstevel@tonic-gate w->w_addr < (uintptr_t)sym.st_value + sym.st_size) { 24917c478bd9Sstevel@tonic-gate mdb_printf("%s+%lx ", c, w->w_addr - (uintptr_t)sym.st_value); 24927c478bd9Sstevel@tonic-gate } 24937c478bd9Sstevel@tonic-gate 24947c478bd9Sstevel@tonic-gate mdb_printf("in %s's %s\n", name, where); 24957c478bd9Sstevel@tonic-gate 24967c478bd9Sstevel@tonic-gate w->w_found++; 24977c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 24987c478bd9Sstevel@tonic-gate } 24997c478bd9Sstevel@tonic-gate 25007c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 25017c478bd9Sstevel@tonic-gate static int 25027c478bd9Sstevel@tonic-gate whatis_walk_page(uintptr_t addr, const void *ignored, whatis_t *w) 25037c478bd9Sstevel@tonic-gate { 25047c478bd9Sstevel@tonic-gate static int machsize = 0; 25057c478bd9Sstevel@tonic-gate mdb_ctf_id_t id; 25067c478bd9Sstevel@tonic-gate 25077c478bd9Sstevel@tonic-gate if (machsize == 0) { 25087c478bd9Sstevel@tonic-gate if (mdb_ctf_lookup_by_name("unix`page_t", &id) == 0) 25097c478bd9Sstevel@tonic-gate machsize = mdb_ctf_type_size(id); 25107c478bd9Sstevel@tonic-gate else { 25117c478bd9Sstevel@tonic-gate mdb_warn("could not get size of page_t"); 25127c478bd9Sstevel@tonic-gate machsize = sizeof (page_t); 25137c478bd9Sstevel@tonic-gate } 25147c478bd9Sstevel@tonic-gate } 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate if (w->w_addr < addr || w->w_addr >= addr + machsize) 25177c478bd9Sstevel@tonic-gate return (WALK_NEXT); 25187c478bd9Sstevel@tonic-gate 25197c478bd9Sstevel@tonic-gate mdb_printf("%p is %p+%p, allocated as a page structure\n", 25207c478bd9Sstevel@tonic-gate w->w_addr, addr, w->w_addr - addr); 25217c478bd9Sstevel@tonic-gate 25227c478bd9Sstevel@tonic-gate w->w_found++; 25237c478bd9Sstevel@tonic-gate return (w->w_all == TRUE ? WALK_NEXT : WALK_DONE); 25247c478bd9Sstevel@tonic-gate } 25257c478bd9Sstevel@tonic-gate 25267c478bd9Sstevel@tonic-gate int 25277c478bd9Sstevel@tonic-gate whatis(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 25287c478bd9Sstevel@tonic-gate { 25297c478bd9Sstevel@tonic-gate whatis_t w; 25307c478bd9Sstevel@tonic-gate 25317c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 25327c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 25337c478bd9Sstevel@tonic-gate 25347c478bd9Sstevel@tonic-gate w.w_verbose = FALSE; 25357c478bd9Sstevel@tonic-gate w.w_bufctl = FALSE; 25367c478bd9Sstevel@tonic-gate w.w_all = FALSE; 25377c478bd9Sstevel@tonic-gate w.w_idspace = FALSE; 25387c478bd9Sstevel@tonic-gate 25397c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 25407c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.w_verbose, 25417c478bd9Sstevel@tonic-gate 'a', MDB_OPT_SETBITS, TRUE, &w.w_all, 25427c478bd9Sstevel@tonic-gate 'i', MDB_OPT_SETBITS, TRUE, &w.w_idspace, 25437c478bd9Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &w.w_bufctl, NULL) != argc) 25447c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 25457c478bd9Sstevel@tonic-gate 25467c478bd9Sstevel@tonic-gate w.w_addr = addr; 25477c478bd9Sstevel@tonic-gate w.w_found = 0; 25487c478bd9Sstevel@tonic-gate 25497c478bd9Sstevel@tonic-gate if (w.w_verbose) 25507c478bd9Sstevel@tonic-gate mdb_printf("Searching modules...\n"); 25517c478bd9Sstevel@tonic-gate 25527c478bd9Sstevel@tonic-gate if (!w.w_idspace) { 25537c478bd9Sstevel@tonic-gate if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, &w) 25547c478bd9Sstevel@tonic-gate == -1) { 25557c478bd9Sstevel@tonic-gate mdb_warn("couldn't find modctl walker"); 25567c478bd9Sstevel@tonic-gate return (DCMD_ERR); 25577c478bd9Sstevel@tonic-gate } 25587c478bd9Sstevel@tonic-gate 25597c478bd9Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25607c478bd9Sstevel@tonic-gate return (DCMD_OK); 25617c478bd9Sstevel@tonic-gate 25627c478bd9Sstevel@tonic-gate /* 25637c478bd9Sstevel@tonic-gate * Now search all thread stacks. Yes, this is a little weak; we 25647c478bd9Sstevel@tonic-gate * can save a lot of work by first checking to see if the 25657c478bd9Sstevel@tonic-gate * address is in segkp vs. segkmem. But hey, computers are 25667c478bd9Sstevel@tonic-gate * fast. 25677c478bd9Sstevel@tonic-gate */ 25687c478bd9Sstevel@tonic-gate if (w.w_verbose) 25697c478bd9Sstevel@tonic-gate mdb_printf("Searching threads...\n"); 25707c478bd9Sstevel@tonic-gate 25717c478bd9Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, &w) 25727c478bd9Sstevel@tonic-gate == -1) { 25737c478bd9Sstevel@tonic-gate mdb_warn("couldn't find thread walker"); 25747c478bd9Sstevel@tonic-gate return (DCMD_ERR); 25757c478bd9Sstevel@tonic-gate } 25767c478bd9Sstevel@tonic-gate 25777c478bd9Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25787c478bd9Sstevel@tonic-gate return (DCMD_OK); 25797c478bd9Sstevel@tonic-gate 25807c478bd9Sstevel@tonic-gate if (w.w_verbose) 25817c478bd9Sstevel@tonic-gate mdb_printf("Searching page structures...\n"); 25827c478bd9Sstevel@tonic-gate 25837c478bd9Sstevel@tonic-gate if (mdb_walk("page", (mdb_walk_cb_t)whatis_walk_page, &w) 25847c478bd9Sstevel@tonic-gate == -1) { 25857c478bd9Sstevel@tonic-gate mdb_warn("couldn't find page walker"); 25867c478bd9Sstevel@tonic-gate return (DCMD_ERR); 25877c478bd9Sstevel@tonic-gate } 25887c478bd9Sstevel@tonic-gate 25897c478bd9Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 25907c478bd9Sstevel@tonic-gate return (DCMD_OK); 25917c478bd9Sstevel@tonic-gate } 25927c478bd9Sstevel@tonic-gate 25937c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_cache", 25947c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_touch, &w) == -1) { 25957c478bd9Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 25967c478bd9Sstevel@tonic-gate return (DCMD_ERR); 25977c478bd9Sstevel@tonic-gate } 25987c478bd9Sstevel@tonic-gate 25997c478bd9Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26007c478bd9Sstevel@tonic-gate return (DCMD_OK); 26017c478bd9Sstevel@tonic-gate 26027c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_cache", 26037c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_notouch, &w) == -1) { 26047c478bd9Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker"); 26057c478bd9Sstevel@tonic-gate return (DCMD_ERR); 26067c478bd9Sstevel@tonic-gate } 26077c478bd9Sstevel@tonic-gate 26087c478bd9Sstevel@tonic-gate if (w.w_found && w.w_all == FALSE) 26097c478bd9Sstevel@tonic-gate return (DCMD_OK); 26107c478bd9Sstevel@tonic-gate 26117c478bd9Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 26127c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)whatis_walk_vmem, &w) == -1) { 26137c478bd9Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 26147c478bd9Sstevel@tonic-gate return (DCMD_ERR); 26157c478bd9Sstevel@tonic-gate } 26167c478bd9Sstevel@tonic-gate 26177c478bd9Sstevel@tonic-gate if (w.w_found == 0) 26187c478bd9Sstevel@tonic-gate mdb_printf("%p is unknown\n", addr); 26197c478bd9Sstevel@tonic-gate 26207c478bd9Sstevel@tonic-gate return (DCMD_OK); 26217c478bd9Sstevel@tonic-gate } 26227c478bd9Sstevel@tonic-gate 26237c478bd9Sstevel@tonic-gate void 26247c478bd9Sstevel@tonic-gate whatis_help(void) 26257c478bd9Sstevel@tonic-gate { 26267c478bd9Sstevel@tonic-gate mdb_printf( 26277c478bd9Sstevel@tonic-gate "Given a virtual address, attempt to determine where it came\n" 26287c478bd9Sstevel@tonic-gate "from.\n" 26297c478bd9Sstevel@tonic-gate "\n" 26307c478bd9Sstevel@tonic-gate "\t-v\tVerbose output; display caches/arenas/etc as they are\n" 26317c478bd9Sstevel@tonic-gate "\t\tsearched\n" 26327c478bd9Sstevel@tonic-gate "\t-a\tFind all possible sources. Default behavior is to stop at\n" 26337c478bd9Sstevel@tonic-gate "\t\tthe first (most specific) source.\n" 26347c478bd9Sstevel@tonic-gate "\t-i\tSearch only identifier arenas and caches. By default\n" 26357c478bd9Sstevel@tonic-gate "\t\tthese are ignored.\n" 26367c478bd9Sstevel@tonic-gate "\t-b\tReport bufctls and vmem_segs for matches in kmem and vmem,\n" 26377c478bd9Sstevel@tonic-gate "\t\trespectively. Warning: if the buffer exists, but does not\n" 26387c478bd9Sstevel@tonic-gate "\t\thave a bufctl, it will not be reported.\n"); 26397c478bd9Sstevel@tonic-gate } 26407c478bd9Sstevel@tonic-gate 26417c478bd9Sstevel@tonic-gate typedef struct kmem_log_cpu { 26427c478bd9Sstevel@tonic-gate uintptr_t kmc_low; 26437c478bd9Sstevel@tonic-gate uintptr_t kmc_high; 26447c478bd9Sstevel@tonic-gate } kmem_log_cpu_t; 26457c478bd9Sstevel@tonic-gate 26467c478bd9Sstevel@tonic-gate typedef struct kmem_log_data { 26477c478bd9Sstevel@tonic-gate uintptr_t kmd_addr; 26487c478bd9Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu; 26497c478bd9Sstevel@tonic-gate } kmem_log_data_t; 26507c478bd9Sstevel@tonic-gate 26517c478bd9Sstevel@tonic-gate int 26527c478bd9Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b, 26537c478bd9Sstevel@tonic-gate kmem_log_data_t *kmd) 26547c478bd9Sstevel@tonic-gate { 26557c478bd9Sstevel@tonic-gate int i; 26567c478bd9Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu; 26577c478bd9Sstevel@tonic-gate size_t bufsize; 26587c478bd9Sstevel@tonic-gate 26597c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 26607c478bd9Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high) 26617c478bd9Sstevel@tonic-gate break; 26627c478bd9Sstevel@tonic-gate } 26637c478bd9Sstevel@tonic-gate 26647c478bd9Sstevel@tonic-gate if (kmd->kmd_addr) { 26657c478bd9Sstevel@tonic-gate if (b->bc_cache == NULL) 26667c478bd9Sstevel@tonic-gate return (WALK_NEXT); 26677c478bd9Sstevel@tonic-gate 26687c478bd9Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 26697c478bd9Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) { 26707c478bd9Sstevel@tonic-gate mdb_warn( 26717c478bd9Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 26727c478bd9Sstevel@tonic-gate b->bc_cache); 26737c478bd9Sstevel@tonic-gate return (WALK_ERR); 26747c478bd9Sstevel@tonic-gate } 26757c478bd9Sstevel@tonic-gate 26767c478bd9Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr || 26777c478bd9Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize) 26787c478bd9Sstevel@tonic-gate return (WALK_NEXT); 26797c478bd9Sstevel@tonic-gate } 26807c478bd9Sstevel@tonic-gate 26817c478bd9Sstevel@tonic-gate if (i == NCPU) 26827c478bd9Sstevel@tonic-gate mdb_printf(" "); 26837c478bd9Sstevel@tonic-gate else 26847c478bd9Sstevel@tonic-gate mdb_printf("%3d", i); 26857c478bd9Sstevel@tonic-gate 26867c478bd9Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 26877c478bd9Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 26887c478bd9Sstevel@tonic-gate 26897c478bd9Sstevel@tonic-gate return (WALK_NEXT); 26907c478bd9Sstevel@tonic-gate } 26917c478bd9Sstevel@tonic-gate 26927c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 26937c478bd9Sstevel@tonic-gate int 26947c478bd9Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 26957c478bd9Sstevel@tonic-gate { 26967c478bd9Sstevel@tonic-gate kmem_log_header_t lh; 26977c478bd9Sstevel@tonic-gate kmem_cpu_log_header_t clh; 26987c478bd9Sstevel@tonic-gate uintptr_t lhp, clhp; 26997c478bd9Sstevel@tonic-gate int ncpus; 27007c478bd9Sstevel@tonic-gate uintptr_t *cpu; 27017c478bd9Sstevel@tonic-gate GElf_Sym sym; 27027c478bd9Sstevel@tonic-gate kmem_log_cpu_t *kmc; 27037c478bd9Sstevel@tonic-gate int i; 27047c478bd9Sstevel@tonic-gate kmem_log_data_t kmd; 27057c478bd9Sstevel@tonic-gate uint_t opt_b = FALSE; 27067c478bd9Sstevel@tonic-gate 27077c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 27087c478bd9Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc) 27097c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 27107c478bd9Sstevel@tonic-gate 27117c478bd9Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) { 27127c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'"); 27137c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27147c478bd9Sstevel@tonic-gate } 27157c478bd9Sstevel@tonic-gate 27167c478bd9Sstevel@tonic-gate if (lhp == NULL) { 27177c478bd9Sstevel@tonic-gate mdb_warn("no kmem transaction log\n"); 27187c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27197c478bd9Sstevel@tonic-gate } 27207c478bd9Sstevel@tonic-gate 27217c478bd9Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus"); 27227c478bd9Sstevel@tonic-gate 27237c478bd9Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) { 27247c478bd9Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 27257c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27267c478bd9Sstevel@tonic-gate } 27277c478bd9Sstevel@tonic-gate 27287c478bd9Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 27297c478bd9Sstevel@tonic-gate 27307c478bd9Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC); 27317c478bd9Sstevel@tonic-gate 27327c478bd9Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) { 27337c478bd9Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array"); 27347c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27357c478bd9Sstevel@tonic-gate } 27367c478bd9Sstevel@tonic-gate 27377c478bd9Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) { 27387c478bd9Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n", 27397c478bd9Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size); 27407c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27417c478bd9Sstevel@tonic-gate } 27427c478bd9Sstevel@tonic-gate 27437c478bd9Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) { 27447c478bd9Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value); 27457c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27467c478bd9Sstevel@tonic-gate } 27477c478bd9Sstevel@tonic-gate 27487c478bd9Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC); 27497c478bd9Sstevel@tonic-gate kmd.kmd_addr = NULL; 27507c478bd9Sstevel@tonic-gate kmd.kmd_cpu = kmc; 27517c478bd9Sstevel@tonic-gate 27527c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 27537c478bd9Sstevel@tonic-gate 27547c478bd9Sstevel@tonic-gate if (cpu[i] == NULL) 27557c478bd9Sstevel@tonic-gate continue; 27567c478bd9Sstevel@tonic-gate 27577c478bd9Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 27587c478bd9Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 27597c478bd9Sstevel@tonic-gate i, clhp); 27607c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27617c478bd9Sstevel@tonic-gate } 27627c478bd9Sstevel@tonic-gate 27637c478bd9Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize + 27647c478bd9Sstevel@tonic-gate (uintptr_t)lh.lh_base; 27657c478bd9Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current; 27667c478bd9Sstevel@tonic-gate 27677c478bd9Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t); 27687c478bd9Sstevel@tonic-gate } 27697c478bd9Sstevel@tonic-gate 27707c478bd9Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR", 27717c478bd9Sstevel@tonic-gate "TIMESTAMP", "THREAD"); 27727c478bd9Sstevel@tonic-gate 27737c478bd9Sstevel@tonic-gate /* 27747c478bd9Sstevel@tonic-gate * If we have been passed an address, print out only log entries 27757c478bd9Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret 27767c478bd9Sstevel@tonic-gate * the address as a bufctl. 27777c478bd9Sstevel@tonic-gate */ 27787c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 27797c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t b; 27807c478bd9Sstevel@tonic-gate 27817c478bd9Sstevel@tonic-gate if (opt_b) { 27827c478bd9Sstevel@tonic-gate kmd.kmd_addr = addr; 27837c478bd9Sstevel@tonic-gate } else { 27847c478bd9Sstevel@tonic-gate if (mdb_vread(&b, 27857c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) { 27867c478bd9Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 27877c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27887c478bd9Sstevel@tonic-gate } 27897c478bd9Sstevel@tonic-gate 27907c478bd9Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd); 27917c478bd9Sstevel@tonic-gate 27927c478bd9Sstevel@tonic-gate return (DCMD_OK); 27937c478bd9Sstevel@tonic-gate } 27947c478bd9Sstevel@tonic-gate } 27957c478bd9Sstevel@tonic-gate 27967c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) { 27977c478bd9Sstevel@tonic-gate mdb_warn("can't find kmem log walker"); 27987c478bd9Sstevel@tonic-gate return (DCMD_ERR); 27997c478bd9Sstevel@tonic-gate } 28007c478bd9Sstevel@tonic-gate 28017c478bd9Sstevel@tonic-gate return (DCMD_OK); 28027c478bd9Sstevel@tonic-gate } 28037c478bd9Sstevel@tonic-gate 28047c478bd9Sstevel@tonic-gate typedef struct bufctl_history_cb { 28057c478bd9Sstevel@tonic-gate int bhc_flags; 28067c478bd9Sstevel@tonic-gate int bhc_argc; 28077c478bd9Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 28087c478bd9Sstevel@tonic-gate int bhc_ret; 28097c478bd9Sstevel@tonic-gate } bufctl_history_cb_t; 28107c478bd9Sstevel@tonic-gate 28117c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 28127c478bd9Sstevel@tonic-gate static int 28137c478bd9Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 28147c478bd9Sstevel@tonic-gate { 28157c478bd9Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 28167c478bd9Sstevel@tonic-gate 28177c478bd9Sstevel@tonic-gate bhc->bhc_ret = 28187c478bd9Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 28197c478bd9Sstevel@tonic-gate 28207c478bd9Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 28217c478bd9Sstevel@tonic-gate 28227c478bd9Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 28237c478bd9Sstevel@tonic-gate } 28247c478bd9Sstevel@tonic-gate 28257c478bd9Sstevel@tonic-gate void 28267c478bd9Sstevel@tonic-gate bufctl_help(void) 28277c478bd9Sstevel@tonic-gate { 2828b5fca8f8Stomee mdb_printf("%s", 2829b5fca8f8Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n"); 28307c478bd9Sstevel@tonic-gate mdb_dec_indent(2); 28317c478bd9Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 28327c478bd9Sstevel@tonic-gate mdb_inc_indent(2); 28337c478bd9Sstevel@tonic-gate mdb_printf("%s", 28347c478bd9Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 28357c478bd9Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 28367c478bd9Sstevel@tonic-gate " -a addr\n" 28377c478bd9Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 28387c478bd9Sstevel@tonic-gate " -c caller\n" 28397c478bd9Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 28407c478bd9Sstevel@tonic-gate " -e earliest\n" 28417c478bd9Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 28427c478bd9Sstevel@tonic-gate " -l latest\n" 28437c478bd9Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 28447c478bd9Sstevel@tonic-gate " -t thread\n" 28457c478bd9Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 28467c478bd9Sstevel@tonic-gate } 28477c478bd9Sstevel@tonic-gate 28487c478bd9Sstevel@tonic-gate int 28497c478bd9Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 28507c478bd9Sstevel@tonic-gate { 28517c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc; 28527c478bd9Sstevel@tonic-gate uint_t verbose = FALSE; 28537c478bd9Sstevel@tonic-gate uint_t history = FALSE; 28547c478bd9Sstevel@tonic-gate uint_t in_history = FALSE; 28557c478bd9Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 28567c478bd9Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL; 28577c478bd9Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 28587c478bd9Sstevel@tonic-gate int i, depth; 28597c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 28607c478bd9Sstevel@tonic-gate GElf_Sym sym; 28617c478bd9Sstevel@tonic-gate 28627c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 28637c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 28647c478bd9Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 28657c478bd9Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 28667c478bd9Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 28677c478bd9Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 28687c478bd9Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 28697c478bd9Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 28707c478bd9Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 28717c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 28727c478bd9Sstevel@tonic-gate 28737c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 28747c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate if (in_history && !history) 28777c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 28787c478bd9Sstevel@tonic-gate 28797c478bd9Sstevel@tonic-gate if (history && !in_history) { 28807c478bd9Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 28817c478bd9Sstevel@tonic-gate UM_SLEEP | UM_GC); 28827c478bd9Sstevel@tonic-gate bufctl_history_cb_t bhc; 28837c478bd9Sstevel@tonic-gate 28847c478bd9Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 28857c478bd9Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 28867c478bd9Sstevel@tonic-gate 28877c478bd9Sstevel@tonic-gate for (i = 0; i < argc; i++) 28887c478bd9Sstevel@tonic-gate nargv[i + 1] = argv[i]; 28897c478bd9Sstevel@tonic-gate 28907c478bd9Sstevel@tonic-gate /* 28917c478bd9Sstevel@tonic-gate * When in history mode, we treat each element as if it 28927c478bd9Sstevel@tonic-gate * were in a seperate loop, so that the headers group 28937c478bd9Sstevel@tonic-gate * bufctls with similar histories. 28947c478bd9Sstevel@tonic-gate */ 28957c478bd9Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 28967c478bd9Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 28977c478bd9Sstevel@tonic-gate bhc.bhc_argv = nargv; 28987c478bd9Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 28997c478bd9Sstevel@tonic-gate 29007c478bd9Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 29017c478bd9Sstevel@tonic-gate addr) == -1) { 29027c478bd9Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 29037c478bd9Sstevel@tonic-gate return (DCMD_ERR); 29047c478bd9Sstevel@tonic-gate } 29057c478bd9Sstevel@tonic-gate 29067c478bd9Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 29077c478bd9Sstevel@tonic-gate mdb_printf("\n"); 29087c478bd9Sstevel@tonic-gate 29097c478bd9Sstevel@tonic-gate return (bhc.bhc_ret); 29107c478bd9Sstevel@tonic-gate } 29117c478bd9Sstevel@tonic-gate 29127c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 29137c478bd9Sstevel@tonic-gate if (verbose) { 29147c478bd9Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 29157c478bd9Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 29167c478bd9Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 29177c478bd9Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 29187c478bd9Sstevel@tonic-gate } else { 29197c478bd9Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n", 29207c478bd9Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER"); 29217c478bd9Sstevel@tonic-gate } 29227c478bd9Sstevel@tonic-gate } 29237c478bd9Sstevel@tonic-gate 29247c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) { 29257c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 29267c478bd9Sstevel@tonic-gate return (DCMD_ERR); 29277c478bd9Sstevel@tonic-gate } 29287c478bd9Sstevel@tonic-gate 29297c478bd9Sstevel@tonic-gate /* 29307c478bd9Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 29317c478bd9Sstevel@tonic-gate * the address does not really refer to a bufctl. 29327c478bd9Sstevel@tonic-gate */ 29337c478bd9Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH); 29347c478bd9Sstevel@tonic-gate 29357c478bd9Sstevel@tonic-gate if (caller != NULL) { 29367c478bd9Sstevel@tonic-gate laddr = caller; 29377c478bd9Sstevel@tonic-gate haddr = caller + sizeof (caller); 29387c478bd9Sstevel@tonic-gate 29397c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 29407c478bd9Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 29417c478bd9Sstevel@tonic-gate /* 29427c478bd9Sstevel@tonic-gate * We were provided an exact symbol value; any 29437c478bd9Sstevel@tonic-gate * address in the function is valid. 29447c478bd9Sstevel@tonic-gate */ 29457c478bd9Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 29467c478bd9Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 29477c478bd9Sstevel@tonic-gate } 29487c478bd9Sstevel@tonic-gate 29497c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 29507c478bd9Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr) 29517c478bd9Sstevel@tonic-gate break; 29527c478bd9Sstevel@tonic-gate 29537c478bd9Sstevel@tonic-gate if (i == depth) 29547c478bd9Sstevel@tonic-gate return (DCMD_OK); 29557c478bd9Sstevel@tonic-gate } 29567c478bd9Sstevel@tonic-gate 29577c478bd9Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread) 29587c478bd9Sstevel@tonic-gate return (DCMD_OK); 29597c478bd9Sstevel@tonic-gate 29607c478bd9Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest) 29617c478bd9Sstevel@tonic-gate return (DCMD_OK); 29627c478bd9Sstevel@tonic-gate 29637c478bd9Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest) 29647c478bd9Sstevel@tonic-gate return (DCMD_OK); 29657c478bd9Sstevel@tonic-gate 29667c478bd9Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr) 29677c478bd9Sstevel@tonic-gate return (DCMD_OK); 29687c478bd9Sstevel@tonic-gate 29697c478bd9Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 29707c478bd9Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 29717c478bd9Sstevel@tonic-gate return (DCMD_OK); 29727c478bd9Sstevel@tonic-gate } 29737c478bd9Sstevel@tonic-gate 29747c478bd9Sstevel@tonic-gate if (verbose) { 29757c478bd9Sstevel@tonic-gate mdb_printf( 29767c478bd9Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n" 29777c478bd9Sstevel@tonic-gate "%16s %16p %16p %16p\n", 29787c478bd9Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread, 29797c478bd9Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents); 29807c478bd9Sstevel@tonic-gate 29817c478bd9Sstevel@tonic-gate mdb_inc_indent(17); 29827c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 29837c478bd9Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]); 29847c478bd9Sstevel@tonic-gate mdb_dec_indent(17); 29857c478bd9Sstevel@tonic-gate mdb_printf("\n"); 29867c478bd9Sstevel@tonic-gate } else { 29877c478bd9Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr, 29887c478bd9Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread); 29897c478bd9Sstevel@tonic-gate 29907c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 29917c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i], 29927c478bd9Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 29937c478bd9Sstevel@tonic-gate continue; 29947c478bd9Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0) 29957c478bd9Sstevel@tonic-gate continue; 29967c478bd9Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]); 29977c478bd9Sstevel@tonic-gate break; 29987c478bd9Sstevel@tonic-gate } 29997c478bd9Sstevel@tonic-gate 30007c478bd9Sstevel@tonic-gate if (i >= depth) 30017c478bd9Sstevel@tonic-gate mdb_printf("\n"); 30027c478bd9Sstevel@tonic-gate } 30037c478bd9Sstevel@tonic-gate 30047c478bd9Sstevel@tonic-gate return (DCMD_OK); 30057c478bd9Sstevel@tonic-gate } 30067c478bd9Sstevel@tonic-gate 30077c478bd9Sstevel@tonic-gate typedef struct kmem_verify { 30087c478bd9Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */ 30097c478bd9Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */ 30107c478bd9Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */ 30117c478bd9Sstevel@tonic-gate int kmv_besilent; /* report actual corruption sites */ 30127c478bd9Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */ 30137c478bd9Sstevel@tonic-gate } kmem_verify_t; 30147c478bd9Sstevel@tonic-gate 30157c478bd9Sstevel@tonic-gate /* 30167c478bd9Sstevel@tonic-gate * verify_pattern() 30177c478bd9Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 30187c478bd9Sstevel@tonic-gate */ 30197c478bd9Sstevel@tonic-gate static int64_t 30207c478bd9Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 30217c478bd9Sstevel@tonic-gate { 30227c478bd9Sstevel@tonic-gate /*LINTED*/ 30237c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 30247c478bd9Sstevel@tonic-gate uint64_t *buf; 30257c478bd9Sstevel@tonic-gate 30267c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 30277c478bd9Sstevel@tonic-gate if (*buf != pat) 30287c478bd9Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 30297c478bd9Sstevel@tonic-gate return (-1); 30307c478bd9Sstevel@tonic-gate } 30317c478bd9Sstevel@tonic-gate 30327c478bd9Sstevel@tonic-gate /* 30337c478bd9Sstevel@tonic-gate * verify_buftag() 30347c478bd9Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 30357c478bd9Sstevel@tonic-gate */ 30367c478bd9Sstevel@tonic-gate static int 30377c478bd9Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat) 30387c478bd9Sstevel@tonic-gate { 30397c478bd9Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 30407c478bd9Sstevel@tonic-gate } 30417c478bd9Sstevel@tonic-gate 30427c478bd9Sstevel@tonic-gate /* 30437c478bd9Sstevel@tonic-gate * verify_free() 30447c478bd9Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 30457c478bd9Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 30467c478bd9Sstevel@tonic-gate */ 30477c478bd9Sstevel@tonic-gate /*ARGSUSED1*/ 30487c478bd9Sstevel@tonic-gate static int 30497c478bd9Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 30507c478bd9Sstevel@tonic-gate { 30517c478bd9Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 30527c478bd9Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 30537c478bd9Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 30547c478bd9Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */ 30557c478bd9Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 30567c478bd9Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 30577c478bd9Sstevel@tonic-gate 30587c478bd9Sstevel@tonic-gate /*LINTED*/ 30597c478bd9Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf); 30607c478bd9Sstevel@tonic-gate 30617c478bd9Sstevel@tonic-gate /* 30627c478bd9Sstevel@tonic-gate * Read the buffer to check. 30637c478bd9Sstevel@tonic-gate */ 30647c478bd9Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 30657c478bd9Sstevel@tonic-gate if (!besilent) 30667c478bd9Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 30677c478bd9Sstevel@tonic-gate return (WALK_NEXT); 30687c478bd9Sstevel@tonic-gate } 30697c478bd9Sstevel@tonic-gate 30707c478bd9Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 30717c478bd9Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) { 30727c478bd9Sstevel@tonic-gate if (!besilent) 30737c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 30747c478bd9Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 30757c478bd9Sstevel@tonic-gate goto corrupt; 30767c478bd9Sstevel@tonic-gate } 30777c478bd9Sstevel@tonic-gate /* 30787c478bd9Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold 30797c478bd9Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red 30807c478bd9Sstevel@tonic-gate * zone corruption. 30817c478bd9Sstevel@tonic-gate */ 30827c478bd9Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH && 30837c478bd9Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) { 30847c478bd9Sstevel@tonic-gate if (!besilent) 30857c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 30867c478bd9Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 30877c478bd9Sstevel@tonic-gate goto corrupt; 30887c478bd9Sstevel@tonic-gate } 30897c478bd9Sstevel@tonic-gate 30907c478bd9Sstevel@tonic-gate /* 30917c478bd9Sstevel@tonic-gate * confirm bufctl pointer integrity. 30927c478bd9Sstevel@tonic-gate */ 30937c478bd9Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) { 30947c478bd9Sstevel@tonic-gate if (!besilent) 30957c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 30967c478bd9Sstevel@tonic-gate "buftag\n", addr); 30977c478bd9Sstevel@tonic-gate goto corrupt; 30987c478bd9Sstevel@tonic-gate } 30997c478bd9Sstevel@tonic-gate 31007c478bd9Sstevel@tonic-gate return (WALK_NEXT); 31017c478bd9Sstevel@tonic-gate corrupt: 31027c478bd9Sstevel@tonic-gate kmv->kmv_corruption++; 31037c478bd9Sstevel@tonic-gate return (WALK_NEXT); 31047c478bd9Sstevel@tonic-gate } 31057c478bd9Sstevel@tonic-gate 31067c478bd9Sstevel@tonic-gate /* 31077c478bd9Sstevel@tonic-gate * verify_alloc() 31087c478bd9Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 31097c478bd9Sstevel@tonic-gate * to the buffer. 31107c478bd9Sstevel@tonic-gate */ 31117c478bd9Sstevel@tonic-gate /*ARGSUSED1*/ 31127c478bd9Sstevel@tonic-gate static int 31137c478bd9Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 31147c478bd9Sstevel@tonic-gate { 31157c478bd9Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private; 31167c478bd9Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache; 31177c478bd9Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */ 31187c478bd9Sstevel@tonic-gate /*LINTED*/ 31197c478bd9Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf); 31207c478bd9Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 31217c478bd9Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 31227c478bd9Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 31237c478bd9Sstevel@tonic-gate int besilent = kmv->kmv_besilent; 31247c478bd9Sstevel@tonic-gate 31257c478bd9Sstevel@tonic-gate /* 31267c478bd9Sstevel@tonic-gate * Read the buffer to check. 31277c478bd9Sstevel@tonic-gate */ 31287c478bd9Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) { 31297c478bd9Sstevel@tonic-gate if (!besilent) 31307c478bd9Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 31317c478bd9Sstevel@tonic-gate return (WALK_NEXT); 31327c478bd9Sstevel@tonic-gate } 31337c478bd9Sstevel@tonic-gate 31347c478bd9Sstevel@tonic-gate /* 31357c478bd9Sstevel@tonic-gate * There are two cases to handle: 31367c478bd9Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have 31377c478bd9Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 31387c478bd9Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have 31397c478bd9Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 31407c478bd9Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 31417c478bd9Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 31427c478bd9Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 31437c478bd9Sstevel@tonic-gate * 0xbb byte in the buffer. 31447c478bd9Sstevel@tonic-gate * 31457c478bd9Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 31467c478bd9Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC 31477c478bd9Sstevel@tonic-gate */ 31487c478bd9Sstevel@tonic-gate 31497c478bd9Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN) 31507c478bd9Sstevel@tonic-gate looks_ok = 1; 31517c478bd9Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1])) 31527c478bd9Sstevel@tonic-gate size_ok = 0; 31537c478bd9Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE) 31547c478bd9Sstevel@tonic-gate looks_ok = 1; 31557c478bd9Sstevel@tonic-gate else 31567c478bd9Sstevel@tonic-gate size_ok = 0; 31577c478bd9Sstevel@tonic-gate 31587c478bd9Sstevel@tonic-gate if (!size_ok) { 31597c478bd9Sstevel@tonic-gate if (!besilent) 31607c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31617c478bd9Sstevel@tonic-gate "redzone size encoding\n", addr); 31627c478bd9Sstevel@tonic-gate goto corrupt; 31637c478bd9Sstevel@tonic-gate } 31647c478bd9Sstevel@tonic-gate 31657c478bd9Sstevel@tonic-gate if (!looks_ok) { 31667c478bd9Sstevel@tonic-gate if (!besilent) 31677c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 31687c478bd9Sstevel@tonic-gate "redzone signature\n", addr); 31697c478bd9Sstevel@tonic-gate goto corrupt; 31707c478bd9Sstevel@tonic-gate } 31717c478bd9Sstevel@tonic-gate 31727c478bd9Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) { 31737c478bd9Sstevel@tonic-gate if (!besilent) 31747c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 31757c478bd9Sstevel@tonic-gate "corrupt buftag\n", addr); 31767c478bd9Sstevel@tonic-gate goto corrupt; 31777c478bd9Sstevel@tonic-gate } 31787c478bd9Sstevel@tonic-gate 31797c478bd9Sstevel@tonic-gate return (WALK_NEXT); 31807c478bd9Sstevel@tonic-gate corrupt: 31817c478bd9Sstevel@tonic-gate kmv->kmv_corruption++; 31827c478bd9Sstevel@tonic-gate return (WALK_NEXT); 31837c478bd9Sstevel@tonic-gate } 31847c478bd9Sstevel@tonic-gate 31857c478bd9Sstevel@tonic-gate /*ARGSUSED2*/ 31867c478bd9Sstevel@tonic-gate int 31877c478bd9Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 31887c478bd9Sstevel@tonic-gate { 31897c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 31907c478bd9Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 31917c478bd9Sstevel@tonic-gate kmem_verify_t kmv; 31927c478bd9Sstevel@tonic-gate 31937c478bd9Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache), 31947c478bd9Sstevel@tonic-gate addr) == -1) { 31957c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr); 31967c478bd9Sstevel@tonic-gate return (DCMD_ERR); 31977c478bd9Sstevel@tonic-gate } 31987c478bd9Sstevel@tonic-gate 31997c478bd9Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag + 32007c478bd9Sstevel@tonic-gate sizeof (kmem_buftag_t); 32017c478bd9Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC); 32027c478bd9Sstevel@tonic-gate kmv.kmv_corruption = 0; 32037c478bd9Sstevel@tonic-gate 32047c478bd9Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) { 32057c478bd9Sstevel@tonic-gate check_alloc = 1; 32067c478bd9Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF) 32077c478bd9Sstevel@tonic-gate check_free = 1; 32087c478bd9Sstevel@tonic-gate } else { 32097c478bd9Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 32107c478bd9Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 32117c478bd9Sstevel@tonic-gate "redzone checking enabled\n", addr, 32127c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32137c478bd9Sstevel@tonic-gate } 32147c478bd9Sstevel@tonic-gate return (DCMD_ERR); 32157c478bd9Sstevel@tonic-gate } 32167c478bd9Sstevel@tonic-gate 32177c478bd9Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32187c478bd9Sstevel@tonic-gate /* 32197c478bd9Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 32207c478bd9Sstevel@tonic-gate */ 32217c478bd9Sstevel@tonic-gate kmv.kmv_besilent = 1; 32227c478bd9Sstevel@tonic-gate } else { 32237c478bd9Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 32247c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name); 32257c478bd9Sstevel@tonic-gate mdb_inc_indent(2); 32267c478bd9Sstevel@tonic-gate kmv.kmv_besilent = 0; 32277c478bd9Sstevel@tonic-gate } 32287c478bd9Sstevel@tonic-gate 32297c478bd9Sstevel@tonic-gate if (check_alloc) 32307c478bd9Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr); 32317c478bd9Sstevel@tonic-gate if (check_free) 32327c478bd9Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr); 32337c478bd9Sstevel@tonic-gate 32347c478bd9Sstevel@tonic-gate if (flags & DCMD_LOOP) { 32357c478bd9Sstevel@tonic-gate if (kmv.kmv_corruption == 0) { 32367c478bd9Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 32377c478bd9Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32387c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr); 32397c478bd9Sstevel@tonic-gate } else { 32407c478bd9Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 32417c478bd9Sstevel@tonic-gate if (kmv.kmv_corruption > 1) 32427c478bd9Sstevel@tonic-gate s = "s"; 32437c478bd9Sstevel@tonic-gate 32447c478bd9Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 32457c478bd9Sstevel@tonic-gate KMEM_CACHE_NAMELEN, 32467c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr, 32477c478bd9Sstevel@tonic-gate kmv.kmv_corruption, s); 32487c478bd9Sstevel@tonic-gate } 32497c478bd9Sstevel@tonic-gate } else { 32507c478bd9Sstevel@tonic-gate /* 32517c478bd9Sstevel@tonic-gate * This is the more verbose mode, when the user has 32527c478bd9Sstevel@tonic-gate * type addr::kmem_verify. If the cache was clean, 32537c478bd9Sstevel@tonic-gate * nothing will have yet been printed. So say something. 32547c478bd9Sstevel@tonic-gate */ 32557c478bd9Sstevel@tonic-gate if (kmv.kmv_corruption == 0) 32567c478bd9Sstevel@tonic-gate mdb_printf("clean\n"); 32577c478bd9Sstevel@tonic-gate 32587c478bd9Sstevel@tonic-gate mdb_dec_indent(2); 32597c478bd9Sstevel@tonic-gate } 32607c478bd9Sstevel@tonic-gate } else { 32617c478bd9Sstevel@tonic-gate /* 32627c478bd9Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 32637c478bd9Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each... 32647c478bd9Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify' 32657c478bd9Sstevel@tonic-gate */ 32667c478bd9Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", KMEM_CACHE_NAMELEN, 32677c478bd9Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 32687c478bd9Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL)); 32697c478bd9Sstevel@tonic-gate } 32707c478bd9Sstevel@tonic-gate 32717c478bd9Sstevel@tonic-gate return (DCMD_OK); 32727c478bd9Sstevel@tonic-gate } 32737c478bd9Sstevel@tonic-gate 32747c478bd9Sstevel@tonic-gate typedef struct vmem_node { 32757c478bd9Sstevel@tonic-gate struct vmem_node *vn_next; 32767c478bd9Sstevel@tonic-gate struct vmem_node *vn_parent; 32777c478bd9Sstevel@tonic-gate struct vmem_node *vn_sibling; 32787c478bd9Sstevel@tonic-gate struct vmem_node *vn_children; 32797c478bd9Sstevel@tonic-gate uintptr_t vn_addr; 32807c478bd9Sstevel@tonic-gate int vn_marked; 32817c478bd9Sstevel@tonic-gate vmem_t vn_vmem; 32827c478bd9Sstevel@tonic-gate } vmem_node_t; 32837c478bd9Sstevel@tonic-gate 32847c478bd9Sstevel@tonic-gate typedef struct vmem_walk { 32857c478bd9Sstevel@tonic-gate vmem_node_t *vw_root; 32867c478bd9Sstevel@tonic-gate vmem_node_t *vw_current; 32877c478bd9Sstevel@tonic-gate } vmem_walk_t; 32887c478bd9Sstevel@tonic-gate 32897c478bd9Sstevel@tonic-gate int 32907c478bd9Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 32917c478bd9Sstevel@tonic-gate { 32927c478bd9Sstevel@tonic-gate uintptr_t vaddr, paddr; 32937c478bd9Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 32947c478bd9Sstevel@tonic-gate vmem_walk_t *vw; 32957c478bd9Sstevel@tonic-gate 32967c478bd9Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) { 32977c478bd9Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 32987c478bd9Sstevel@tonic-gate return (WALK_ERR); 32997c478bd9Sstevel@tonic-gate } 33007c478bd9Sstevel@tonic-gate 33017c478bd9Sstevel@tonic-gate while (vaddr != NULL) { 33027c478bd9Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 33037c478bd9Sstevel@tonic-gate vp->vn_addr = vaddr; 33047c478bd9Sstevel@tonic-gate vp->vn_next = head; 33057c478bd9Sstevel@tonic-gate head = vp; 33067c478bd9Sstevel@tonic-gate 33077c478bd9Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 33087c478bd9Sstevel@tonic-gate current = vp; 33097c478bd9Sstevel@tonic-gate 33107c478bd9Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 33117c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 33127c478bd9Sstevel@tonic-gate goto err; 33137c478bd9Sstevel@tonic-gate } 33147c478bd9Sstevel@tonic-gate 33157c478bd9Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 33167c478bd9Sstevel@tonic-gate } 33177c478bd9Sstevel@tonic-gate 33187c478bd9Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 33197c478bd9Sstevel@tonic-gate 33207c478bd9Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) { 33217c478bd9Sstevel@tonic-gate vp->vn_sibling = root; 33227c478bd9Sstevel@tonic-gate root = vp; 33237c478bd9Sstevel@tonic-gate continue; 33247c478bd9Sstevel@tonic-gate } 33257c478bd9Sstevel@tonic-gate 33267c478bd9Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 33277c478bd9Sstevel@tonic-gate if (parent->vn_addr != paddr) 33287c478bd9Sstevel@tonic-gate continue; 33297c478bd9Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 33307c478bd9Sstevel@tonic-gate parent->vn_children = vp; 33317c478bd9Sstevel@tonic-gate vp->vn_parent = parent; 33327c478bd9Sstevel@tonic-gate break; 33337c478bd9Sstevel@tonic-gate } 33347c478bd9Sstevel@tonic-gate 33357c478bd9Sstevel@tonic-gate if (parent == NULL) { 33367c478bd9Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 33377c478bd9Sstevel@tonic-gate vp->vn_addr, paddr); 33387c478bd9Sstevel@tonic-gate goto err; 33397c478bd9Sstevel@tonic-gate } 33407c478bd9Sstevel@tonic-gate } 33417c478bd9Sstevel@tonic-gate 33427c478bd9Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 33437c478bd9Sstevel@tonic-gate vw->vw_root = root; 33447c478bd9Sstevel@tonic-gate 33457c478bd9Sstevel@tonic-gate if (current != NULL) 33467c478bd9Sstevel@tonic-gate vw->vw_current = current; 33477c478bd9Sstevel@tonic-gate else 33487c478bd9Sstevel@tonic-gate vw->vw_current = root; 33497c478bd9Sstevel@tonic-gate 33507c478bd9Sstevel@tonic-gate wsp->walk_data = vw; 33517c478bd9Sstevel@tonic-gate return (WALK_NEXT); 33527c478bd9Sstevel@tonic-gate err: 33537c478bd9Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 33547c478bd9Sstevel@tonic-gate head = vp->vn_next; 33557c478bd9Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 33567c478bd9Sstevel@tonic-gate } 33577c478bd9Sstevel@tonic-gate 33587c478bd9Sstevel@tonic-gate return (WALK_ERR); 33597c478bd9Sstevel@tonic-gate } 33607c478bd9Sstevel@tonic-gate 33617c478bd9Sstevel@tonic-gate int 33627c478bd9Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 33637c478bd9Sstevel@tonic-gate { 33647c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33657c478bd9Sstevel@tonic-gate vmem_node_t *vp; 33667c478bd9Sstevel@tonic-gate int rval; 33677c478bd9Sstevel@tonic-gate 33687c478bd9Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 33697c478bd9Sstevel@tonic-gate return (WALK_DONE); 33707c478bd9Sstevel@tonic-gate 33717c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 33727c478bd9Sstevel@tonic-gate 33737c478bd9Sstevel@tonic-gate if (vp->vn_children != NULL) { 33747c478bd9Sstevel@tonic-gate vw->vw_current = vp->vn_children; 33757c478bd9Sstevel@tonic-gate return (rval); 33767c478bd9Sstevel@tonic-gate } 33777c478bd9Sstevel@tonic-gate 33787c478bd9Sstevel@tonic-gate do { 33797c478bd9Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 33807c478bd9Sstevel@tonic-gate vp = vp->vn_parent; 33817c478bd9Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 33827c478bd9Sstevel@tonic-gate 33837c478bd9Sstevel@tonic-gate return (rval); 33847c478bd9Sstevel@tonic-gate } 33857c478bd9Sstevel@tonic-gate 33867c478bd9Sstevel@tonic-gate /* 33877c478bd9Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 33887c478bd9Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 33897c478bd9Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 33907c478bd9Sstevel@tonic-gate * after each callback. 33917c478bd9Sstevel@tonic-gate */ 33927c478bd9Sstevel@tonic-gate int 33937c478bd9Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 33947c478bd9Sstevel@tonic-gate { 33957c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 33967c478bd9Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 33977c478bd9Sstevel@tonic-gate int rval; 33987c478bd9Sstevel@tonic-gate 33997c478bd9Sstevel@tonic-gate /* 34007c478bd9Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 34017c478bd9Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 34027c478bd9Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 34037c478bd9Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 34047c478bd9Sstevel@tonic-gate * the step function. 34057c478bd9Sstevel@tonic-gate */ 34067c478bd9Sstevel@tonic-gate if (vp->vn_marked) { 34077c478bd9Sstevel@tonic-gate if (vp->vn_sibling != NULL) 34087c478bd9Sstevel@tonic-gate vp = vp->vn_sibling; 34097c478bd9Sstevel@tonic-gate else if (vp->vn_parent != NULL) 34107c478bd9Sstevel@tonic-gate vp = vp->vn_parent; 34117c478bd9Sstevel@tonic-gate else { 34127c478bd9Sstevel@tonic-gate /* 34137c478bd9Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 34147c478bd9Sstevel@tonic-gate * have already been visited; we're done. 34157c478bd9Sstevel@tonic-gate */ 34167c478bd9Sstevel@tonic-gate return (WALK_DONE); 34177c478bd9Sstevel@tonic-gate } 34187c478bd9Sstevel@tonic-gate } 34197c478bd9Sstevel@tonic-gate 34207c478bd9Sstevel@tonic-gate /* 34217c478bd9Sstevel@tonic-gate * Before we visit this node, visit its children. 34227c478bd9Sstevel@tonic-gate */ 34237c478bd9Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 34247c478bd9Sstevel@tonic-gate vp = vp->vn_children; 34257c478bd9Sstevel@tonic-gate 34267c478bd9Sstevel@tonic-gate vp->vn_marked = 1; 34277c478bd9Sstevel@tonic-gate vw->vw_current = vp; 34287c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 34297c478bd9Sstevel@tonic-gate 34307c478bd9Sstevel@tonic-gate return (rval); 34317c478bd9Sstevel@tonic-gate } 34327c478bd9Sstevel@tonic-gate 34337c478bd9Sstevel@tonic-gate void 34347c478bd9Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 34357c478bd9Sstevel@tonic-gate { 34367c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 34377c478bd9Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 34387c478bd9Sstevel@tonic-gate int done; 34397c478bd9Sstevel@tonic-gate 34407c478bd9Sstevel@tonic-gate if (root == NULL) 34417c478bd9Sstevel@tonic-gate return; 34427c478bd9Sstevel@tonic-gate 34437c478bd9Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 34447c478bd9Sstevel@tonic-gate vmem_walk_fini(wsp); 34457c478bd9Sstevel@tonic-gate 34467c478bd9Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 34477c478bd9Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 34487c478bd9Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 34497c478bd9Sstevel@tonic-gate 34507c478bd9Sstevel@tonic-gate if (done) { 34517c478bd9Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 34527c478bd9Sstevel@tonic-gate } else { 34537c478bd9Sstevel@tonic-gate vmem_walk_fini(wsp); 34547c478bd9Sstevel@tonic-gate } 34557c478bd9Sstevel@tonic-gate } 34567c478bd9Sstevel@tonic-gate 34577c478bd9Sstevel@tonic-gate typedef struct vmem_seg_walk { 34587c478bd9Sstevel@tonic-gate uint8_t vsw_type; 34597c478bd9Sstevel@tonic-gate uintptr_t vsw_start; 34607c478bd9Sstevel@tonic-gate uintptr_t vsw_current; 34617c478bd9Sstevel@tonic-gate } vmem_seg_walk_t; 34627c478bd9Sstevel@tonic-gate 34637c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 34647c478bd9Sstevel@tonic-gate int 34657c478bd9Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 34667c478bd9Sstevel@tonic-gate { 34677c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw; 34687c478bd9Sstevel@tonic-gate 34697c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) { 34707c478bd9Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 34717c478bd9Sstevel@tonic-gate return (WALK_ERR); 34727c478bd9Sstevel@tonic-gate } 34737c478bd9Sstevel@tonic-gate 34747c478bd9Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 34757c478bd9Sstevel@tonic-gate 34767c478bd9Sstevel@tonic-gate vsw->vsw_type = type; 34777c478bd9Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0); 34787c478bd9Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 34797c478bd9Sstevel@tonic-gate 34807c478bd9Sstevel@tonic-gate return (WALK_NEXT); 34817c478bd9Sstevel@tonic-gate } 34827c478bd9Sstevel@tonic-gate 34837c478bd9Sstevel@tonic-gate /* 34847c478bd9Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 34857c478bd9Sstevel@tonic-gate */ 34867c478bd9Sstevel@tonic-gate #define VMEM_NONE 0 34877c478bd9Sstevel@tonic-gate 34887c478bd9Sstevel@tonic-gate int 34897c478bd9Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 34907c478bd9Sstevel@tonic-gate { 34917c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 34927c478bd9Sstevel@tonic-gate } 34937c478bd9Sstevel@tonic-gate 34947c478bd9Sstevel@tonic-gate int 34957c478bd9Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 34967c478bd9Sstevel@tonic-gate { 34977c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 34987c478bd9Sstevel@tonic-gate } 34997c478bd9Sstevel@tonic-gate 35007c478bd9Sstevel@tonic-gate int 35017c478bd9Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 35027c478bd9Sstevel@tonic-gate { 35037c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 35047c478bd9Sstevel@tonic-gate } 35057c478bd9Sstevel@tonic-gate 35067c478bd9Sstevel@tonic-gate int 35077c478bd9Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 35087c478bd9Sstevel@tonic-gate { 35097c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 35107c478bd9Sstevel@tonic-gate } 35117c478bd9Sstevel@tonic-gate 35127c478bd9Sstevel@tonic-gate int 35137c478bd9Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 35147c478bd9Sstevel@tonic-gate { 35157c478bd9Sstevel@tonic-gate vmem_seg_t seg; 35167c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35177c478bd9Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 35187c478bd9Sstevel@tonic-gate static size_t seg_size = 0; 35197c478bd9Sstevel@tonic-gate int rval; 35207c478bd9Sstevel@tonic-gate 35217c478bd9Sstevel@tonic-gate if (!seg_size) { 35227c478bd9Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) { 35237c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 35247c478bd9Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 35257c478bd9Sstevel@tonic-gate } 35267c478bd9Sstevel@tonic-gate } 35277c478bd9Sstevel@tonic-gate 35287c478bd9Sstevel@tonic-gate if (seg_size < sizeof (seg)) 35297c478bd9Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 35307c478bd9Sstevel@tonic-gate 35317c478bd9Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 35327c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 35337c478bd9Sstevel@tonic-gate return (WALK_ERR); 35347c478bd9Sstevel@tonic-gate } 35357c478bd9Sstevel@tonic-gate 35367c478bd9Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 35377c478bd9Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 35387c478bd9Sstevel@tonic-gate rval = WALK_NEXT; 35397c478bd9Sstevel@tonic-gate } else { 35407c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 35417c478bd9Sstevel@tonic-gate } 35427c478bd9Sstevel@tonic-gate 35437c478bd9Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 35447c478bd9Sstevel@tonic-gate return (WALK_DONE); 35457c478bd9Sstevel@tonic-gate 35467c478bd9Sstevel@tonic-gate return (rval); 35477c478bd9Sstevel@tonic-gate } 35487c478bd9Sstevel@tonic-gate 35497c478bd9Sstevel@tonic-gate void 35507c478bd9Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 35517c478bd9Sstevel@tonic-gate { 35527c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 35537c478bd9Sstevel@tonic-gate 35547c478bd9Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 35557c478bd9Sstevel@tonic-gate } 35567c478bd9Sstevel@tonic-gate 35577c478bd9Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 35587c478bd9Sstevel@tonic-gate 35597c478bd9Sstevel@tonic-gate int 35607c478bd9Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 35617c478bd9Sstevel@tonic-gate { 35627c478bd9Sstevel@tonic-gate vmem_t v, parent; 35637c478bd9Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat; 35647c478bd9Sstevel@tonic-gate uintptr_t paddr; 35657c478bd9Sstevel@tonic-gate int ident = 0; 35667c478bd9Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 35677c478bd9Sstevel@tonic-gate 35687c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 35697c478bd9Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 35707c478bd9Sstevel@tonic-gate mdb_warn("can't walk vmem"); 35717c478bd9Sstevel@tonic-gate return (DCMD_ERR); 35727c478bd9Sstevel@tonic-gate } 35737c478bd9Sstevel@tonic-gate return (DCMD_OK); 35747c478bd9Sstevel@tonic-gate } 35757c478bd9Sstevel@tonic-gate 35767c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 35777c478bd9Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 35787c478bd9Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 35797c478bd9Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 35807c478bd9Sstevel@tonic-gate 35817c478bd9Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 35827c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 35837c478bd9Sstevel@tonic-gate return (DCMD_ERR); 35847c478bd9Sstevel@tonic-gate } 35857c478bd9Sstevel@tonic-gate 35867c478bd9Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) { 35877c478bd9Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 35887c478bd9Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 35897c478bd9Sstevel@tonic-gate ident = 0; 35907c478bd9Sstevel@tonic-gate break; 35917c478bd9Sstevel@tonic-gate } 35927c478bd9Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 35937c478bd9Sstevel@tonic-gate } 35947c478bd9Sstevel@tonic-gate 35957c478bd9Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 35967c478bd9Sstevel@tonic-gate 35977c478bd9Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 35987c478bd9Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 35997c478bd9Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64, 36007c478bd9Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64); 36017c478bd9Sstevel@tonic-gate 36027c478bd9Sstevel@tonic-gate return (DCMD_OK); 36037c478bd9Sstevel@tonic-gate } 36047c478bd9Sstevel@tonic-gate 36057c478bd9Sstevel@tonic-gate void 36067c478bd9Sstevel@tonic-gate vmem_seg_help(void) 36077c478bd9Sstevel@tonic-gate { 3608b5fca8f8Stomee mdb_printf("%s", 3609b5fca8f8Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n" 36107c478bd9Sstevel@tonic-gate "\n" 36117c478bd9Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 36127c478bd9Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 36137c478bd9Sstevel@tonic-gate "information.\n"); 36147c478bd9Sstevel@tonic-gate mdb_dec_indent(2); 36157c478bd9Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 36167c478bd9Sstevel@tonic-gate mdb_inc_indent(2); 36177c478bd9Sstevel@tonic-gate mdb_printf("%s", 36187c478bd9Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 36197c478bd9Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 36207c478bd9Sstevel@tonic-gate " -c caller\n" 36217c478bd9Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 36227c478bd9Sstevel@tonic-gate " -e earliest\n" 36237c478bd9Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 36247c478bd9Sstevel@tonic-gate " -l latest\n" 36257c478bd9Sstevel@tonic-gate " filter out segments timestamped after latest\n" 36267c478bd9Sstevel@tonic-gate " -m minsize\n" 36277c478bd9Sstevel@tonic-gate " filer out segments smaller than minsize\n" 36287c478bd9Sstevel@tonic-gate " -M maxsize\n" 36297c478bd9Sstevel@tonic-gate " filer out segments larger than maxsize\n" 36307c478bd9Sstevel@tonic-gate " -t thread\n" 36317c478bd9Sstevel@tonic-gate " filter out segments not involving thread\n" 36327c478bd9Sstevel@tonic-gate " -T type\n" 36337c478bd9Sstevel@tonic-gate " filter out segments not of type 'type'\n" 36347c478bd9Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 36357c478bd9Sstevel@tonic-gate } 36367c478bd9Sstevel@tonic-gate 36377c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 36387c478bd9Sstevel@tonic-gate int 36397c478bd9Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 36407c478bd9Sstevel@tonic-gate { 36417c478bd9Sstevel@tonic-gate vmem_seg_t vs; 36427c478bd9Sstevel@tonic-gate pc_t *stk = vs.vs_stack; 36437c478bd9Sstevel@tonic-gate uintptr_t sz; 36447c478bd9Sstevel@tonic-gate uint8_t t; 36457c478bd9Sstevel@tonic-gate const char *type = NULL; 36467c478bd9Sstevel@tonic-gate GElf_Sym sym; 36477c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 36487c478bd9Sstevel@tonic-gate int no_debug; 36497c478bd9Sstevel@tonic-gate int i; 36507c478bd9Sstevel@tonic-gate int depth; 36517c478bd9Sstevel@tonic-gate uintptr_t laddr, haddr; 36527c478bd9Sstevel@tonic-gate 36537c478bd9Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL; 36547c478bd9Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 36557c478bd9Sstevel@tonic-gate 36567c478bd9Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 36577c478bd9Sstevel@tonic-gate 36587c478bd9Sstevel@tonic-gate uint_t size = 0; 36597c478bd9Sstevel@tonic-gate uint_t verbose = 0; 36607c478bd9Sstevel@tonic-gate 36617c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 36627c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 36637c478bd9Sstevel@tonic-gate 36647c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 36657c478bd9Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 36667c478bd9Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 36677c478bd9Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 36687c478bd9Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 36697c478bd9Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 36707c478bd9Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 36717c478bd9Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 36727c478bd9Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 36737c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 36747c478bd9Sstevel@tonic-gate NULL) != argc) 36757c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 36767c478bd9Sstevel@tonic-gate 36777c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 36787c478bd9Sstevel@tonic-gate if (verbose) { 36797c478bd9Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 36807c478bd9Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 36817c478bd9Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 36827c478bd9Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 36837c478bd9Sstevel@tonic-gate } else { 36847c478bd9Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 36857c478bd9Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 36867c478bd9Sstevel@tonic-gate } 36877c478bd9Sstevel@tonic-gate } 36887c478bd9Sstevel@tonic-gate 36897c478bd9Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 36907c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 36917c478bd9Sstevel@tonic-gate return (DCMD_ERR); 36927c478bd9Sstevel@tonic-gate } 36937c478bd9Sstevel@tonic-gate 36947c478bd9Sstevel@tonic-gate if (type != NULL) { 36957c478bd9Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 36967c478bd9Sstevel@tonic-gate t = VMEM_ALLOC; 36977c478bd9Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 36987c478bd9Sstevel@tonic-gate t = VMEM_FREE; 36997c478bd9Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 37007c478bd9Sstevel@tonic-gate t = VMEM_SPAN; 37017c478bd9Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 37027c478bd9Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 37037c478bd9Sstevel@tonic-gate t = VMEM_ROTOR; 37047c478bd9Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 37057c478bd9Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 37067c478bd9Sstevel@tonic-gate t = VMEM_WALKER; 37077c478bd9Sstevel@tonic-gate else { 37087c478bd9Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 37097c478bd9Sstevel@tonic-gate type); 37107c478bd9Sstevel@tonic-gate return (DCMD_ERR); 37117c478bd9Sstevel@tonic-gate } 37127c478bd9Sstevel@tonic-gate 37137c478bd9Sstevel@tonic-gate if (vs.vs_type != t) 37147c478bd9Sstevel@tonic-gate return (DCMD_OK); 37157c478bd9Sstevel@tonic-gate } 37167c478bd9Sstevel@tonic-gate 37177c478bd9Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 37187c478bd9Sstevel@tonic-gate 37197c478bd9Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 37207c478bd9Sstevel@tonic-gate return (DCMD_OK); 37217c478bd9Sstevel@tonic-gate 37227c478bd9Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 37237c478bd9Sstevel@tonic-gate return (DCMD_OK); 37247c478bd9Sstevel@tonic-gate 37257c478bd9Sstevel@tonic-gate t = vs.vs_type; 37267c478bd9Sstevel@tonic-gate depth = vs.vs_depth; 37277c478bd9Sstevel@tonic-gate 37287c478bd9Sstevel@tonic-gate /* 37297c478bd9Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 37307c478bd9Sstevel@tonic-gate */ 37317c478bd9Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 37327c478bd9Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 37337c478bd9Sstevel@tonic-gate 37347c478bd9Sstevel@tonic-gate if (no_debug) { 37357c478bd9Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 || 37367c478bd9Sstevel@tonic-gate latest != 0) 37377c478bd9Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 37387c478bd9Sstevel@tonic-gate } else { 37397c478bd9Sstevel@tonic-gate if (caller != NULL) { 37407c478bd9Sstevel@tonic-gate laddr = caller; 37417c478bd9Sstevel@tonic-gate haddr = caller + sizeof (caller); 37427c478bd9Sstevel@tonic-gate 37437c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 37447c478bd9Sstevel@tonic-gate sizeof (c), &sym) != -1 && 37457c478bd9Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 37467c478bd9Sstevel@tonic-gate /* 37477c478bd9Sstevel@tonic-gate * We were provided an exact symbol value; any 37487c478bd9Sstevel@tonic-gate * address in the function is valid. 37497c478bd9Sstevel@tonic-gate */ 37507c478bd9Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 37517c478bd9Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 37527c478bd9Sstevel@tonic-gate } 37537c478bd9Sstevel@tonic-gate 37547c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 37557c478bd9Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 37567c478bd9Sstevel@tonic-gate vs.vs_stack[i] < haddr) 37577c478bd9Sstevel@tonic-gate break; 37587c478bd9Sstevel@tonic-gate 37597c478bd9Sstevel@tonic-gate if (i == depth) 37607c478bd9Sstevel@tonic-gate return (DCMD_OK); 37617c478bd9Sstevel@tonic-gate } 37627c478bd9Sstevel@tonic-gate 37637c478bd9Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread) 37647c478bd9Sstevel@tonic-gate return (DCMD_OK); 37657c478bd9Sstevel@tonic-gate 37667c478bd9Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 37677c478bd9Sstevel@tonic-gate return (DCMD_OK); 37687c478bd9Sstevel@tonic-gate 37697c478bd9Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 37707c478bd9Sstevel@tonic-gate return (DCMD_OK); 37717c478bd9Sstevel@tonic-gate } 37727c478bd9Sstevel@tonic-gate 37737c478bd9Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 37747c478bd9Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 37757c478bd9Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 37767c478bd9Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 37777c478bd9Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 37787c478bd9Sstevel@tonic-gate "????"); 37797c478bd9Sstevel@tonic-gate 37807c478bd9Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 37817c478bd9Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 37827c478bd9Sstevel@tonic-gate return (DCMD_OK); 37837c478bd9Sstevel@tonic-gate } 37847c478bd9Sstevel@tonic-gate 37857c478bd9Sstevel@tonic-gate if (verbose) { 37867c478bd9Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 37877c478bd9Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 37887c478bd9Sstevel@tonic-gate 37897c478bd9Sstevel@tonic-gate if (no_debug) 37907c478bd9Sstevel@tonic-gate return (DCMD_OK); 37917c478bd9Sstevel@tonic-gate 37927c478bd9Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n", 37937c478bd9Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 37947c478bd9Sstevel@tonic-gate 37957c478bd9Sstevel@tonic-gate mdb_inc_indent(17); 37967c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 37977c478bd9Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 37987c478bd9Sstevel@tonic-gate } 37997c478bd9Sstevel@tonic-gate mdb_dec_indent(17); 38007c478bd9Sstevel@tonic-gate mdb_printf("\n"); 38017c478bd9Sstevel@tonic-gate } else { 38027c478bd9Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 38037c478bd9Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 38047c478bd9Sstevel@tonic-gate 38057c478bd9Sstevel@tonic-gate if (no_debug) { 38067c478bd9Sstevel@tonic-gate mdb_printf("\n"); 38077c478bd9Sstevel@tonic-gate return (DCMD_OK); 38087c478bd9Sstevel@tonic-gate } 38097c478bd9Sstevel@tonic-gate 38107c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 38117c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 38127c478bd9Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 38137c478bd9Sstevel@tonic-gate continue; 38147c478bd9Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0) 38157c478bd9Sstevel@tonic-gate continue; 38167c478bd9Sstevel@tonic-gate break; 38177c478bd9Sstevel@tonic-gate } 38187c478bd9Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 38197c478bd9Sstevel@tonic-gate } 38207c478bd9Sstevel@tonic-gate return (DCMD_OK); 38217c478bd9Sstevel@tonic-gate } 38227c478bd9Sstevel@tonic-gate 38237c478bd9Sstevel@tonic-gate typedef struct kmalog_data { 38247c478bd9Sstevel@tonic-gate uintptr_t kma_addr; 38257c478bd9Sstevel@tonic-gate hrtime_t kma_newest; 38267c478bd9Sstevel@tonic-gate } kmalog_data_t; 38277c478bd9Sstevel@tonic-gate 38287c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 38297c478bd9Sstevel@tonic-gate static int 38307c478bd9Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma) 38317c478bd9Sstevel@tonic-gate { 38327c478bd9Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1]; 38337c478bd9Sstevel@tonic-gate hrtime_t delta; 38347c478bd9Sstevel@tonic-gate int i, depth; 38357c478bd9Sstevel@tonic-gate size_t bufsize; 38367c478bd9Sstevel@tonic-gate 38377c478bd9Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 38387c478bd9Sstevel@tonic-gate return (WALK_DONE); 38397c478bd9Sstevel@tonic-gate 38407c478bd9Sstevel@tonic-gate if (kma->kma_newest == 0) 38417c478bd9Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp; 38427c478bd9Sstevel@tonic-gate 38437c478bd9Sstevel@tonic-gate if (kma->kma_addr) { 38447c478bd9Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize), 38457c478bd9Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) { 38467c478bd9Sstevel@tonic-gate mdb_warn( 38477c478bd9Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p", 38487c478bd9Sstevel@tonic-gate bcp->bc_cache); 38497c478bd9Sstevel@tonic-gate return (WALK_ERR); 38507c478bd9Sstevel@tonic-gate } 38517c478bd9Sstevel@tonic-gate 38527c478bd9Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr || 38537c478bd9Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize) 38547c478bd9Sstevel@tonic-gate return (WALK_NEXT); 38557c478bd9Sstevel@tonic-gate } 38567c478bd9Sstevel@tonic-gate 38577c478bd9Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp; 38587c478bd9Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 38597c478bd9Sstevel@tonic-gate 38607c478bd9Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 38617c478bd9Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 38627c478bd9Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 38637c478bd9Sstevel@tonic-gate 38647c478bd9Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 38657c478bd9Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 38667c478bd9Sstevel@tonic-gate 38677c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 38687c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 38697c478bd9Sstevel@tonic-gate 38707c478bd9Sstevel@tonic-gate return (WALK_NEXT); 38717c478bd9Sstevel@tonic-gate } 38727c478bd9Sstevel@tonic-gate 38737c478bd9Sstevel@tonic-gate int 38747c478bd9Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 38757c478bd9Sstevel@tonic-gate { 38767c478bd9Sstevel@tonic-gate const char *logname = "kmem_transaction_log"; 38777c478bd9Sstevel@tonic-gate kmalog_data_t kma; 38787c478bd9Sstevel@tonic-gate 38797c478bd9Sstevel@tonic-gate if (argc > 1) 38807c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 38817c478bd9Sstevel@tonic-gate 38827c478bd9Sstevel@tonic-gate kma.kma_newest = 0; 38837c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 38847c478bd9Sstevel@tonic-gate kma.kma_addr = addr; 38857c478bd9Sstevel@tonic-gate else 38867c478bd9Sstevel@tonic-gate kma.kma_addr = NULL; 38877c478bd9Sstevel@tonic-gate 38887c478bd9Sstevel@tonic-gate if (argc > 0) { 38897c478bd9Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 38907c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 38917c478bd9Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 38927c478bd9Sstevel@tonic-gate logname = "kmem_failure_log"; 38937c478bd9Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 38947c478bd9Sstevel@tonic-gate logname = "kmem_slab_log"; 38957c478bd9Sstevel@tonic-gate else 38967c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 38977c478bd9Sstevel@tonic-gate } 38987c478bd9Sstevel@tonic-gate 38997c478bd9Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) { 39007c478bd9Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 39017c478bd9Sstevel@tonic-gate return (DCMD_ERR); 39027c478bd9Sstevel@tonic-gate } 39037c478bd9Sstevel@tonic-gate 39047c478bd9Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) { 39057c478bd9Sstevel@tonic-gate mdb_warn("failed to walk kmem log"); 39067c478bd9Sstevel@tonic-gate return (DCMD_ERR); 39077c478bd9Sstevel@tonic-gate } 39087c478bd9Sstevel@tonic-gate 39097c478bd9Sstevel@tonic-gate return (DCMD_OK); 39107c478bd9Sstevel@tonic-gate } 39117c478bd9Sstevel@tonic-gate 39127c478bd9Sstevel@tonic-gate /* 39137c478bd9Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here. 39147c478bd9Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t 39157c478bd9Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache 39167c478bd9Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 39177c478bd9Sstevel@tonic-gate */ 39187c478bd9Sstevel@tonic-gate 39197c478bd9Sstevel@tonic-gate typedef struct kmclist { 39207c478bd9Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */ 39217c478bd9Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */ 39227c478bd9Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */ 39237c478bd9Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */ 39247c478bd9Sstevel@tonic-gate } kmclist_t; 39257c478bd9Sstevel@tonic-gate 39267c478bd9Sstevel@tonic-gate static int 39277c478bd9Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc) 39287c478bd9Sstevel@tonic-gate { 39297c478bd9Sstevel@tonic-gate void *p; 39307c478bd9Sstevel@tonic-gate int s; 39317c478bd9Sstevel@tonic-gate 39327c478bd9Sstevel@tonic-gate if (kmc->kmc_name == NULL || 39337c478bd9Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) { 39347c478bd9Sstevel@tonic-gate /* 39357c478bd9Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 39367c478bd9Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 39377c478bd9Sstevel@tonic-gate */ 39387c478bd9Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) { 39397c478bd9Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256; 39407c478bd9Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 39417c478bd9Sstevel@tonic-gate 39427c478bd9Sstevel@tonic-gate bcopy(kmc->kmc_caches, p, 39437c478bd9Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size); 39447c478bd9Sstevel@tonic-gate 39457c478bd9Sstevel@tonic-gate kmc->kmc_caches = p; 39467c478bd9Sstevel@tonic-gate kmc->kmc_size = s; 39477c478bd9Sstevel@tonic-gate } 39487c478bd9Sstevel@tonic-gate 39497c478bd9Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr; 39507c478bd9Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT); 39517c478bd9Sstevel@tonic-gate } 39527c478bd9Sstevel@tonic-gate 39537c478bd9Sstevel@tonic-gate return (WALK_NEXT); 39547c478bd9Sstevel@tonic-gate } 39557c478bd9Sstevel@tonic-gate 39567c478bd9Sstevel@tonic-gate /* 39577c478bd9Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each 39587c478bd9Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 39597c478bd9Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 39607c478bd9Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 39617c478bd9Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 39627c478bd9Sstevel@tonic-gate */ 39637c478bd9Sstevel@tonic-gate 39647c478bd9Sstevel@tonic-gate typedef struct kmowner { 39657c478bd9Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */ 39667c478bd9Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */ 39677c478bd9Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */ 39687c478bd9Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */ 39697c478bd9Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */ 39707c478bd9Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */ 39717c478bd9Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */ 39727c478bd9Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */ 39737c478bd9Sstevel@tonic-gate } kmowner_t; 39747c478bd9Sstevel@tonic-gate 39757c478bd9Sstevel@tonic-gate typedef struct kmusers { 39767c478bd9Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */ 39777c478bd9Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */ 39787c478bd9Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */ 39797c478bd9Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */ 39807c478bd9Sstevel@tonic-gate int kmu_size; /* Total number of entries */ 39817c478bd9Sstevel@tonic-gate } kmusers_t; 39827c478bd9Sstevel@tonic-gate 39837c478bd9Sstevel@tonic-gate static void 39847c478bd9Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp, 39857c478bd9Sstevel@tonic-gate size_t size, size_t data_size) 39867c478bd9Sstevel@tonic-gate { 39877c478bd9Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 39887c478bd9Sstevel@tonic-gate size_t bucket, signature = data_size; 39897c478bd9Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 39907c478bd9Sstevel@tonic-gate 39917c478bd9Sstevel@tonic-gate /* 39927c478bd9Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 39937c478bd9Sstevel@tonic-gate */ 39947c478bd9Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) { 39957c478bd9Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024; 39967c478bd9Sstevel@tonic-gate 39977c478bd9Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC); 39987c478bd9Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size); 39997c478bd9Sstevel@tonic-gate kmu->kmu_hash = kmo; 40007c478bd9Sstevel@tonic-gate kmu->kmu_size = s; 40017c478bd9Sstevel@tonic-gate 40027c478bd9Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size; 40037c478bd9Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) 40047c478bd9Sstevel@tonic-gate kmo->kmo_head = NULL; 40057c478bd9Sstevel@tonic-gate 40067c478bd9Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems; 40077c478bd9Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) { 40087c478bd9Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1); 40097c478bd9Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40107c478bd9Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40117c478bd9Sstevel@tonic-gate } 40127c478bd9Sstevel@tonic-gate } 40137c478bd9Sstevel@tonic-gate 40147c478bd9Sstevel@tonic-gate /* 40157c478bd9Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 40167c478bd9Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 40177c478bd9Sstevel@tonic-gate */ 40187c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 40197c478bd9Sstevel@tonic-gate signature += bcp->bc_stack[i]; 40207c478bd9Sstevel@tonic-gate 40217c478bd9Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1); 40227c478bd9Sstevel@tonic-gate 40237c478bd9Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) { 40247c478bd9Sstevel@tonic-gate if (kmo->kmo_signature == signature) { 40257c478bd9Sstevel@tonic-gate size_t difference = 0; 40267c478bd9Sstevel@tonic-gate 40277c478bd9Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size; 40287c478bd9Sstevel@tonic-gate difference |= kmo->kmo_depth - depth; 40297c478bd9Sstevel@tonic-gate 40307c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 40317c478bd9Sstevel@tonic-gate difference |= kmo->kmo_stack[i] - 40327c478bd9Sstevel@tonic-gate bcp->bc_stack[i]; 40337c478bd9Sstevel@tonic-gate } 40347c478bd9Sstevel@tonic-gate 40357c478bd9Sstevel@tonic-gate if (difference == 0) { 40367c478bd9Sstevel@tonic-gate kmo->kmo_total_size += size; 40377c478bd9Sstevel@tonic-gate kmo->kmo_num++; 40387c478bd9Sstevel@tonic-gate return; 40397c478bd9Sstevel@tonic-gate } 40407c478bd9Sstevel@tonic-gate } 40417c478bd9Sstevel@tonic-gate } 40427c478bd9Sstevel@tonic-gate 40437c478bd9Sstevel@tonic-gate /* 40447c478bd9Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 40457c478bd9Sstevel@tonic-gate * in based on the allocation information. 40467c478bd9Sstevel@tonic-gate */ 40477c478bd9Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++]; 40487c478bd9Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head; 40497c478bd9Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo; 40507c478bd9Sstevel@tonic-gate 40517c478bd9Sstevel@tonic-gate kmo->kmo_signature = signature; 40527c478bd9Sstevel@tonic-gate kmo->kmo_num = 1; 40537c478bd9Sstevel@tonic-gate kmo->kmo_data_size = data_size; 40547c478bd9Sstevel@tonic-gate kmo->kmo_total_size = size; 40557c478bd9Sstevel@tonic-gate kmo->kmo_depth = depth; 40567c478bd9Sstevel@tonic-gate 40577c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 40587c478bd9Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i]; 40597c478bd9Sstevel@tonic-gate } 40607c478bd9Sstevel@tonic-gate 40617c478bd9Sstevel@tonic-gate /* 40627c478bd9Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash 40637c478bd9Sstevel@tonic-gate * table with the information from each allocated bufctl. 40647c478bd9Sstevel@tonic-gate */ 40657c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 40667c478bd9Sstevel@tonic-gate static int 40677c478bd9Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 40687c478bd9Sstevel@tonic-gate { 40697c478bd9Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 40707c478bd9Sstevel@tonic-gate 40717c478bd9Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 40727c478bd9Sstevel@tonic-gate return (WALK_NEXT); 40737c478bd9Sstevel@tonic-gate } 40747c478bd9Sstevel@tonic-gate 40757c478bd9Sstevel@tonic-gate /* 40767c478bd9Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information 40777c478bd9Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 40787c478bd9Sstevel@tonic-gate */ 40797c478bd9Sstevel@tonic-gate static int 40807c478bd9Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu) 40817c478bd9Sstevel@tonic-gate { 40827c478bd9Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH); 40837c478bd9Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache; 40847c478bd9Sstevel@tonic-gate kmem_bufctl_t bufctl; 40857c478bd9Sstevel@tonic-gate 40867c478bd9Sstevel@tonic-gate if (kmu->kmu_addr) { 40877c478bd9Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1) 40887c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 40897c478bd9Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr || 40907c478bd9Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr + 40917c478bd9Sstevel@tonic-gate cp->cache_bufsize) 40927c478bd9Sstevel@tonic-gate return (WALK_NEXT); 40937c478bd9Sstevel@tonic-gate } 40947c478bd9Sstevel@tonic-gate 40957c478bd9Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 40967c478bd9Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 40977c478bd9Sstevel@tonic-gate 40987c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 40997c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 41007c478bd9Sstevel@tonic-gate 41017c478bd9Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize); 41027c478bd9Sstevel@tonic-gate return (WALK_NEXT); 41037c478bd9Sstevel@tonic-gate } 41047c478bd9Sstevel@tonic-gate 41057c478bd9Sstevel@tonic-gate /* 41067c478bd9Sstevel@tonic-gate * We sort our results by allocation size before printing them. 41077c478bd9Sstevel@tonic-gate */ 41087c478bd9Sstevel@tonic-gate static int 41097c478bd9Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp) 41107c478bd9Sstevel@tonic-gate { 41117c478bd9Sstevel@tonic-gate const kmowner_t *lhs = lp; 41127c478bd9Sstevel@tonic-gate const kmowner_t *rhs = rp; 41137c478bd9Sstevel@tonic-gate 41147c478bd9Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size); 41157c478bd9Sstevel@tonic-gate } 41167c478bd9Sstevel@tonic-gate 41177c478bd9Sstevel@tonic-gate /* 41187c478bd9Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we 41197c478bd9Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we 41207c478bd9Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 41217c478bd9Sstevel@tonic-gate * we sort and print our results. 41227c478bd9Sstevel@tonic-gate */ 41237c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 41247c478bd9Sstevel@tonic-gate int 41257c478bd9Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 41267c478bd9Sstevel@tonic-gate { 41277c478bd9Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 41287c478bd9Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 41297c478bd9Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */ 41307c478bd9Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 41317c478bd9Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 41327c478bd9Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 41337c478bd9Sstevel@tonic-gate 41347c478bd9Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1; 41357c478bd9Sstevel@tonic-gate kmowner_t *kmo, *kmoend; 41367c478bd9Sstevel@tonic-gate int i, oelems; 41377c478bd9Sstevel@tonic-gate 41387c478bd9Sstevel@tonic-gate kmclist_t kmc; 41397c478bd9Sstevel@tonic-gate kmusers_t kmu; 41407c478bd9Sstevel@tonic-gate 41417c478bd9Sstevel@tonic-gate bzero(&kmc, sizeof (kmc)); 41427c478bd9Sstevel@tonic-gate bzero(&kmu, sizeof (kmu)); 41437c478bd9Sstevel@tonic-gate 41447c478bd9Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 41457c478bd9Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 41467c478bd9Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 41477c478bd9Sstevel@tonic-gate 41487c478bd9Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 41497c478bd9Sstevel@tonic-gate argc -= i; /* adjust argc */ 41507c478bd9Sstevel@tonic-gate 41517c478bd9Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 41527c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 41537c478bd9Sstevel@tonic-gate 41547c478bd9Sstevel@tonic-gate oelems = kmc.kmc_nelems; 41557c478bd9Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str; 41567c478bd9Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41577c478bd9Sstevel@tonic-gate 41587c478bd9Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) { 41597c478bd9Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name); 41607c478bd9Sstevel@tonic-gate return (DCMD_ERR); 41617c478bd9Sstevel@tonic-gate } 41627c478bd9Sstevel@tonic-gate 41637c478bd9Sstevel@tonic-gate do_all_caches = 0; 41647c478bd9Sstevel@tonic-gate argv++; 41657c478bd9Sstevel@tonic-gate argc--; 41667c478bd9Sstevel@tonic-gate } 41677c478bd9Sstevel@tonic-gate 41687c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 41697c478bd9Sstevel@tonic-gate opt_f = TRUE; 41707c478bd9Sstevel@tonic-gate kmu.kmu_addr = addr; 41717c478bd9Sstevel@tonic-gate } else { 41727c478bd9Sstevel@tonic-gate kmu.kmu_addr = NULL; 41737c478bd9Sstevel@tonic-gate } 41747c478bd9Sstevel@tonic-gate 41757c478bd9Sstevel@tonic-gate if (opt_e) 41767c478bd9Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 41777c478bd9Sstevel@tonic-gate 41787c478bd9Sstevel@tonic-gate if (opt_f) 41797c478bd9Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2; 41807c478bd9Sstevel@tonic-gate 41817c478bd9Sstevel@tonic-gate if (do_all_caches) { 41827c478bd9Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */ 41837c478bd9Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc); 41847c478bd9Sstevel@tonic-gate } 41857c478bd9Sstevel@tonic-gate 41867c478bd9Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) { 41877c478bd9Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i]; 41887c478bd9Sstevel@tonic-gate kmem_cache_t c; 41897c478bd9Sstevel@tonic-gate 41907c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 41917c478bd9Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 41927c478bd9Sstevel@tonic-gate continue; 41937c478bd9Sstevel@tonic-gate } 41947c478bd9Sstevel@tonic-gate 41957c478bd9Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) { 41967c478bd9Sstevel@tonic-gate if (!do_all_caches) { 41977c478bd9Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n", 41987c478bd9Sstevel@tonic-gate c.cache_name); 41997c478bd9Sstevel@tonic-gate } 42007c478bd9Sstevel@tonic-gate continue; 42017c478bd9Sstevel@tonic-gate } 42027c478bd9Sstevel@tonic-gate 42037c478bd9Sstevel@tonic-gate kmu.kmu_cache = &c; 42047c478bd9Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp); 42057c478bd9Sstevel@tonic-gate audited_caches++; 42067c478bd9Sstevel@tonic-gate } 42077c478bd9Sstevel@tonic-gate 42087c478bd9Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 42097c478bd9Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n"); 42107c478bd9Sstevel@tonic-gate return (DCMD_ERR); 42117c478bd9Sstevel@tonic-gate } 42127c478bd9Sstevel@tonic-gate 42137c478bd9Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp); 42147c478bd9Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems; 42157c478bd9Sstevel@tonic-gate 42167c478bd9Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) { 42177c478bd9Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold && 42187c478bd9Sstevel@tonic-gate kmo->kmo_num < cnt_threshold) 42197c478bd9Sstevel@tonic-gate continue; 42207c478bd9Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 42217c478bd9Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size); 42227c478bd9Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++) 42237c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]); 42247c478bd9Sstevel@tonic-gate } 42257c478bd9Sstevel@tonic-gate 42267c478bd9Sstevel@tonic-gate return (DCMD_OK); 42277c478bd9Sstevel@tonic-gate } 42287c478bd9Sstevel@tonic-gate 42297c478bd9Sstevel@tonic-gate void 42307c478bd9Sstevel@tonic-gate kmausers_help(void) 42317c478bd9Sstevel@tonic-gate { 42327c478bd9Sstevel@tonic-gate mdb_printf( 42337c478bd9Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n" 42347c478bd9Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n" 42357c478bd9Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n" 42367c478bd9Sstevel@tonic-gate "address is specified, then only those allocations which include\n" 42377c478bd9Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n" 42387c478bd9Sstevel@tonic-gate "-f.\n" 42397c478bd9Sstevel@tonic-gate "\n" 42407c478bd9Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n" 42417c478bd9Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n" 42427c478bd9Sstevel@tonic-gate "\t\tgrouped by stack\n"); 42437c478bd9Sstevel@tonic-gate } 42447c478bd9Sstevel@tonic-gate 42457c478bd9Sstevel@tonic-gate static int 42467c478bd9Sstevel@tonic-gate kmem_ready_check(void) 42477c478bd9Sstevel@tonic-gate { 42487c478bd9Sstevel@tonic-gate int ready; 42497c478bd9Sstevel@tonic-gate 42507c478bd9Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0) 42517c478bd9Sstevel@tonic-gate return (-1); /* errno is set for us */ 42527c478bd9Sstevel@tonic-gate 42537c478bd9Sstevel@tonic-gate return (ready); 42547c478bd9Sstevel@tonic-gate } 42557c478bd9Sstevel@tonic-gate 4256*346799e8SJonathan W Adams void 4257*346799e8SJonathan W Adams kmem_statechange(void) 42587c478bd9Sstevel@tonic-gate { 4259789d94c2Sjwadams static int been_ready = 0; 4260789d94c2Sjwadams 4261789d94c2Sjwadams if (been_ready) 42627c478bd9Sstevel@tonic-gate return; 42637c478bd9Sstevel@tonic-gate 4264789d94c2Sjwadams if (kmem_ready_check() <= 0) 4265789d94c2Sjwadams return; 42667c478bd9Sstevel@tonic-gate 4267789d94c2Sjwadams been_ready = 1; 42687c478bd9Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL); 42697c478bd9Sstevel@tonic-gate } 42707c478bd9Sstevel@tonic-gate 42717c478bd9Sstevel@tonic-gate void 42727c478bd9Sstevel@tonic-gate kmem_init(void) 42737c478bd9Sstevel@tonic-gate { 42747c478bd9Sstevel@tonic-gate mdb_walker_t w = { 42757c478bd9Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init, 4276b5fca8f8Stomee list_walk_step, list_walk_fini 42777c478bd9Sstevel@tonic-gate }; 42787c478bd9Sstevel@tonic-gate 42797c478bd9Sstevel@tonic-gate /* 42807c478bd9Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker 42817c478bd9Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until 42827c478bd9Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem 42837c478bd9Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem 42847c478bd9Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer 42857c478bd9Sstevel@tonic-gate * cache walking until it is. 42867c478bd9Sstevel@tonic-gate */ 42877c478bd9Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) { 42887c478bd9Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker"); 42897c478bd9Sstevel@tonic-gate return; 42907c478bd9Sstevel@tonic-gate } 42917c478bd9Sstevel@tonic-gate 4292*346799e8SJonathan W Adams kmem_statechange(); 42937c478bd9Sstevel@tonic-gate } 42947c478bd9Sstevel@tonic-gate 42957c478bd9Sstevel@tonic-gate typedef struct whatthread { 42967c478bd9Sstevel@tonic-gate uintptr_t wt_target; 42977c478bd9Sstevel@tonic-gate int wt_verbose; 42987c478bd9Sstevel@tonic-gate } whatthread_t; 42997c478bd9Sstevel@tonic-gate 43007c478bd9Sstevel@tonic-gate static int 43017c478bd9Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w) 43027c478bd9Sstevel@tonic-gate { 43037c478bd9Sstevel@tonic-gate uintptr_t current, data; 43047c478bd9Sstevel@tonic-gate 43057c478bd9Sstevel@tonic-gate if (t->t_stkbase == NULL) 43067c478bd9Sstevel@tonic-gate return (WALK_NEXT); 43077c478bd9Sstevel@tonic-gate 43087c478bd9Sstevel@tonic-gate /* 43097c478bd9Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway 43107c478bd9Sstevel@tonic-gate */ 43117c478bd9Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) { 43127c478bd9Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr); 43137c478bd9Sstevel@tonic-gate return (WALK_NEXT); 43147c478bd9Sstevel@tonic-gate } 43157c478bd9Sstevel@tonic-gate 43167c478bd9Sstevel@tonic-gate /* 43177c478bd9Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would 43187c478bd9Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized 43197c478bd9Sstevel@tonic-gate * chunks, but this routine is already fast and simple. 43207c478bd9Sstevel@tonic-gate */ 43217c478bd9Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk; 43227c478bd9Sstevel@tonic-gate current += sizeof (uintptr_t)) { 43237c478bd9Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) { 43247c478bd9Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p", 43257c478bd9Sstevel@tonic-gate addr, current); 43267c478bd9Sstevel@tonic-gate return (WALK_ERR); 43277c478bd9Sstevel@tonic-gate } 43287c478bd9Sstevel@tonic-gate 43297c478bd9Sstevel@tonic-gate if (data == w->wt_target) { 43307c478bd9Sstevel@tonic-gate if (w->wt_verbose) { 43317c478bd9Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n", 43327c478bd9Sstevel@tonic-gate current, addr, stack_active(t, current)); 43337c478bd9Sstevel@tonic-gate } else { 43347c478bd9Sstevel@tonic-gate mdb_printf("%#lr\n", addr); 43357c478bd9Sstevel@tonic-gate return (WALK_NEXT); 43367c478bd9Sstevel@tonic-gate } 43377c478bd9Sstevel@tonic-gate } 43387c478bd9Sstevel@tonic-gate } 43397c478bd9Sstevel@tonic-gate 43407c478bd9Sstevel@tonic-gate return (WALK_NEXT); 43417c478bd9Sstevel@tonic-gate } 43427c478bd9Sstevel@tonic-gate 43437c478bd9Sstevel@tonic-gate int 43447c478bd9Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 43457c478bd9Sstevel@tonic-gate { 43467c478bd9Sstevel@tonic-gate whatthread_t w; 43477c478bd9Sstevel@tonic-gate 43487c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 43497c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 43507c478bd9Sstevel@tonic-gate 43517c478bd9Sstevel@tonic-gate w.wt_verbose = FALSE; 43527c478bd9Sstevel@tonic-gate w.wt_target = addr; 43537c478bd9Sstevel@tonic-gate 43547c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 43557c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc) 43567c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 43577c478bd9Sstevel@tonic-gate 43587c478bd9Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w) 43597c478bd9Sstevel@tonic-gate == -1) { 43607c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk threads"); 43617c478bd9Sstevel@tonic-gate return (DCMD_ERR); 43627c478bd9Sstevel@tonic-gate } 43637c478bd9Sstevel@tonic-gate 43647c478bd9Sstevel@tonic-gate return (DCMD_OK); 43657c478bd9Sstevel@tonic-gate } 4366