17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5789d94c2Sjwadams * Common Development and Distribution License (the "License"). 6789d94c2Sjwadams * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 220c3b83b1SJonathan Adams * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 26d7dba7e5SBryan Cantrill /* 27d75f3745SJohn Levon * Copyright (c) 2018, Joyent, Inc. 28831abf2cSDan Kimmel * Copyright (c) 2013, 2015 by Delphix. All rights reserved. 29d7dba7e5SBryan Cantrill */ 30d7dba7e5SBryan Cantrill 317c478bd9Sstevel@tonic-gate #include "umem.h" 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate #include <sys/vmem_impl_user.h> 347c478bd9Sstevel@tonic-gate #include <umem_impl.h> 357c478bd9Sstevel@tonic-gate 367c478bd9Sstevel@tonic-gate #include <alloca.h> 37789d94c2Sjwadams #include <limits.h> 384a1c2431SJonathan Adams #include <mdb/mdb_whatis.h> 394f364e7cSRobert Mustacchi #include <thr_uberdata.h> 407c478bd9Sstevel@tonic-gate 417c478bd9Sstevel@tonic-gate #include "misc.h" 42789d94c2Sjwadams #include "leaky.h" 43087e1372Stomee #include "dist.h" 447c478bd9Sstevel@tonic-gate 457c478bd9Sstevel@tonic-gate #include "umem_pagesize.h" 467c478bd9Sstevel@tonic-gate 477c478bd9Sstevel@tonic-gate #define UM_ALLOCATED 0x1 487c478bd9Sstevel@tonic-gate #define UM_FREE 0x2 497c478bd9Sstevel@tonic-gate #define UM_BUFCTL 0x4 507c478bd9Sstevel@tonic-gate #define UM_HASH 0x8 517c478bd9Sstevel@tonic-gate 52789d94c2Sjwadams int umem_ready; 537c478bd9Sstevel@tonic-gate 54789d94c2Sjwadams static int umem_stack_depth_warned; 55789d94c2Sjwadams static uint32_t umem_max_ncpus; 567c478bd9Sstevel@tonic-gate uint32_t umem_stack_depth; 57789d94c2Sjwadams 587c478bd9Sstevel@tonic-gate size_t umem_pagesize; 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate #define UMEM_READVAR(var) \ 617c478bd9Sstevel@tonic-gate (umem_readvar(&(var), #var) == -1 && \ 62789d94c2Sjwadams (mdb_warn("failed to read "#var), 1)) 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate int 65789d94c2Sjwadams umem_update_variables(void) 667c478bd9Sstevel@tonic-gate { 677c478bd9Sstevel@tonic-gate size_t pagesize; 687c478bd9Sstevel@tonic-gate 697c478bd9Sstevel@tonic-gate /* 70789d94c2Sjwadams * Figure out which type of umem is being used; if it's not there 71789d94c2Sjwadams * yet, succeed quietly. 727c478bd9Sstevel@tonic-gate */ 73789d94c2Sjwadams if (umem_set_standalone() == -1) { 74789d94c2Sjwadams umem_ready = 0; 75789d94c2Sjwadams return (0); /* umem not there yet */ 76789d94c2Sjwadams } 777c478bd9Sstevel@tonic-gate 78789d94c2Sjwadams /* 79789d94c2Sjwadams * Solaris 9 used a different name for umem_max_ncpus. It's 80789d94c2Sjwadams * cheap backwards compatibility to check for both names. 81789d94c2Sjwadams */ 82789d94c2Sjwadams if (umem_readvar(&umem_max_ncpus, "umem_max_ncpus") == -1 && 83789d94c2Sjwadams umem_readvar(&umem_max_ncpus, "max_ncpus") == -1) { 84789d94c2Sjwadams mdb_warn("unable to read umem_max_ncpus or max_ncpus"); 85789d94c2Sjwadams return (-1); 86789d94c2Sjwadams } 87789d94c2Sjwadams if (UMEM_READVAR(umem_ready)) 887c478bd9Sstevel@tonic-gate return (-1); 897c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_stack_depth)) 907c478bd9Sstevel@tonic-gate return (-1); 917c478bd9Sstevel@tonic-gate if (UMEM_READVAR(pagesize)) 927c478bd9Sstevel@tonic-gate return (-1); 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate if (umem_stack_depth > UMEM_MAX_STACK_DEPTH) { 95789d94c2Sjwadams if (umem_stack_depth_warned == 0) { 96789d94c2Sjwadams mdb_warn("umem_stack_depth corrupted (%d > %d)\n", 97789d94c2Sjwadams umem_stack_depth, UMEM_MAX_STACK_DEPTH); 98789d94c2Sjwadams umem_stack_depth_warned = 1; 99789d94c2Sjwadams } 1007c478bd9Sstevel@tonic-gate umem_stack_depth = 0; 1017c478bd9Sstevel@tonic-gate } 102789d94c2Sjwadams 103789d94c2Sjwadams umem_pagesize = pagesize; 104789d94c2Sjwadams 1057c478bd9Sstevel@tonic-gate return (0); 1067c478bd9Sstevel@tonic-gate } 1077c478bd9Sstevel@tonic-gate 1084f364e7cSRobert Mustacchi static int 1094f364e7cSRobert Mustacchi umem_ptc_walk_init(mdb_walk_state_t *wsp) 1104f364e7cSRobert Mustacchi { 111*892ad162SToomas Soome if (wsp->walk_addr == 0) { 1124f364e7cSRobert Mustacchi if (mdb_layered_walk("ulwp", wsp) == -1) { 1134f364e7cSRobert Mustacchi mdb_warn("couldn't walk 'ulwp'"); 1144f364e7cSRobert Mustacchi return (WALK_ERR); 1154f364e7cSRobert Mustacchi } 1164f364e7cSRobert Mustacchi } 1174f364e7cSRobert Mustacchi 1184f364e7cSRobert Mustacchi return (WALK_NEXT); 1194f364e7cSRobert Mustacchi } 1204f364e7cSRobert Mustacchi 1214f364e7cSRobert Mustacchi static int 1224f364e7cSRobert Mustacchi umem_ptc_walk_step(mdb_walk_state_t *wsp) 1234f364e7cSRobert Mustacchi { 1244f364e7cSRobert Mustacchi uintptr_t this; 1254f364e7cSRobert Mustacchi int rval; 1264f364e7cSRobert Mustacchi 1274f364e7cSRobert Mustacchi if (wsp->walk_layer != NULL) { 1284f364e7cSRobert Mustacchi this = (uintptr_t)((ulwp_t *)wsp->walk_layer)->ul_self + 1294f364e7cSRobert Mustacchi (uintptr_t)wsp->walk_arg; 1304f364e7cSRobert Mustacchi } else { 1314f364e7cSRobert Mustacchi this = wsp->walk_addr + (uintptr_t)wsp->walk_arg; 1324f364e7cSRobert Mustacchi } 1334f364e7cSRobert Mustacchi 1344f364e7cSRobert Mustacchi for (;;) { 1354f364e7cSRobert Mustacchi if (mdb_vread(&this, sizeof (void *), this) == -1) { 1364f364e7cSRobert Mustacchi mdb_warn("couldn't read ptc buffer at %p", this); 1374f364e7cSRobert Mustacchi return (WALK_ERR); 1384f364e7cSRobert Mustacchi } 1394f364e7cSRobert Mustacchi 140*892ad162SToomas Soome if (this == 0) 1414f364e7cSRobert Mustacchi break; 1424f364e7cSRobert Mustacchi 1434f364e7cSRobert Mustacchi rval = wsp->walk_callback(this, &this, wsp->walk_cbdata); 1444f364e7cSRobert Mustacchi 1454f364e7cSRobert Mustacchi if (rval != WALK_NEXT) 1464f364e7cSRobert Mustacchi return (rval); 1474f364e7cSRobert Mustacchi } 1484f364e7cSRobert Mustacchi 1494f364e7cSRobert Mustacchi return (wsp->walk_layer != NULL ? WALK_NEXT : WALK_DONE); 1504f364e7cSRobert Mustacchi } 1514f364e7cSRobert Mustacchi 1527c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 153789d94c2Sjwadams static int 1544f364e7cSRobert Mustacchi umem_init_walkers(uintptr_t addr, const umem_cache_t *c, int *sizes) 1557c478bd9Sstevel@tonic-gate { 1567c478bd9Sstevel@tonic-gate mdb_walker_t w; 1577c478bd9Sstevel@tonic-gate char descr[64]; 1584f364e7cSRobert Mustacchi char name[64]; 1594f364e7cSRobert Mustacchi int i; 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr), 1627c478bd9Sstevel@tonic-gate "walk the %s cache", c->cache_name); 1637c478bd9Sstevel@tonic-gate 1647c478bd9Sstevel@tonic-gate w.walk_name = c->cache_name; 1657c478bd9Sstevel@tonic-gate w.walk_descr = descr; 1667c478bd9Sstevel@tonic-gate w.walk_init = umem_walk_init; 1677c478bd9Sstevel@tonic-gate w.walk_step = umem_walk_step; 1687c478bd9Sstevel@tonic-gate w.walk_fini = umem_walk_fini; 1697c478bd9Sstevel@tonic-gate w.walk_init_arg = (void *)addr; 1707c478bd9Sstevel@tonic-gate 1717c478bd9Sstevel@tonic-gate if (mdb_add_walker(&w) == -1) 1727c478bd9Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name); 1737c478bd9Sstevel@tonic-gate 1744f364e7cSRobert Mustacchi if (!(c->cache_flags & UMF_PTC)) 1754f364e7cSRobert Mustacchi return (WALK_NEXT); 1764f364e7cSRobert Mustacchi 1774f364e7cSRobert Mustacchi /* 1784f364e7cSRobert Mustacchi * For the per-thread cache walker, the address is the offset in the 1794f364e7cSRobert Mustacchi * tm_roots[] array of the ulwp_t. 1804f364e7cSRobert Mustacchi */ 1814f364e7cSRobert Mustacchi for (i = 0; sizes[i] != 0; i++) { 1824f364e7cSRobert Mustacchi if (sizes[i] == c->cache_bufsize) 1834f364e7cSRobert Mustacchi break; 1844f364e7cSRobert Mustacchi } 1854f364e7cSRobert Mustacchi 1864f364e7cSRobert Mustacchi if (sizes[i] == 0) { 1874f364e7cSRobert Mustacchi mdb_warn("cache %s is cached per-thread, but could not find " 1884f364e7cSRobert Mustacchi "size in umem_alloc_sizes\n", c->cache_name); 1894f364e7cSRobert Mustacchi return (WALK_NEXT); 1904f364e7cSRobert Mustacchi } 1914f364e7cSRobert Mustacchi 1924f364e7cSRobert Mustacchi if (i >= NTMEMBASE) { 1934f364e7cSRobert Mustacchi mdb_warn("index for %s (%d) exceeds root slots (%d)\n", 1944f364e7cSRobert Mustacchi c->cache_name, i, NTMEMBASE); 1954f364e7cSRobert Mustacchi return (WALK_NEXT); 1964f364e7cSRobert Mustacchi } 1974f364e7cSRobert Mustacchi 1984f364e7cSRobert Mustacchi (void) mdb_snprintf(name, sizeof (name), 1994f364e7cSRobert Mustacchi "umem_ptc_%d", c->cache_bufsize); 2004f364e7cSRobert Mustacchi (void) mdb_snprintf(descr, sizeof (descr), 2014f364e7cSRobert Mustacchi "walk the per-thread cache for %s", c->cache_name); 2024f364e7cSRobert Mustacchi 2034f364e7cSRobert Mustacchi w.walk_name = name; 2044f364e7cSRobert Mustacchi w.walk_descr = descr; 2054f364e7cSRobert Mustacchi w.walk_init = umem_ptc_walk_init; 2064f364e7cSRobert Mustacchi w.walk_step = umem_ptc_walk_step; 2074f364e7cSRobert Mustacchi w.walk_fini = NULL; 2084f364e7cSRobert Mustacchi w.walk_init_arg = (void *)offsetof(ulwp_t, ul_tmem.tm_roots[i]); 2094f364e7cSRobert Mustacchi 2104f364e7cSRobert Mustacchi if (mdb_add_walker(&w) == -1) 2114f364e7cSRobert Mustacchi mdb_warn("failed to add %s walker", w.walk_name); 2124f364e7cSRobert Mustacchi 2137c478bd9Sstevel@tonic-gate return (WALK_NEXT); 2147c478bd9Sstevel@tonic-gate } 2157c478bd9Sstevel@tonic-gate 216789d94c2Sjwadams /*ARGSUSED*/ 217789d94c2Sjwadams static void 218789d94c2Sjwadams umem_statechange_cb(void *arg) 219789d94c2Sjwadams { 220789d94c2Sjwadams static int been_ready = 0; 2214f364e7cSRobert Mustacchi GElf_Sym sym; 2224f364e7cSRobert Mustacchi int *sizes; 223789d94c2Sjwadams 224789d94c2Sjwadams #ifndef _KMDB 225789d94c2Sjwadams leaky_cleanup(1); /* state changes invalidate leaky state */ 226789d94c2Sjwadams #endif 227789d94c2Sjwadams 228789d94c2Sjwadams if (umem_update_variables() == -1) 229789d94c2Sjwadams return; 230789d94c2Sjwadams 231789d94c2Sjwadams if (been_ready) 232789d94c2Sjwadams return; 233789d94c2Sjwadams 234789d94c2Sjwadams if (umem_ready != UMEM_READY) 235789d94c2Sjwadams return; 236789d94c2Sjwadams 237789d94c2Sjwadams been_ready = 1; 2384f364e7cSRobert Mustacchi 2394f364e7cSRobert Mustacchi /* 2404f364e7cSRobert Mustacchi * In order to determine the tm_roots offset of any cache that is 2414f364e7cSRobert Mustacchi * cached per-thread, we need to have the umem_alloc_sizes array. 2424f364e7cSRobert Mustacchi * Read this, assuring that it is zero-terminated. 2434f364e7cSRobert Mustacchi */ 2444f364e7cSRobert Mustacchi if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) { 2454f364e7cSRobert Mustacchi mdb_warn("unable to lookup 'umem_alloc_sizes'"); 2464f364e7cSRobert Mustacchi return; 2474f364e7cSRobert Mustacchi } 2484f364e7cSRobert Mustacchi 2494f364e7cSRobert Mustacchi sizes = mdb_zalloc(sym.st_size + sizeof (int), UM_SLEEP | UM_GC); 2504f364e7cSRobert Mustacchi 2514f364e7cSRobert Mustacchi if (mdb_vread(sizes, sym.st_size, (uintptr_t)sym.st_value) == -1) { 2524f364e7cSRobert Mustacchi mdb_warn("couldn't read 'umem_alloc_sizes'"); 2534f364e7cSRobert Mustacchi return; 2544f364e7cSRobert Mustacchi } 2554f364e7cSRobert Mustacchi 2564f364e7cSRobert Mustacchi (void) mdb_walk("umem_cache", (mdb_walk_cb_t)umem_init_walkers, sizes); 257789d94c2Sjwadams } 258789d94c2Sjwadams 2597c478bd9Sstevel@tonic-gate int 2607c478bd9Sstevel@tonic-gate umem_abort_messages(void) 2617c478bd9Sstevel@tonic-gate { 2627c478bd9Sstevel@tonic-gate char *umem_error_buffer; 2637c478bd9Sstevel@tonic-gate uint_t umem_error_begin; 2647c478bd9Sstevel@tonic-gate GElf_Sym sym; 2657c478bd9Sstevel@tonic-gate size_t bufsize; 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_error_begin)) 2687c478bd9Sstevel@tonic-gate return (DCMD_ERR); 2697c478bd9Sstevel@tonic-gate 2707c478bd9Sstevel@tonic-gate if (umem_lookup_by_name("umem_error_buffer", &sym) == -1) { 2717c478bd9Sstevel@tonic-gate mdb_warn("unable to look up umem_error_buffer"); 2727c478bd9Sstevel@tonic-gate return (DCMD_ERR); 2737c478bd9Sstevel@tonic-gate } 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate bufsize = (size_t)sym.st_size; 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate umem_error_buffer = mdb_alloc(bufsize+1, UM_SLEEP | UM_GC); 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate if (mdb_vread(umem_error_buffer, bufsize, (uintptr_t)sym.st_value) 2807c478bd9Sstevel@tonic-gate != bufsize) { 2817c478bd9Sstevel@tonic-gate mdb_warn("unable to read umem_error_buffer"); 2827c478bd9Sstevel@tonic-gate return (DCMD_ERR); 2837c478bd9Sstevel@tonic-gate } 2847c478bd9Sstevel@tonic-gate /* put a zero after the end of the buffer to simplify printing */ 2857c478bd9Sstevel@tonic-gate umem_error_buffer[bufsize] = 0; 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate if ((umem_error_begin % bufsize) == 0) 2887c478bd9Sstevel@tonic-gate mdb_printf("%s\n", umem_error_buffer); 2897c478bd9Sstevel@tonic-gate else { 2907c478bd9Sstevel@tonic-gate umem_error_buffer[(umem_error_begin % bufsize) - 1] = 0; 2917c478bd9Sstevel@tonic-gate mdb_printf("%s%s\n", 2927c478bd9Sstevel@tonic-gate &umem_error_buffer[umem_error_begin % bufsize], 2937c478bd9Sstevel@tonic-gate umem_error_buffer); 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate return (DCMD_OK); 2977c478bd9Sstevel@tonic-gate } 2987c478bd9Sstevel@tonic-gate 2997c478bd9Sstevel@tonic-gate static void 3007c478bd9Sstevel@tonic-gate umem_log_status(const char *name, umem_log_header_t *val) 3017c478bd9Sstevel@tonic-gate { 3027c478bd9Sstevel@tonic-gate umem_log_header_t my_lh; 3037c478bd9Sstevel@tonic-gate uintptr_t pos = (uintptr_t)val; 3047c478bd9Sstevel@tonic-gate size_t size; 3057c478bd9Sstevel@tonic-gate 306*892ad162SToomas Soome if (pos == 0) 3077c478bd9Sstevel@tonic-gate return; 3087c478bd9Sstevel@tonic-gate 3097c478bd9Sstevel@tonic-gate if (mdb_vread(&my_lh, sizeof (umem_log_header_t), pos) == -1) { 3107c478bd9Sstevel@tonic-gate mdb_warn("\nunable to read umem_%s_log pointer %p", 3117c478bd9Sstevel@tonic-gate name, pos); 3127c478bd9Sstevel@tonic-gate return; 3137c478bd9Sstevel@tonic-gate } 3147c478bd9Sstevel@tonic-gate 3157c478bd9Sstevel@tonic-gate size = my_lh.lh_chunksize * my_lh.lh_nchunks; 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate if (size % (1024 * 1024) == 0) 3187c478bd9Sstevel@tonic-gate mdb_printf("%s=%dm ", name, size / (1024 * 1024)); 3197c478bd9Sstevel@tonic-gate else if (size % 1024 == 0) 3207c478bd9Sstevel@tonic-gate mdb_printf("%s=%dk ", name, size / 1024); 3217c478bd9Sstevel@tonic-gate else 3227c478bd9Sstevel@tonic-gate mdb_printf("%s=%d ", name, size); 3237c478bd9Sstevel@tonic-gate } 3247c478bd9Sstevel@tonic-gate 3257c478bd9Sstevel@tonic-gate typedef struct umem_debug_flags { 3267c478bd9Sstevel@tonic-gate const char *udf_name; 3277c478bd9Sstevel@tonic-gate uint_t udf_flags; 3287c478bd9Sstevel@tonic-gate uint_t udf_clear; /* if 0, uses udf_flags */ 3297c478bd9Sstevel@tonic-gate } umem_debug_flags_t; 3307c478bd9Sstevel@tonic-gate 3317c478bd9Sstevel@tonic-gate umem_debug_flags_t umem_status_flags[] = { 3327c478bd9Sstevel@tonic-gate { "random", UMF_RANDOMIZE, UMF_RANDOM }, 3337c478bd9Sstevel@tonic-gate { "default", UMF_AUDIT | UMF_DEADBEEF | UMF_REDZONE | UMF_CONTENTS }, 3347c478bd9Sstevel@tonic-gate { "audit", UMF_AUDIT }, 3357c478bd9Sstevel@tonic-gate { "guards", UMF_DEADBEEF | UMF_REDZONE }, 3367c478bd9Sstevel@tonic-gate { "nosignal", UMF_CHECKSIGNAL }, 3377c478bd9Sstevel@tonic-gate { "firewall", UMF_FIREWALL }, 3387c478bd9Sstevel@tonic-gate { "lite", UMF_LITE }, 339831abf2cSDan Kimmel { "checknull", UMF_CHECKNULL }, 3407c478bd9Sstevel@tonic-gate { NULL } 3417c478bd9Sstevel@tonic-gate }; 3427c478bd9Sstevel@tonic-gate 3437c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 3447c478bd9Sstevel@tonic-gate int 3457c478bd9Sstevel@tonic-gate umem_status(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 3467c478bd9Sstevel@tonic-gate { 3477c478bd9Sstevel@tonic-gate int umem_logging; 3487c478bd9Sstevel@tonic-gate 3497c478bd9Sstevel@tonic-gate umem_log_header_t *umem_transaction_log; 3507c478bd9Sstevel@tonic-gate umem_log_header_t *umem_content_log; 3517c478bd9Sstevel@tonic-gate umem_log_header_t *umem_failure_log; 3527c478bd9Sstevel@tonic-gate umem_log_header_t *umem_slab_log; 3537c478bd9Sstevel@tonic-gate 3547c478bd9Sstevel@tonic-gate mdb_printf("Status:\t\t%s\n", 3557c478bd9Sstevel@tonic-gate umem_ready == UMEM_READY_INIT_FAILED ? "initialization failed" : 3567c478bd9Sstevel@tonic-gate umem_ready == UMEM_READY_STARTUP ? "uninitialized" : 3577c478bd9Sstevel@tonic-gate umem_ready == UMEM_READY_INITING ? "initialization in process" : 3587c478bd9Sstevel@tonic-gate umem_ready == UMEM_READY ? "ready and active" : 359789d94c2Sjwadams umem_ready == 0 ? "not loaded into address space" : 3607c478bd9Sstevel@tonic-gate "unknown (umem_ready invalid)"); 3617c478bd9Sstevel@tonic-gate 362789d94c2Sjwadams if (umem_ready == 0) 363789d94c2Sjwadams return (DCMD_OK); 364789d94c2Sjwadams 3657c478bd9Sstevel@tonic-gate mdb_printf("Concurrency:\t%d\n", umem_max_ncpus); 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_logging)) 3687c478bd9Sstevel@tonic-gate goto err; 3697c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_transaction_log)) 3707c478bd9Sstevel@tonic-gate goto err; 3717c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_content_log)) 3727c478bd9Sstevel@tonic-gate goto err; 3737c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_failure_log)) 3747c478bd9Sstevel@tonic-gate goto err; 3757c478bd9Sstevel@tonic-gate if (UMEM_READVAR(umem_slab_log)) 3767c478bd9Sstevel@tonic-gate goto err; 3777c478bd9Sstevel@tonic-gate 3787c478bd9Sstevel@tonic-gate mdb_printf("Logs:\t\t"); 3797c478bd9Sstevel@tonic-gate umem_log_status("transaction", umem_transaction_log); 3807c478bd9Sstevel@tonic-gate umem_log_status("content", umem_content_log); 3817c478bd9Sstevel@tonic-gate umem_log_status("fail", umem_failure_log); 3827c478bd9Sstevel@tonic-gate umem_log_status("slab", umem_slab_log); 3837c478bd9Sstevel@tonic-gate if (!umem_logging) 3847c478bd9Sstevel@tonic-gate mdb_printf("(inactive)"); 3857c478bd9Sstevel@tonic-gate mdb_printf("\n"); 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate mdb_printf("Message buffer:\n"); 3887c478bd9Sstevel@tonic-gate return (umem_abort_messages()); 3897c478bd9Sstevel@tonic-gate 3907c478bd9Sstevel@tonic-gate err: 3917c478bd9Sstevel@tonic-gate mdb_printf("Message buffer:\n"); 3927c478bd9Sstevel@tonic-gate (void) umem_abort_messages(); 3937c478bd9Sstevel@tonic-gate return (DCMD_ERR); 3947c478bd9Sstevel@tonic-gate } 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate typedef struct { 3977c478bd9Sstevel@tonic-gate uintptr_t ucw_first; 3987c478bd9Sstevel@tonic-gate uintptr_t ucw_current; 3997c478bd9Sstevel@tonic-gate } umem_cache_walk_t; 4007c478bd9Sstevel@tonic-gate 4017c478bd9Sstevel@tonic-gate int 4027c478bd9Sstevel@tonic-gate umem_cache_walk_init(mdb_walk_state_t *wsp) 4037c478bd9Sstevel@tonic-gate { 4047c478bd9Sstevel@tonic-gate umem_cache_walk_t *ucw; 4057c478bd9Sstevel@tonic-gate umem_cache_t c; 4067c478bd9Sstevel@tonic-gate uintptr_t cp; 4077c478bd9Sstevel@tonic-gate GElf_Sym sym; 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate if (umem_lookup_by_name("umem_null_cache", &sym) == -1) { 4107c478bd9Sstevel@tonic-gate mdb_warn("couldn't find umem_null_cache"); 4117c478bd9Sstevel@tonic-gate return (WALK_ERR); 4127c478bd9Sstevel@tonic-gate } 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate cp = (uintptr_t)sym.st_value; 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (umem_cache_t), cp) == -1) { 4177c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at %p", cp); 4187c478bd9Sstevel@tonic-gate return (WALK_ERR); 4197c478bd9Sstevel@tonic-gate } 4207c478bd9Sstevel@tonic-gate 4217c478bd9Sstevel@tonic-gate ucw = mdb_alloc(sizeof (umem_cache_walk_t), UM_SLEEP); 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate ucw->ucw_first = cp; 4247c478bd9Sstevel@tonic-gate ucw->ucw_current = (uintptr_t)c.cache_next; 4257c478bd9Sstevel@tonic-gate wsp->walk_data = ucw; 4267c478bd9Sstevel@tonic-gate 4277c478bd9Sstevel@tonic-gate return (WALK_NEXT); 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate 4307c478bd9Sstevel@tonic-gate int 4317c478bd9Sstevel@tonic-gate umem_cache_walk_step(mdb_walk_state_t *wsp) 4327c478bd9Sstevel@tonic-gate { 4337c478bd9Sstevel@tonic-gate umem_cache_walk_t *ucw = wsp->walk_data; 4347c478bd9Sstevel@tonic-gate umem_cache_t c; 4357c478bd9Sstevel@tonic-gate int status; 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (umem_cache_t), ucw->ucw_current) == -1) { 4387c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at %p", ucw->ucw_current); 4397c478bd9Sstevel@tonic-gate return (WALK_DONE); 4407c478bd9Sstevel@tonic-gate } 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate status = wsp->walk_callback(ucw->ucw_current, &c, wsp->walk_cbdata); 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate if ((ucw->ucw_current = (uintptr_t)c.cache_next) == ucw->ucw_first) 4457c478bd9Sstevel@tonic-gate return (WALK_DONE); 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate return (status); 4487c478bd9Sstevel@tonic-gate } 4497c478bd9Sstevel@tonic-gate 4507c478bd9Sstevel@tonic-gate void 4517c478bd9Sstevel@tonic-gate umem_cache_walk_fini(mdb_walk_state_t *wsp) 4527c478bd9Sstevel@tonic-gate { 4537c478bd9Sstevel@tonic-gate umem_cache_walk_t *ucw = wsp->walk_data; 4547c478bd9Sstevel@tonic-gate mdb_free(ucw, sizeof (umem_cache_walk_t)); 4557c478bd9Sstevel@tonic-gate } 4567c478bd9Sstevel@tonic-gate 4577c478bd9Sstevel@tonic-gate typedef struct { 4587c478bd9Sstevel@tonic-gate umem_cpu_t *ucw_cpus; 4597c478bd9Sstevel@tonic-gate uint32_t ucw_current; 4607c478bd9Sstevel@tonic-gate uint32_t ucw_max; 4617c478bd9Sstevel@tonic-gate } umem_cpu_walk_state_t; 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate int 4647c478bd9Sstevel@tonic-gate umem_cpu_walk_init(mdb_walk_state_t *wsp) 4657c478bd9Sstevel@tonic-gate { 4667c478bd9Sstevel@tonic-gate umem_cpu_t *umem_cpus; 4677c478bd9Sstevel@tonic-gate 4687c478bd9Sstevel@tonic-gate umem_cpu_walk_state_t *ucw; 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate if (umem_readvar(&umem_cpus, "umem_cpus") == -1) { 4717c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'umem_cpus'"); 4727c478bd9Sstevel@tonic-gate return (WALK_ERR); 4737c478bd9Sstevel@tonic-gate } 4747c478bd9Sstevel@tonic-gate 4757c478bd9Sstevel@tonic-gate ucw = mdb_alloc(sizeof (*ucw), UM_SLEEP); 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate ucw->ucw_cpus = umem_cpus; 4787c478bd9Sstevel@tonic-gate ucw->ucw_current = 0; 4797c478bd9Sstevel@tonic-gate ucw->ucw_max = umem_max_ncpus; 4807c478bd9Sstevel@tonic-gate 4817c478bd9Sstevel@tonic-gate wsp->walk_data = ucw; 4827c478bd9Sstevel@tonic-gate return (WALK_NEXT); 4837c478bd9Sstevel@tonic-gate } 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate int 4867c478bd9Sstevel@tonic-gate umem_cpu_walk_step(mdb_walk_state_t *wsp) 4877c478bd9Sstevel@tonic-gate { 4887c478bd9Sstevel@tonic-gate umem_cpu_t cpu; 4897c478bd9Sstevel@tonic-gate umem_cpu_walk_state_t *ucw = wsp->walk_data; 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate uintptr_t caddr; 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate if (ucw->ucw_current >= ucw->ucw_max) 4947c478bd9Sstevel@tonic-gate return (WALK_DONE); 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate caddr = (uintptr_t)&(ucw->ucw_cpus[ucw->ucw_current]); 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate if (mdb_vread(&cpu, sizeof (umem_cpu_t), caddr) == -1) { 4997c478bd9Sstevel@tonic-gate mdb_warn("failed to read cpu %d", ucw->ucw_current); 5007c478bd9Sstevel@tonic-gate return (WALK_ERR); 5017c478bd9Sstevel@tonic-gate } 5027c478bd9Sstevel@tonic-gate 5037c478bd9Sstevel@tonic-gate ucw->ucw_current++; 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cpu, wsp->walk_cbdata)); 5067c478bd9Sstevel@tonic-gate } 5077c478bd9Sstevel@tonic-gate 5087c478bd9Sstevel@tonic-gate void 5097c478bd9Sstevel@tonic-gate umem_cpu_walk_fini(mdb_walk_state_t *wsp) 5107c478bd9Sstevel@tonic-gate { 5117c478bd9Sstevel@tonic-gate umem_cpu_walk_state_t *ucw = wsp->walk_data; 5127c478bd9Sstevel@tonic-gate 5137c478bd9Sstevel@tonic-gate mdb_free(ucw, sizeof (*ucw)); 5147c478bd9Sstevel@tonic-gate } 5157c478bd9Sstevel@tonic-gate 5167c478bd9Sstevel@tonic-gate int 5177c478bd9Sstevel@tonic-gate umem_cpu_cache_walk_init(mdb_walk_state_t *wsp) 5187c478bd9Sstevel@tonic-gate { 519*892ad162SToomas Soome if (wsp->walk_addr == 0) { 5207c478bd9Sstevel@tonic-gate mdb_warn("umem_cpu_cache doesn't support global walks"); 5217c478bd9Sstevel@tonic-gate return (WALK_ERR); 5227c478bd9Sstevel@tonic-gate } 5237c478bd9Sstevel@tonic-gate 5247c478bd9Sstevel@tonic-gate if (mdb_layered_walk("umem_cpu", wsp) == -1) { 5257c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk 'umem_cpu'"); 5267c478bd9Sstevel@tonic-gate return (WALK_ERR); 5277c478bd9Sstevel@tonic-gate } 5287c478bd9Sstevel@tonic-gate 5297c478bd9Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr; 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate return (WALK_NEXT); 5327c478bd9Sstevel@tonic-gate } 5337c478bd9Sstevel@tonic-gate 5347c478bd9Sstevel@tonic-gate int 5357c478bd9Sstevel@tonic-gate umem_cpu_cache_walk_step(mdb_walk_state_t *wsp) 5367c478bd9Sstevel@tonic-gate { 5377c478bd9Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data; 5387c478bd9Sstevel@tonic-gate const umem_cpu_t *cpu = wsp->walk_layer; 5397c478bd9Sstevel@tonic-gate umem_cpu_cache_t cc; 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate caddr += cpu->cpu_cache_offset; 5427c478bd9Sstevel@tonic-gate 5437c478bd9Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (umem_cpu_cache_t), caddr) == -1) { 5447c478bd9Sstevel@tonic-gate mdb_warn("couldn't read umem_cpu_cache at %p", caddr); 5457c478bd9Sstevel@tonic-gate return (WALK_ERR); 5467c478bd9Sstevel@tonic-gate } 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata)); 5497c478bd9Sstevel@tonic-gate } 5507c478bd9Sstevel@tonic-gate 5517c478bd9Sstevel@tonic-gate int 5527c478bd9Sstevel@tonic-gate umem_slab_walk_init(mdb_walk_state_t *wsp) 5537c478bd9Sstevel@tonic-gate { 5547c478bd9Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 5557c478bd9Sstevel@tonic-gate umem_cache_t c; 5567c478bd9Sstevel@tonic-gate 557*892ad162SToomas Soome if (caddr == 0) { 5587c478bd9Sstevel@tonic-gate mdb_warn("umem_slab doesn't support global walks\n"); 5597c478bd9Sstevel@tonic-gate return (WALK_ERR); 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 5637c478bd9Sstevel@tonic-gate mdb_warn("couldn't read umem_cache at %p", caddr); 5647c478bd9Sstevel@tonic-gate return (WALK_ERR); 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate wsp->walk_data = 5687c478bd9Sstevel@tonic-gate (void *)(caddr + offsetof(umem_cache_t, cache_nullslab)); 5697c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_next; 5707c478bd9Sstevel@tonic-gate 5717c478bd9Sstevel@tonic-gate return (WALK_NEXT); 5727c478bd9Sstevel@tonic-gate } 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate int 5757c478bd9Sstevel@tonic-gate umem_slab_walk_partial_init(mdb_walk_state_t *wsp) 5767c478bd9Sstevel@tonic-gate { 5777c478bd9Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr; 5787c478bd9Sstevel@tonic-gate umem_cache_t c; 5797c478bd9Sstevel@tonic-gate 580*892ad162SToomas Soome if (caddr == 0) { 5817c478bd9Sstevel@tonic-gate mdb_warn("umem_slab_partial doesn't support global walks\n"); 5827c478bd9Sstevel@tonic-gate return (WALK_ERR); 5837c478bd9Sstevel@tonic-gate } 5847c478bd9Sstevel@tonic-gate 5857c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) { 5867c478bd9Sstevel@tonic-gate mdb_warn("couldn't read umem_cache at %p", caddr); 5877c478bd9Sstevel@tonic-gate return (WALK_ERR); 5887c478bd9Sstevel@tonic-gate } 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate wsp->walk_data = 5917c478bd9Sstevel@tonic-gate (void *)(caddr + offsetof(umem_cache_t, cache_nullslab)); 5927c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_freelist; 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate /* 5957c478bd9Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at 5967c478bd9Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So 5977c478bd9Sstevel@tonic-gate * if there are *no* partial slabs, report the last full slab, if 5987c478bd9Sstevel@tonic-gate * any. 5997c478bd9Sstevel@tonic-gate * 6007c478bd9Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities. 6017c478bd9Sstevel@tonic-gate */ 6027c478bd9Sstevel@tonic-gate if ((uintptr_t)wsp->walk_data == wsp->walk_addr) 6037c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)c.cache_nullslab.slab_prev; 6047c478bd9Sstevel@tonic-gate 6057c478bd9Sstevel@tonic-gate return (WALK_NEXT); 6067c478bd9Sstevel@tonic-gate } 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate int 6097c478bd9Sstevel@tonic-gate umem_slab_walk_step(mdb_walk_state_t *wsp) 6107c478bd9Sstevel@tonic-gate { 6117c478bd9Sstevel@tonic-gate umem_slab_t s; 6127c478bd9Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 6137c478bd9Sstevel@tonic-gate uintptr_t saddr = (uintptr_t)wsp->walk_data; 6147c478bd9Sstevel@tonic-gate uintptr_t caddr = saddr - offsetof(umem_cache_t, cache_nullslab); 6157c478bd9Sstevel@tonic-gate 6167c478bd9Sstevel@tonic-gate if (addr == saddr) 6177c478bd9Sstevel@tonic-gate return (WALK_DONE); 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate if (mdb_vread(&s, sizeof (s), addr) == -1) { 6207c478bd9Sstevel@tonic-gate mdb_warn("failed to read slab at %p", wsp->walk_addr); 6217c478bd9Sstevel@tonic-gate return (WALK_ERR); 6227c478bd9Sstevel@tonic-gate } 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate if ((uintptr_t)s.slab_cache != caddr) { 6257c478bd9Sstevel@tonic-gate mdb_warn("slab %p isn't in cache %p (in cache %p)\n", 6267c478bd9Sstevel@tonic-gate addr, caddr, s.slab_cache); 6277c478bd9Sstevel@tonic-gate return (WALK_ERR); 6287c478bd9Sstevel@tonic-gate } 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)s.slab_next; 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &s, wsp->walk_cbdata)); 6337c478bd9Sstevel@tonic-gate } 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate int 6367c478bd9Sstevel@tonic-gate umem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv) 6377c478bd9Sstevel@tonic-gate { 6387c478bd9Sstevel@tonic-gate umem_cache_t c; 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 6417c478bd9Sstevel@tonic-gate if (mdb_walk_dcmd("umem_cache", "umem_cache", ac, argv) == -1) { 6427c478bd9Sstevel@tonic-gate mdb_warn("can't walk umem_cache"); 6437c478bd9Sstevel@tonic-gate return (DCMD_ERR); 6447c478bd9Sstevel@tonic-gate } 6457c478bd9Sstevel@tonic-gate return (DCMD_OK); 6467c478bd9Sstevel@tonic-gate } 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 6497c478bd9Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %8s %8s %8s\n", "ADDR", "NAME", 6507c478bd9Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL"); 6517c478bd9Sstevel@tonic-gate 6527c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 6537c478bd9Sstevel@tonic-gate mdb_warn("couldn't read umem_cache at %p", addr); 6547c478bd9Sstevel@tonic-gate return (DCMD_ERR); 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %08x %8ld %8lld\n", addr, c.cache_name, 6587c478bd9Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal); 6597c478bd9Sstevel@tonic-gate 6607c478bd9Sstevel@tonic-gate return (DCMD_OK); 6617c478bd9Sstevel@tonic-gate } 6627c478bd9Sstevel@tonic-gate 6637c478bd9Sstevel@tonic-gate static int 6647c478bd9Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs) 6657c478bd9Sstevel@tonic-gate { 6667c478bd9Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs); 6677c478bd9Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs); 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate if (p1 < p2) 6707c478bd9Sstevel@tonic-gate return (-1); 6717c478bd9Sstevel@tonic-gate if (p1 > p2) 6727c478bd9Sstevel@tonic-gate return (1); 6737c478bd9Sstevel@tonic-gate return (0); 6747c478bd9Sstevel@tonic-gate } 6757c478bd9Sstevel@tonic-gate 6767c478bd9Sstevel@tonic-gate static int 6777c478bd9Sstevel@tonic-gate bufctlcmp(const umem_bufctl_audit_t **lhs, const umem_bufctl_audit_t **rhs) 6787c478bd9Sstevel@tonic-gate { 6797c478bd9Sstevel@tonic-gate const umem_bufctl_audit_t *bcp1 = *lhs; 6807c478bd9Sstevel@tonic-gate const umem_bufctl_audit_t *bcp2 = *rhs; 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp) 6837c478bd9Sstevel@tonic-gate return (-1); 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp) 6867c478bd9Sstevel@tonic-gate return (1); 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate return (0); 6897c478bd9Sstevel@tonic-gate } 6907c478bd9Sstevel@tonic-gate 6917c478bd9Sstevel@tonic-gate typedef struct umem_hash_walk { 6927c478bd9Sstevel@tonic-gate uintptr_t *umhw_table; 6937c478bd9Sstevel@tonic-gate size_t umhw_nelems; 6947c478bd9Sstevel@tonic-gate size_t umhw_pos; 6957c478bd9Sstevel@tonic-gate umem_bufctl_t umhw_cur; 6967c478bd9Sstevel@tonic-gate } umem_hash_walk_t; 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate int 6997c478bd9Sstevel@tonic-gate umem_hash_walk_init(mdb_walk_state_t *wsp) 7007c478bd9Sstevel@tonic-gate { 7017c478bd9Sstevel@tonic-gate umem_hash_walk_t *umhw; 7027c478bd9Sstevel@tonic-gate uintptr_t *hash; 7037c478bd9Sstevel@tonic-gate umem_cache_t c; 7047c478bd9Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr; 7057c478bd9Sstevel@tonic-gate size_t nelems; 7067c478bd9Sstevel@tonic-gate size_t hsize; 7077c478bd9Sstevel@tonic-gate 708*892ad162SToomas Soome if (addr == 0) { 7097c478bd9Sstevel@tonic-gate mdb_warn("umem_hash doesn't support global walks\n"); 7107c478bd9Sstevel@tonic-gate return (WALK_ERR); 7117c478bd9Sstevel@tonic-gate } 7127c478bd9Sstevel@tonic-gate 7137c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) { 7147c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 7157c478bd9Sstevel@tonic-gate return (WALK_ERR); 7167c478bd9Sstevel@tonic-gate } 7177c478bd9Sstevel@tonic-gate 7187c478bd9Sstevel@tonic-gate if (!(c.cache_flags & UMF_HASH)) { 7197c478bd9Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr); 7207c478bd9Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */ 7217c478bd9Sstevel@tonic-gate } 7227c478bd9Sstevel@tonic-gate 7237c478bd9Sstevel@tonic-gate umhw = mdb_zalloc(sizeof (umem_hash_walk_t), UM_SLEEP); 7247c478bd9Sstevel@tonic-gate umhw->umhw_cur.bc_next = NULL; 7257c478bd9Sstevel@tonic-gate umhw->umhw_pos = 0; 7267c478bd9Sstevel@tonic-gate 7277c478bd9Sstevel@tonic-gate umhw->umhw_nelems = nelems = c.cache_hash_mask + 1; 7287c478bd9Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t); 7297c478bd9Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table; 7307c478bd9Sstevel@tonic-gate 7317c478bd9Sstevel@tonic-gate umhw->umhw_table = hash = mdb_alloc(hsize, UM_SLEEP); 7327c478bd9Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) { 7337c478bd9Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr); 7347c478bd9Sstevel@tonic-gate mdb_free(hash, hsize); 7357c478bd9Sstevel@tonic-gate mdb_free(umhw, sizeof (umem_hash_walk_t)); 7367c478bd9Sstevel@tonic-gate return (WALK_ERR); 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate wsp->walk_data = umhw; 7407c478bd9Sstevel@tonic-gate 7417c478bd9Sstevel@tonic-gate return (WALK_NEXT); 7427c478bd9Sstevel@tonic-gate } 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate int 7457c478bd9Sstevel@tonic-gate umem_hash_walk_step(mdb_walk_state_t *wsp) 7467c478bd9Sstevel@tonic-gate { 7477c478bd9Sstevel@tonic-gate umem_hash_walk_t *umhw = wsp->walk_data; 748*892ad162SToomas Soome uintptr_t addr = 0; 7497c478bd9Sstevel@tonic-gate 750*892ad162SToomas Soome if ((addr = (uintptr_t)umhw->umhw_cur.bc_next) == 0) { 7517c478bd9Sstevel@tonic-gate while (umhw->umhw_pos < umhw->umhw_nelems) { 752*892ad162SToomas Soome if ((addr = umhw->umhw_table[umhw->umhw_pos++]) != 0) 7537c478bd9Sstevel@tonic-gate break; 7547c478bd9Sstevel@tonic-gate } 7557c478bd9Sstevel@tonic-gate } 756*892ad162SToomas Soome if (addr == 0) 7577c478bd9Sstevel@tonic-gate return (WALK_DONE); 7587c478bd9Sstevel@tonic-gate 7597c478bd9Sstevel@tonic-gate if (mdb_vread(&umhw->umhw_cur, sizeof (umem_bufctl_t), addr) == -1) { 7607c478bd9Sstevel@tonic-gate mdb_warn("couldn't read umem_bufctl_t at addr %p", addr); 7617c478bd9Sstevel@tonic-gate return (WALK_ERR); 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &umhw->umhw_cur, wsp->walk_cbdata)); 7657c478bd9Sstevel@tonic-gate } 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate void 7687c478bd9Sstevel@tonic-gate umem_hash_walk_fini(mdb_walk_state_t *wsp) 7697c478bd9Sstevel@tonic-gate { 7707c478bd9Sstevel@tonic-gate umem_hash_walk_t *umhw = wsp->walk_data; 7717c478bd9Sstevel@tonic-gate 7727c478bd9Sstevel@tonic-gate if (umhw == NULL) 7737c478bd9Sstevel@tonic-gate return; 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate mdb_free(umhw->umhw_table, umhw->umhw_nelems * sizeof (uintptr_t)); 7767c478bd9Sstevel@tonic-gate mdb_free(umhw, sizeof (umem_hash_walk_t)); 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate 7797c478bd9Sstevel@tonic-gate /* 7807c478bd9Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache 7817c478bd9Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out. 7827c478bd9Sstevel@tonic-gate */ 7837c478bd9Sstevel@tonic-gate static int 7847c478bd9Sstevel@tonic-gate umem_hash_lookup(umem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out) 7857c478bd9Sstevel@tonic-gate { 7867c478bd9Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)UMEM_HASH(cp, buf); 7877c478bd9Sstevel@tonic-gate umem_bufctl_t *bcp; 7887c478bd9Sstevel@tonic-gate umem_bufctl_t bc; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (umem_bufctl_t *), bucket) == -1) { 7917c478bd9Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p", 7927c478bd9Sstevel@tonic-gate buf, caddr); 7937c478bd9Sstevel@tonic-gate return (-1); 7947c478bd9Sstevel@tonic-gate } 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate while (bcp != NULL) { 7977c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (umem_bufctl_t), 7987c478bd9Sstevel@tonic-gate (uintptr_t)bcp) == -1) { 7997c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp); 8007c478bd9Sstevel@tonic-gate return (-1); 8017c478bd9Sstevel@tonic-gate } 8027c478bd9Sstevel@tonic-gate if (bc.bc_addr == buf) { 8037c478bd9Sstevel@tonic-gate *out = (uintptr_t)bcp; 8047c478bd9Sstevel@tonic-gate return (0); 8057c478bd9Sstevel@tonic-gate } 8067c478bd9Sstevel@tonic-gate bcp = bc.bc_next; 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr); 8107c478bd9Sstevel@tonic-gate return (-1); 8117c478bd9Sstevel@tonic-gate } 8127c478bd9Sstevel@tonic-gate 8137c478bd9Sstevel@tonic-gate int 8147c478bd9Sstevel@tonic-gate umem_get_magsize(const umem_cache_t *cp) 8157c478bd9Sstevel@tonic-gate { 8167c478bd9Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype; 8177c478bd9Sstevel@tonic-gate GElf_Sym mt_sym; 8187c478bd9Sstevel@tonic-gate umem_magtype_t mt; 8197c478bd9Sstevel@tonic-gate int res; 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate /* 8227c478bd9Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches 8237c478bd9Sstevel@tonic-gate * with UMF_NOMAGAZINE have disabled their magazine layers, so 8247c478bd9Sstevel@tonic-gate * it is okay to return 0 for them. 8257c478bd9Sstevel@tonic-gate */ 8267c478bd9Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 || 8277c478bd9Sstevel@tonic-gate (cp->cache_flags & UMF_NOMAGAZINE)) 8287c478bd9Sstevel@tonic-gate return (res); 8297c478bd9Sstevel@tonic-gate 830789d94c2Sjwadams if (umem_lookup_by_name("umem_magtype", &mt_sym) == -1) { 8317c478bd9Sstevel@tonic-gate mdb_warn("unable to read 'umem_magtype'"); 8327c478bd9Sstevel@tonic-gate } else if (addr < mt_sym.st_value || 8337c478bd9Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 || 8347c478bd9Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) { 8357c478bd9Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n", 8367c478bd9Sstevel@tonic-gate cp->cache_name, addr); 8377c478bd9Sstevel@tonic-gate return (0); 8387c478bd9Sstevel@tonic-gate } 8397c478bd9Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) { 8407c478bd9Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr); 8417c478bd9Sstevel@tonic-gate return (0); 8427c478bd9Sstevel@tonic-gate } 8437c478bd9Sstevel@tonic-gate return (mt.mt_magsize); 8447c478bd9Sstevel@tonic-gate } 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 8477c478bd9Sstevel@tonic-gate static int 8487c478bd9Sstevel@tonic-gate umem_estimate_slab(uintptr_t addr, const umem_slab_t *sp, size_t *est) 8497c478bd9Sstevel@tonic-gate { 8507c478bd9Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt); 8517c478bd9Sstevel@tonic-gate 8527c478bd9Sstevel@tonic-gate return (WALK_NEXT); 8537c478bd9Sstevel@tonic-gate } 8547c478bd9Sstevel@tonic-gate 8557c478bd9Sstevel@tonic-gate /* 8567c478bd9Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given 8577c478bd9Sstevel@tonic-gate * cache. 8587c478bd9Sstevel@tonic-gate */ 8597c478bd9Sstevel@tonic-gate size_t 8607c478bd9Sstevel@tonic-gate umem_estimate_allocated(uintptr_t addr, const umem_cache_t *cp) 8617c478bd9Sstevel@tonic-gate { 8627c478bd9Sstevel@tonic-gate int magsize; 8637c478bd9Sstevel@tonic-gate size_t cache_est; 8647c478bd9Sstevel@tonic-gate 8657c478bd9Sstevel@tonic-gate cache_est = cp->cache_buftotal; 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate (void) mdb_pwalk("umem_slab_partial", 8687c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)umem_estimate_slab, &cache_est, addr); 8697c478bd9Sstevel@tonic-gate 8707c478bd9Sstevel@tonic-gate if ((magsize = umem_get_magsize(cp)) != 0) { 8717c478bd9Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize; 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate if (cache_est >= mag_est) { 8747c478bd9Sstevel@tonic-gate cache_est -= mag_est; 8757c478bd9Sstevel@tonic-gate } else { 8767c478bd9Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers " 8777c478bd9Sstevel@tonic-gate "than the slab layer.\n", addr); 8787c478bd9Sstevel@tonic-gate } 8797c478bd9Sstevel@tonic-gate } 8807c478bd9Sstevel@tonic-gate return (cache_est); 8817c478bd9Sstevel@tonic-gate } 8827c478bd9Sstevel@tonic-gate 8837c478bd9Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \ 8847c478bd9Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)ump) == -1) { \ 8857c478bd9Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", ump); \ 8867c478bd9Sstevel@tonic-gate goto fail; \ 8877c478bd9Sstevel@tonic-gate } \ 8887c478bd9Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \ 8897c478bd9Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \ 8907c478bd9Sstevel@tonic-gate if (magcnt == magmax) { \ 8917c478bd9Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \ 8927c478bd9Sstevel@tonic-gate magcnt); \ 8937c478bd9Sstevel@tonic-gate goto fail; \ 8947c478bd9Sstevel@tonic-gate } \ 8957c478bd9Sstevel@tonic-gate } \ 8967c478bd9Sstevel@tonic-gate } 8977c478bd9Sstevel@tonic-gate 8984f364e7cSRobert Mustacchi static int 899789d94c2Sjwadams umem_read_magazines(umem_cache_t *cp, uintptr_t addr, 9004f364e7cSRobert Mustacchi void ***maglistp, size_t *magcntp, size_t *magmaxp) 9017c478bd9Sstevel@tonic-gate { 9027c478bd9Sstevel@tonic-gate umem_magazine_t *ump, *mp; 9037c478bd9Sstevel@tonic-gate void **maglist = NULL; 9047c478bd9Sstevel@tonic-gate int i, cpu; 9057c478bd9Sstevel@tonic-gate size_t magsize, magmax, magbsize; 9067c478bd9Sstevel@tonic-gate size_t magcnt = 0; 9077c478bd9Sstevel@tonic-gate 9087c478bd9Sstevel@tonic-gate /* 9097c478bd9Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's 9107c478bd9Sstevel@tonic-gate * correctness. 9117c478bd9Sstevel@tonic-gate */ 9127c478bd9Sstevel@tonic-gate magsize = umem_get_magsize(cp); 913789d94c2Sjwadams if (magsize == 0) { 914789d94c2Sjwadams *maglistp = NULL; 915789d94c2Sjwadams *magcntp = 0; 916789d94c2Sjwadams *magmaxp = 0; 9174f364e7cSRobert Mustacchi return (0); 918789d94c2Sjwadams } 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate /* 9217c478bd9Sstevel@tonic-gate * There are several places where we need to go buffer hunting: 9227c478bd9Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine, 9237c478bd9Sstevel@tonic-gate * and the full magazine list in the depot. 9247c478bd9Sstevel@tonic-gate * 9257c478bd9Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine 9267c478bd9Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full 9277c478bd9Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the 9287c478bd9Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this 9297c478bd9Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in 9307c478bd9Sstevel@tonic-gate * crash(1M)). 9317c478bd9Sstevel@tonic-gate */ 932789d94c2Sjwadams magmax = (cp->cache_full.ml_total + 2 * umem_max_ncpus + 100) * magsize; 9337c478bd9Sstevel@tonic-gate magbsize = offsetof(umem_magazine_t, mag_round[magsize]); 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) { 9367c478bd9Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n", 9377c478bd9Sstevel@tonic-gate addr, magbsize); 9384f364e7cSRobert Mustacchi return (-1); 9397c478bd9Sstevel@tonic-gate } 9407c478bd9Sstevel@tonic-gate 9414f364e7cSRobert Mustacchi maglist = mdb_alloc(magmax * sizeof (void *), UM_SLEEP); 9424f364e7cSRobert Mustacchi mp = mdb_alloc(magbsize, UM_SLEEP); 9437c478bd9Sstevel@tonic-gate if (mp == NULL || maglist == NULL) 9447c478bd9Sstevel@tonic-gate goto fail; 9457c478bd9Sstevel@tonic-gate 9467c478bd9Sstevel@tonic-gate /* 9477c478bd9Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list). 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate for (ump = cp->cache_full.ml_list; ump != NULL; ) { 9507c478bd9Sstevel@tonic-gate READMAG_ROUNDS(magsize); 9517c478bd9Sstevel@tonic-gate ump = mp->mag_next; 9527c478bd9Sstevel@tonic-gate 9537c478bd9Sstevel@tonic-gate if (ump == cp->cache_full.ml_list) 9547c478bd9Sstevel@tonic-gate break; /* cache_full list loop detected */ 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate dprintf(("cache_full list done\n")); 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate /* 9607c478bd9Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines 9617c478bd9Sstevel@tonic-gate * and full spares. 9627c478bd9Sstevel@tonic-gate */ 963789d94c2Sjwadams for (cpu = 0; cpu < umem_max_ncpus; cpu++) { 9647c478bd9Sstevel@tonic-gate umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu]; 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate dprintf(("reading cpu cache %p\n", 9677c478bd9Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr)); 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate if (ccp->cc_rounds > 0 && 9707c478bd9Sstevel@tonic-gate (ump = ccp->cc_loaded) != NULL) { 9717c478bd9Sstevel@tonic-gate dprintf(("reading %d loaded rounds\n", ccp->cc_rounds)); 9727c478bd9Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_rounds); 9737c478bd9Sstevel@tonic-gate } 9747c478bd9Sstevel@tonic-gate 9757c478bd9Sstevel@tonic-gate if (ccp->cc_prounds > 0 && 9767c478bd9Sstevel@tonic-gate (ump = ccp->cc_ploaded) != NULL) { 9777c478bd9Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n", 9787c478bd9Sstevel@tonic-gate ccp->cc_prounds)); 9797c478bd9Sstevel@tonic-gate READMAG_ROUNDS(ccp->cc_prounds); 9807c478bd9Sstevel@tonic-gate } 9817c478bd9Sstevel@tonic-gate } 9827c478bd9Sstevel@tonic-gate 9837c478bd9Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt)); 9847c478bd9Sstevel@tonic-gate 9854f364e7cSRobert Mustacchi mdb_free(mp, magbsize); 9867c478bd9Sstevel@tonic-gate 9877c478bd9Sstevel@tonic-gate *maglistp = maglist; 9887c478bd9Sstevel@tonic-gate *magcntp = magcnt; 9897c478bd9Sstevel@tonic-gate *magmaxp = magmax; 9907c478bd9Sstevel@tonic-gate 9914f364e7cSRobert Mustacchi return (0); 9927c478bd9Sstevel@tonic-gate 9937c478bd9Sstevel@tonic-gate fail: 9944f364e7cSRobert Mustacchi if (mp) 9954f364e7cSRobert Mustacchi mdb_free(mp, magbsize); 9964f364e7cSRobert Mustacchi if (maglist) 9974f364e7cSRobert Mustacchi mdb_free(maglist, magmax * sizeof (void *)); 9984f364e7cSRobert Mustacchi 9994f364e7cSRobert Mustacchi return (-1); 10004f364e7cSRobert Mustacchi } 10014f364e7cSRobert Mustacchi 10024f364e7cSRobert Mustacchi typedef struct umem_read_ptc_walk { 10034f364e7cSRobert Mustacchi void **urpw_buf; 10044f364e7cSRobert Mustacchi size_t urpw_cnt; 10054f364e7cSRobert Mustacchi size_t urpw_max; 10064f364e7cSRobert Mustacchi } umem_read_ptc_walk_t; 10074f364e7cSRobert Mustacchi 10084f364e7cSRobert Mustacchi /*ARGSUSED*/ 10094f364e7cSRobert Mustacchi static int 10104f364e7cSRobert Mustacchi umem_read_ptc_walk_buf(uintptr_t addr, 10114f364e7cSRobert Mustacchi const void *ignored, umem_read_ptc_walk_t *urpw) 10124f364e7cSRobert Mustacchi { 10134f364e7cSRobert Mustacchi if (urpw->urpw_cnt == urpw->urpw_max) { 10144f364e7cSRobert Mustacchi size_t nmax = urpw->urpw_max ? (urpw->urpw_max << 1) : 1; 10154f364e7cSRobert Mustacchi void **new = mdb_zalloc(nmax * sizeof (void *), UM_SLEEP); 10164f364e7cSRobert Mustacchi 10174f364e7cSRobert Mustacchi if (nmax > 1) { 10184f364e7cSRobert Mustacchi size_t osize = urpw->urpw_max * sizeof (void *); 10194f364e7cSRobert Mustacchi bcopy(urpw->urpw_buf, new, osize); 10204f364e7cSRobert Mustacchi mdb_free(urpw->urpw_buf, osize); 10214f364e7cSRobert Mustacchi } 10224f364e7cSRobert Mustacchi 10234f364e7cSRobert Mustacchi urpw->urpw_buf = new; 10244f364e7cSRobert Mustacchi urpw->urpw_max = nmax; 10254f364e7cSRobert Mustacchi } 10264f364e7cSRobert Mustacchi 10274f364e7cSRobert Mustacchi urpw->urpw_buf[urpw->urpw_cnt++] = (void *)addr; 10284f364e7cSRobert Mustacchi 10294f364e7cSRobert Mustacchi return (WALK_NEXT); 10304f364e7cSRobert Mustacchi } 10314f364e7cSRobert Mustacchi 10324f364e7cSRobert Mustacchi static int 10334f364e7cSRobert Mustacchi umem_read_ptc(umem_cache_t *cp, 10344f364e7cSRobert Mustacchi void ***buflistp, size_t *bufcntp, size_t *bufmaxp) 10354f364e7cSRobert Mustacchi { 10364f364e7cSRobert Mustacchi umem_read_ptc_walk_t urpw; 10374f364e7cSRobert Mustacchi char walk[60]; 10384f364e7cSRobert Mustacchi int rval; 10394f364e7cSRobert Mustacchi 10404f364e7cSRobert Mustacchi if (!(cp->cache_flags & UMF_PTC)) 10414f364e7cSRobert Mustacchi return (0); 10424f364e7cSRobert Mustacchi 10439c720e3bSIgor Kozhukhov (void) mdb_snprintf(walk, sizeof (walk), "umem_ptc_%d", 10449c720e3bSIgor Kozhukhov cp->cache_bufsize); 10454f364e7cSRobert Mustacchi 10464f364e7cSRobert Mustacchi urpw.urpw_buf = *buflistp; 10474f364e7cSRobert Mustacchi urpw.urpw_cnt = *bufcntp; 10484f364e7cSRobert Mustacchi urpw.urpw_max = *bufmaxp; 10494f364e7cSRobert Mustacchi 10504f364e7cSRobert Mustacchi if ((rval = mdb_walk(walk, 10514f364e7cSRobert Mustacchi (mdb_walk_cb_t)umem_read_ptc_walk_buf, &urpw)) == -1) { 10524f364e7cSRobert Mustacchi mdb_warn("couldn't walk %s", walk); 10537c478bd9Sstevel@tonic-gate } 10544f364e7cSRobert Mustacchi 10554f364e7cSRobert Mustacchi *buflistp = urpw.urpw_buf; 10564f364e7cSRobert Mustacchi *bufcntp = urpw.urpw_cnt; 10574f364e7cSRobert Mustacchi *bufmaxp = urpw.urpw_max; 10584f364e7cSRobert Mustacchi 10594f364e7cSRobert Mustacchi return (rval); 10607c478bd9Sstevel@tonic-gate } 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate static int 10637c478bd9Sstevel@tonic-gate umem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf) 10647c478bd9Sstevel@tonic-gate { 10657c478bd9Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata)); 10667c478bd9Sstevel@tonic-gate } 10677c478bd9Sstevel@tonic-gate 10687c478bd9Sstevel@tonic-gate static int 10697c478bd9Sstevel@tonic-gate bufctl_walk_callback(umem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf) 10707c478bd9Sstevel@tonic-gate { 10717c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *b; 10727c478bd9Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&b); 10737c478bd9Sstevel@tonic-gate 10747c478bd9Sstevel@tonic-gate /* 10757c478bd9Sstevel@tonic-gate * if UMF_AUDIT is not set, we know that we're looking at a 10767c478bd9Sstevel@tonic-gate * umem_bufctl_t. 10777c478bd9Sstevel@tonic-gate */ 10787c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & UMF_AUDIT) || 10797c478bd9Sstevel@tonic-gate mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, buf) == -1) { 10807c478bd9Sstevel@tonic-gate (void) memset(b, 0, UMEM_BUFCTL_AUDIT_SIZE); 10817c478bd9Sstevel@tonic-gate if (mdb_vread(b, sizeof (umem_bufctl_t), buf) == -1) { 10827c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf); 10837c478bd9Sstevel@tonic-gate return (WALK_ERR); 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate } 10867c478bd9Sstevel@tonic-gate 10877c478bd9Sstevel@tonic-gate return (wsp->walk_callback(buf, b, wsp->walk_cbdata)); 10887c478bd9Sstevel@tonic-gate } 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate typedef struct umem_walk { 10917c478bd9Sstevel@tonic-gate int umw_type; 10927c478bd9Sstevel@tonic-gate 1093d7dba7e5SBryan Cantrill uintptr_t umw_addr; /* cache address */ 10947c478bd9Sstevel@tonic-gate umem_cache_t *umw_cp; 10957c478bd9Sstevel@tonic-gate size_t umw_csize; 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate /* 10987c478bd9Sstevel@tonic-gate * magazine layer 10997c478bd9Sstevel@tonic-gate */ 11007c478bd9Sstevel@tonic-gate void **umw_maglist; 11017c478bd9Sstevel@tonic-gate size_t umw_max; 11027c478bd9Sstevel@tonic-gate size_t umw_count; 11037c478bd9Sstevel@tonic-gate size_t umw_pos; 11047c478bd9Sstevel@tonic-gate 11057c478bd9Sstevel@tonic-gate /* 11067c478bd9Sstevel@tonic-gate * slab layer 11077c478bd9Sstevel@tonic-gate */ 11087c478bd9Sstevel@tonic-gate char *umw_valid; /* to keep track of freed buffers */ 11097c478bd9Sstevel@tonic-gate char *umw_ubase; /* buffer for slab data */ 11107c478bd9Sstevel@tonic-gate } umem_walk_t; 11117c478bd9Sstevel@tonic-gate 11127c478bd9Sstevel@tonic-gate static int 11137c478bd9Sstevel@tonic-gate umem_walk_init_common(mdb_walk_state_t *wsp, int type) 11147c478bd9Sstevel@tonic-gate { 11157c478bd9Sstevel@tonic-gate umem_walk_t *umw; 1116789d94c2Sjwadams int csize; 11177c478bd9Sstevel@tonic-gate umem_cache_t *cp; 1118789d94c2Sjwadams size_t vm_quantum; 11197c478bd9Sstevel@tonic-gate 11207c478bd9Sstevel@tonic-gate size_t magmax, magcnt; 11217c478bd9Sstevel@tonic-gate void **maglist = NULL; 11227c478bd9Sstevel@tonic-gate uint_t chunksize, slabsize; 11237c478bd9Sstevel@tonic-gate int status = WALK_ERR; 11247c478bd9Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr; 11257c478bd9Sstevel@tonic-gate const char *layered; 11267c478bd9Sstevel@tonic-gate 11277c478bd9Sstevel@tonic-gate type &= ~UM_HASH; 11287c478bd9Sstevel@tonic-gate 1129*892ad162SToomas Soome if (addr == 0) { 11307c478bd9Sstevel@tonic-gate mdb_warn("umem walk doesn't support global walks\n"); 11317c478bd9Sstevel@tonic-gate return (WALK_ERR); 11327c478bd9Sstevel@tonic-gate } 11337c478bd9Sstevel@tonic-gate 11347c478bd9Sstevel@tonic-gate dprintf(("walking %p\n", addr)); 11357c478bd9Sstevel@tonic-gate 11367c478bd9Sstevel@tonic-gate /* 1137789d94c2Sjwadams * The number of "cpus" determines how large the cache is. 11387c478bd9Sstevel@tonic-gate */ 1139789d94c2Sjwadams csize = UMEM_CACHE_SIZE(umem_max_ncpus); 11407c478bd9Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP); 11417c478bd9Sstevel@tonic-gate 11427c478bd9Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) { 11437c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr); 11447c478bd9Sstevel@tonic-gate goto out2; 11457c478bd9Sstevel@tonic-gate } 11467c478bd9Sstevel@tonic-gate 1147789d94c2Sjwadams /* 1148789d94c2Sjwadams * It's easy for someone to hand us an invalid cache address. 1149789d94c2Sjwadams * Unfortunately, it is hard for this walker to survive an 1150789d94c2Sjwadams * invalid cache cleanly. So we make sure that: 1151789d94c2Sjwadams * 1152789d94c2Sjwadams * 1. the vmem arena for the cache is readable, 1153789d94c2Sjwadams * 2. the vmem arena's quantum is a power of 2, 1154789d94c2Sjwadams * 3. our slabsize is a multiple of the quantum, and 1155789d94c2Sjwadams * 4. our chunksize is >0 and less than our slabsize. 1156789d94c2Sjwadams */ 1157789d94c2Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum), 1158789d94c2Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 || 1159789d94c2Sjwadams vm_quantum == 0 || 1160789d94c2Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 || 1161789d94c2Sjwadams cp->cache_slabsize < vm_quantum || 1162789d94c2Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 || 1163789d94c2Sjwadams cp->cache_chunksize == 0 || 1164789d94c2Sjwadams cp->cache_chunksize > cp->cache_slabsize) { 1165789d94c2Sjwadams mdb_warn("%p is not a valid umem_cache_t\n", addr); 1166789d94c2Sjwadams goto out2; 1167789d94c2Sjwadams } 1168789d94c2Sjwadams 11697c478bd9Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal)); 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate if (cp->cache_buftotal == 0) { 11727c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 11737c478bd9Sstevel@tonic-gate return (WALK_DONE); 11747c478bd9Sstevel@tonic-gate } 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate /* 11777c478bd9Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache, 11787c478bd9Sstevel@tonic-gate * there is nothing to report. 11797c478bd9Sstevel@tonic-gate */ 11807c478bd9Sstevel@tonic-gate if ((type & UM_BUFCTL) && !(cp->cache_flags & UMF_HASH)) { 11817c478bd9Sstevel@tonic-gate dprintf(("bufctl requested, not UMF_HASH (flags: %p)\n", 11827c478bd9Sstevel@tonic-gate cp->cache_flags)); 11837c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 11847c478bd9Sstevel@tonic-gate return (WALK_DONE); 11857c478bd9Sstevel@tonic-gate } 11867c478bd9Sstevel@tonic-gate 11877c478bd9Sstevel@tonic-gate /* 11887c478bd9Sstevel@tonic-gate * Read in the contents of the magazine layer 11897c478bd9Sstevel@tonic-gate */ 11904f364e7cSRobert Mustacchi if (umem_read_magazines(cp, addr, &maglist, &magcnt, &magmax) != 0) 11914f364e7cSRobert Mustacchi goto out2; 11924f364e7cSRobert Mustacchi 11934f364e7cSRobert Mustacchi /* 11944f364e7cSRobert Mustacchi * Read in the contents of the per-thread caches, if any 11954f364e7cSRobert Mustacchi */ 11964f364e7cSRobert Mustacchi if (umem_read_ptc(cp, &maglist, &magcnt, &magmax) != 0) 11977c478bd9Sstevel@tonic-gate goto out2; 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12004f364e7cSRobert Mustacchi * We have all of the buffers from the magazines and from the 12014f364e7cSRobert Mustacchi * per-thread cache (if any); if we are walking allocated buffers, 12024f364e7cSRobert Mustacchi * sort them so we can bsearch them later. 12037c478bd9Sstevel@tonic-gate */ 12047c478bd9Sstevel@tonic-gate if (type & UM_ALLOCATED) 12057c478bd9Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp); 12067c478bd9Sstevel@tonic-gate 12077c478bd9Sstevel@tonic-gate wsp->walk_data = umw = mdb_zalloc(sizeof (umem_walk_t), UM_SLEEP); 12087c478bd9Sstevel@tonic-gate 12097c478bd9Sstevel@tonic-gate umw->umw_type = type; 12107c478bd9Sstevel@tonic-gate umw->umw_addr = addr; 12117c478bd9Sstevel@tonic-gate umw->umw_cp = cp; 12127c478bd9Sstevel@tonic-gate umw->umw_csize = csize; 12137c478bd9Sstevel@tonic-gate umw->umw_maglist = maglist; 12147c478bd9Sstevel@tonic-gate umw->umw_max = magmax; 12157c478bd9Sstevel@tonic-gate umw->umw_count = magcnt; 12167c478bd9Sstevel@tonic-gate umw->umw_pos = 0; 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * When walking allocated buffers in a UMF_HASH cache, we walk the 12207c478bd9Sstevel@tonic-gate * hash table instead of the slab layer. 12217c478bd9Sstevel@tonic-gate */ 12227c478bd9Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) && (type & UM_ALLOCATED)) { 12237c478bd9Sstevel@tonic-gate layered = "umem_hash"; 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate umw->umw_type |= UM_HASH; 12267c478bd9Sstevel@tonic-gate } else { 12277c478bd9Sstevel@tonic-gate /* 12287c478bd9Sstevel@tonic-gate * If we are walking freed buffers, we only need the 12297c478bd9Sstevel@tonic-gate * magazine layer plus the partially allocated slabs. 12307c478bd9Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs. 12317c478bd9Sstevel@tonic-gate */ 12327c478bd9Sstevel@tonic-gate if (type & UM_ALLOCATED) 12337c478bd9Sstevel@tonic-gate layered = "umem_slab"; 12347c478bd9Sstevel@tonic-gate else 12357c478bd9Sstevel@tonic-gate layered = "umem_slab_partial"; 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate /* 12387c478bd9Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For 12397c478bd9Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For 12407c478bd9Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track 12417c478bd9Sstevel@tonic-gate * the freed buffers. 12427c478bd9Sstevel@tonic-gate */ 12437c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & UMF_HASH)) { 12447c478bd9Sstevel@tonic-gate chunksize = cp->cache_chunksize; 12457c478bd9Sstevel@tonic-gate slabsize = cp->cache_slabsize; 12467c478bd9Sstevel@tonic-gate 12477c478bd9Sstevel@tonic-gate umw->umw_ubase = mdb_alloc(slabsize + 12487c478bd9Sstevel@tonic-gate sizeof (umem_bufctl_t), UM_SLEEP); 12497c478bd9Sstevel@tonic-gate 12507c478bd9Sstevel@tonic-gate if (type & UM_ALLOCATED) 12517c478bd9Sstevel@tonic-gate umw->umw_valid = 12527c478bd9Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP); 12537c478bd9Sstevel@tonic-gate } 12547c478bd9Sstevel@tonic-gate } 12557c478bd9Sstevel@tonic-gate 12567c478bd9Sstevel@tonic-gate status = WALK_NEXT; 12577c478bd9Sstevel@tonic-gate 12587c478bd9Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) { 12597c478bd9Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered); 12607c478bd9Sstevel@tonic-gate status = WALK_ERR; 12617c478bd9Sstevel@tonic-gate } 12627c478bd9Sstevel@tonic-gate 12637c478bd9Sstevel@tonic-gate out1: 12647c478bd9Sstevel@tonic-gate if (status == WALK_ERR) { 12657c478bd9Sstevel@tonic-gate if (umw->umw_valid) 12667c478bd9Sstevel@tonic-gate mdb_free(umw->umw_valid, slabsize / chunksize); 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate if (umw->umw_ubase) 12697c478bd9Sstevel@tonic-gate mdb_free(umw->umw_ubase, slabsize + 12707c478bd9Sstevel@tonic-gate sizeof (umem_bufctl_t)); 12717c478bd9Sstevel@tonic-gate 1272789d94c2Sjwadams if (umw->umw_maglist) 1273789d94c2Sjwadams mdb_free(umw->umw_maglist, umw->umw_max * 1274789d94c2Sjwadams sizeof (uintptr_t)); 1275789d94c2Sjwadams 12767c478bd9Sstevel@tonic-gate mdb_free(umw, sizeof (umem_walk_t)); 12777c478bd9Sstevel@tonic-gate wsp->walk_data = NULL; 12787c478bd9Sstevel@tonic-gate } 12797c478bd9Sstevel@tonic-gate 12807c478bd9Sstevel@tonic-gate out2: 12817c478bd9Sstevel@tonic-gate if (status == WALK_ERR) 12827c478bd9Sstevel@tonic-gate mdb_free(cp, csize); 12837c478bd9Sstevel@tonic-gate 12847c478bd9Sstevel@tonic-gate return (status); 12857c478bd9Sstevel@tonic-gate } 12867c478bd9Sstevel@tonic-gate 12877c478bd9Sstevel@tonic-gate int 12887c478bd9Sstevel@tonic-gate umem_walk_step(mdb_walk_state_t *wsp) 12897c478bd9Sstevel@tonic-gate { 12907c478bd9Sstevel@tonic-gate umem_walk_t *umw = wsp->walk_data; 12917c478bd9Sstevel@tonic-gate int type = umw->umw_type; 12927c478bd9Sstevel@tonic-gate umem_cache_t *cp = umw->umw_cp; 12937c478bd9Sstevel@tonic-gate 12947c478bd9Sstevel@tonic-gate void **maglist = umw->umw_maglist; 12957c478bd9Sstevel@tonic-gate int magcnt = umw->umw_count; 12967c478bd9Sstevel@tonic-gate 12977c478bd9Sstevel@tonic-gate uintptr_t chunksize, slabsize; 12987c478bd9Sstevel@tonic-gate uintptr_t addr; 12997c478bd9Sstevel@tonic-gate const umem_slab_t *sp; 13007c478bd9Sstevel@tonic-gate const umem_bufctl_t *bcp; 13017c478bd9Sstevel@tonic-gate umem_bufctl_t bc; 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate int chunks; 13047c478bd9Sstevel@tonic-gate char *kbase; 13057c478bd9Sstevel@tonic-gate void *buf; 13067c478bd9Sstevel@tonic-gate int i, ret; 13077c478bd9Sstevel@tonic-gate 13087c478bd9Sstevel@tonic-gate char *valid, *ubase; 13097c478bd9Sstevel@tonic-gate 13107c478bd9Sstevel@tonic-gate /* 13117c478bd9Sstevel@tonic-gate * first, handle the 'umem_hash' layered walk case 13127c478bd9Sstevel@tonic-gate */ 13137c478bd9Sstevel@tonic-gate if (type & UM_HASH) { 13147c478bd9Sstevel@tonic-gate /* 13157c478bd9Sstevel@tonic-gate * We have a buffer which has been allocated out of the 13167c478bd9Sstevel@tonic-gate * global layer. We need to make sure that it's not 13177c478bd9Sstevel@tonic-gate * actually sitting in a magazine before we report it as 13187c478bd9Sstevel@tonic-gate * an allocated buffer. 13197c478bd9Sstevel@tonic-gate */ 13207c478bd9Sstevel@tonic-gate buf = ((const umem_bufctl_t *)wsp->walk_layer)->bc_addr; 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate if (magcnt > 0 && 13237c478bd9Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 13247c478bd9Sstevel@tonic-gate addrcmp) != NULL) 13257c478bd9Sstevel@tonic-gate return (WALK_NEXT); 13267c478bd9Sstevel@tonic-gate 13277c478bd9Sstevel@tonic-gate if (type & UM_BUFCTL) 13287c478bd9Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr)); 13297c478bd9Sstevel@tonic-gate 13307c478bd9Sstevel@tonic-gate return (umem_walk_callback(wsp, (uintptr_t)buf)); 13317c478bd9Sstevel@tonic-gate } 13327c478bd9Sstevel@tonic-gate 13337c478bd9Sstevel@tonic-gate ret = WALK_NEXT; 13347c478bd9Sstevel@tonic-gate 13357c478bd9Sstevel@tonic-gate addr = umw->umw_addr; 13367c478bd9Sstevel@tonic-gate 13377c478bd9Sstevel@tonic-gate /* 13387c478bd9Sstevel@tonic-gate * If we're walking freed buffers, report everything in the 13397c478bd9Sstevel@tonic-gate * magazine layer before processing the first slab. 13407c478bd9Sstevel@tonic-gate */ 13417c478bd9Sstevel@tonic-gate if ((type & UM_FREE) && magcnt != 0) { 13427c478bd9Sstevel@tonic-gate umw->umw_count = 0; /* only do this once */ 13437c478bd9Sstevel@tonic-gate for (i = 0; i < magcnt; i++) { 13447c478bd9Sstevel@tonic-gate buf = maglist[i]; 13457c478bd9Sstevel@tonic-gate 13467c478bd9Sstevel@tonic-gate if (type & UM_BUFCTL) { 13477c478bd9Sstevel@tonic-gate uintptr_t out; 13487c478bd9Sstevel@tonic-gate 13497c478bd9Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG) { 13507c478bd9Sstevel@tonic-gate umem_buftag_t *btp; 13517c478bd9Sstevel@tonic-gate umem_buftag_t tag; 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate /* LINTED - alignment */ 13547c478bd9Sstevel@tonic-gate btp = UMEM_BUFTAG(cp, buf); 13557c478bd9Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag), 13567c478bd9Sstevel@tonic-gate (uintptr_t)btp) == -1) { 13577c478bd9Sstevel@tonic-gate mdb_warn("reading buftag for " 13587c478bd9Sstevel@tonic-gate "%p at %p", buf, btp); 13597c478bd9Sstevel@tonic-gate continue; 13607c478bd9Sstevel@tonic-gate } 13617c478bd9Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl; 13627c478bd9Sstevel@tonic-gate } else { 13637c478bd9Sstevel@tonic-gate if (umem_hash_lookup(cp, addr, buf, 13647c478bd9Sstevel@tonic-gate &out) == -1) 13657c478bd9Sstevel@tonic-gate continue; 13667c478bd9Sstevel@tonic-gate } 13677c478bd9Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out); 13687c478bd9Sstevel@tonic-gate } else { 13697c478bd9Sstevel@tonic-gate ret = umem_walk_callback(wsp, (uintptr_t)buf); 13707c478bd9Sstevel@tonic-gate } 13717c478bd9Sstevel@tonic-gate 13727c478bd9Sstevel@tonic-gate if (ret != WALK_NEXT) 13737c478bd9Sstevel@tonic-gate return (ret); 13747c478bd9Sstevel@tonic-gate } 13757c478bd9Sstevel@tonic-gate } 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate /* 13787c478bd9Sstevel@tonic-gate * Handle the buffers in the current slab 13797c478bd9Sstevel@tonic-gate */ 13807c478bd9Sstevel@tonic-gate chunksize = cp->cache_chunksize; 13817c478bd9Sstevel@tonic-gate slabsize = cp->cache_slabsize; 13827c478bd9Sstevel@tonic-gate 13837c478bd9Sstevel@tonic-gate sp = wsp->walk_layer; 13847c478bd9Sstevel@tonic-gate chunks = sp->slab_chunks; 13857c478bd9Sstevel@tonic-gate kbase = sp->slab_base; 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase)); 13887c478bd9Sstevel@tonic-gate 13897c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & UMF_HASH)) { 13907c478bd9Sstevel@tonic-gate valid = umw->umw_valid; 13917c478bd9Sstevel@tonic-gate ubase = umw->umw_ubase; 13927c478bd9Sstevel@tonic-gate 13937c478bd9Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize, 13947c478bd9Sstevel@tonic-gate (uintptr_t)kbase) == -1) { 13957c478bd9Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase); 13967c478bd9Sstevel@tonic-gate return (WALK_ERR); 13977c478bd9Sstevel@tonic-gate } 13987c478bd9Sstevel@tonic-gate 13997c478bd9Sstevel@tonic-gate /* 14007c478bd9Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch 14017c478bd9Sstevel@tonic-gate * out the freelist. 14027c478bd9Sstevel@tonic-gate */ 14037c478bd9Sstevel@tonic-gate if (type & UM_ALLOCATED) 14047c478bd9Sstevel@tonic-gate (void) memset(valid, 1, chunks); 14057c478bd9Sstevel@tonic-gate } else { 14067c478bd9Sstevel@tonic-gate valid = NULL; 14077c478bd9Sstevel@tonic-gate ubase = NULL; 14087c478bd9Sstevel@tonic-gate } 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate /* 14117c478bd9Sstevel@tonic-gate * walk the slab's freelist 14127c478bd9Sstevel@tonic-gate */ 14137c478bd9Sstevel@tonic-gate bcp = sp->slab_head; 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks)); 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate /* 14187c478bd9Sstevel@tonic-gate * since we could be in the middle of allocating a buffer, 14197c478bd9Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we 14207c478bd9Sstevel@tonic-gate * check one further on the freelist than the count allows. 14217c478bd9Sstevel@tonic-gate */ 14227c478bd9Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) { 14237c478bd9Sstevel@tonic-gate uint_t ndx; 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp)); 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate if (bcp == NULL) { 14287c478bd9Sstevel@tonic-gate if (i == chunks) 14297c478bd9Sstevel@tonic-gate break; 14307c478bd9Sstevel@tonic-gate mdb_warn( 14317c478bd9Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n", 14327c478bd9Sstevel@tonic-gate sp, addr, chunks - i); 14337c478bd9Sstevel@tonic-gate break; 14347c478bd9Sstevel@tonic-gate } 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) { 14377c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) { 14387c478bd9Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p", 14397c478bd9Sstevel@tonic-gate bcp); 14407c478bd9Sstevel@tonic-gate break; 14417c478bd9Sstevel@tonic-gate } 14427c478bd9Sstevel@tonic-gate buf = bc.bc_addr; 14437c478bd9Sstevel@tonic-gate } else { 14447c478bd9Sstevel@tonic-gate /* 1445d7dba7e5SBryan Cantrill * Otherwise the buffer is (or should be) in the slab 1446d7dba7e5SBryan Cantrill * that we've read in; determine its offset in the 1447d7dba7e5SBryan Cantrill * slab, validate that it's not corrupt, and add to 1448d7dba7e5SBryan Cantrill * our base address to find the umem_bufctl_t. (Note 1449d7dba7e5SBryan Cantrill * that we don't need to add the size of the bufctl 1450d7dba7e5SBryan Cantrill * to our offset calculation because of the slop that's 1451d7dba7e5SBryan Cantrill * allocated for the buffer at ubase.) 14527c478bd9Sstevel@tonic-gate */ 1453d7dba7e5SBryan Cantrill uintptr_t offs = (uintptr_t)bcp - (uintptr_t)kbase; 1454d7dba7e5SBryan Cantrill 1455d7dba7e5SBryan Cantrill if (offs > chunks * chunksize) { 1456d7dba7e5SBryan Cantrill mdb_warn("found corrupt bufctl ptr %p" 1457d7dba7e5SBryan Cantrill " in slab %p in cache %p\n", bcp, 1458d7dba7e5SBryan Cantrill wsp->walk_addr, addr); 1459d7dba7e5SBryan Cantrill break; 1460d7dba7e5SBryan Cantrill } 14617c478bd9Sstevel@tonic-gate 1462d7dba7e5SBryan Cantrill bc = *((umem_bufctl_t *)((uintptr_t)ubase + offs)); 14637c478bd9Sstevel@tonic-gate buf = UMEM_BUF(cp, bcp); 14647c478bd9Sstevel@tonic-gate } 14657c478bd9Sstevel@tonic-gate 14667c478bd9Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize; 14677c478bd9Sstevel@tonic-gate 14687c478bd9Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) { 14697c478bd9Sstevel@tonic-gate /* 14707c478bd9Sstevel@tonic-gate * This is very wrong; we have managed to find 14717c478bd9Sstevel@tonic-gate * a buffer in the slab which shouldn't 14727c478bd9Sstevel@tonic-gate * actually be here. Emit a warning, and 14737c478bd9Sstevel@tonic-gate * try to continue. 14747c478bd9Sstevel@tonic-gate */ 14757c478bd9Sstevel@tonic-gate mdb_warn("buf %p is out of range for " 14767c478bd9Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr); 14777c478bd9Sstevel@tonic-gate } else if (type & UM_ALLOCATED) { 14787c478bd9Sstevel@tonic-gate /* 14797c478bd9Sstevel@tonic-gate * we have found a buffer on the slab's freelist; 14807c478bd9Sstevel@tonic-gate * clear its entry 14817c478bd9Sstevel@tonic-gate */ 14827c478bd9Sstevel@tonic-gate valid[ndx] = 0; 14837c478bd9Sstevel@tonic-gate } else { 14847c478bd9Sstevel@tonic-gate /* 14857c478bd9Sstevel@tonic-gate * Report this freed buffer 14867c478bd9Sstevel@tonic-gate */ 14877c478bd9Sstevel@tonic-gate if (type & UM_BUFCTL) { 14887c478bd9Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, 14897c478bd9Sstevel@tonic-gate (uintptr_t)bcp); 14907c478bd9Sstevel@tonic-gate } else { 14917c478bd9Sstevel@tonic-gate ret = umem_walk_callback(wsp, (uintptr_t)buf); 14927c478bd9Sstevel@tonic-gate } 14937c478bd9Sstevel@tonic-gate if (ret != WALK_NEXT) 14947c478bd9Sstevel@tonic-gate return (ret); 14957c478bd9Sstevel@tonic-gate } 14967c478bd9Sstevel@tonic-gate 14977c478bd9Sstevel@tonic-gate bcp = bc.bc_next; 14987c478bd9Sstevel@tonic-gate } 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate if (bcp != NULL) { 15017c478bd9Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n", 15027c478bd9Sstevel@tonic-gate sp, addr, bcp)); 15037c478bd9Sstevel@tonic-gate } 15047c478bd9Sstevel@tonic-gate 15057c478bd9Sstevel@tonic-gate /* 15067c478bd9Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting 15077c478bd9Sstevel@tonic-gate * them. 15087c478bd9Sstevel@tonic-gate */ 15097c478bd9Sstevel@tonic-gate if (type & UM_FREE) 15107c478bd9Sstevel@tonic-gate return (WALK_NEXT); 15117c478bd9Sstevel@tonic-gate 15127c478bd9Sstevel@tonic-gate if (type & UM_BUFCTL) { 15137c478bd9Sstevel@tonic-gate mdb_warn("impossible situation: small-slab UM_BUFCTL walk for " 15147c478bd9Sstevel@tonic-gate "cache %p\n", addr); 15157c478bd9Sstevel@tonic-gate return (WALK_ERR); 15167c478bd9Sstevel@tonic-gate } 15177c478bd9Sstevel@tonic-gate 15187c478bd9Sstevel@tonic-gate /* 15197c478bd9Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer. 15207c478bd9Sstevel@tonic-gate * We only get this far for small-slab caches. 15217c478bd9Sstevel@tonic-gate */ 15227c478bd9Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) { 15237c478bd9Sstevel@tonic-gate buf = (char *)kbase + i * chunksize; 15247c478bd9Sstevel@tonic-gate 15257c478bd9Sstevel@tonic-gate if (!valid[i]) 15267c478bd9Sstevel@tonic-gate continue; /* on slab freelist */ 15277c478bd9Sstevel@tonic-gate 15287c478bd9Sstevel@tonic-gate if (magcnt > 0 && 15297c478bd9Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *), 15307c478bd9Sstevel@tonic-gate addrcmp) != NULL) 15317c478bd9Sstevel@tonic-gate continue; /* in magazine layer */ 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate ret = umem_walk_callback(wsp, (uintptr_t)buf); 15347c478bd9Sstevel@tonic-gate } 15357c478bd9Sstevel@tonic-gate return (ret); 15367c478bd9Sstevel@tonic-gate } 15377c478bd9Sstevel@tonic-gate 15387c478bd9Sstevel@tonic-gate void 15397c478bd9Sstevel@tonic-gate umem_walk_fini(mdb_walk_state_t *wsp) 15407c478bd9Sstevel@tonic-gate { 15417c478bd9Sstevel@tonic-gate umem_walk_t *umw = wsp->walk_data; 15427c478bd9Sstevel@tonic-gate uintptr_t chunksize; 15437c478bd9Sstevel@tonic-gate uintptr_t slabsize; 15447c478bd9Sstevel@tonic-gate 15457c478bd9Sstevel@tonic-gate if (umw == NULL) 15467c478bd9Sstevel@tonic-gate return; 15477c478bd9Sstevel@tonic-gate 15487c478bd9Sstevel@tonic-gate if (umw->umw_maglist != NULL) 15497c478bd9Sstevel@tonic-gate mdb_free(umw->umw_maglist, umw->umw_max * sizeof (void *)); 15507c478bd9Sstevel@tonic-gate 15517c478bd9Sstevel@tonic-gate chunksize = umw->umw_cp->cache_chunksize; 15527c478bd9Sstevel@tonic-gate slabsize = umw->umw_cp->cache_slabsize; 15537c478bd9Sstevel@tonic-gate 15547c478bd9Sstevel@tonic-gate if (umw->umw_valid != NULL) 15557c478bd9Sstevel@tonic-gate mdb_free(umw->umw_valid, slabsize / chunksize); 15567c478bd9Sstevel@tonic-gate if (umw->umw_ubase != NULL) 15577c478bd9Sstevel@tonic-gate mdb_free(umw->umw_ubase, slabsize + sizeof (umem_bufctl_t)); 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate mdb_free(umw->umw_cp, umw->umw_csize); 15607c478bd9Sstevel@tonic-gate mdb_free(umw, sizeof (umem_walk_t)); 15617c478bd9Sstevel@tonic-gate } 15627c478bd9Sstevel@tonic-gate 15637c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15647c478bd9Sstevel@tonic-gate static int 15657c478bd9Sstevel@tonic-gate umem_walk_all(uintptr_t addr, const umem_cache_t *c, mdb_walk_state_t *wsp) 15667c478bd9Sstevel@tonic-gate { 15677c478bd9Sstevel@tonic-gate /* 15687c478bd9Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed 15697c478bd9Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we 15707c478bd9Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring 15717c478bd9Sstevel@tonic-gate * that "::walk umem" and "::walk freemem" yield disjoint output). 15727c478bd9Sstevel@tonic-gate */ 15737c478bd9Sstevel@tonic-gate if (c->cache_cflags & UMC_NOTOUCH) 15747c478bd9Sstevel@tonic-gate return (WALK_NEXT); 15757c478bd9Sstevel@tonic-gate 15767c478bd9Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback, 15777c478bd9Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1) 15787c478bd9Sstevel@tonic-gate return (WALK_DONE); 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate return (WALK_NEXT); 15817c478bd9Sstevel@tonic-gate } 15827c478bd9Sstevel@tonic-gate 15837c478bd9Sstevel@tonic-gate #define UMEM_WALK_ALL(name, wsp) { \ 15847c478bd9Sstevel@tonic-gate wsp->walk_data = (name); \ 15857c478bd9Sstevel@tonic-gate if (mdb_walk("umem_cache", (mdb_walk_cb_t)umem_walk_all, wsp) == -1) \ 15867c478bd9Sstevel@tonic-gate return (WALK_ERR); \ 15877c478bd9Sstevel@tonic-gate return (WALK_DONE); \ 15887c478bd9Sstevel@tonic-gate } 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate int 15917c478bd9Sstevel@tonic-gate umem_walk_init(mdb_walk_state_t *wsp) 15927c478bd9Sstevel@tonic-gate { 15937c478bd9Sstevel@tonic-gate if (wsp->walk_arg != NULL) 15947c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg; 15957c478bd9Sstevel@tonic-gate 1596*892ad162SToomas Soome if (wsp->walk_addr == 0) 15977c478bd9Sstevel@tonic-gate UMEM_WALK_ALL("umem", wsp); 15987c478bd9Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_ALLOCATED)); 15997c478bd9Sstevel@tonic-gate } 16007c478bd9Sstevel@tonic-gate 16017c478bd9Sstevel@tonic-gate int 16027c478bd9Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp) 16037c478bd9Sstevel@tonic-gate { 1604*892ad162SToomas Soome if (wsp->walk_addr == 0) 16057c478bd9Sstevel@tonic-gate UMEM_WALK_ALL("bufctl", wsp); 16067c478bd9Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_ALLOCATED | UM_BUFCTL)); 16077c478bd9Sstevel@tonic-gate } 16087c478bd9Sstevel@tonic-gate 16097c478bd9Sstevel@tonic-gate int 16107c478bd9Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp) 16117c478bd9Sstevel@tonic-gate { 1612*892ad162SToomas Soome if (wsp->walk_addr == 0) 16137c478bd9Sstevel@tonic-gate UMEM_WALK_ALL("freemem", wsp); 16147c478bd9Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_FREE)); 16157c478bd9Sstevel@tonic-gate } 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate int 16187c478bd9Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp) 16197c478bd9Sstevel@tonic-gate { 1620*892ad162SToomas Soome if (wsp->walk_addr == 0) 16217c478bd9Sstevel@tonic-gate UMEM_WALK_ALL("freectl", wsp); 16227c478bd9Sstevel@tonic-gate return (umem_walk_init_common(wsp, UM_FREE | UM_BUFCTL)); 16237c478bd9Sstevel@tonic-gate } 16247c478bd9Sstevel@tonic-gate 16257c478bd9Sstevel@tonic-gate typedef struct bufctl_history_walk { 16267c478bd9Sstevel@tonic-gate void *bhw_next; 16277c478bd9Sstevel@tonic-gate umem_cache_t *bhw_cache; 16287c478bd9Sstevel@tonic-gate umem_slab_t *bhw_slab; 16297c478bd9Sstevel@tonic-gate hrtime_t bhw_timestamp; 16307c478bd9Sstevel@tonic-gate } bufctl_history_walk_t; 16317c478bd9Sstevel@tonic-gate 16327c478bd9Sstevel@tonic-gate int 16337c478bd9Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp) 16347c478bd9Sstevel@tonic-gate { 16357c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw; 16367c478bd9Sstevel@tonic-gate umem_bufctl_audit_t bc; 16377c478bd9Sstevel@tonic-gate umem_bufctl_audit_t bcn; 16387c478bd9Sstevel@tonic-gate 1639*892ad162SToomas Soome if (wsp->walk_addr == 0) { 16407c478bd9Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n"); 16417c478bd9Sstevel@tonic-gate return (WALK_ERR); 16427c478bd9Sstevel@tonic-gate } 16437c478bd9Sstevel@tonic-gate 16447c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) { 16457c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr); 16467c478bd9Sstevel@tonic-gate return (WALK_ERR); 16477c478bd9Sstevel@tonic-gate } 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP); 16507c478bd9Sstevel@tonic-gate bhw->bhw_timestamp = 0; 16517c478bd9Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache; 16527c478bd9Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab; 16537c478bd9Sstevel@tonic-gate 16547c478bd9Sstevel@tonic-gate /* 16557c478bd9Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that 16567c478bd9Sstevel@tonic-gate * case, skip the base bufctl. 16577c478bd9Sstevel@tonic-gate */ 16587c478bd9Sstevel@tonic-gate if (bc.bc_lastlog != NULL && 16597c478bd9Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 && 16607c478bd9Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr && 16617c478bd9Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache && 16627c478bd9Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab && 16637c478bd9Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp && 16647c478bd9Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread) 16657c478bd9Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog; 16667c478bd9Sstevel@tonic-gate else 16677c478bd9Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr; 16687c478bd9Sstevel@tonic-gate 16697c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr; 16707c478bd9Sstevel@tonic-gate wsp->walk_data = bhw; 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate return (WALK_NEXT); 16737c478bd9Sstevel@tonic-gate } 16747c478bd9Sstevel@tonic-gate 16757c478bd9Sstevel@tonic-gate int 16767c478bd9Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp) 16777c478bd9Sstevel@tonic-gate { 16787c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 16797c478bd9Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next; 16807c478bd9Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr; 16817c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *b; 16827c478bd9Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&b); 16837c478bd9Sstevel@tonic-gate 1684*892ad162SToomas Soome if (addr == 0) 16857c478bd9Sstevel@tonic-gate return (WALK_DONE); 16867c478bd9Sstevel@tonic-gate 16877c478bd9Sstevel@tonic-gate if (mdb_vread(b, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 16887c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next); 16897c478bd9Sstevel@tonic-gate return (WALK_ERR); 16907c478bd9Sstevel@tonic-gate } 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate /* 16937c478bd9Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are 16947c478bd9Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to 16957c478bd9Sstevel@tonic-gate * prevent infinite loops. 16967c478bd9Sstevel@tonic-gate */ 16977c478bd9Sstevel@tonic-gate if ((uintptr_t)b->bc_addr != baseaddr || 16987c478bd9Sstevel@tonic-gate b->bc_cache != bhw->bhw_cache || 16997c478bd9Sstevel@tonic-gate b->bc_slab != bhw->bhw_slab || 17007c478bd9Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && b->bc_timestamp >= bhw->bhw_timestamp)) 17017c478bd9Sstevel@tonic-gate return (WALK_DONE); 17027c478bd9Sstevel@tonic-gate 17037c478bd9Sstevel@tonic-gate bhw->bhw_next = b->bc_lastlog; 17047c478bd9Sstevel@tonic-gate bhw->bhw_timestamp = b->bc_timestamp; 17057c478bd9Sstevel@tonic-gate 17067c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, b, wsp->walk_cbdata)); 17077c478bd9Sstevel@tonic-gate } 17087c478bd9Sstevel@tonic-gate 17097c478bd9Sstevel@tonic-gate void 17107c478bd9Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp) 17117c478bd9Sstevel@tonic-gate { 17127c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data; 17137c478bd9Sstevel@tonic-gate 17147c478bd9Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw)); 17157c478bd9Sstevel@tonic-gate } 17167c478bd9Sstevel@tonic-gate 17177c478bd9Sstevel@tonic-gate typedef struct umem_log_walk { 17187c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *ulw_base; 17197c478bd9Sstevel@tonic-gate umem_bufctl_audit_t **ulw_sorted; 17207c478bd9Sstevel@tonic-gate umem_log_header_t ulw_lh; 17217c478bd9Sstevel@tonic-gate size_t ulw_size; 17227c478bd9Sstevel@tonic-gate size_t ulw_maxndx; 17237c478bd9Sstevel@tonic-gate size_t ulw_ndx; 17247c478bd9Sstevel@tonic-gate } umem_log_walk_t; 17257c478bd9Sstevel@tonic-gate 17267c478bd9Sstevel@tonic-gate int 17277c478bd9Sstevel@tonic-gate umem_log_walk_init(mdb_walk_state_t *wsp) 17287c478bd9Sstevel@tonic-gate { 17297c478bd9Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr; 17307c478bd9Sstevel@tonic-gate umem_log_walk_t *ulw; 17317c478bd9Sstevel@tonic-gate umem_log_header_t *lhp; 17327c478bd9Sstevel@tonic-gate int maxndx, i, j, k; 17337c478bd9Sstevel@tonic-gate 17347c478bd9Sstevel@tonic-gate /* 17357c478bd9Sstevel@tonic-gate * By default (global walk), walk the umem_transaction_log. Otherwise 17367c478bd9Sstevel@tonic-gate * read the log whose umem_log_header_t is stored at walk_addr. 17377c478bd9Sstevel@tonic-gate */ 1738*892ad162SToomas Soome if (lp == 0 && umem_readvar(&lp, "umem_transaction_log") == -1) { 17397c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'umem_transaction_log'"); 17407c478bd9Sstevel@tonic-gate return (WALK_ERR); 17417c478bd9Sstevel@tonic-gate } 17427c478bd9Sstevel@tonic-gate 1743*892ad162SToomas Soome if (lp == 0) { 17447c478bd9Sstevel@tonic-gate mdb_warn("log is disabled\n"); 17457c478bd9Sstevel@tonic-gate return (WALK_ERR); 17467c478bd9Sstevel@tonic-gate } 17477c478bd9Sstevel@tonic-gate 17487c478bd9Sstevel@tonic-gate ulw = mdb_zalloc(sizeof (umem_log_walk_t), UM_SLEEP); 17497c478bd9Sstevel@tonic-gate lhp = &ulw->ulw_lh; 17507c478bd9Sstevel@tonic-gate 17517c478bd9Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (umem_log_header_t), lp) == -1) { 17527c478bd9Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp); 17537c478bd9Sstevel@tonic-gate mdb_free(ulw, sizeof (umem_log_walk_t)); 17547c478bd9Sstevel@tonic-gate return (WALK_ERR); 17557c478bd9Sstevel@tonic-gate } 17567c478bd9Sstevel@tonic-gate 17577c478bd9Sstevel@tonic-gate ulw->ulw_size = lhp->lh_chunksize * lhp->lh_nchunks; 17587c478bd9Sstevel@tonic-gate ulw->ulw_base = mdb_alloc(ulw->ulw_size, UM_SLEEP); 17597c478bd9Sstevel@tonic-gate maxndx = lhp->lh_chunksize / UMEM_BUFCTL_AUDIT_SIZE - 1; 17607c478bd9Sstevel@tonic-gate 17617c478bd9Sstevel@tonic-gate if (mdb_vread(ulw->ulw_base, ulw->ulw_size, 17627c478bd9Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) { 17637c478bd9Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base); 17647c478bd9Sstevel@tonic-gate mdb_free(ulw->ulw_base, ulw->ulw_size); 17657c478bd9Sstevel@tonic-gate mdb_free(ulw, sizeof (umem_log_walk_t)); 17667c478bd9Sstevel@tonic-gate return (WALK_ERR); 17677c478bd9Sstevel@tonic-gate } 17687c478bd9Sstevel@tonic-gate 17697c478bd9Sstevel@tonic-gate ulw->ulw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks * 17707c478bd9Sstevel@tonic-gate sizeof (umem_bufctl_audit_t *), UM_SLEEP); 17717c478bd9Sstevel@tonic-gate 17727c478bd9Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) { 17737c478bd9Sstevel@tonic-gate caddr_t chunk = (caddr_t) 17747c478bd9Sstevel@tonic-gate ((uintptr_t)ulw->ulw_base + i * lhp->lh_chunksize); 17757c478bd9Sstevel@tonic-gate 17767c478bd9Sstevel@tonic-gate for (j = 0; j < maxndx; j++) { 17777c478bd9Sstevel@tonic-gate /* LINTED align */ 17787c478bd9Sstevel@tonic-gate ulw->ulw_sorted[k++] = (umem_bufctl_audit_t *)chunk; 17797c478bd9Sstevel@tonic-gate chunk += UMEM_BUFCTL_AUDIT_SIZE; 17807c478bd9Sstevel@tonic-gate } 17817c478bd9Sstevel@tonic-gate } 17827c478bd9Sstevel@tonic-gate 17837c478bd9Sstevel@tonic-gate qsort(ulw->ulw_sorted, k, sizeof (umem_bufctl_audit_t *), 17847c478bd9Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp); 17857c478bd9Sstevel@tonic-gate 17867c478bd9Sstevel@tonic-gate ulw->ulw_maxndx = k; 17877c478bd9Sstevel@tonic-gate wsp->walk_data = ulw; 17887c478bd9Sstevel@tonic-gate 17897c478bd9Sstevel@tonic-gate return (WALK_NEXT); 17907c478bd9Sstevel@tonic-gate } 17917c478bd9Sstevel@tonic-gate 17927c478bd9Sstevel@tonic-gate int 17937c478bd9Sstevel@tonic-gate umem_log_walk_step(mdb_walk_state_t *wsp) 17947c478bd9Sstevel@tonic-gate { 17957c478bd9Sstevel@tonic-gate umem_log_walk_t *ulw = wsp->walk_data; 17967c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *bcp; 17977c478bd9Sstevel@tonic-gate 17987c478bd9Sstevel@tonic-gate if (ulw->ulw_ndx == ulw->ulw_maxndx) 17997c478bd9Sstevel@tonic-gate return (WALK_DONE); 18007c478bd9Sstevel@tonic-gate 18017c478bd9Sstevel@tonic-gate bcp = ulw->ulw_sorted[ulw->ulw_ndx++]; 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)ulw->ulw_base + 18047c478bd9Sstevel@tonic-gate (uintptr_t)ulw->ulw_lh.lh_base, bcp, wsp->walk_cbdata)); 18057c478bd9Sstevel@tonic-gate } 18067c478bd9Sstevel@tonic-gate 18077c478bd9Sstevel@tonic-gate void 18087c478bd9Sstevel@tonic-gate umem_log_walk_fini(mdb_walk_state_t *wsp) 18097c478bd9Sstevel@tonic-gate { 18107c478bd9Sstevel@tonic-gate umem_log_walk_t *ulw = wsp->walk_data; 18117c478bd9Sstevel@tonic-gate 18127c478bd9Sstevel@tonic-gate mdb_free(ulw->ulw_base, ulw->ulw_size); 18137c478bd9Sstevel@tonic-gate mdb_free(ulw->ulw_sorted, ulw->ulw_maxndx * 18147c478bd9Sstevel@tonic-gate sizeof (umem_bufctl_audit_t *)); 18157c478bd9Sstevel@tonic-gate mdb_free(ulw, sizeof (umem_log_walk_t)); 18167c478bd9Sstevel@tonic-gate } 18177c478bd9Sstevel@tonic-gate 18187c478bd9Sstevel@tonic-gate typedef struct allocdby_bufctl { 18197c478bd9Sstevel@tonic-gate uintptr_t abb_addr; 18207c478bd9Sstevel@tonic-gate hrtime_t abb_ts; 18217c478bd9Sstevel@tonic-gate } allocdby_bufctl_t; 18227c478bd9Sstevel@tonic-gate 18237c478bd9Sstevel@tonic-gate typedef struct allocdby_walk { 18247c478bd9Sstevel@tonic-gate const char *abw_walk; 18257c478bd9Sstevel@tonic-gate uintptr_t abw_thread; 18267c478bd9Sstevel@tonic-gate size_t abw_nbufs; 18277c478bd9Sstevel@tonic-gate size_t abw_size; 18287c478bd9Sstevel@tonic-gate allocdby_bufctl_t *abw_buf; 18297c478bd9Sstevel@tonic-gate size_t abw_ndx; 18307c478bd9Sstevel@tonic-gate } allocdby_walk_t; 18317c478bd9Sstevel@tonic-gate 18327c478bd9Sstevel@tonic-gate int 18337c478bd9Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const umem_bufctl_audit_t *bcp, 18347c478bd9Sstevel@tonic-gate allocdby_walk_t *abw) 18357c478bd9Sstevel@tonic-gate { 18367c478bd9Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread) 18377c478bd9Sstevel@tonic-gate return (WALK_NEXT); 18387c478bd9Sstevel@tonic-gate 18397c478bd9Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) { 18407c478bd9Sstevel@tonic-gate allocdby_bufctl_t *buf; 18417c478bd9Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size; 18427c478bd9Sstevel@tonic-gate 18437c478bd9Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP); 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize); 18467c478bd9Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize); 18477c478bd9Sstevel@tonic-gate 18487c478bd9Sstevel@tonic-gate abw->abw_size <<= 1; 18497c478bd9Sstevel@tonic-gate abw->abw_buf = buf; 18507c478bd9Sstevel@tonic-gate } 18517c478bd9Sstevel@tonic-gate 18527c478bd9Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr; 18537c478bd9Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp; 18547c478bd9Sstevel@tonic-gate abw->abw_nbufs++; 18557c478bd9Sstevel@tonic-gate 18567c478bd9Sstevel@tonic-gate return (WALK_NEXT); 18577c478bd9Sstevel@tonic-gate } 18587c478bd9Sstevel@tonic-gate 18597c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 18607c478bd9Sstevel@tonic-gate int 18617c478bd9Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const umem_cache_t *c, allocdby_walk_t *abw) 18627c478bd9Sstevel@tonic-gate { 18637c478bd9Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl, 18647c478bd9Sstevel@tonic-gate abw, addr) == -1) { 18657c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr); 18667c478bd9Sstevel@tonic-gate return (WALK_DONE); 18677c478bd9Sstevel@tonic-gate } 18687c478bd9Sstevel@tonic-gate 18697c478bd9Sstevel@tonic-gate return (WALK_NEXT); 18707c478bd9Sstevel@tonic-gate } 18717c478bd9Sstevel@tonic-gate 18727c478bd9Sstevel@tonic-gate static int 18737c478bd9Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs) 18747c478bd9Sstevel@tonic-gate { 18757c478bd9Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts) 18767c478bd9Sstevel@tonic-gate return (1); 18777c478bd9Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts) 18787c478bd9Sstevel@tonic-gate return (-1); 18797c478bd9Sstevel@tonic-gate return (0); 18807c478bd9Sstevel@tonic-gate } 18817c478bd9Sstevel@tonic-gate 18827c478bd9Sstevel@tonic-gate static int 18837c478bd9Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk) 18847c478bd9Sstevel@tonic-gate { 18857c478bd9Sstevel@tonic-gate allocdby_walk_t *abw; 18867c478bd9Sstevel@tonic-gate 1887*892ad162SToomas Soome if (wsp->walk_addr == 0) { 18887c478bd9Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n"); 18897c478bd9Sstevel@tonic-gate return (WALK_ERR); 18907c478bd9Sstevel@tonic-gate } 18917c478bd9Sstevel@tonic-gate 18927c478bd9Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP); 18937c478bd9Sstevel@tonic-gate 18947c478bd9Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr; 18957c478bd9Sstevel@tonic-gate abw->abw_walk = walk; 18967c478bd9Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */ 18977c478bd9Sstevel@tonic-gate abw->abw_buf = 18987c478bd9Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP); 18997c478bd9Sstevel@tonic-gate 19007c478bd9Sstevel@tonic-gate wsp->walk_data = abw; 19017c478bd9Sstevel@tonic-gate 19027c478bd9Sstevel@tonic-gate if (mdb_walk("umem_cache", 19037c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) { 19047c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk umem_cache"); 19057c478bd9Sstevel@tonic-gate allocdby_walk_fini(wsp); 19067c478bd9Sstevel@tonic-gate return (WALK_ERR); 19077c478bd9Sstevel@tonic-gate } 19087c478bd9Sstevel@tonic-gate 19097c478bd9Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t), 19107c478bd9Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp); 19117c478bd9Sstevel@tonic-gate 19127c478bd9Sstevel@tonic-gate return (WALK_NEXT); 19137c478bd9Sstevel@tonic-gate } 19147c478bd9Sstevel@tonic-gate 19157c478bd9Sstevel@tonic-gate int 19167c478bd9Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp) 19177c478bd9Sstevel@tonic-gate { 19187c478bd9Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl")); 19197c478bd9Sstevel@tonic-gate } 19207c478bd9Sstevel@tonic-gate 19217c478bd9Sstevel@tonic-gate int 19227c478bd9Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp) 19237c478bd9Sstevel@tonic-gate { 19247c478bd9Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl")); 19257c478bd9Sstevel@tonic-gate } 19267c478bd9Sstevel@tonic-gate 19277c478bd9Sstevel@tonic-gate int 19287c478bd9Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp) 19297c478bd9Sstevel@tonic-gate { 19307c478bd9Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19317c478bd9Sstevel@tonic-gate uintptr_t addr; 19327c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *bcp; 19337c478bd9Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 19347c478bd9Sstevel@tonic-gate 19357c478bd9Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs) 19367c478bd9Sstevel@tonic-gate return (WALK_DONE); 19377c478bd9Sstevel@tonic-gate 19387c478bd9Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr; 19397c478bd9Sstevel@tonic-gate 19407c478bd9Sstevel@tonic-gate if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 19417c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 19427c478bd9Sstevel@tonic-gate return (WALK_DONE); 19437c478bd9Sstevel@tonic-gate } 19447c478bd9Sstevel@tonic-gate 19457c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, bcp, wsp->walk_cbdata)); 19467c478bd9Sstevel@tonic-gate } 19477c478bd9Sstevel@tonic-gate 19487c478bd9Sstevel@tonic-gate void 19497c478bd9Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp) 19507c478bd9Sstevel@tonic-gate { 19517c478bd9Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data; 19527c478bd9Sstevel@tonic-gate 19537c478bd9Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size); 19547c478bd9Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t)); 19557c478bd9Sstevel@tonic-gate } 19567c478bd9Sstevel@tonic-gate 19577c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19587c478bd9Sstevel@tonic-gate int 19597c478bd9Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const umem_bufctl_audit_t *bcp, void *ignored) 19607c478bd9Sstevel@tonic-gate { 19617c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 19627c478bd9Sstevel@tonic-gate GElf_Sym sym; 19637c478bd9Sstevel@tonic-gate int i; 19647c478bd9Sstevel@tonic-gate 19657c478bd9Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp); 19667c478bd9Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) { 19677c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 19687c478bd9Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 19697c478bd9Sstevel@tonic-gate continue; 19707c478bd9Sstevel@tonic-gate if (is_umem_sym(c, "umem_")) 19717c478bd9Sstevel@tonic-gate continue; 19727c478bd9Sstevel@tonic-gate mdb_printf("%s+0x%lx", 19737c478bd9Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value); 19747c478bd9Sstevel@tonic-gate break; 19757c478bd9Sstevel@tonic-gate } 19767c478bd9Sstevel@tonic-gate mdb_printf("\n"); 19777c478bd9Sstevel@tonic-gate 19787c478bd9Sstevel@tonic-gate return (WALK_NEXT); 19797c478bd9Sstevel@tonic-gate } 19807c478bd9Sstevel@tonic-gate 19817c478bd9Sstevel@tonic-gate static int 19827c478bd9Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w) 19837c478bd9Sstevel@tonic-gate { 19847c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 19857c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 19867c478bd9Sstevel@tonic-gate 19877c478bd9Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER"); 19887c478bd9Sstevel@tonic-gate 19897c478bd9Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) { 19907c478bd9Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr); 19917c478bd9Sstevel@tonic-gate return (DCMD_ERR); 19927c478bd9Sstevel@tonic-gate } 19937c478bd9Sstevel@tonic-gate 19947c478bd9Sstevel@tonic-gate return (DCMD_OK); 19957c478bd9Sstevel@tonic-gate } 19967c478bd9Sstevel@tonic-gate 19977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19987c478bd9Sstevel@tonic-gate int 19997c478bd9Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20007c478bd9Sstevel@tonic-gate { 20017c478bd9Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby")); 20027c478bd9Sstevel@tonic-gate } 20037c478bd9Sstevel@tonic-gate 20047c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 20057c478bd9Sstevel@tonic-gate int 20067c478bd9Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 20077c478bd9Sstevel@tonic-gate { 20087c478bd9Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby")); 20097c478bd9Sstevel@tonic-gate } 20107c478bd9Sstevel@tonic-gate 20114a1c2431SJonathan Adams typedef struct whatis_info { 20124a1c2431SJonathan Adams mdb_whatis_t *wi_w; 20134a1c2431SJonathan Adams const umem_cache_t *wi_cache; 20144a1c2431SJonathan Adams const vmem_t *wi_vmem; 20154a1c2431SJonathan Adams vmem_t *wi_msb_arena; 20164a1c2431SJonathan Adams size_t wi_slab_size; 20174a1c2431SJonathan Adams int wi_slab_found; 20184a1c2431SJonathan Adams uint_t wi_freemem; 20194a1c2431SJonathan Adams } whatis_info_t; 20200c3b83b1SJonathan Adams 20210c3b83b1SJonathan Adams /* call one of our dcmd functions with "-v" and the provided address */ 20220c3b83b1SJonathan Adams static void 20230c3b83b1SJonathan Adams whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr) 20240c3b83b1SJonathan Adams { 20250c3b83b1SJonathan Adams mdb_arg_t a; 20260c3b83b1SJonathan Adams a.a_type = MDB_TYPE_STRING; 20270c3b83b1SJonathan Adams a.a_un.a_str = "-v"; 20280c3b83b1SJonathan Adams 20294a1c2431SJonathan Adams mdb_printf(":\n"); 20300c3b83b1SJonathan Adams (void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a); 20310c3b83b1SJonathan Adams } 20320c3b83b1SJonathan Adams 20337c478bd9Sstevel@tonic-gate static void 20344a1c2431SJonathan Adams whatis_print_umem(whatis_info_t *wi, uintptr_t maddr, uintptr_t addr, 20354a1c2431SJonathan Adams uintptr_t baddr) 20367c478bd9Sstevel@tonic-gate { 20374a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 20384a1c2431SJonathan Adams const umem_cache_t *cp = wi->wi_cache; 20394a1c2431SJonathan Adams int quiet = (mdb_whatis_flags(w) & WHATIS_QUIET); 20407c478bd9Sstevel@tonic-gate 20414a1c2431SJonathan Adams int call_printer = (!quiet && (cp->cache_flags & UMF_AUDIT)); 20427c478bd9Sstevel@tonic-gate 20434a1c2431SJonathan Adams mdb_whatis_report_object(w, maddr, addr, ""); 20447c478bd9Sstevel@tonic-gate 20454a1c2431SJonathan Adams if (baddr != 0 && !call_printer) 20464a1c2431SJonathan Adams mdb_printf("bufctl %p ", baddr); 20477c478bd9Sstevel@tonic-gate 20484a1c2431SJonathan Adams mdb_printf("%s from %s", 20494a1c2431SJonathan Adams (wi->wi_freemem == FALSE) ? "allocated" : "freed", cp->cache_name); 20507c478bd9Sstevel@tonic-gate 20514a1c2431SJonathan Adams if (call_printer && baddr != 0) { 20524a1c2431SJonathan Adams whatis_call_printer(bufctl, baddr); 20534a1c2431SJonathan Adams return; 20547c478bd9Sstevel@tonic-gate } 20554a1c2431SJonathan Adams mdb_printf("\n"); 20564a1c2431SJonathan Adams } 20577c478bd9Sstevel@tonic-gate 20584a1c2431SJonathan Adams /*ARGSUSED*/ 20594a1c2431SJonathan Adams static int 20604a1c2431SJonathan Adams whatis_walk_umem(uintptr_t addr, void *ignored, whatis_info_t *wi) 20614a1c2431SJonathan Adams { 20624a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 20630c3b83b1SJonathan Adams 20644a1c2431SJonathan Adams uintptr_t cur; 20654a1c2431SJonathan Adams size_t size = wi->wi_cache->cache_bufsize; 20660c3b83b1SJonathan Adams 20674a1c2431SJonathan Adams while (mdb_whatis_match(w, addr, size, &cur)) 2068*892ad162SToomas Soome whatis_print_umem(wi, cur, addr, 0); 20690c3b83b1SJonathan Adams 20704a1c2431SJonathan Adams return (WHATIS_WALKRET(w)); 20717c478bd9Sstevel@tonic-gate } 20727c478bd9Sstevel@tonic-gate 20737c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 20747c478bd9Sstevel@tonic-gate static int 20754a1c2431SJonathan Adams whatis_walk_bufctl(uintptr_t baddr, const umem_bufctl_t *bcp, whatis_info_t *wi) 20767c478bd9Sstevel@tonic-gate { 20774a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 20787c478bd9Sstevel@tonic-gate 20794a1c2431SJonathan Adams uintptr_t cur; 20804a1c2431SJonathan Adams uintptr_t addr = (uintptr_t)bcp->bc_addr; 20814a1c2431SJonathan Adams size_t size = wi->wi_cache->cache_bufsize; 20824a1c2431SJonathan Adams 20834a1c2431SJonathan Adams while (mdb_whatis_match(w, addr, size, &cur)) 20844a1c2431SJonathan Adams whatis_print_umem(wi, cur, addr, baddr); 20854a1c2431SJonathan Adams 20864a1c2431SJonathan Adams return (WHATIS_WALKRET(w)); 20877c478bd9Sstevel@tonic-gate } 20887c478bd9Sstevel@tonic-gate 20894a1c2431SJonathan Adams 20907c478bd9Sstevel@tonic-gate static int 20914a1c2431SJonathan Adams whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_info_t *wi) 20927c478bd9Sstevel@tonic-gate { 20934a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 20944a1c2431SJonathan Adams 20954a1c2431SJonathan Adams size_t size = vs->vs_end - vs->vs_start; 20964a1c2431SJonathan Adams uintptr_t cur; 20974a1c2431SJonathan Adams 20984a1c2431SJonathan Adams /* We're not interested in anything but alloc and free segments */ 20994a1c2431SJonathan Adams if (vs->vs_type != VMEM_ALLOC && vs->vs_type != VMEM_FREE) 21007c478bd9Sstevel@tonic-gate return (WALK_NEXT); 21017c478bd9Sstevel@tonic-gate 21024a1c2431SJonathan Adams while (mdb_whatis_match(w, vs->vs_start, size, &cur)) { 21034a1c2431SJonathan Adams mdb_whatis_report_object(w, cur, vs->vs_start, ""); 21047c478bd9Sstevel@tonic-gate 21054a1c2431SJonathan Adams /* 21064a1c2431SJonathan Adams * If we're not printing it seperately, provide the vmem_seg 21074a1c2431SJonathan Adams * pointer if it has a stack trace. 21084a1c2431SJonathan Adams */ 21094a1c2431SJonathan Adams if ((mdb_whatis_flags(w) & WHATIS_QUIET) && 21104a1c2431SJonathan Adams ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0 || 21114a1c2431SJonathan Adams (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) { 21124a1c2431SJonathan Adams mdb_printf("vmem_seg %p ", addr); 21134a1c2431SJonathan Adams } 21147c478bd9Sstevel@tonic-gate 21154a1c2431SJonathan Adams mdb_printf("%s from %s vmem arena", 21164a1c2431SJonathan Adams (vs->vs_type == VMEM_ALLOC) ? "allocated" : "freed", 21174a1c2431SJonathan Adams wi->wi_vmem->vm_name); 21180c3b83b1SJonathan Adams 2119d75f3745SJohn Levon if (!(mdb_whatis_flags(w) & WHATIS_QUIET)) 21204a1c2431SJonathan Adams whatis_call_printer(vmem_seg, addr); 21214a1c2431SJonathan Adams else 21224a1c2431SJonathan Adams mdb_printf("\n"); 21234a1c2431SJonathan Adams } 21247c478bd9Sstevel@tonic-gate 21254a1c2431SJonathan Adams return (WHATIS_WALKRET(w)); 21267c478bd9Sstevel@tonic-gate } 21277c478bd9Sstevel@tonic-gate 21287c478bd9Sstevel@tonic-gate static int 21294a1c2431SJonathan Adams whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_info_t *wi) 21307c478bd9Sstevel@tonic-gate { 21314a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 21327c478bd9Sstevel@tonic-gate const char *nm = vmem->vm_name; 21334a1c2431SJonathan Adams wi->wi_vmem = vmem; 21347c478bd9Sstevel@tonic-gate 21354a1c2431SJonathan Adams if (mdb_whatis_flags(w) & WHATIS_VERBOSE) 21367c478bd9Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm); 21377c478bd9Sstevel@tonic-gate 21384a1c2431SJonathan Adams if (mdb_pwalk("vmem_seg", 21394a1c2431SJonathan Adams (mdb_walk_cb_t)whatis_walk_seg, wi, addr) == -1) { 21407c478bd9Sstevel@tonic-gate mdb_warn("can't walk vmem seg for %p", addr); 21417c478bd9Sstevel@tonic-gate return (WALK_NEXT); 21427c478bd9Sstevel@tonic-gate } 21437c478bd9Sstevel@tonic-gate 21444a1c2431SJonathan Adams return (WHATIS_WALKRET(w)); 21457c478bd9Sstevel@tonic-gate } 21467c478bd9Sstevel@tonic-gate 21477c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 21487c478bd9Sstevel@tonic-gate static int 21494a1c2431SJonathan Adams whatis_walk_slab(uintptr_t saddr, const umem_slab_t *sp, whatis_info_t *wi) 21507c478bd9Sstevel@tonic-gate { 21514a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 21527c478bd9Sstevel@tonic-gate 21534a1c2431SJonathan Adams /* It must overlap with the slab data, or it's not interesting */ 21544a1c2431SJonathan Adams if (mdb_whatis_overlaps(w, 21554a1c2431SJonathan Adams (uintptr_t)sp->slab_base, wi->wi_slab_size)) { 21564a1c2431SJonathan Adams wi->wi_slab_found++; 21574a1c2431SJonathan Adams return (WALK_DONE); 21584a1c2431SJonathan Adams } 21594a1c2431SJonathan Adams return (WALK_NEXT); 21607c478bd9Sstevel@tonic-gate } 21617c478bd9Sstevel@tonic-gate 21627c478bd9Sstevel@tonic-gate static int 21634a1c2431SJonathan Adams whatis_walk_cache(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi) 21647c478bd9Sstevel@tonic-gate { 21654a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w; 21667c478bd9Sstevel@tonic-gate char *walk, *freewalk; 21677c478bd9Sstevel@tonic-gate mdb_walk_cb_t func; 21684a1c2431SJonathan Adams int do_bufctl; 21697c478bd9Sstevel@tonic-gate 21704a1c2431SJonathan Adams /* Override the '-b' flag as necessary */ 21714a1c2431SJonathan Adams if (!(c->cache_flags & UMF_HASH)) 21724a1c2431SJonathan Adams do_bufctl = FALSE; /* no bufctls to walk */ 21734a1c2431SJonathan Adams else if (c->cache_flags & UMF_AUDIT) 21744a1c2431SJonathan Adams do_bufctl = TRUE; /* we always want debugging info */ 21754a1c2431SJonathan Adams else 21764a1c2431SJonathan Adams do_bufctl = ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0); 21774a1c2431SJonathan Adams 21784a1c2431SJonathan Adams if (do_bufctl) { 21797c478bd9Sstevel@tonic-gate walk = "bufctl"; 21807c478bd9Sstevel@tonic-gate freewalk = "freectl"; 21817c478bd9Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_bufctl; 21820c3b83b1SJonathan Adams } else { 21830c3b83b1SJonathan Adams walk = "umem"; 21840c3b83b1SJonathan Adams freewalk = "freemem"; 21850c3b83b1SJonathan Adams func = (mdb_walk_cb_t)whatis_walk_umem; 21867c478bd9Sstevel@tonic-gate } 21877c478bd9Sstevel@tonic-gate 21884a1c2431SJonathan Adams wi->wi_cache = c; 21894a1c2431SJonathan Adams 21904a1c2431SJonathan Adams if (mdb_whatis_flags(w) & WHATIS_VERBOSE) 21917c478bd9Sstevel@tonic-gate mdb_printf("Searching %s...\n", c->cache_name); 21927c478bd9Sstevel@tonic-gate 21934a1c2431SJonathan Adams /* 21944a1c2431SJonathan Adams * If more then two buffers live on each slab, figure out if we're 21954a1c2431SJonathan Adams * interested in anything in any slab before doing the more expensive 21964a1c2431SJonathan Adams * umem/freemem (bufctl/freectl) walkers. 21974a1c2431SJonathan Adams */ 21984a1c2431SJonathan Adams wi->wi_slab_size = c->cache_slabsize - c->cache_maxcolor; 21994a1c2431SJonathan Adams if (!(c->cache_flags & UMF_HASH)) 22004a1c2431SJonathan Adams wi->wi_slab_size -= sizeof (umem_slab_t); 22014a1c2431SJonathan Adams 22024a1c2431SJonathan Adams if ((wi->wi_slab_size / c->cache_chunksize) > 2) { 22034a1c2431SJonathan Adams wi->wi_slab_found = 0; 22044a1c2431SJonathan Adams if (mdb_pwalk("umem_slab", (mdb_walk_cb_t)whatis_walk_slab, wi, 22054a1c2431SJonathan Adams addr) == -1) { 22064a1c2431SJonathan Adams mdb_warn("can't find umem_slab walker"); 22074a1c2431SJonathan Adams return (WALK_DONE); 22084a1c2431SJonathan Adams } 22094a1c2431SJonathan Adams if (wi->wi_slab_found == 0) 22104a1c2431SJonathan Adams return (WALK_NEXT); 22114a1c2431SJonathan Adams } 22127c478bd9Sstevel@tonic-gate 22134a1c2431SJonathan Adams wi->wi_freemem = FALSE; 22144a1c2431SJonathan Adams if (mdb_pwalk(walk, func, wi, addr) == -1) { 22157c478bd9Sstevel@tonic-gate mdb_warn("can't find %s walker", walk); 22167c478bd9Sstevel@tonic-gate return (WALK_DONE); 22177c478bd9Sstevel@tonic-gate } 22187c478bd9Sstevel@tonic-gate 22194a1c2431SJonathan Adams if (mdb_whatis_done(w)) 22207c478bd9Sstevel@tonic-gate return (WALK_DONE); 22217c478bd9Sstevel@tonic-gate 22227c478bd9Sstevel@tonic-gate /* 22237c478bd9Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory. 22247c478bd9Sstevel@tonic-gate */ 22254a1c2431SJonathan Adams if (mdb_whatis_flags(w) & WHATIS_VERBOSE) 22267c478bd9Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name); 22277c478bd9Sstevel@tonic-gate 22284a1c2431SJonathan Adams wi->wi_freemem = TRUE; 22297c478bd9Sstevel@tonic-gate 22304a1c2431SJonathan Adams if (mdb_pwalk(freewalk, func, wi, addr) == -1) { 22317c478bd9Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk); 22327c478bd9Sstevel@tonic-gate return (WALK_DONE); 22337c478bd9Sstevel@tonic-gate } 22347c478bd9Sstevel@tonic-gate 22354a1c2431SJonathan Adams return (WHATIS_WALKRET(w)); 22367c478bd9Sstevel@tonic-gate } 22377c478bd9Sstevel@tonic-gate 22387c478bd9Sstevel@tonic-gate static int 22394a1c2431SJonathan Adams whatis_walk_touch(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi) 22407c478bd9Sstevel@tonic-gate { 22414a1c2431SJonathan Adams if (c->cache_arena == wi->wi_msb_arena || 22424a1c2431SJonathan Adams (c->cache_cflags & UMC_NOTOUCH)) 22437c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22447c478bd9Sstevel@tonic-gate 22454a1c2431SJonathan Adams return (whatis_walk_cache(addr, c, wi)); 22467c478bd9Sstevel@tonic-gate } 22477c478bd9Sstevel@tonic-gate 22487c478bd9Sstevel@tonic-gate static int 22494a1c2431SJonathan Adams whatis_walk_metadata(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi) 22507c478bd9Sstevel@tonic-gate { 22514a1c2431SJonathan Adams if (c->cache_arena != wi->wi_msb_arena) 22527c478bd9Sstevel@tonic-gate return (WALK_NEXT); 22537c478bd9Sstevel@tonic-gate 22544a1c2431SJonathan Adams return (whatis_walk_cache(addr, c, wi)); 22557c478bd9Sstevel@tonic-gate } 22567c478bd9Sstevel@tonic-gate 22574a1c2431SJonathan Adams static int 22584a1c2431SJonathan Adams whatis_walk_notouch(uintptr_t addr, const umem_cache_t *c, whatis_info_t *wi) 22597c478bd9Sstevel@tonic-gate { 22604a1c2431SJonathan Adams if (c->cache_arena == wi->wi_msb_arena || 22614a1c2431SJonathan Adams !(c->cache_cflags & UMC_NOTOUCH)) 22624a1c2431SJonathan Adams return (WALK_NEXT); 22637c478bd9Sstevel@tonic-gate 22644a1c2431SJonathan Adams return (whatis_walk_cache(addr, c, wi)); 22654a1c2431SJonathan Adams } 22667c478bd9Sstevel@tonic-gate 22674a1c2431SJonathan Adams /*ARGSUSED*/ 22684a1c2431SJonathan Adams static int 22694a1c2431SJonathan Adams whatis_run_umem(mdb_whatis_t *w, void *ignored) 22704a1c2431SJonathan Adams { 22714a1c2431SJonathan Adams whatis_info_t wi; 22724a1c2431SJonathan Adams 22734a1c2431SJonathan Adams bzero(&wi, sizeof (wi)); 22744a1c2431SJonathan Adams wi.wi_w = w; 22757c478bd9Sstevel@tonic-gate 22764a1c2431SJonathan Adams /* umem's metadata is allocated from the umem_internal_arena */ 227722ce0148SMatthew Ahrens if (umem_readvar(&wi.wi_msb_arena, "umem_internal_arena") == -1) 22784a1c2431SJonathan Adams mdb_warn("unable to readvar \"umem_internal_arena\""); 22797c478bd9Sstevel@tonic-gate 22807c478bd9Sstevel@tonic-gate /* 22814a1c2431SJonathan Adams * We process umem caches in the following order: 22824a1c2431SJonathan Adams * 22834a1c2431SJonathan Adams * non-UMC_NOTOUCH, non-metadata (typically the most interesting) 22844a1c2431SJonathan Adams * metadata (can be huge with UMF_AUDIT) 22854a1c2431SJonathan Adams * UMC_NOTOUCH, non-metadata (see umem_walk_all()) 22867c478bd9Sstevel@tonic-gate */ 22874a1c2431SJonathan Adams if (mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_touch, 22884a1c2431SJonathan Adams &wi) == -1 || 22894a1c2431SJonathan Adams mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_metadata, 22904a1c2431SJonathan Adams &wi) == -1 || 22914a1c2431SJonathan Adams mdb_walk("umem_cache", (mdb_walk_cb_t)whatis_walk_notouch, 22924a1c2431SJonathan Adams &wi) == -1) { 22937c478bd9Sstevel@tonic-gate mdb_warn("couldn't find umem_cache walker"); 22944a1c2431SJonathan Adams return (1); 22957c478bd9Sstevel@tonic-gate } 22964a1c2431SJonathan Adams return (0); 22974a1c2431SJonathan Adams } 22987c478bd9Sstevel@tonic-gate 22994a1c2431SJonathan Adams /*ARGSUSED*/ 23004a1c2431SJonathan Adams static int 23014a1c2431SJonathan Adams whatis_run_vmem(mdb_whatis_t *w, void *ignored) 23024a1c2431SJonathan Adams { 23034a1c2431SJonathan Adams whatis_info_t wi; 23047c478bd9Sstevel@tonic-gate 23054a1c2431SJonathan Adams bzero(&wi, sizeof (wi)); 23064a1c2431SJonathan Adams wi.wi_w = w; 23077c478bd9Sstevel@tonic-gate 23087c478bd9Sstevel@tonic-gate if (mdb_walk("vmem_postfix", 23094a1c2431SJonathan Adams (mdb_walk_cb_t)whatis_walk_vmem, &wi) == -1) { 23107c478bd9Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker"); 23114a1c2431SJonathan Adams return (1); 23124a1c2431SJonathan Adams } 23134a1c2431SJonathan Adams return (0); 23144a1c2431SJonathan Adams } 23154a1c2431SJonathan Adams 23164a1c2431SJonathan Adams int 23174a1c2431SJonathan Adams umem_init(void) 23184a1c2431SJonathan Adams { 23194a1c2431SJonathan Adams mdb_walker_t w = { 23204a1c2431SJonathan Adams "umem_cache", "walk list of umem caches", umem_cache_walk_init, 23214a1c2431SJonathan Adams umem_cache_walk_step, umem_cache_walk_fini 23224a1c2431SJonathan Adams }; 23234a1c2431SJonathan Adams 23244a1c2431SJonathan Adams if (mdb_add_walker(&w) == -1) { 23254a1c2431SJonathan Adams mdb_warn("failed to add umem_cache walker"); 23264a1c2431SJonathan Adams return (-1); 23277c478bd9Sstevel@tonic-gate } 23287c478bd9Sstevel@tonic-gate 23294a1c2431SJonathan Adams if (umem_update_variables() == -1) 23304a1c2431SJonathan Adams return (-1); 23317c478bd9Sstevel@tonic-gate 23324a1c2431SJonathan Adams /* install a callback so that our variables are always up-to-date */ 23334a1c2431SJonathan Adams (void) mdb_callback_add(MDB_CALLBACK_STCHG, umem_statechange_cb, NULL); 23344a1c2431SJonathan Adams umem_statechange_cb(NULL); 23354a1c2431SJonathan Adams 23364a1c2431SJonathan Adams /* 23374a1c2431SJonathan Adams * Register our ::whatis callbacks. 23384a1c2431SJonathan Adams */ 23394a1c2431SJonathan Adams mdb_whatis_register("umem", whatis_run_umem, NULL, 23404a1c2431SJonathan Adams WHATIS_PRIO_ALLOCATOR, WHATIS_REG_NO_ID); 23414a1c2431SJonathan Adams mdb_whatis_register("vmem", whatis_run_vmem, NULL, 23424a1c2431SJonathan Adams WHATIS_PRIO_ALLOCATOR, WHATIS_REG_NO_ID); 23434a1c2431SJonathan Adams 23444a1c2431SJonathan Adams return (0); 23457c478bd9Sstevel@tonic-gate } 23467c478bd9Sstevel@tonic-gate 23477c478bd9Sstevel@tonic-gate typedef struct umem_log_cpu { 23487c478bd9Sstevel@tonic-gate uintptr_t umc_low; 23497c478bd9Sstevel@tonic-gate uintptr_t umc_high; 23507c478bd9Sstevel@tonic-gate } umem_log_cpu_t; 23517c478bd9Sstevel@tonic-gate 23527c478bd9Sstevel@tonic-gate int 23537c478bd9Sstevel@tonic-gate umem_log_walk(uintptr_t addr, const umem_bufctl_audit_t *b, umem_log_cpu_t *umc) 23547c478bd9Sstevel@tonic-gate { 23557c478bd9Sstevel@tonic-gate int i; 23567c478bd9Sstevel@tonic-gate 23577c478bd9Sstevel@tonic-gate for (i = 0; i < umem_max_ncpus; i++) { 23587c478bd9Sstevel@tonic-gate if (addr >= umc[i].umc_low && addr < umc[i].umc_high) 23597c478bd9Sstevel@tonic-gate break; 23607c478bd9Sstevel@tonic-gate } 23617c478bd9Sstevel@tonic-gate 23627c478bd9Sstevel@tonic-gate if (i == umem_max_ncpus) 23637c478bd9Sstevel@tonic-gate mdb_printf(" "); 23647c478bd9Sstevel@tonic-gate else 23657c478bd9Sstevel@tonic-gate mdb_printf("%3d", i); 23667c478bd9Sstevel@tonic-gate 23677c478bd9Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr, 23687c478bd9Sstevel@tonic-gate b->bc_timestamp, b->bc_thread); 23697c478bd9Sstevel@tonic-gate 23707c478bd9Sstevel@tonic-gate return (WALK_NEXT); 23717c478bd9Sstevel@tonic-gate } 23727c478bd9Sstevel@tonic-gate 23737c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 23747c478bd9Sstevel@tonic-gate int 23757c478bd9Sstevel@tonic-gate umem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 23767c478bd9Sstevel@tonic-gate { 23777c478bd9Sstevel@tonic-gate umem_log_header_t lh; 23787c478bd9Sstevel@tonic-gate umem_cpu_log_header_t clh; 23797c478bd9Sstevel@tonic-gate uintptr_t lhp, clhp; 23807c478bd9Sstevel@tonic-gate umem_log_cpu_t *umc; 23817c478bd9Sstevel@tonic-gate int i; 23827c478bd9Sstevel@tonic-gate 23837c478bd9Sstevel@tonic-gate if (umem_readvar(&lhp, "umem_transaction_log") == -1) { 23847c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'umem_transaction_log'"); 23857c478bd9Sstevel@tonic-gate return (DCMD_ERR); 23867c478bd9Sstevel@tonic-gate } 23877c478bd9Sstevel@tonic-gate 2388*892ad162SToomas Soome if (lhp == 0) { 23897c478bd9Sstevel@tonic-gate mdb_warn("no umem transaction log\n"); 23907c478bd9Sstevel@tonic-gate return (DCMD_ERR); 23917c478bd9Sstevel@tonic-gate } 23927c478bd9Sstevel@tonic-gate 23937c478bd9Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (umem_log_header_t), lhp) == -1) { 23947c478bd9Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp); 23957c478bd9Sstevel@tonic-gate return (DCMD_ERR); 23967c478bd9Sstevel@tonic-gate } 23977c478bd9Sstevel@tonic-gate 23987c478bd9Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh); 23997c478bd9Sstevel@tonic-gate 24007c478bd9Sstevel@tonic-gate umc = mdb_zalloc(sizeof (umem_log_cpu_t) * umem_max_ncpus, 24017c478bd9Sstevel@tonic-gate UM_SLEEP | UM_GC); 24027c478bd9Sstevel@tonic-gate 24037c478bd9Sstevel@tonic-gate for (i = 0; i < umem_max_ncpus; i++) { 24047c478bd9Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) { 24057c478bd9Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p", 24067c478bd9Sstevel@tonic-gate i, clhp); 24077c478bd9Sstevel@tonic-gate return (DCMD_ERR); 24087c478bd9Sstevel@tonic-gate } 24097c478bd9Sstevel@tonic-gate 24107c478bd9Sstevel@tonic-gate umc[i].umc_low = clh.clh_chunk * lh.lh_chunksize + 24117c478bd9Sstevel@tonic-gate (uintptr_t)lh.lh_base; 24127c478bd9Sstevel@tonic-gate umc[i].umc_high = (uintptr_t)clh.clh_current; 24137c478bd9Sstevel@tonic-gate 24147c478bd9Sstevel@tonic-gate clhp += sizeof (umem_cpu_log_header_t); 24157c478bd9Sstevel@tonic-gate } 24167c478bd9Sstevel@tonic-gate 24177c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) { 24187c478bd9Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", 24197c478bd9Sstevel@tonic-gate "BUFADDR", "TIMESTAMP", "THREAD"); 24207c478bd9Sstevel@tonic-gate } 24217c478bd9Sstevel@tonic-gate 24227c478bd9Sstevel@tonic-gate /* 24237c478bd9Sstevel@tonic-gate * If we have been passed an address, we'll just print out that 24247c478bd9Sstevel@tonic-gate * log entry. 24257c478bd9Sstevel@tonic-gate */ 24267c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 24277c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *bp; 24287c478bd9Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bp); 24297c478bd9Sstevel@tonic-gate 24307c478bd9Sstevel@tonic-gate if (mdb_vread(bp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 24317c478bd9Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr); 24327c478bd9Sstevel@tonic-gate return (DCMD_ERR); 24337c478bd9Sstevel@tonic-gate } 24347c478bd9Sstevel@tonic-gate 24357c478bd9Sstevel@tonic-gate (void) umem_log_walk(addr, bp, umc); 24367c478bd9Sstevel@tonic-gate 24377c478bd9Sstevel@tonic-gate return (DCMD_OK); 24387c478bd9Sstevel@tonic-gate } 24397c478bd9Sstevel@tonic-gate 24407c478bd9Sstevel@tonic-gate if (mdb_walk("umem_log", (mdb_walk_cb_t)umem_log_walk, umc) == -1) { 24417c478bd9Sstevel@tonic-gate mdb_warn("can't find umem log walker"); 24427c478bd9Sstevel@tonic-gate return (DCMD_ERR); 24437c478bd9Sstevel@tonic-gate } 24447c478bd9Sstevel@tonic-gate 24457c478bd9Sstevel@tonic-gate return (DCMD_OK); 24467c478bd9Sstevel@tonic-gate } 24477c478bd9Sstevel@tonic-gate 24487c478bd9Sstevel@tonic-gate typedef struct bufctl_history_cb { 24497c478bd9Sstevel@tonic-gate int bhc_flags; 24507c478bd9Sstevel@tonic-gate int bhc_argc; 24517c478bd9Sstevel@tonic-gate const mdb_arg_t *bhc_argv; 24527c478bd9Sstevel@tonic-gate int bhc_ret; 24537c478bd9Sstevel@tonic-gate } bufctl_history_cb_t; 24547c478bd9Sstevel@tonic-gate 24557c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 24567c478bd9Sstevel@tonic-gate static int 24577c478bd9Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg) 24587c478bd9Sstevel@tonic-gate { 24597c478bd9Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg; 24607c478bd9Sstevel@tonic-gate 24617c478bd9Sstevel@tonic-gate bhc->bhc_ret = 24627c478bd9Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv); 24637c478bd9Sstevel@tonic-gate 24647c478bd9Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST; 24657c478bd9Sstevel@tonic-gate 24667c478bd9Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE); 24677c478bd9Sstevel@tonic-gate } 24687c478bd9Sstevel@tonic-gate 24697c478bd9Sstevel@tonic-gate void 24707c478bd9Sstevel@tonic-gate bufctl_help(void) 24717c478bd9Sstevel@tonic-gate { 24727c478bd9Sstevel@tonic-gate mdb_printf("%s\n", 24737c478bd9Sstevel@tonic-gate "Display the contents of umem_bufctl_audit_ts, with optional filtering.\n"); 24747c478bd9Sstevel@tonic-gate mdb_dec_indent(2); 24757c478bd9Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 24767c478bd9Sstevel@tonic-gate mdb_inc_indent(2); 24777c478bd9Sstevel@tonic-gate mdb_printf("%s", 24787c478bd9Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n" 24797c478bd9Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n" 24807c478bd9Sstevel@tonic-gate " -a addr\n" 24817c478bd9Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n" 24827c478bd9Sstevel@tonic-gate " -c caller\n" 24837c478bd9Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n" 24847c478bd9Sstevel@tonic-gate " -e earliest\n" 24857c478bd9Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n" 24867c478bd9Sstevel@tonic-gate " -l latest\n" 24877c478bd9Sstevel@tonic-gate " filter out bufctls timestamped after latest\n" 24887c478bd9Sstevel@tonic-gate " -t thread\n" 24897c478bd9Sstevel@tonic-gate " filter out bufctls not involving thread\n"); 24907c478bd9Sstevel@tonic-gate } 24917c478bd9Sstevel@tonic-gate 24927c478bd9Sstevel@tonic-gate int 24937c478bd9Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 24947c478bd9Sstevel@tonic-gate { 24957c478bd9Sstevel@tonic-gate uint_t verbose = FALSE; 24967c478bd9Sstevel@tonic-gate uint_t history = FALSE; 24977c478bd9Sstevel@tonic-gate uint_t in_history = FALSE; 2498*892ad162SToomas Soome uintptr_t caller = 0, thread = 0; 2499*892ad162SToomas Soome uintptr_t laddr, haddr, baddr = 0; 25007c478bd9Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 25017c478bd9Sstevel@tonic-gate int i, depth; 25027c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 25037c478bd9Sstevel@tonic-gate GElf_Sym sym; 25047c478bd9Sstevel@tonic-gate umem_bufctl_audit_t *bcp; 25057c478bd9Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 25067c478bd9Sstevel@tonic-gate 25077c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 25087c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 25097c478bd9Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history, 25107c478bd9Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */ 25117c478bd9Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 25127c478bd9Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 25137c478bd9Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 25147c478bd9Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 25157c478bd9Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc) 25167c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 25177c478bd9Sstevel@tonic-gate 25187c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 25197c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 25207c478bd9Sstevel@tonic-gate 25217c478bd9Sstevel@tonic-gate if (in_history && !history) 25227c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 25237c478bd9Sstevel@tonic-gate 25247c478bd9Sstevel@tonic-gate if (history && !in_history) { 25257c478bd9Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1), 25267c478bd9Sstevel@tonic-gate UM_SLEEP | UM_GC); 25277c478bd9Sstevel@tonic-gate bufctl_history_cb_t bhc; 25287c478bd9Sstevel@tonic-gate 25297c478bd9Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING; 25307c478bd9Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */ 25317c478bd9Sstevel@tonic-gate 25327c478bd9Sstevel@tonic-gate for (i = 0; i < argc; i++) 25337c478bd9Sstevel@tonic-gate nargv[i + 1] = argv[i]; 25347c478bd9Sstevel@tonic-gate 25357c478bd9Sstevel@tonic-gate /* 25367c478bd9Sstevel@tonic-gate * When in history mode, we treat each element as if it 25377c478bd9Sstevel@tonic-gate * were in a seperate loop, so that the headers group 25387c478bd9Sstevel@tonic-gate * bufctls with similar histories. 25397c478bd9Sstevel@tonic-gate */ 25407c478bd9Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST; 25417c478bd9Sstevel@tonic-gate bhc.bhc_argc = argc + 1; 25427c478bd9Sstevel@tonic-gate bhc.bhc_argv = nargv; 25437c478bd9Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK; 25447c478bd9Sstevel@tonic-gate 25457c478bd9Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc, 25467c478bd9Sstevel@tonic-gate addr) == -1) { 25477c478bd9Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history"); 25487c478bd9Sstevel@tonic-gate return (DCMD_ERR); 25497c478bd9Sstevel@tonic-gate } 25507c478bd9Sstevel@tonic-gate 25517c478bd9Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT)) 25527c478bd9Sstevel@tonic-gate mdb_printf("\n"); 25537c478bd9Sstevel@tonic-gate 25547c478bd9Sstevel@tonic-gate return (bhc.bhc_ret); 25557c478bd9Sstevel@tonic-gate } 25567c478bd9Sstevel@tonic-gate 25577c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 25587c478bd9Sstevel@tonic-gate if (verbose) { 25597c478bd9Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n" 25607c478bd9Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n", 25617c478bd9Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", 25627c478bd9Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS"); 25637c478bd9Sstevel@tonic-gate } else { 25647c478bd9Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %5s %s%</u>\n", 25657c478bd9Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THRD", "CALLER"); 25667c478bd9Sstevel@tonic-gate } 25677c478bd9Sstevel@tonic-gate } 25687c478bd9Sstevel@tonic-gate 25697c478bd9Sstevel@tonic-gate if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) { 25707c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr); 25717c478bd9Sstevel@tonic-gate return (DCMD_ERR); 25727c478bd9Sstevel@tonic-gate } 25737c478bd9Sstevel@tonic-gate 25747c478bd9Sstevel@tonic-gate /* 25757c478bd9Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or 25767c478bd9Sstevel@tonic-gate * the address does not really refer to a bufctl. 25777c478bd9Sstevel@tonic-gate */ 25787c478bd9Sstevel@tonic-gate depth = MIN(bcp->bc_depth, umem_stack_depth); 25797c478bd9Sstevel@tonic-gate 2580*892ad162SToomas Soome if (caller != 0) { 25817c478bd9Sstevel@tonic-gate laddr = caller; 25827c478bd9Sstevel@tonic-gate haddr = caller + sizeof (caller); 25837c478bd9Sstevel@tonic-gate 25847c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c), 25857c478bd9Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) { 25867c478bd9Sstevel@tonic-gate /* 25877c478bd9Sstevel@tonic-gate * We were provided an exact symbol value; any 25887c478bd9Sstevel@tonic-gate * address in the function is valid. 25897c478bd9Sstevel@tonic-gate */ 25907c478bd9Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 25917c478bd9Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 25927c478bd9Sstevel@tonic-gate } 25937c478bd9Sstevel@tonic-gate 25947c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 25957c478bd9Sstevel@tonic-gate if (bcp->bc_stack[i] >= laddr && 25967c478bd9Sstevel@tonic-gate bcp->bc_stack[i] < haddr) 25977c478bd9Sstevel@tonic-gate break; 25987c478bd9Sstevel@tonic-gate 25997c478bd9Sstevel@tonic-gate if (i == depth) 26007c478bd9Sstevel@tonic-gate return (DCMD_OK); 26017c478bd9Sstevel@tonic-gate } 26027c478bd9Sstevel@tonic-gate 2603*892ad162SToomas Soome if (thread != 0 && (uintptr_t)bcp->bc_thread != thread) 26047c478bd9Sstevel@tonic-gate return (DCMD_OK); 26057c478bd9Sstevel@tonic-gate 26067c478bd9Sstevel@tonic-gate if (earliest != 0 && bcp->bc_timestamp < earliest) 26077c478bd9Sstevel@tonic-gate return (DCMD_OK); 26087c478bd9Sstevel@tonic-gate 26097c478bd9Sstevel@tonic-gate if (latest != 0 && bcp->bc_timestamp > latest) 26107c478bd9Sstevel@tonic-gate return (DCMD_OK); 26117c478bd9Sstevel@tonic-gate 26127c478bd9Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bcp->bc_addr != baddr) 26137c478bd9Sstevel@tonic-gate return (DCMD_OK); 26147c478bd9Sstevel@tonic-gate 26157c478bd9Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 26167c478bd9Sstevel@tonic-gate mdb_printf("%#r\n", addr); 26177c478bd9Sstevel@tonic-gate return (DCMD_OK); 26187c478bd9Sstevel@tonic-gate } 26197c478bd9Sstevel@tonic-gate 26207c478bd9Sstevel@tonic-gate if (verbose) { 26217c478bd9Sstevel@tonic-gate mdb_printf( 26227c478bd9Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16d\n" 26237c478bd9Sstevel@tonic-gate "%16s %16p %16p %16p\n", 26247c478bd9Sstevel@tonic-gate addr, bcp->bc_addr, bcp->bc_timestamp, bcp->bc_thread, 26257c478bd9Sstevel@tonic-gate "", bcp->bc_cache, bcp->bc_lastlog, bcp->bc_contents); 26267c478bd9Sstevel@tonic-gate 26277c478bd9Sstevel@tonic-gate mdb_inc_indent(17); 26287c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 26297c478bd9Sstevel@tonic-gate mdb_printf("%a\n", bcp->bc_stack[i]); 26307c478bd9Sstevel@tonic-gate mdb_dec_indent(17); 26317c478bd9Sstevel@tonic-gate mdb_printf("\n"); 26327c478bd9Sstevel@tonic-gate } else { 26337c478bd9Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %5d", addr, bcp->bc_addr, 26347c478bd9Sstevel@tonic-gate bcp->bc_timestamp, bcp->bc_thread); 26357c478bd9Sstevel@tonic-gate 26367c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 26377c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i], 26387c478bd9Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1) 26397c478bd9Sstevel@tonic-gate continue; 26407c478bd9Sstevel@tonic-gate if (is_umem_sym(c, "umem_")) 26417c478bd9Sstevel@tonic-gate continue; 26427c478bd9Sstevel@tonic-gate mdb_printf(" %a\n", bcp->bc_stack[i]); 26437c478bd9Sstevel@tonic-gate break; 26447c478bd9Sstevel@tonic-gate } 26457c478bd9Sstevel@tonic-gate 26467c478bd9Sstevel@tonic-gate if (i >= depth) 26477c478bd9Sstevel@tonic-gate mdb_printf("\n"); 26487c478bd9Sstevel@tonic-gate } 26497c478bd9Sstevel@tonic-gate 26507c478bd9Sstevel@tonic-gate return (DCMD_OK); 26517c478bd9Sstevel@tonic-gate } 26527c478bd9Sstevel@tonic-gate 26537c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 26547c478bd9Sstevel@tonic-gate int 26557c478bd9Sstevel@tonic-gate bufctl_audit(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 26567c478bd9Sstevel@tonic-gate { 26577c478bd9Sstevel@tonic-gate mdb_arg_t a; 26587c478bd9Sstevel@tonic-gate 26597c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 26607c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 26617c478bd9Sstevel@tonic-gate 26627c478bd9Sstevel@tonic-gate if (argc != 0) 26637c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 26647c478bd9Sstevel@tonic-gate 26657c478bd9Sstevel@tonic-gate a.a_type = MDB_TYPE_STRING; 26667c478bd9Sstevel@tonic-gate a.a_un.a_str = "-v"; 26677c478bd9Sstevel@tonic-gate 26687c478bd9Sstevel@tonic-gate return (bufctl(addr, flags, 1, &a)); 26697c478bd9Sstevel@tonic-gate } 26707c478bd9Sstevel@tonic-gate 26717c478bd9Sstevel@tonic-gate typedef struct umem_verify { 26727c478bd9Sstevel@tonic-gate uint64_t *umv_buf; /* buffer to read cache contents into */ 26737c478bd9Sstevel@tonic-gate size_t umv_size; /* number of bytes in umv_buf */ 26747c478bd9Sstevel@tonic-gate int umv_corruption; /* > 0 if corruption found. */ 26757c478bd9Sstevel@tonic-gate int umv_besilent; /* report actual corruption sites */ 26767c478bd9Sstevel@tonic-gate struct umem_cache umv_cache; /* the cache we're operating on */ 26777c478bd9Sstevel@tonic-gate } umem_verify_t; 26787c478bd9Sstevel@tonic-gate 26797c478bd9Sstevel@tonic-gate /* 26807c478bd9Sstevel@tonic-gate * verify_pattern() 26817c478bd9Sstevel@tonic-gate * verify that buf is filled with the pattern pat. 26827c478bd9Sstevel@tonic-gate */ 26837c478bd9Sstevel@tonic-gate static int64_t 26847c478bd9Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat) 26857c478bd9Sstevel@tonic-gate { 26867c478bd9Sstevel@tonic-gate /*LINTED*/ 26877c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 26887c478bd9Sstevel@tonic-gate uint64_t *buf; 26897c478bd9Sstevel@tonic-gate 26907c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 26917c478bd9Sstevel@tonic-gate if (*buf != pat) 26927c478bd9Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg); 26937c478bd9Sstevel@tonic-gate return (-1); 26947c478bd9Sstevel@tonic-gate } 26957c478bd9Sstevel@tonic-gate 26967c478bd9Sstevel@tonic-gate /* 26977c478bd9Sstevel@tonic-gate * verify_buftag() 26987c478bd9Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat) 26997c478bd9Sstevel@tonic-gate */ 27007c478bd9Sstevel@tonic-gate static int 27017c478bd9Sstevel@tonic-gate verify_buftag(umem_buftag_t *btp, uintptr_t pat) 27027c478bd9Sstevel@tonic-gate { 27037c478bd9Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1); 27047c478bd9Sstevel@tonic-gate } 27057c478bd9Sstevel@tonic-gate 27067c478bd9Sstevel@tonic-gate /* 27077c478bd9Sstevel@tonic-gate * verify_free() 27087c478bd9Sstevel@tonic-gate * verify the integrity of a free block of memory by checking 27097c478bd9Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane. 27107c478bd9Sstevel@tonic-gate */ 27117c478bd9Sstevel@tonic-gate /*ARGSUSED1*/ 27127c478bd9Sstevel@tonic-gate static int 27137c478bd9Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private) 27147c478bd9Sstevel@tonic-gate { 27157c478bd9Sstevel@tonic-gate umem_verify_t *umv = (umem_verify_t *)private; 27167c478bd9Sstevel@tonic-gate uint64_t *buf = umv->umv_buf; /* buf to validate */ 27177c478bd9Sstevel@tonic-gate int64_t corrupt; /* corruption offset */ 27187c478bd9Sstevel@tonic-gate umem_buftag_t *buftagp; /* ptr to buftag */ 27197c478bd9Sstevel@tonic-gate umem_cache_t *cp = &umv->umv_cache; 27207c478bd9Sstevel@tonic-gate int besilent = umv->umv_besilent; 27217c478bd9Sstevel@tonic-gate 27227c478bd9Sstevel@tonic-gate /*LINTED*/ 27237c478bd9Sstevel@tonic-gate buftagp = UMEM_BUFTAG(cp, buf); 27247c478bd9Sstevel@tonic-gate 27257c478bd9Sstevel@tonic-gate /* 27267c478bd9Sstevel@tonic-gate * Read the buffer to check. 27277c478bd9Sstevel@tonic-gate */ 27287c478bd9Sstevel@tonic-gate if (mdb_vread(buf, umv->umv_size, addr) == -1) { 27297c478bd9Sstevel@tonic-gate if (!besilent) 27307c478bd9Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 27317c478bd9Sstevel@tonic-gate return (WALK_NEXT); 27327c478bd9Sstevel@tonic-gate } 27337c478bd9Sstevel@tonic-gate 27347c478bd9Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify, 27357c478bd9Sstevel@tonic-gate UMEM_FREE_PATTERN)) >= 0) { 27367c478bd9Sstevel@tonic-gate if (!besilent) 27377c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n", 27387c478bd9Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt); 27397c478bd9Sstevel@tonic-gate goto corrupt; 27407c478bd9Sstevel@tonic-gate } 27417c478bd9Sstevel@tonic-gate 27427c478bd9Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) && 27437c478bd9Sstevel@tonic-gate buftagp->bt_redzone != UMEM_REDZONE_PATTERN) { 27447c478bd9Sstevel@tonic-gate if (!besilent) 27457c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to " 27467c478bd9Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr); 27477c478bd9Sstevel@tonic-gate goto corrupt; 27487c478bd9Sstevel@tonic-gate } 27497c478bd9Sstevel@tonic-gate 27507c478bd9Sstevel@tonic-gate /* 27517c478bd9Sstevel@tonic-gate * confirm bufctl pointer integrity. 27527c478bd9Sstevel@tonic-gate */ 27537c478bd9Sstevel@tonic-gate if (verify_buftag(buftagp, UMEM_BUFTAG_FREE) == -1) { 27547c478bd9Sstevel@tonic-gate if (!besilent) 27557c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt " 27567c478bd9Sstevel@tonic-gate "buftag\n", addr); 27577c478bd9Sstevel@tonic-gate goto corrupt; 27587c478bd9Sstevel@tonic-gate } 27597c478bd9Sstevel@tonic-gate 27607c478bd9Sstevel@tonic-gate return (WALK_NEXT); 27617c478bd9Sstevel@tonic-gate corrupt: 27627c478bd9Sstevel@tonic-gate umv->umv_corruption++; 27637c478bd9Sstevel@tonic-gate return (WALK_NEXT); 27647c478bd9Sstevel@tonic-gate } 27657c478bd9Sstevel@tonic-gate 27667c478bd9Sstevel@tonic-gate /* 27677c478bd9Sstevel@tonic-gate * verify_alloc() 27687c478bd9Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect 27697c478bd9Sstevel@tonic-gate * to the buffer. 27707c478bd9Sstevel@tonic-gate */ 27717c478bd9Sstevel@tonic-gate /*ARGSUSED1*/ 27727c478bd9Sstevel@tonic-gate static int 27737c478bd9Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private) 27747c478bd9Sstevel@tonic-gate { 27757c478bd9Sstevel@tonic-gate umem_verify_t *umv = (umem_verify_t *)private; 27767c478bd9Sstevel@tonic-gate umem_cache_t *cp = &umv->umv_cache; 27777c478bd9Sstevel@tonic-gate uint64_t *buf = umv->umv_buf; /* buf to validate */ 27787c478bd9Sstevel@tonic-gate /*LINTED*/ 27797c478bd9Sstevel@tonic-gate umem_buftag_t *buftagp = UMEM_BUFTAG(cp, buf); 27807c478bd9Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp; 27817c478bd9Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf; 27827c478bd9Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */ 27837c478bd9Sstevel@tonic-gate int besilent = umv->umv_besilent; 27847c478bd9Sstevel@tonic-gate 27857c478bd9Sstevel@tonic-gate /* 27867c478bd9Sstevel@tonic-gate * Read the buffer to check. 27877c478bd9Sstevel@tonic-gate */ 27887c478bd9Sstevel@tonic-gate if (mdb_vread(buf, umv->umv_size, addr) == -1) { 27897c478bd9Sstevel@tonic-gate if (!besilent) 27907c478bd9Sstevel@tonic-gate mdb_warn("couldn't read %p", addr); 27917c478bd9Sstevel@tonic-gate return (WALK_NEXT); 27927c478bd9Sstevel@tonic-gate } 27937c478bd9Sstevel@tonic-gate 27947c478bd9Sstevel@tonic-gate /* 27957c478bd9Sstevel@tonic-gate * There are two cases to handle: 27967c478bd9Sstevel@tonic-gate * 1. If the buf was alloc'd using umem_cache_alloc, it will have 27977c478bd9Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it 27987c478bd9Sstevel@tonic-gate * 2. If the buf was alloc'd using umem_alloc, it will have 27997c478bd9Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag, 28007c478bd9Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use, 28017c478bd9Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on 28027c478bd9Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the 28037c478bd9Sstevel@tonic-gate * 0xbb byte in the buffer. 28047c478bd9Sstevel@tonic-gate * 28057c478bd9Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the 28067c478bd9Sstevel@tonic-gate * buftag should xor to UMEM_BUFTAG_ALLOC 28077c478bd9Sstevel@tonic-gate */ 28087c478bd9Sstevel@tonic-gate 28097c478bd9Sstevel@tonic-gate if (buftagp->bt_redzone == UMEM_REDZONE_PATTERN) 28107c478bd9Sstevel@tonic-gate looks_ok = 1; 28117c478bd9Sstevel@tonic-gate else if (!UMEM_SIZE_VALID(ip[1])) 28127c478bd9Sstevel@tonic-gate size_ok = 0; 28137c478bd9Sstevel@tonic-gate else if (bp[UMEM_SIZE_DECODE(ip[1])] == UMEM_REDZONE_BYTE) 28147c478bd9Sstevel@tonic-gate looks_ok = 1; 28157c478bd9Sstevel@tonic-gate else 28167c478bd9Sstevel@tonic-gate size_ok = 0; 28177c478bd9Sstevel@tonic-gate 28187c478bd9Sstevel@tonic-gate if (!size_ok) { 28197c478bd9Sstevel@tonic-gate if (!besilent) 28207c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 28217c478bd9Sstevel@tonic-gate "redzone size encoding\n", addr); 28227c478bd9Sstevel@tonic-gate goto corrupt; 28237c478bd9Sstevel@tonic-gate } 28247c478bd9Sstevel@tonic-gate 28257c478bd9Sstevel@tonic-gate if (!looks_ok) { 28267c478bd9Sstevel@tonic-gate if (!besilent) 28277c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt " 28287c478bd9Sstevel@tonic-gate "redzone signature\n", addr); 28297c478bd9Sstevel@tonic-gate goto corrupt; 28307c478bd9Sstevel@tonic-gate } 28317c478bd9Sstevel@tonic-gate 28327c478bd9Sstevel@tonic-gate if (verify_buftag(buftagp, UMEM_BUFTAG_ALLOC) == -1) { 28337c478bd9Sstevel@tonic-gate if (!besilent) 28347c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a " 28357c478bd9Sstevel@tonic-gate "corrupt buftag\n", addr); 28367c478bd9Sstevel@tonic-gate goto corrupt; 28377c478bd9Sstevel@tonic-gate } 28387c478bd9Sstevel@tonic-gate 28397c478bd9Sstevel@tonic-gate return (WALK_NEXT); 28407c478bd9Sstevel@tonic-gate corrupt: 28417c478bd9Sstevel@tonic-gate umv->umv_corruption++; 28427c478bd9Sstevel@tonic-gate return (WALK_NEXT); 28437c478bd9Sstevel@tonic-gate } 28447c478bd9Sstevel@tonic-gate 28457c478bd9Sstevel@tonic-gate /*ARGSUSED2*/ 28467c478bd9Sstevel@tonic-gate int 28477c478bd9Sstevel@tonic-gate umem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 28487c478bd9Sstevel@tonic-gate { 28497c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) { 28507c478bd9Sstevel@tonic-gate int check_alloc = 0, check_free = 0; 28517c478bd9Sstevel@tonic-gate umem_verify_t umv; 28527c478bd9Sstevel@tonic-gate 28537c478bd9Sstevel@tonic-gate if (mdb_vread(&umv.umv_cache, sizeof (umv.umv_cache), 28547c478bd9Sstevel@tonic-gate addr) == -1) { 28557c478bd9Sstevel@tonic-gate mdb_warn("couldn't read umem_cache %p", addr); 28567c478bd9Sstevel@tonic-gate return (DCMD_ERR); 28577c478bd9Sstevel@tonic-gate } 28587c478bd9Sstevel@tonic-gate 28597c478bd9Sstevel@tonic-gate umv.umv_size = umv.umv_cache.cache_buftag + 28607c478bd9Sstevel@tonic-gate sizeof (umem_buftag_t); 28617c478bd9Sstevel@tonic-gate umv.umv_buf = mdb_alloc(umv.umv_size, UM_SLEEP | UM_GC); 28627c478bd9Sstevel@tonic-gate umv.umv_corruption = 0; 28637c478bd9Sstevel@tonic-gate 28647c478bd9Sstevel@tonic-gate if ((umv.umv_cache.cache_flags & UMF_REDZONE)) { 28657c478bd9Sstevel@tonic-gate check_alloc = 1; 28667c478bd9Sstevel@tonic-gate if (umv.umv_cache.cache_flags & UMF_DEADBEEF) 28677c478bd9Sstevel@tonic-gate check_free = 1; 28687c478bd9Sstevel@tonic-gate } else { 28697c478bd9Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) { 28707c478bd9Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have " 28717c478bd9Sstevel@tonic-gate "redzone checking enabled\n", addr, 28727c478bd9Sstevel@tonic-gate umv.umv_cache.cache_name); 28737c478bd9Sstevel@tonic-gate } 28747c478bd9Sstevel@tonic-gate return (DCMD_ERR); 28757c478bd9Sstevel@tonic-gate } 28767c478bd9Sstevel@tonic-gate 28777c478bd9Sstevel@tonic-gate if (flags & DCMD_LOOP) { 28787c478bd9Sstevel@tonic-gate /* 28797c478bd9Sstevel@tonic-gate * table mode, don't print out every corrupt buffer 28807c478bd9Sstevel@tonic-gate */ 28817c478bd9Sstevel@tonic-gate umv.umv_besilent = 1; 28827c478bd9Sstevel@tonic-gate } else { 28837c478bd9Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n", 28847c478bd9Sstevel@tonic-gate umv.umv_cache.cache_name); 28857c478bd9Sstevel@tonic-gate mdb_inc_indent(2); 28867c478bd9Sstevel@tonic-gate umv.umv_besilent = 0; 28877c478bd9Sstevel@tonic-gate } 28887c478bd9Sstevel@tonic-gate 28897c478bd9Sstevel@tonic-gate if (check_alloc) 28907c478bd9Sstevel@tonic-gate (void) mdb_pwalk("umem", verify_alloc, &umv, addr); 28917c478bd9Sstevel@tonic-gate if (check_free) 28927c478bd9Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &umv, addr); 28937c478bd9Sstevel@tonic-gate 28947c478bd9Sstevel@tonic-gate if (flags & DCMD_LOOP) { 28957c478bd9Sstevel@tonic-gate if (umv.umv_corruption == 0) { 28967c478bd9Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n", 28977c478bd9Sstevel@tonic-gate UMEM_CACHE_NAMELEN, 28987c478bd9Sstevel@tonic-gate umv.umv_cache.cache_name, addr); 28997c478bd9Sstevel@tonic-gate } else { 29007c478bd9Sstevel@tonic-gate char *s = ""; /* optional s in "buffer[s]" */ 29017c478bd9Sstevel@tonic-gate if (umv.umv_corruption > 1) 29027c478bd9Sstevel@tonic-gate s = "s"; 29037c478bd9Sstevel@tonic-gate 29047c478bd9Sstevel@tonic-gate mdb_printf("%-*s %?p %d corrupt buffer%s\n", 29057c478bd9Sstevel@tonic-gate UMEM_CACHE_NAMELEN, 29067c478bd9Sstevel@tonic-gate umv.umv_cache.cache_name, addr, 29077c478bd9Sstevel@tonic-gate umv.umv_corruption, s); 29087c478bd9Sstevel@tonic-gate } 29097c478bd9Sstevel@tonic-gate } else { 29107c478bd9Sstevel@tonic-gate /* 29117c478bd9Sstevel@tonic-gate * This is the more verbose mode, when the user has 29127c478bd9Sstevel@tonic-gate * type addr::umem_verify. If the cache was clean, 29137c478bd9Sstevel@tonic-gate * nothing will have yet been printed. So say something. 29147c478bd9Sstevel@tonic-gate */ 29157c478bd9Sstevel@tonic-gate if (umv.umv_corruption == 0) 29167c478bd9Sstevel@tonic-gate mdb_printf("clean\n"); 29177c478bd9Sstevel@tonic-gate 29187c478bd9Sstevel@tonic-gate mdb_dec_indent(2); 29197c478bd9Sstevel@tonic-gate } 29207c478bd9Sstevel@tonic-gate } else { 29217c478bd9Sstevel@tonic-gate /* 29227c478bd9Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all 29237c478bd9Sstevel@tonic-gate * umem_cache's, specifying ourself as a callback for each... 29247c478bd9Sstevel@tonic-gate * this is the equivalent of '::walk umem_cache .::umem_verify' 29257c478bd9Sstevel@tonic-gate */ 29267c478bd9Sstevel@tonic-gate mdb_printf("%<u>%-*s %-?s %-20s%</b>\n", UMEM_CACHE_NAMELEN, 29277c478bd9Sstevel@tonic-gate "Cache Name", "Addr", "Cache Integrity"); 29287c478bd9Sstevel@tonic-gate (void) (mdb_walk_dcmd("umem_cache", "umem_verify", 0, NULL)); 29297c478bd9Sstevel@tonic-gate } 29307c478bd9Sstevel@tonic-gate 29317c478bd9Sstevel@tonic-gate return (DCMD_OK); 29327c478bd9Sstevel@tonic-gate } 29337c478bd9Sstevel@tonic-gate 29347c478bd9Sstevel@tonic-gate typedef struct vmem_node { 29357c478bd9Sstevel@tonic-gate struct vmem_node *vn_next; 29367c478bd9Sstevel@tonic-gate struct vmem_node *vn_parent; 29377c478bd9Sstevel@tonic-gate struct vmem_node *vn_sibling; 29387c478bd9Sstevel@tonic-gate struct vmem_node *vn_children; 29397c478bd9Sstevel@tonic-gate uintptr_t vn_addr; 29407c478bd9Sstevel@tonic-gate int vn_marked; 29417c478bd9Sstevel@tonic-gate vmem_t vn_vmem; 29427c478bd9Sstevel@tonic-gate } vmem_node_t; 29437c478bd9Sstevel@tonic-gate 29447c478bd9Sstevel@tonic-gate typedef struct vmem_walk { 29457c478bd9Sstevel@tonic-gate vmem_node_t *vw_root; 29467c478bd9Sstevel@tonic-gate vmem_node_t *vw_current; 29477c478bd9Sstevel@tonic-gate } vmem_walk_t; 29487c478bd9Sstevel@tonic-gate 29497c478bd9Sstevel@tonic-gate int 29507c478bd9Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp) 29517c478bd9Sstevel@tonic-gate { 29527c478bd9Sstevel@tonic-gate uintptr_t vaddr, paddr; 29537c478bd9Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp; 29547c478bd9Sstevel@tonic-gate vmem_walk_t *vw; 29557c478bd9Sstevel@tonic-gate 29567c478bd9Sstevel@tonic-gate if (umem_readvar(&vaddr, "vmem_list") == -1) { 29577c478bd9Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'"); 29587c478bd9Sstevel@tonic-gate return (WALK_ERR); 29597c478bd9Sstevel@tonic-gate } 29607c478bd9Sstevel@tonic-gate 2961*892ad162SToomas Soome while (vaddr != 0) { 29627c478bd9Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP); 29637c478bd9Sstevel@tonic-gate vp->vn_addr = vaddr; 29647c478bd9Sstevel@tonic-gate vp->vn_next = head; 29657c478bd9Sstevel@tonic-gate head = vp; 29667c478bd9Sstevel@tonic-gate 29677c478bd9Sstevel@tonic-gate if (vaddr == wsp->walk_addr) 29687c478bd9Sstevel@tonic-gate current = vp; 29697c478bd9Sstevel@tonic-gate 29707c478bd9Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) { 29717c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr); 29727c478bd9Sstevel@tonic-gate goto err; 29737c478bd9Sstevel@tonic-gate } 29747c478bd9Sstevel@tonic-gate 29757c478bd9Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next; 29767c478bd9Sstevel@tonic-gate } 29777c478bd9Sstevel@tonic-gate 29787c478bd9Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) { 29797c478bd9Sstevel@tonic-gate 2980*892ad162SToomas Soome if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == 0) { 29817c478bd9Sstevel@tonic-gate vp->vn_sibling = root; 29827c478bd9Sstevel@tonic-gate root = vp; 29837c478bd9Sstevel@tonic-gate continue; 29847c478bd9Sstevel@tonic-gate } 29857c478bd9Sstevel@tonic-gate 29867c478bd9Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) { 29877c478bd9Sstevel@tonic-gate if (parent->vn_addr != paddr) 29887c478bd9Sstevel@tonic-gate continue; 29897c478bd9Sstevel@tonic-gate vp->vn_sibling = parent->vn_children; 29907c478bd9Sstevel@tonic-gate parent->vn_children = vp; 29917c478bd9Sstevel@tonic-gate vp->vn_parent = parent; 29927c478bd9Sstevel@tonic-gate break; 29937c478bd9Sstevel@tonic-gate } 29947c478bd9Sstevel@tonic-gate 29957c478bd9Sstevel@tonic-gate if (parent == NULL) { 29967c478bd9Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n", 29977c478bd9Sstevel@tonic-gate vp->vn_addr, paddr); 29987c478bd9Sstevel@tonic-gate goto err; 29997c478bd9Sstevel@tonic-gate } 30007c478bd9Sstevel@tonic-gate } 30017c478bd9Sstevel@tonic-gate 30027c478bd9Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP); 30037c478bd9Sstevel@tonic-gate vw->vw_root = root; 30047c478bd9Sstevel@tonic-gate 30057c478bd9Sstevel@tonic-gate if (current != NULL) 30067c478bd9Sstevel@tonic-gate vw->vw_current = current; 30077c478bd9Sstevel@tonic-gate else 30087c478bd9Sstevel@tonic-gate vw->vw_current = root; 30097c478bd9Sstevel@tonic-gate 30107c478bd9Sstevel@tonic-gate wsp->walk_data = vw; 30117c478bd9Sstevel@tonic-gate return (WALK_NEXT); 30127c478bd9Sstevel@tonic-gate err: 30137c478bd9Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) { 30147c478bd9Sstevel@tonic-gate head = vp->vn_next; 30157c478bd9Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t)); 30167c478bd9Sstevel@tonic-gate } 30177c478bd9Sstevel@tonic-gate 30187c478bd9Sstevel@tonic-gate return (WALK_ERR); 30197c478bd9Sstevel@tonic-gate } 30207c478bd9Sstevel@tonic-gate 30217c478bd9Sstevel@tonic-gate int 30227c478bd9Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp) 30237c478bd9Sstevel@tonic-gate { 30247c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 30257c478bd9Sstevel@tonic-gate vmem_node_t *vp; 30267c478bd9Sstevel@tonic-gate int rval; 30277c478bd9Sstevel@tonic-gate 30287c478bd9Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL) 30297c478bd9Sstevel@tonic-gate return (WALK_DONE); 30307c478bd9Sstevel@tonic-gate 30317c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 30327c478bd9Sstevel@tonic-gate 30337c478bd9Sstevel@tonic-gate if (vp->vn_children != NULL) { 30347c478bd9Sstevel@tonic-gate vw->vw_current = vp->vn_children; 30357c478bd9Sstevel@tonic-gate return (rval); 30367c478bd9Sstevel@tonic-gate } 30377c478bd9Sstevel@tonic-gate 30387c478bd9Sstevel@tonic-gate do { 30397c478bd9Sstevel@tonic-gate vw->vw_current = vp->vn_sibling; 30407c478bd9Sstevel@tonic-gate vp = vp->vn_parent; 30417c478bd9Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL); 30427c478bd9Sstevel@tonic-gate 30437c478bd9Sstevel@tonic-gate return (rval); 30447c478bd9Sstevel@tonic-gate } 30457c478bd9Sstevel@tonic-gate 30467c478bd9Sstevel@tonic-gate /* 30477c478bd9Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all 30487c478bd9Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk 30497c478bd9Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control 30507c478bd9Sstevel@tonic-gate * after each callback. 30517c478bd9Sstevel@tonic-gate */ 30527c478bd9Sstevel@tonic-gate int 30537c478bd9Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp) 30547c478bd9Sstevel@tonic-gate { 30557c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 30567c478bd9Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current; 30577c478bd9Sstevel@tonic-gate int rval; 30587c478bd9Sstevel@tonic-gate 30597c478bd9Sstevel@tonic-gate /* 30607c478bd9Sstevel@tonic-gate * If this node is marked, then we know that we have already visited 30617c478bd9Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to 30627c478bd9Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note 30637c478bd9Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of 30647c478bd9Sstevel@tonic-gate * the step function. 30657c478bd9Sstevel@tonic-gate */ 30667c478bd9Sstevel@tonic-gate if (vp->vn_marked) { 30677c478bd9Sstevel@tonic-gate if (vp->vn_sibling != NULL) 30687c478bd9Sstevel@tonic-gate vp = vp->vn_sibling; 30697c478bd9Sstevel@tonic-gate else if (vp->vn_parent != NULL) 30707c478bd9Sstevel@tonic-gate vp = vp->vn_parent; 30717c478bd9Sstevel@tonic-gate else { 30727c478bd9Sstevel@tonic-gate /* 30737c478bd9Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we 30747c478bd9Sstevel@tonic-gate * have already been visited; we're done. 30757c478bd9Sstevel@tonic-gate */ 30767c478bd9Sstevel@tonic-gate return (WALK_DONE); 30777c478bd9Sstevel@tonic-gate } 30787c478bd9Sstevel@tonic-gate } 30797c478bd9Sstevel@tonic-gate 30807c478bd9Sstevel@tonic-gate /* 30817c478bd9Sstevel@tonic-gate * Before we visit this node, visit its children. 30827c478bd9Sstevel@tonic-gate */ 30837c478bd9Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked) 30847c478bd9Sstevel@tonic-gate vp = vp->vn_children; 30857c478bd9Sstevel@tonic-gate 30867c478bd9Sstevel@tonic-gate vp->vn_marked = 1; 30877c478bd9Sstevel@tonic-gate vw->vw_current = vp; 30887c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata); 30897c478bd9Sstevel@tonic-gate 30907c478bd9Sstevel@tonic-gate return (rval); 30917c478bd9Sstevel@tonic-gate } 30927c478bd9Sstevel@tonic-gate 30937c478bd9Sstevel@tonic-gate void 30947c478bd9Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp) 30957c478bd9Sstevel@tonic-gate { 30967c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data; 30977c478bd9Sstevel@tonic-gate vmem_node_t *root = vw->vw_root; 30987c478bd9Sstevel@tonic-gate int done; 30997c478bd9Sstevel@tonic-gate 31007c478bd9Sstevel@tonic-gate if (root == NULL) 31017c478bd9Sstevel@tonic-gate return; 31027c478bd9Sstevel@tonic-gate 31037c478bd9Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL) 31047c478bd9Sstevel@tonic-gate vmem_walk_fini(wsp); 31057c478bd9Sstevel@tonic-gate 31067c478bd9Sstevel@tonic-gate vw->vw_root = root->vn_sibling; 31077c478bd9Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL); 31087c478bd9Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t)); 31097c478bd9Sstevel@tonic-gate 31107c478bd9Sstevel@tonic-gate if (done) { 31117c478bd9Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t)); 31127c478bd9Sstevel@tonic-gate } else { 31137c478bd9Sstevel@tonic-gate vmem_walk_fini(wsp); 31147c478bd9Sstevel@tonic-gate } 31157c478bd9Sstevel@tonic-gate } 31167c478bd9Sstevel@tonic-gate 31177c478bd9Sstevel@tonic-gate typedef struct vmem_seg_walk { 31187c478bd9Sstevel@tonic-gate uint8_t vsw_type; 31197c478bd9Sstevel@tonic-gate uintptr_t vsw_start; 31207c478bd9Sstevel@tonic-gate uintptr_t vsw_current; 31217c478bd9Sstevel@tonic-gate } vmem_seg_walk_t; 31227c478bd9Sstevel@tonic-gate 31237c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 31247c478bd9Sstevel@tonic-gate int 31257c478bd9Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name) 31267c478bd9Sstevel@tonic-gate { 31277c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw; 31287c478bd9Sstevel@tonic-gate 3129*892ad162SToomas Soome if (wsp->walk_addr == 0) { 31307c478bd9Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name); 31317c478bd9Sstevel@tonic-gate return (WALK_ERR); 31327c478bd9Sstevel@tonic-gate } 31337c478bd9Sstevel@tonic-gate 31347c478bd9Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP); 31357c478bd9Sstevel@tonic-gate 31367c478bd9Sstevel@tonic-gate vsw->vsw_type = type; 31377c478bd9Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + OFFSETOF(vmem_t, vm_seg0); 31387c478bd9Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start; 31397c478bd9Sstevel@tonic-gate 31407c478bd9Sstevel@tonic-gate return (WALK_NEXT); 31417c478bd9Sstevel@tonic-gate } 31427c478bd9Sstevel@tonic-gate 31437c478bd9Sstevel@tonic-gate /* 31447c478bd9Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h). 31457c478bd9Sstevel@tonic-gate */ 31467c478bd9Sstevel@tonic-gate #define VMEM_NONE 0 31477c478bd9Sstevel@tonic-gate 31487c478bd9Sstevel@tonic-gate int 31497c478bd9Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp) 31507c478bd9Sstevel@tonic-gate { 31517c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc")); 31527c478bd9Sstevel@tonic-gate } 31537c478bd9Sstevel@tonic-gate 31547c478bd9Sstevel@tonic-gate int 31557c478bd9Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp) 31567c478bd9Sstevel@tonic-gate { 31577c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free")); 31587c478bd9Sstevel@tonic-gate } 31597c478bd9Sstevel@tonic-gate 31607c478bd9Sstevel@tonic-gate int 31617c478bd9Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp) 31627c478bd9Sstevel@tonic-gate { 31637c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span")); 31647c478bd9Sstevel@tonic-gate } 31657c478bd9Sstevel@tonic-gate 31667c478bd9Sstevel@tonic-gate int 31677c478bd9Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp) 31687c478bd9Sstevel@tonic-gate { 31697c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg")); 31707c478bd9Sstevel@tonic-gate } 31717c478bd9Sstevel@tonic-gate 31727c478bd9Sstevel@tonic-gate int 31737c478bd9Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp) 31747c478bd9Sstevel@tonic-gate { 31757c478bd9Sstevel@tonic-gate vmem_seg_t seg; 31767c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 31777c478bd9Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current; 31787c478bd9Sstevel@tonic-gate static size_t seg_size = 0; 31797c478bd9Sstevel@tonic-gate int rval; 31807c478bd9Sstevel@tonic-gate 31817c478bd9Sstevel@tonic-gate if (!seg_size) { 31827c478bd9Sstevel@tonic-gate if (umem_readvar(&seg_size, "vmem_seg_size") == -1) { 31837c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'"); 31847c478bd9Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t); 31857c478bd9Sstevel@tonic-gate } 31867c478bd9Sstevel@tonic-gate } 31877c478bd9Sstevel@tonic-gate 31887c478bd9Sstevel@tonic-gate if (seg_size < sizeof (seg)) 31897c478bd9Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size); 31907c478bd9Sstevel@tonic-gate 31917c478bd9Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) { 31927c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 31937c478bd9Sstevel@tonic-gate return (WALK_ERR); 31947c478bd9Sstevel@tonic-gate } 31957c478bd9Sstevel@tonic-gate 31967c478bd9Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext; 31977c478bd9Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) { 31987c478bd9Sstevel@tonic-gate rval = WALK_NEXT; 31997c478bd9Sstevel@tonic-gate } else { 32007c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata); 32017c478bd9Sstevel@tonic-gate } 32027c478bd9Sstevel@tonic-gate 32037c478bd9Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start) 32047c478bd9Sstevel@tonic-gate return (WALK_DONE); 32057c478bd9Sstevel@tonic-gate 32067c478bd9Sstevel@tonic-gate return (rval); 32077c478bd9Sstevel@tonic-gate } 32087c478bd9Sstevel@tonic-gate 32097c478bd9Sstevel@tonic-gate void 32107c478bd9Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp) 32117c478bd9Sstevel@tonic-gate { 32127c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data; 32137c478bd9Sstevel@tonic-gate 32147c478bd9Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t)); 32157c478bd9Sstevel@tonic-gate } 32167c478bd9Sstevel@tonic-gate 32177c478bd9Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22 32187c478bd9Sstevel@tonic-gate 32197c478bd9Sstevel@tonic-gate int 32207c478bd9Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 32217c478bd9Sstevel@tonic-gate { 32227c478bd9Sstevel@tonic-gate vmem_t v, parent; 32237c478bd9Sstevel@tonic-gate uintptr_t paddr; 32247c478bd9Sstevel@tonic-gate int ident = 0; 32257c478bd9Sstevel@tonic-gate char c[VMEM_NAMEWIDTH]; 32267c478bd9Sstevel@tonic-gate 32277c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) { 32287c478bd9Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) { 32297c478bd9Sstevel@tonic-gate mdb_warn("can't walk vmem"); 32307c478bd9Sstevel@tonic-gate return (DCMD_ERR); 32317c478bd9Sstevel@tonic-gate } 32327c478bd9Sstevel@tonic-gate return (DCMD_OK); 32337c478bd9Sstevel@tonic-gate } 32347c478bd9Sstevel@tonic-gate 32357c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags)) 32367c478bd9Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n", 32377c478bd9Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE", 32387c478bd9Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL"); 32397c478bd9Sstevel@tonic-gate 32407c478bd9Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) { 32417c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr); 32427c478bd9Sstevel@tonic-gate return (DCMD_ERR); 32437c478bd9Sstevel@tonic-gate } 32447c478bd9Sstevel@tonic-gate 3245*892ad162SToomas Soome for (paddr = (uintptr_t)v.vm_source; paddr != 0; ident += 2) { 32467c478bd9Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) { 32477c478bd9Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr); 32487c478bd9Sstevel@tonic-gate ident = 0; 32497c478bd9Sstevel@tonic-gate break; 32507c478bd9Sstevel@tonic-gate } 32517c478bd9Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source; 32527c478bd9Sstevel@tonic-gate } 32537c478bd9Sstevel@tonic-gate 32547c478bd9Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name); 32557c478bd9Sstevel@tonic-gate 32567c478bd9Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n", 32577c478bd9Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c, 32587c478bd9Sstevel@tonic-gate v.vm_kstat.vk_mem_inuse, v.vm_kstat.vk_mem_total, 32597c478bd9Sstevel@tonic-gate v.vm_kstat.vk_alloc, v.vm_kstat.vk_fail); 32607c478bd9Sstevel@tonic-gate 32617c478bd9Sstevel@tonic-gate return (DCMD_OK); 32627c478bd9Sstevel@tonic-gate } 32637c478bd9Sstevel@tonic-gate 32647c478bd9Sstevel@tonic-gate void 32657c478bd9Sstevel@tonic-gate vmem_seg_help(void) 32667c478bd9Sstevel@tonic-gate { 32677c478bd9Sstevel@tonic-gate mdb_printf("%s\n", 32687c478bd9Sstevel@tonic-gate "Display the contents of vmem_seg_ts, with optional filtering.\n" 32697c478bd9Sstevel@tonic-gate "\n" 32707c478bd9Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n" 32717c478bd9Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n" 32727c478bd9Sstevel@tonic-gate "information.\n"); 32737c478bd9Sstevel@tonic-gate mdb_dec_indent(2); 32747c478bd9Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n"); 32757c478bd9Sstevel@tonic-gate mdb_inc_indent(2); 32767c478bd9Sstevel@tonic-gate mdb_printf("%s", 32777c478bd9Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n" 32787c478bd9Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n" 32797c478bd9Sstevel@tonic-gate " -c caller\n" 32807c478bd9Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n" 32817c478bd9Sstevel@tonic-gate " -e earliest\n" 32827c478bd9Sstevel@tonic-gate " filter out segments timestamped before earliest\n" 32837c478bd9Sstevel@tonic-gate " -l latest\n" 32847c478bd9Sstevel@tonic-gate " filter out segments timestamped after latest\n" 32857c478bd9Sstevel@tonic-gate " -m minsize\n" 32867c478bd9Sstevel@tonic-gate " filer out segments smaller than minsize\n" 32877c478bd9Sstevel@tonic-gate " -M maxsize\n" 32887c478bd9Sstevel@tonic-gate " filer out segments larger than maxsize\n" 32897c478bd9Sstevel@tonic-gate " -t thread\n" 32907c478bd9Sstevel@tonic-gate " filter out segments not involving thread\n" 32917c478bd9Sstevel@tonic-gate " -T type\n" 32927c478bd9Sstevel@tonic-gate " filter out segments not of type 'type'\n" 32937c478bd9Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n"); 32947c478bd9Sstevel@tonic-gate } 32957c478bd9Sstevel@tonic-gate 32967c478bd9Sstevel@tonic-gate 32977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 32987c478bd9Sstevel@tonic-gate int 32997c478bd9Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 33007c478bd9Sstevel@tonic-gate { 33017c478bd9Sstevel@tonic-gate vmem_seg_t vs; 33027c478bd9Sstevel@tonic-gate uintptr_t *stk = vs.vs_stack; 33037c478bd9Sstevel@tonic-gate uintptr_t sz; 33047c478bd9Sstevel@tonic-gate uint8_t t; 33057c478bd9Sstevel@tonic-gate const char *type = NULL; 33067c478bd9Sstevel@tonic-gate GElf_Sym sym; 33077c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN]; 33087c478bd9Sstevel@tonic-gate int no_debug; 33097c478bd9Sstevel@tonic-gate int i; 33107c478bd9Sstevel@tonic-gate int depth; 33117c478bd9Sstevel@tonic-gate uintptr_t laddr, haddr; 33127c478bd9Sstevel@tonic-gate 3313*892ad162SToomas Soome uintptr_t caller = 0, thread = 0; 33147c478bd9Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0; 33157c478bd9Sstevel@tonic-gate 33167c478bd9Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0; 33177c478bd9Sstevel@tonic-gate 33187c478bd9Sstevel@tonic-gate uint_t size = 0; 33197c478bd9Sstevel@tonic-gate uint_t verbose = 0; 33207c478bd9Sstevel@tonic-gate 33217c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) 33227c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 33237c478bd9Sstevel@tonic-gate 33247c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv, 33257c478bd9Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller, 33267c478bd9Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest, 33277c478bd9Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest, 33287c478bd9Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size, 33297c478bd9Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize, 33307c478bd9Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize, 33317c478bd9Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread, 33327c478bd9Sstevel@tonic-gate 'T', MDB_OPT_STR, &type, 33337c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose, 33347c478bd9Sstevel@tonic-gate NULL) != argc) 33357c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 33367c478bd9Sstevel@tonic-gate 33377c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) { 33387c478bd9Sstevel@tonic-gate if (verbose) { 33397c478bd9Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n" 33407c478bd9Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n", 33417c478bd9Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE", 33427c478bd9Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", ""); 33437c478bd9Sstevel@tonic-gate } else { 33447c478bd9Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE", 33457c478bd9Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO"); 33467c478bd9Sstevel@tonic-gate } 33477c478bd9Sstevel@tonic-gate } 33487c478bd9Sstevel@tonic-gate 33497c478bd9Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) { 33507c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr); 33517c478bd9Sstevel@tonic-gate return (DCMD_ERR); 33527c478bd9Sstevel@tonic-gate } 33537c478bd9Sstevel@tonic-gate 33547c478bd9Sstevel@tonic-gate if (type != NULL) { 33557c478bd9Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0) 33567c478bd9Sstevel@tonic-gate t = VMEM_ALLOC; 33577c478bd9Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0) 33587c478bd9Sstevel@tonic-gate t = VMEM_FREE; 33597c478bd9Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0) 33607c478bd9Sstevel@tonic-gate t = VMEM_SPAN; 33617c478bd9Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 || 33627c478bd9Sstevel@tonic-gate strcmp(type, "ROTOR") == 0) 33637c478bd9Sstevel@tonic-gate t = VMEM_ROTOR; 33647c478bd9Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 || 33657c478bd9Sstevel@tonic-gate strcmp(type, "WALKER") == 0) 33667c478bd9Sstevel@tonic-gate t = VMEM_WALKER; 33677c478bd9Sstevel@tonic-gate else { 33687c478bd9Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n", 33697c478bd9Sstevel@tonic-gate type); 33707c478bd9Sstevel@tonic-gate return (DCMD_ERR); 33717c478bd9Sstevel@tonic-gate } 33727c478bd9Sstevel@tonic-gate 33737c478bd9Sstevel@tonic-gate if (vs.vs_type != t) 33747c478bd9Sstevel@tonic-gate return (DCMD_OK); 33757c478bd9Sstevel@tonic-gate } 33767c478bd9Sstevel@tonic-gate 33777c478bd9Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start; 33787c478bd9Sstevel@tonic-gate 33797c478bd9Sstevel@tonic-gate if (minsize != 0 && sz < minsize) 33807c478bd9Sstevel@tonic-gate return (DCMD_OK); 33817c478bd9Sstevel@tonic-gate 33827c478bd9Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize) 33837c478bd9Sstevel@tonic-gate return (DCMD_OK); 33847c478bd9Sstevel@tonic-gate 33857c478bd9Sstevel@tonic-gate t = vs.vs_type; 33867c478bd9Sstevel@tonic-gate depth = vs.vs_depth; 33877c478bd9Sstevel@tonic-gate 33887c478bd9Sstevel@tonic-gate /* 33897c478bd9Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments 33907c478bd9Sstevel@tonic-gate */ 33917c478bd9Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) || 33927c478bd9Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH); 33937c478bd9Sstevel@tonic-gate 33947c478bd9Sstevel@tonic-gate if (no_debug) { 3395*892ad162SToomas Soome if (caller != 0 || thread != 0 || earliest != 0 || latest != 0) 33967c478bd9Sstevel@tonic-gate return (DCMD_OK); /* not enough info */ 33977c478bd9Sstevel@tonic-gate } else { 3398*892ad162SToomas Soome if (caller != 0) { 33997c478bd9Sstevel@tonic-gate laddr = caller; 34007c478bd9Sstevel@tonic-gate haddr = caller + sizeof (caller); 34017c478bd9Sstevel@tonic-gate 34027c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, 34037c478bd9Sstevel@tonic-gate sizeof (c), &sym) != -1 && 34047c478bd9Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) { 34057c478bd9Sstevel@tonic-gate /* 34067c478bd9Sstevel@tonic-gate * We were provided an exact symbol value; any 34077c478bd9Sstevel@tonic-gate * address in the function is valid. 34087c478bd9Sstevel@tonic-gate */ 34097c478bd9Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value; 34107c478bd9Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size; 34117c478bd9Sstevel@tonic-gate } 34127c478bd9Sstevel@tonic-gate 34137c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 34147c478bd9Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr && 34157c478bd9Sstevel@tonic-gate vs.vs_stack[i] < haddr) 34167c478bd9Sstevel@tonic-gate break; 34177c478bd9Sstevel@tonic-gate 34187c478bd9Sstevel@tonic-gate if (i == depth) 34197c478bd9Sstevel@tonic-gate return (DCMD_OK); 34207c478bd9Sstevel@tonic-gate } 34217c478bd9Sstevel@tonic-gate 3422*892ad162SToomas Soome if (thread != 0 && (uintptr_t)vs.vs_thread != thread) 34237c478bd9Sstevel@tonic-gate return (DCMD_OK); 34247c478bd9Sstevel@tonic-gate 34257c478bd9Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest) 34267c478bd9Sstevel@tonic-gate return (DCMD_OK); 34277c478bd9Sstevel@tonic-gate 34287c478bd9Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest) 34297c478bd9Sstevel@tonic-gate return (DCMD_OK); 34307c478bd9Sstevel@tonic-gate } 34317c478bd9Sstevel@tonic-gate 34327c478bd9Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" : 34337c478bd9Sstevel@tonic-gate t == VMEM_FREE ? "FREE" : 34347c478bd9Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" : 34357c478bd9Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" : 34367c478bd9Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" : 34377c478bd9Sstevel@tonic-gate "????"); 34387c478bd9Sstevel@tonic-gate 34397c478bd9Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) { 34407c478bd9Sstevel@tonic-gate mdb_printf("%#r\n", addr); 34417c478bd9Sstevel@tonic-gate return (DCMD_OK); 34427c478bd9Sstevel@tonic-gate } 34437c478bd9Sstevel@tonic-gate 34447c478bd9Sstevel@tonic-gate if (verbose) { 34457c478bd9Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n", 34467c478bd9Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz); 34477c478bd9Sstevel@tonic-gate 34487c478bd9Sstevel@tonic-gate if (no_debug) 34497c478bd9Sstevel@tonic-gate return (DCMD_OK); 34507c478bd9Sstevel@tonic-gate 34517c478bd9Sstevel@tonic-gate mdb_printf("%16s %4s %16d %16llx\n", 34527c478bd9Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp); 34537c478bd9Sstevel@tonic-gate 34547c478bd9Sstevel@tonic-gate mdb_inc_indent(17); 34557c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 34567c478bd9Sstevel@tonic-gate mdb_printf("%a\n", stk[i]); 34577c478bd9Sstevel@tonic-gate } 34587c478bd9Sstevel@tonic-gate mdb_dec_indent(17); 34597c478bd9Sstevel@tonic-gate mdb_printf("\n"); 34607c478bd9Sstevel@tonic-gate } else { 34617c478bd9Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type, 34627c478bd9Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end); 34637c478bd9Sstevel@tonic-gate 34647c478bd9Sstevel@tonic-gate if (no_debug) { 34657c478bd9Sstevel@tonic-gate mdb_printf("\n"); 34667c478bd9Sstevel@tonic-gate return (DCMD_OK); 34677c478bd9Sstevel@tonic-gate } 34687c478bd9Sstevel@tonic-gate 34697c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 34707c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY, 34717c478bd9Sstevel@tonic-gate c, sizeof (c), &sym) == -1) 34727c478bd9Sstevel@tonic-gate continue; 34737c478bd9Sstevel@tonic-gate if (is_umem_sym(c, "vmem_")) 34747c478bd9Sstevel@tonic-gate continue; 34757c478bd9Sstevel@tonic-gate break; 34767c478bd9Sstevel@tonic-gate } 34777c478bd9Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]); 34787c478bd9Sstevel@tonic-gate } 34797c478bd9Sstevel@tonic-gate return (DCMD_OK); 34807c478bd9Sstevel@tonic-gate } 34817c478bd9Sstevel@tonic-gate 34827c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 34837c478bd9Sstevel@tonic-gate static int 34847c478bd9Sstevel@tonic-gate showbc(uintptr_t addr, const umem_bufctl_audit_t *bcp, hrtime_t *newest) 34857c478bd9Sstevel@tonic-gate { 34867c478bd9Sstevel@tonic-gate char name[UMEM_CACHE_NAMELEN + 1]; 34877c478bd9Sstevel@tonic-gate hrtime_t delta; 34887c478bd9Sstevel@tonic-gate int i, depth; 34897c478bd9Sstevel@tonic-gate 34907c478bd9Sstevel@tonic-gate if (bcp->bc_timestamp == 0) 34917c478bd9Sstevel@tonic-gate return (WALK_DONE); 34927c478bd9Sstevel@tonic-gate 34937c478bd9Sstevel@tonic-gate if (*newest == 0) 34947c478bd9Sstevel@tonic-gate *newest = bcp->bc_timestamp; 34957c478bd9Sstevel@tonic-gate 34967c478bd9Sstevel@tonic-gate delta = *newest - bcp->bc_timestamp; 34977c478bd9Sstevel@tonic-gate depth = MIN(bcp->bc_depth, umem_stack_depth); 34987c478bd9Sstevel@tonic-gate 34997c478bd9Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t) 35007c478bd9Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0) 35017c478bd9Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache); 35027c478bd9Sstevel@tonic-gate 35037c478bd9Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n", 35047c478bd9Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name); 35057c478bd9Sstevel@tonic-gate 35067c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 35077c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 35087c478bd9Sstevel@tonic-gate 35097c478bd9Sstevel@tonic-gate return (WALK_NEXT); 35107c478bd9Sstevel@tonic-gate } 35117c478bd9Sstevel@tonic-gate 35127c478bd9Sstevel@tonic-gate int 35137c478bd9Sstevel@tonic-gate umalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 35147c478bd9Sstevel@tonic-gate { 35157c478bd9Sstevel@tonic-gate const char *logname = "umem_transaction_log"; 35167c478bd9Sstevel@tonic-gate hrtime_t newest = 0; 35177c478bd9Sstevel@tonic-gate 35187c478bd9Sstevel@tonic-gate if ((flags & DCMD_ADDRSPEC) || argc > 1) 35197c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 35207c478bd9Sstevel@tonic-gate 35217c478bd9Sstevel@tonic-gate if (argc > 0) { 35227c478bd9Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING) 35237c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 35247c478bd9Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0) 35257c478bd9Sstevel@tonic-gate logname = "umem_failure_log"; 35267c478bd9Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0) 35277c478bd9Sstevel@tonic-gate logname = "umem_slab_log"; 35287c478bd9Sstevel@tonic-gate else 35297c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 35307c478bd9Sstevel@tonic-gate } 35317c478bd9Sstevel@tonic-gate 35327c478bd9Sstevel@tonic-gate if (umem_readvar(&addr, logname) == -1) { 35337c478bd9Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer"); 35347c478bd9Sstevel@tonic-gate return (DCMD_ERR); 35357c478bd9Sstevel@tonic-gate } 35367c478bd9Sstevel@tonic-gate 35377c478bd9Sstevel@tonic-gate if (mdb_pwalk("umem_log", (mdb_walk_cb_t)showbc, &newest, addr) == -1) { 35387c478bd9Sstevel@tonic-gate mdb_warn("failed to walk umem log"); 35397c478bd9Sstevel@tonic-gate return (DCMD_ERR); 35407c478bd9Sstevel@tonic-gate } 35417c478bd9Sstevel@tonic-gate 35427c478bd9Sstevel@tonic-gate return (DCMD_OK); 35437c478bd9Sstevel@tonic-gate } 35447c478bd9Sstevel@tonic-gate 35457c478bd9Sstevel@tonic-gate /* 35467c478bd9Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::umausers here. 35477c478bd9Sstevel@tonic-gate * The first piece is a structure which we use to accumulate umem_cache_t 35487c478bd9Sstevel@tonic-gate * addresses of interest. The umc_add is used as a callback for the umem_cache 35497c478bd9Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments. 35507c478bd9Sstevel@tonic-gate */ 35517c478bd9Sstevel@tonic-gate 35527c478bd9Sstevel@tonic-gate typedef struct umclist { 35537c478bd9Sstevel@tonic-gate const char *umc_name; /* Name to match (or NULL) */ 35547c478bd9Sstevel@tonic-gate uintptr_t *umc_caches; /* List of umem_cache_t addrs */ 35557c478bd9Sstevel@tonic-gate int umc_nelems; /* Num entries in umc_caches */ 35567c478bd9Sstevel@tonic-gate int umc_size; /* Size of umc_caches array */ 35577c478bd9Sstevel@tonic-gate } umclist_t; 35587c478bd9Sstevel@tonic-gate 35597c478bd9Sstevel@tonic-gate static int 35607c478bd9Sstevel@tonic-gate umc_add(uintptr_t addr, const umem_cache_t *cp, umclist_t *umc) 35617c478bd9Sstevel@tonic-gate { 35627c478bd9Sstevel@tonic-gate void *p; 35637c478bd9Sstevel@tonic-gate int s; 35647c478bd9Sstevel@tonic-gate 35657c478bd9Sstevel@tonic-gate if (umc->umc_name == NULL || 35667c478bd9Sstevel@tonic-gate strcmp(cp->cache_name, umc->umc_name) == 0) { 35677c478bd9Sstevel@tonic-gate /* 35687c478bd9Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then 35697c478bd9Sstevel@tonic-gate * add the virtual address of the matching cache to our list. 35707c478bd9Sstevel@tonic-gate */ 35717c478bd9Sstevel@tonic-gate if (umc->umc_nelems >= umc->umc_size) { 35727c478bd9Sstevel@tonic-gate s = umc->umc_size ? umc->umc_size * 2 : 256; 35737c478bd9Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC); 35747c478bd9Sstevel@tonic-gate 35757c478bd9Sstevel@tonic-gate bcopy(umc->umc_caches, p, 35767c478bd9Sstevel@tonic-gate sizeof (uintptr_t) * umc->umc_size); 35777c478bd9Sstevel@tonic-gate 35787c478bd9Sstevel@tonic-gate umc->umc_caches = p; 35797c478bd9Sstevel@tonic-gate umc->umc_size = s; 35807c478bd9Sstevel@tonic-gate } 35817c478bd9Sstevel@tonic-gate 35827c478bd9Sstevel@tonic-gate umc->umc_caches[umc->umc_nelems++] = addr; 35837c478bd9Sstevel@tonic-gate return (umc->umc_name ? WALK_DONE : WALK_NEXT); 35847c478bd9Sstevel@tonic-gate } 35857c478bd9Sstevel@tonic-gate 35867c478bd9Sstevel@tonic-gate return (WALK_NEXT); 35877c478bd9Sstevel@tonic-gate } 35887c478bd9Sstevel@tonic-gate 35897c478bd9Sstevel@tonic-gate /* 35907c478bd9Sstevel@tonic-gate * The second piece of ::umausers is a hash table of allocations. Each 35917c478bd9Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then 35927c478bd9Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations 35937c478bd9Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the 35947c478bd9Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly. 35957c478bd9Sstevel@tonic-gate */ 35967c478bd9Sstevel@tonic-gate 35977c478bd9Sstevel@tonic-gate typedef struct umowner { 35987c478bd9Sstevel@tonic-gate struct umowner *umo_head; /* First hash elt in bucket */ 35997c478bd9Sstevel@tonic-gate struct umowner *umo_next; /* Next hash elt in chain */ 36007c478bd9Sstevel@tonic-gate size_t umo_signature; /* Hash table signature */ 36017c478bd9Sstevel@tonic-gate uint_t umo_num; /* Number of allocations */ 36027c478bd9Sstevel@tonic-gate size_t umo_data_size; /* Size of each allocation */ 36037c478bd9Sstevel@tonic-gate size_t umo_total_size; /* Total bytes of allocation */ 36047c478bd9Sstevel@tonic-gate int umo_depth; /* Depth of stack trace */ 36057c478bd9Sstevel@tonic-gate uintptr_t *umo_stack; /* Stack trace */ 36067c478bd9Sstevel@tonic-gate } umowner_t; 36077c478bd9Sstevel@tonic-gate 36087c478bd9Sstevel@tonic-gate typedef struct umusers { 36097c478bd9Sstevel@tonic-gate const umem_cache_t *umu_cache; /* Current umem cache */ 36107c478bd9Sstevel@tonic-gate umowner_t *umu_hash; /* Hash table of owners */ 36117c478bd9Sstevel@tonic-gate uintptr_t *umu_stacks; /* stacks for owners */ 36127c478bd9Sstevel@tonic-gate int umu_nelems; /* Number of entries in use */ 36137c478bd9Sstevel@tonic-gate int umu_size; /* Total number of entries */ 36147c478bd9Sstevel@tonic-gate } umusers_t; 36157c478bd9Sstevel@tonic-gate 36167c478bd9Sstevel@tonic-gate static void 36177c478bd9Sstevel@tonic-gate umu_add(umusers_t *umu, const umem_bufctl_audit_t *bcp, 36187c478bd9Sstevel@tonic-gate size_t size, size_t data_size) 36197c478bd9Sstevel@tonic-gate { 36207c478bd9Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, umem_stack_depth); 36217c478bd9Sstevel@tonic-gate size_t bucket, signature = data_size; 36227c478bd9Sstevel@tonic-gate umowner_t *umo, *umoend; 36237c478bd9Sstevel@tonic-gate 36247c478bd9Sstevel@tonic-gate /* 36257c478bd9Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything. 36267c478bd9Sstevel@tonic-gate */ 36277c478bd9Sstevel@tonic-gate if (umu->umu_nelems >= umu->umu_size) { 36287c478bd9Sstevel@tonic-gate int s = umu->umu_size ? umu->umu_size * 2 : 1024; 36297c478bd9Sstevel@tonic-gate size_t umowner_size = sizeof (umowner_t); 36307c478bd9Sstevel@tonic-gate size_t trace_size = umem_stack_depth * sizeof (uintptr_t); 36317c478bd9Sstevel@tonic-gate uintptr_t *new_stacks; 36327c478bd9Sstevel@tonic-gate 36337c478bd9Sstevel@tonic-gate umo = mdb_alloc(umowner_size * s, UM_SLEEP | UM_GC); 36347c478bd9Sstevel@tonic-gate new_stacks = mdb_alloc(trace_size * s, UM_SLEEP | UM_GC); 36357c478bd9Sstevel@tonic-gate 36367c478bd9Sstevel@tonic-gate bcopy(umu->umu_hash, umo, umowner_size * umu->umu_size); 36377c478bd9Sstevel@tonic-gate bcopy(umu->umu_stacks, new_stacks, trace_size * umu->umu_size); 36387c478bd9Sstevel@tonic-gate umu->umu_hash = umo; 36397c478bd9Sstevel@tonic-gate umu->umu_stacks = new_stacks; 36407c478bd9Sstevel@tonic-gate umu->umu_size = s; 36417c478bd9Sstevel@tonic-gate 36427c478bd9Sstevel@tonic-gate umoend = umu->umu_hash + umu->umu_size; 36437c478bd9Sstevel@tonic-gate for (umo = umu->umu_hash; umo < umoend; umo++) { 36447c478bd9Sstevel@tonic-gate umo->umo_head = NULL; 36457c478bd9Sstevel@tonic-gate umo->umo_stack = &umu->umu_stacks[ 36467c478bd9Sstevel@tonic-gate umem_stack_depth * (umo - umu->umu_hash)]; 36477c478bd9Sstevel@tonic-gate } 36487c478bd9Sstevel@tonic-gate 36497c478bd9Sstevel@tonic-gate umoend = umu->umu_hash + umu->umu_nelems; 36507c478bd9Sstevel@tonic-gate for (umo = umu->umu_hash; umo < umoend; umo++) { 36517c478bd9Sstevel@tonic-gate bucket = umo->umo_signature & (umu->umu_size - 1); 36527c478bd9Sstevel@tonic-gate umo->umo_next = umu->umu_hash[bucket].umo_head; 36537c478bd9Sstevel@tonic-gate umu->umu_hash[bucket].umo_head = umo; 36547c478bd9Sstevel@tonic-gate } 36557c478bd9Sstevel@tonic-gate } 36567c478bd9Sstevel@tonic-gate 36577c478bd9Sstevel@tonic-gate /* 36587c478bd9Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then 36597c478bd9Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats. 36607c478bd9Sstevel@tonic-gate */ 36617c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 36627c478bd9Sstevel@tonic-gate signature += bcp->bc_stack[i]; 36637c478bd9Sstevel@tonic-gate 36647c478bd9Sstevel@tonic-gate bucket = signature & (umu->umu_size - 1); 36657c478bd9Sstevel@tonic-gate 36667c478bd9Sstevel@tonic-gate for (umo = umu->umu_hash[bucket].umo_head; umo; umo = umo->umo_next) { 36677c478bd9Sstevel@tonic-gate if (umo->umo_signature == signature) { 36687c478bd9Sstevel@tonic-gate size_t difference = 0; 36697c478bd9Sstevel@tonic-gate 36707c478bd9Sstevel@tonic-gate difference |= umo->umo_data_size - data_size; 36717c478bd9Sstevel@tonic-gate difference |= umo->umo_depth - depth; 36727c478bd9Sstevel@tonic-gate 36737c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) { 36747c478bd9Sstevel@tonic-gate difference |= umo->umo_stack[i] - 36757c478bd9Sstevel@tonic-gate bcp->bc_stack[i]; 36767c478bd9Sstevel@tonic-gate } 36777c478bd9Sstevel@tonic-gate 36787c478bd9Sstevel@tonic-gate if (difference == 0) { 36797c478bd9Sstevel@tonic-gate umo->umo_total_size += size; 36807c478bd9Sstevel@tonic-gate umo->umo_num++; 36817c478bd9Sstevel@tonic-gate return; 36827c478bd9Sstevel@tonic-gate } 36837c478bd9Sstevel@tonic-gate } 36847c478bd9Sstevel@tonic-gate } 36857c478bd9Sstevel@tonic-gate 36867c478bd9Sstevel@tonic-gate /* 36877c478bd9Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it 36887c478bd9Sstevel@tonic-gate * in based on the allocation information. 36897c478bd9Sstevel@tonic-gate */ 36907c478bd9Sstevel@tonic-gate umo = &umu->umu_hash[umu->umu_nelems++]; 36917c478bd9Sstevel@tonic-gate umo->umo_next = umu->umu_hash[bucket].umo_head; 36927c478bd9Sstevel@tonic-gate umu->umu_hash[bucket].umo_head = umo; 36937c478bd9Sstevel@tonic-gate 36947c478bd9Sstevel@tonic-gate umo->umo_signature = signature; 36957c478bd9Sstevel@tonic-gate umo->umo_num = 1; 36967c478bd9Sstevel@tonic-gate umo->umo_data_size = data_size; 36977c478bd9Sstevel@tonic-gate umo->umo_total_size = size; 36987c478bd9Sstevel@tonic-gate umo->umo_depth = depth; 36997c478bd9Sstevel@tonic-gate 37007c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 37017c478bd9Sstevel@tonic-gate umo->umo_stack[i] = bcp->bc_stack[i]; 37027c478bd9Sstevel@tonic-gate } 37037c478bd9Sstevel@tonic-gate 37047c478bd9Sstevel@tonic-gate /* 37057c478bd9Sstevel@tonic-gate * When ::umausers is invoked without the -f flag, we simply update our hash 37067c478bd9Sstevel@tonic-gate * table with the information from each allocated bufctl. 37077c478bd9Sstevel@tonic-gate */ 37087c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 37097c478bd9Sstevel@tonic-gate static int 37107c478bd9Sstevel@tonic-gate umause1(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu) 37117c478bd9Sstevel@tonic-gate { 37127c478bd9Sstevel@tonic-gate const umem_cache_t *cp = umu->umu_cache; 37137c478bd9Sstevel@tonic-gate 37147c478bd9Sstevel@tonic-gate umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize); 37157c478bd9Sstevel@tonic-gate return (WALK_NEXT); 37167c478bd9Sstevel@tonic-gate } 37177c478bd9Sstevel@tonic-gate 37187c478bd9Sstevel@tonic-gate /* 37197c478bd9Sstevel@tonic-gate * When ::umausers is invoked with the -f flag, we print out the information 37207c478bd9Sstevel@tonic-gate * for each bufctl as well as updating the hash table. 37217c478bd9Sstevel@tonic-gate */ 37227c478bd9Sstevel@tonic-gate static int 37237c478bd9Sstevel@tonic-gate umause2(uintptr_t addr, const umem_bufctl_audit_t *bcp, umusers_t *umu) 37247c478bd9Sstevel@tonic-gate { 37257c478bd9Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, umem_stack_depth); 37267c478bd9Sstevel@tonic-gate const umem_cache_t *cp = umu->umu_cache; 37277c478bd9Sstevel@tonic-gate 37287c478bd9Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n", 37297c478bd9Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name); 37307c478bd9Sstevel@tonic-gate 37317c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) 37327c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]); 37337c478bd9Sstevel@tonic-gate 37347c478bd9Sstevel@tonic-gate umu_add(umu, bcp, cp->cache_bufsize, cp->cache_bufsize); 37357c478bd9Sstevel@tonic-gate return (WALK_NEXT); 37367c478bd9Sstevel@tonic-gate } 37377c478bd9Sstevel@tonic-gate 37387c478bd9Sstevel@tonic-gate /* 37397c478bd9Sstevel@tonic-gate * We sort our results by allocation size before printing them. 37407c478bd9Sstevel@tonic-gate */ 37417c478bd9Sstevel@tonic-gate static int 37427c478bd9Sstevel@tonic-gate umownercmp(const void *lp, const void *rp) 37437c478bd9Sstevel@tonic-gate { 37447c478bd9Sstevel@tonic-gate const umowner_t *lhs = lp; 37457c478bd9Sstevel@tonic-gate const umowner_t *rhs = rp; 37467c478bd9Sstevel@tonic-gate 37477c478bd9Sstevel@tonic-gate return (rhs->umo_total_size - lhs->umo_total_size); 37487c478bd9Sstevel@tonic-gate } 37497c478bd9Sstevel@tonic-gate 37507c478bd9Sstevel@tonic-gate /* 37517c478bd9Sstevel@tonic-gate * The main engine of ::umausers is relatively straightforward: First we 37527c478bd9Sstevel@tonic-gate * accumulate our list of umem_cache_t addresses into the umclist_t. Next we 37537c478bd9Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally, 37547c478bd9Sstevel@tonic-gate * we sort and print our results. 37557c478bd9Sstevel@tonic-gate */ 37567c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 37577c478bd9Sstevel@tonic-gate int 37587c478bd9Sstevel@tonic-gate umausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 37597c478bd9Sstevel@tonic-gate { 37607c478bd9Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */ 37617c478bd9Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */ 37627c478bd9Sstevel@tonic-gate int audited_caches = 0; /* Number of UMF_AUDIT caches found */ 37637c478bd9Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */ 37647c478bd9Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */ 37657c478bd9Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */ 37667c478bd9Sstevel@tonic-gate 37677c478bd9Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)umause1; 37687c478bd9Sstevel@tonic-gate umowner_t *umo, *umoend; 37697c478bd9Sstevel@tonic-gate int i, oelems; 37707c478bd9Sstevel@tonic-gate 37717c478bd9Sstevel@tonic-gate umclist_t umc; 37727c478bd9Sstevel@tonic-gate umusers_t umu; 37737c478bd9Sstevel@tonic-gate 37747c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) 37757c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 37767c478bd9Sstevel@tonic-gate 37777c478bd9Sstevel@tonic-gate bzero(&umc, sizeof (umc)); 37787c478bd9Sstevel@tonic-gate bzero(&umu, sizeof (umu)); 37797c478bd9Sstevel@tonic-gate 37807c478bd9Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv, 37817c478bd9Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e, 37827c478bd9Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) { 37837c478bd9Sstevel@tonic-gate 37847c478bd9Sstevel@tonic-gate argv += i; /* skip past options we just processed */ 37857c478bd9Sstevel@tonic-gate argc -= i; /* adjust argc */ 37867c478bd9Sstevel@tonic-gate 37877c478bd9Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-') 37887c478bd9Sstevel@tonic-gate return (DCMD_USAGE); 37897c478bd9Sstevel@tonic-gate 37907c478bd9Sstevel@tonic-gate oelems = umc.umc_nelems; 37917c478bd9Sstevel@tonic-gate umc.umc_name = argv->a_un.a_str; 37927c478bd9Sstevel@tonic-gate (void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc); 37937c478bd9Sstevel@tonic-gate 37947c478bd9Sstevel@tonic-gate if (umc.umc_nelems == oelems) { 37957c478bd9Sstevel@tonic-gate mdb_warn("unknown umem cache: %s\n", umc.umc_name); 37967c478bd9Sstevel@tonic-gate return (DCMD_ERR); 37977c478bd9Sstevel@tonic-gate } 37987c478bd9Sstevel@tonic-gate 37997c478bd9Sstevel@tonic-gate do_all_caches = 0; 38007c478bd9Sstevel@tonic-gate argv++; 38017c478bd9Sstevel@tonic-gate argc--; 38027c478bd9Sstevel@tonic-gate } 38037c478bd9Sstevel@tonic-gate 38047c478bd9Sstevel@tonic-gate if (opt_e) 38057c478bd9Sstevel@tonic-gate mem_threshold = cnt_threshold = 0; 38067c478bd9Sstevel@tonic-gate 38077c478bd9Sstevel@tonic-gate if (opt_f) 38087c478bd9Sstevel@tonic-gate callback = (mdb_walk_cb_t)umause2; 38097c478bd9Sstevel@tonic-gate 38107c478bd9Sstevel@tonic-gate if (do_all_caches) { 38117c478bd9Sstevel@tonic-gate umc.umc_name = NULL; /* match all cache names */ 38127c478bd9Sstevel@tonic-gate (void) mdb_walk("umem_cache", (mdb_walk_cb_t)umc_add, &umc); 38137c478bd9Sstevel@tonic-gate } 38147c478bd9Sstevel@tonic-gate 38157c478bd9Sstevel@tonic-gate for (i = 0; i < umc.umc_nelems; i++) { 38167c478bd9Sstevel@tonic-gate uintptr_t cp = umc.umc_caches[i]; 38177c478bd9Sstevel@tonic-gate umem_cache_t c; 38187c478bd9Sstevel@tonic-gate 38197c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) { 38207c478bd9Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp); 38217c478bd9Sstevel@tonic-gate continue; 38227c478bd9Sstevel@tonic-gate } 38237c478bd9Sstevel@tonic-gate 38247c478bd9Sstevel@tonic-gate if (!(c.cache_flags & UMF_AUDIT)) { 38257c478bd9Sstevel@tonic-gate if (!do_all_caches) { 38267c478bd9Sstevel@tonic-gate mdb_warn("UMF_AUDIT is not enabled for %s\n", 38277c478bd9Sstevel@tonic-gate c.cache_name); 38287c478bd9Sstevel@tonic-gate } 38297c478bd9Sstevel@tonic-gate continue; 38307c478bd9Sstevel@tonic-gate } 38317c478bd9Sstevel@tonic-gate 38327c478bd9Sstevel@tonic-gate umu.umu_cache = &c; 38337c478bd9Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &umu, cp); 38347c478bd9Sstevel@tonic-gate audited_caches++; 38357c478bd9Sstevel@tonic-gate } 38367c478bd9Sstevel@tonic-gate 38377c478bd9Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) { 38387c478bd9Sstevel@tonic-gate mdb_warn("UMF_AUDIT is not enabled for any caches\n"); 38397c478bd9Sstevel@tonic-gate return (DCMD_ERR); 38407c478bd9Sstevel@tonic-gate } 38417c478bd9Sstevel@tonic-gate 38427c478bd9Sstevel@tonic-gate qsort(umu.umu_hash, umu.umu_nelems, sizeof (umowner_t), umownercmp); 38437c478bd9Sstevel@tonic-gate umoend = umu.umu_hash + umu.umu_nelems; 38447c478bd9Sstevel@tonic-gate 38457c478bd9Sstevel@tonic-gate for (umo = umu.umu_hash; umo < umoend; umo++) { 38467c478bd9Sstevel@tonic-gate if (umo->umo_total_size < mem_threshold && 38477c478bd9Sstevel@tonic-gate umo->umo_num < cnt_threshold) 38487c478bd9Sstevel@tonic-gate continue; 38497c478bd9Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n", 38507c478bd9Sstevel@tonic-gate umo->umo_total_size, umo->umo_num, umo->umo_data_size); 38517c478bd9Sstevel@tonic-gate for (i = 0; i < umo->umo_depth; i++) 38527c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", umo->umo_stack[i]); 38537c478bd9Sstevel@tonic-gate } 38547c478bd9Sstevel@tonic-gate 38557c478bd9Sstevel@tonic-gate return (DCMD_OK); 38567c478bd9Sstevel@tonic-gate } 3857789d94c2Sjwadams 3858789d94c2Sjwadams struct malloc_data { 3859789d94c2Sjwadams uint32_t malloc_size; 3860789d94c2Sjwadams uint32_t malloc_stat; /* == UMEM_MALLOC_ENCODE(state, malloc_size) */ 3861789d94c2Sjwadams }; 3862789d94c2Sjwadams 3863789d94c2Sjwadams #ifdef _LP64 3864789d94c2Sjwadams #define UMI_MAX_BUCKET (UMEM_MAXBUF - 2*sizeof (struct malloc_data)) 3865789d94c2Sjwadams #else 3866789d94c2Sjwadams #define UMI_MAX_BUCKET (UMEM_MAXBUF - sizeof (struct malloc_data)) 3867789d94c2Sjwadams #endif 3868789d94c2Sjwadams 3869789d94c2Sjwadams typedef struct umem_malloc_info { 3870789d94c2Sjwadams size_t um_total; /* total allocated buffers */ 3871789d94c2Sjwadams size_t um_malloc; /* malloc buffers */ 3872789d94c2Sjwadams size_t um_malloc_size; /* sum of malloc buffer sizes */ 3873789d94c2Sjwadams size_t um_malloc_overhead; /* sum of in-chunk overheads */ 3874789d94c2Sjwadams 3875789d94c2Sjwadams umem_cache_t *um_cp; 3876789d94c2Sjwadams 3877789d94c2Sjwadams uint_t *um_bucket; 3878789d94c2Sjwadams } umem_malloc_info_t; 3879789d94c2Sjwadams 3880789d94c2Sjwadams static void 3881789d94c2Sjwadams umem_malloc_print_dist(uint_t *um_bucket, size_t minmalloc, size_t maxmalloc, 3882789d94c2Sjwadams size_t maxbuckets, size_t minbucketsize, int geometric) 3883789d94c2Sjwadams { 38843893cb7fStomee uint64_t um_malloc; 3885789d94c2Sjwadams int minb = -1; 3886789d94c2Sjwadams int maxb = -1; 3887789d94c2Sjwadams int buckets; 3888789d94c2Sjwadams int nbucks; 3889789d94c2Sjwadams int i; 3890789d94c2Sjwadams int b; 3891789d94c2Sjwadams const int *distarray; 3892789d94c2Sjwadams 3893789d94c2Sjwadams minb = (int)minmalloc; 3894789d94c2Sjwadams maxb = (int)maxmalloc; 3895789d94c2Sjwadams 3896789d94c2Sjwadams nbucks = buckets = maxb - minb + 1; 3897789d94c2Sjwadams 3898789d94c2Sjwadams um_malloc = 0; 3899789d94c2Sjwadams for (b = minb; b <= maxb; b++) 3900789d94c2Sjwadams um_malloc += um_bucket[b]; 3901789d94c2Sjwadams 3902789d94c2Sjwadams if (maxbuckets != 0) 3903789d94c2Sjwadams buckets = MIN(buckets, maxbuckets); 3904789d94c2Sjwadams 3905789d94c2Sjwadams if (minbucketsize > 1) { 3906789d94c2Sjwadams buckets = MIN(buckets, nbucks/minbucketsize); 3907789d94c2Sjwadams if (buckets == 0) { 3908789d94c2Sjwadams buckets = 1; 3909789d94c2Sjwadams minbucketsize = nbucks; 3910789d94c2Sjwadams } 3911789d94c2Sjwadams } 3912789d94c2Sjwadams 3913789d94c2Sjwadams if (geometric) 3914087e1372Stomee distarray = dist_geometric(buckets, minb, maxb, minbucketsize); 3915789d94c2Sjwadams else 3916087e1372Stomee distarray = dist_linear(buckets, minb, maxb); 3917789d94c2Sjwadams 3918087e1372Stomee dist_print_header("malloc size", 11, "count"); 3919789d94c2Sjwadams for (i = 0; i < buckets; i++) { 3920087e1372Stomee dist_print_bucket(distarray, i, um_bucket, um_malloc, 11); 3921789d94c2Sjwadams } 3922789d94c2Sjwadams mdb_printf("\n"); 3923789d94c2Sjwadams } 3924789d94c2Sjwadams 3925789d94c2Sjwadams /* 3926789d94c2Sjwadams * A malloc()ed buffer looks like: 3927789d94c2Sjwadams * 3928789d94c2Sjwadams * <----------- mi.malloc_size ---> 3929789d94c2Sjwadams * <----------- cp.cache_bufsize ------------------> 3930789d94c2Sjwadams * <----------- cp.cache_chunksize --------------------------------> 3931789d94c2Sjwadams * +-------+-----------------------+---------------+---------------+ 3932789d94c2Sjwadams * |/tag///| mallocsz |/round-off/////|/debug info////| 3933789d94c2Sjwadams * +-------+---------------------------------------+---------------+ 3934789d94c2Sjwadams * <-- usable space ------> 3935789d94c2Sjwadams * 3936789d94c2Sjwadams * mallocsz is the argument to malloc(3C). 3937789d94c2Sjwadams * mi.malloc_size is the actual size passed to umem_alloc(), which 3938789d94c2Sjwadams * is rounded up to the smallest available cache size, which is 3939789d94c2Sjwadams * cache_bufsize. If there is debugging or alignment overhead in 3940789d94c2Sjwadams * the cache, that is reflected in a larger cache_chunksize. 3941789d94c2Sjwadams * 3942789d94c2Sjwadams * The tag at the beginning of the buffer is either 8-bytes or 16-bytes, 3943789d94c2Sjwadams * depending upon the ISA's alignment requirements. For 32-bit allocations, 3944789d94c2Sjwadams * it is always a 8-byte tag. For 64-bit allocations larger than 8 bytes, 3945789d94c2Sjwadams * the tag has 8 bytes of padding before it. 3946789d94c2Sjwadams * 3947789d94c2Sjwadams * 32-byte, 64-byte buffers <= 8 bytes: 3948789d94c2Sjwadams * +-------+-------+--------- ... 3949789d94c2Sjwadams * |/size//|/stat//| mallocsz ... 3950789d94c2Sjwadams * +-------+-------+--------- ... 3951789d94c2Sjwadams * ^ 3952789d94c2Sjwadams * pointer returned from malloc(3C) 3953789d94c2Sjwadams * 3954789d94c2Sjwadams * 64-byte buffers > 8 bytes: 3955789d94c2Sjwadams * +---------------+-------+-------+--------- ... 3956789d94c2Sjwadams * |/padding///////|/size//|/stat//| mallocsz ... 3957789d94c2Sjwadams * +---------------+-------+-------+--------- ... 3958789d94c2Sjwadams * ^ 3959789d94c2Sjwadams * pointer returned from malloc(3C) 3960789d94c2Sjwadams * 3961789d94c2Sjwadams * The "size" field is "malloc_size", which is mallocsz + the padding. 3962789d94c2Sjwadams * The "stat" field is derived from malloc_size, and functions as a 3963789d94c2Sjwadams * validation that this buffer is actually from malloc(3C). 3964789d94c2Sjwadams */ 3965789d94c2Sjwadams /*ARGSUSED*/ 3966789d94c2Sjwadams static int 3967789d94c2Sjwadams um_umem_buffer_cb(uintptr_t addr, void *buf, umem_malloc_info_t *ump) 3968789d94c2Sjwadams { 3969789d94c2Sjwadams struct malloc_data md; 3970789d94c2Sjwadams size_t m_addr = addr; 3971789d94c2Sjwadams size_t overhead = sizeof (md); 3972789d94c2Sjwadams size_t mallocsz; 3973789d94c2Sjwadams 3974789d94c2Sjwadams ump->um_total++; 3975789d94c2Sjwadams 3976789d94c2Sjwadams #ifdef _LP64 3977789d94c2Sjwadams if (ump->um_cp->cache_bufsize > UMEM_SECOND_ALIGN) { 3978789d94c2Sjwadams m_addr += overhead; 3979789d94c2Sjwadams overhead += sizeof (md); 3980789d94c2Sjwadams } 3981789d94c2Sjwadams #endif 3982789d94c2Sjwadams 3983789d94c2Sjwadams if (mdb_vread(&md, sizeof (md), m_addr) == -1) { 3984789d94c2Sjwadams mdb_warn("unable to read malloc header at %p", m_addr); 3985789d94c2Sjwadams return (WALK_NEXT); 3986789d94c2Sjwadams } 3987789d94c2Sjwadams 3988789d94c2Sjwadams switch (UMEM_MALLOC_DECODE(md.malloc_stat, md.malloc_size)) { 3989789d94c2Sjwadams case MALLOC_MAGIC: 3990789d94c2Sjwadams #ifdef _LP64 3991789d94c2Sjwadams case MALLOC_SECOND_MAGIC: 3992789d94c2Sjwadams #endif 3993789d94c2Sjwadams mallocsz = md.malloc_size - overhead; 3994789d94c2Sjwadams 3995789d94c2Sjwadams ump->um_malloc++; 3996789d94c2Sjwadams ump->um_malloc_size += mallocsz; 3997789d94c2Sjwadams ump->um_malloc_overhead += overhead; 3998789d94c2Sjwadams 3999789d94c2Sjwadams /* include round-off and debug overhead */ 4000789d94c2Sjwadams ump->um_malloc_overhead += 4001789d94c2Sjwadams ump->um_cp->cache_chunksize - md.malloc_size; 4002789d94c2Sjwadams 4003789d94c2Sjwadams if (ump->um_bucket != NULL && mallocsz <= UMI_MAX_BUCKET) 4004789d94c2Sjwadams ump->um_bucket[mallocsz]++; 4005789d94c2Sjwadams 4006789d94c2Sjwadams break; 4007789d94c2Sjwadams default: 4008789d94c2Sjwadams break; 4009789d94c2Sjwadams } 4010789d94c2Sjwadams 4011789d94c2Sjwadams return (WALK_NEXT); 4012789d94c2Sjwadams } 4013789d94c2Sjwadams 4014789d94c2Sjwadams int 4015789d94c2Sjwadams get_umem_alloc_sizes(int **out, size_t *out_num) 4016789d94c2Sjwadams { 4017789d94c2Sjwadams GElf_Sym sym; 4018789d94c2Sjwadams 4019789d94c2Sjwadams if (umem_lookup_by_name("umem_alloc_sizes", &sym) == -1) { 4020789d94c2Sjwadams mdb_warn("unable to look up umem_alloc_sizes"); 4021789d94c2Sjwadams return (-1); 4022789d94c2Sjwadams } 4023789d94c2Sjwadams 4024789d94c2Sjwadams *out = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC); 4025789d94c2Sjwadams *out_num = sym.st_size / sizeof (int); 4026789d94c2Sjwadams 4027789d94c2Sjwadams if (mdb_vread(*out, sym.st_size, sym.st_value) == -1) { 4028789d94c2Sjwadams mdb_warn("unable to read umem_alloc_sizes (%p)", sym.st_value); 4029789d94c2Sjwadams *out = NULL; 4030789d94c2Sjwadams return (-1); 4031789d94c2Sjwadams } 4032789d94c2Sjwadams 4033789d94c2Sjwadams return (0); 4034789d94c2Sjwadams } 4035789d94c2Sjwadams 4036789d94c2Sjwadams 4037789d94c2Sjwadams static int 4038789d94c2Sjwadams um_umem_cache_cb(uintptr_t addr, umem_cache_t *cp, umem_malloc_info_t *ump) 4039789d94c2Sjwadams { 4040789d94c2Sjwadams if (strncmp(cp->cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) 4041789d94c2Sjwadams return (WALK_NEXT); 4042789d94c2Sjwadams 4043789d94c2Sjwadams ump->um_cp = cp; 4044789d94c2Sjwadams 4045789d94c2Sjwadams if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, ump, addr) == 4046789d94c2Sjwadams -1) { 4047789d94c2Sjwadams mdb_warn("can't walk 'umem' for cache %p", addr); 4048789d94c2Sjwadams return (WALK_ERR); 4049789d94c2Sjwadams } 4050789d94c2Sjwadams 4051789d94c2Sjwadams return (WALK_NEXT); 4052789d94c2Sjwadams } 4053789d94c2Sjwadams 4054789d94c2Sjwadams void 4055789d94c2Sjwadams umem_malloc_dist_help(void) 4056789d94c2Sjwadams { 4057789d94c2Sjwadams mdb_printf("%s\n", 4058789d94c2Sjwadams "report distribution of outstanding malloc()s"); 4059789d94c2Sjwadams mdb_dec_indent(2); 4060789d94c2Sjwadams mdb_printf("%<b>OPTIONS%</b>\n"); 4061789d94c2Sjwadams mdb_inc_indent(2); 4062789d94c2Sjwadams mdb_printf("%s", 4063789d94c2Sjwadams " -b maxbins\n" 4064789d94c2Sjwadams " Use at most maxbins bins for the data\n" 4065789d94c2Sjwadams " -B minbinsize\n" 4066789d94c2Sjwadams " Make the bins at least minbinsize bytes apart\n" 4067789d94c2Sjwadams " -d dump the raw data out, without binning\n" 4068789d94c2Sjwadams " -g use geometric binning instead of linear binning\n"); 4069789d94c2Sjwadams } 4070789d94c2Sjwadams 4071789d94c2Sjwadams /*ARGSUSED*/ 4072789d94c2Sjwadams int 4073789d94c2Sjwadams umem_malloc_dist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 4074789d94c2Sjwadams { 4075789d94c2Sjwadams umem_malloc_info_t mi; 4076789d94c2Sjwadams uint_t geometric = 0; 4077789d94c2Sjwadams uint_t dump = 0; 4078789d94c2Sjwadams size_t maxbuckets = 0; 4079789d94c2Sjwadams size_t minbucketsize = 0; 4080789d94c2Sjwadams 4081789d94c2Sjwadams size_t minalloc = 0; 4082789d94c2Sjwadams size_t maxalloc = UMI_MAX_BUCKET; 4083789d94c2Sjwadams 4084789d94c2Sjwadams if (flags & DCMD_ADDRSPEC) 4085789d94c2Sjwadams return (DCMD_USAGE); 4086789d94c2Sjwadams 4087789d94c2Sjwadams if (mdb_getopts(argc, argv, 4088789d94c2Sjwadams 'd', MDB_OPT_SETBITS, TRUE, &dump, 4089789d94c2Sjwadams 'g', MDB_OPT_SETBITS, TRUE, &geometric, 4090789d94c2Sjwadams 'b', MDB_OPT_UINTPTR, &maxbuckets, 4091789d94c2Sjwadams 'B', MDB_OPT_UINTPTR, &minbucketsize, 4092789d94c2Sjwadams 0) != argc) 4093789d94c2Sjwadams return (DCMD_USAGE); 4094789d94c2Sjwadams 4095789d94c2Sjwadams bzero(&mi, sizeof (mi)); 4096789d94c2Sjwadams mi.um_bucket = mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket), 4097789d94c2Sjwadams UM_SLEEP | UM_GC); 4098789d94c2Sjwadams 4099789d94c2Sjwadams if (mdb_walk("umem_cache", (mdb_walk_cb_t)um_umem_cache_cb, 4100789d94c2Sjwadams &mi) == -1) { 4101789d94c2Sjwadams mdb_warn("unable to walk 'umem_cache'"); 4102789d94c2Sjwadams return (DCMD_ERR); 4103789d94c2Sjwadams } 4104789d94c2Sjwadams 4105789d94c2Sjwadams if (dump) { 4106789d94c2Sjwadams int i; 4107789d94c2Sjwadams for (i = minalloc; i <= maxalloc; i++) 4108789d94c2Sjwadams mdb_printf("%d\t%d\n", i, mi.um_bucket[i]); 4109789d94c2Sjwadams 4110789d94c2Sjwadams return (DCMD_OK); 4111789d94c2Sjwadams } 4112789d94c2Sjwadams 4113789d94c2Sjwadams umem_malloc_print_dist(mi.um_bucket, minalloc, maxalloc, 4114789d94c2Sjwadams maxbuckets, minbucketsize, geometric); 4115789d94c2Sjwadams 4116789d94c2Sjwadams return (DCMD_OK); 4117789d94c2Sjwadams } 4118789d94c2Sjwadams 4119789d94c2Sjwadams void 4120789d94c2Sjwadams umem_malloc_info_help(void) 4121789d94c2Sjwadams { 4122789d94c2Sjwadams mdb_printf("%s\n", 4123789d94c2Sjwadams "report information about malloc()s by cache. "); 4124789d94c2Sjwadams mdb_dec_indent(2); 4125789d94c2Sjwadams mdb_printf("%<b>OPTIONS%</b>\n"); 4126789d94c2Sjwadams mdb_inc_indent(2); 4127789d94c2Sjwadams mdb_printf("%s", 4128789d94c2Sjwadams " -b maxbins\n" 4129789d94c2Sjwadams " Use at most maxbins bins for the data\n" 4130789d94c2Sjwadams " -B minbinsize\n" 4131789d94c2Sjwadams " Make the bins at least minbinsize bytes apart\n" 4132789d94c2Sjwadams " -d dump the raw distribution data without binning\n" 4133789d94c2Sjwadams #ifndef _KMDB 4134789d94c2Sjwadams " -g use geometric binning instead of linear binning\n" 4135789d94c2Sjwadams #endif 4136789d94c2Sjwadams ""); 4137789d94c2Sjwadams } 4138789d94c2Sjwadams int 4139789d94c2Sjwadams umem_malloc_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 4140789d94c2Sjwadams { 4141789d94c2Sjwadams umem_cache_t c; 4142789d94c2Sjwadams umem_malloc_info_t mi; 4143789d94c2Sjwadams 4144789d94c2Sjwadams int skip = 0; 4145789d94c2Sjwadams 4146789d94c2Sjwadams size_t maxmalloc; 4147789d94c2Sjwadams size_t overhead; 4148789d94c2Sjwadams size_t allocated; 4149789d94c2Sjwadams size_t avg_malloc; 4150789d94c2Sjwadams size_t overhead_pct; /* 1000 * overhead_percent */ 4151789d94c2Sjwadams 4152789d94c2Sjwadams uint_t verbose = 0; 4153789d94c2Sjwadams uint_t dump = 0; 4154789d94c2Sjwadams uint_t geometric = 0; 4155789d94c2Sjwadams size_t maxbuckets = 0; 4156789d94c2Sjwadams size_t minbucketsize = 0; 4157789d94c2Sjwadams 4158789d94c2Sjwadams int *alloc_sizes; 4159789d94c2Sjwadams int idx; 4160789d94c2Sjwadams size_t num; 4161789d94c2Sjwadams size_t minmalloc; 4162789d94c2Sjwadams 4163789d94c2Sjwadams if (mdb_getopts(argc, argv, 4164789d94c2Sjwadams 'd', MDB_OPT_SETBITS, TRUE, &dump, 4165789d94c2Sjwadams 'g', MDB_OPT_SETBITS, TRUE, &geometric, 4166789d94c2Sjwadams 'b', MDB_OPT_UINTPTR, &maxbuckets, 4167789d94c2Sjwadams 'B', MDB_OPT_UINTPTR, &minbucketsize, 4168789d94c2Sjwadams 0) != argc) 4169789d94c2Sjwadams return (DCMD_USAGE); 4170789d94c2Sjwadams 4171789d94c2Sjwadams if (dump || geometric || (maxbuckets != 0) || (minbucketsize != 0)) 4172789d94c2Sjwadams verbose = 1; 4173789d94c2Sjwadams 4174789d94c2Sjwadams if (!(flags & DCMD_ADDRSPEC)) { 4175789d94c2Sjwadams if (mdb_walk_dcmd("umem_cache", "umem_malloc_info", 4176789d94c2Sjwadams argc, argv) == -1) { 4177789d94c2Sjwadams mdb_warn("can't walk umem_cache"); 4178789d94c2Sjwadams return (DCMD_ERR); 4179789d94c2Sjwadams } 4180789d94c2Sjwadams return (DCMD_OK); 4181789d94c2Sjwadams } 4182789d94c2Sjwadams 4183789d94c2Sjwadams if (!mdb_vread(&c, sizeof (c), addr)) { 4184789d94c2Sjwadams mdb_warn("unable to read cache at %p", addr); 4185789d94c2Sjwadams return (DCMD_ERR); 4186789d94c2Sjwadams } 4187789d94c2Sjwadams 4188789d94c2Sjwadams if (strncmp(c.cache_name, "umem_alloc_", strlen("umem_alloc_")) != 0) { 4189789d94c2Sjwadams if (!(flags & DCMD_LOOP)) 4190789d94c2Sjwadams mdb_warn("umem_malloc_info: cache \"%s\" is not used " 4191789d94c2Sjwadams "by malloc()\n", c.cache_name); 4192789d94c2Sjwadams skip = 1; 4193789d94c2Sjwadams } 4194789d94c2Sjwadams 4195789d94c2Sjwadams /* 4196789d94c2Sjwadams * normally, print the header only the first time. In verbose mode, 4197789d94c2Sjwadams * print the header on every non-skipped buffer 4198789d94c2Sjwadams */ 4199789d94c2Sjwadams if ((!verbose && DCMD_HDRSPEC(flags)) || (verbose && !skip)) 4200789d94c2Sjwadams mdb_printf("%<ul>%-?s %6s %6s %8s %8s %10s %10s %6s%</ul>\n", 4201789d94c2Sjwadams "CACHE", "BUFSZ", "MAXMAL", 4202789d94c2Sjwadams "BUFMALLC", "AVG_MAL", "MALLOCED", "OVERHEAD", "%OVER"); 4203789d94c2Sjwadams 4204789d94c2Sjwadams if (skip) 4205789d94c2Sjwadams return (DCMD_OK); 4206789d94c2Sjwadams 4207789d94c2Sjwadams maxmalloc = c.cache_bufsize - sizeof (struct malloc_data); 4208789d94c2Sjwadams #ifdef _LP64 4209789d94c2Sjwadams if (c.cache_bufsize > UMEM_SECOND_ALIGN) 4210789d94c2Sjwadams maxmalloc -= sizeof (struct malloc_data); 4211789d94c2Sjwadams #endif 4212789d94c2Sjwadams 4213789d94c2Sjwadams bzero(&mi, sizeof (mi)); 4214789d94c2Sjwadams mi.um_cp = &c; 4215789d94c2Sjwadams if (verbose) 4216789d94c2Sjwadams mi.um_bucket = 4217789d94c2Sjwadams mdb_zalloc((UMI_MAX_BUCKET + 1) * sizeof (*mi.um_bucket), 4218789d94c2Sjwadams UM_SLEEP | UM_GC); 4219789d94c2Sjwadams 4220789d94c2Sjwadams if (mdb_pwalk("umem", (mdb_walk_cb_t)um_umem_buffer_cb, &mi, addr) == 4221789d94c2Sjwadams -1) { 4222789d94c2Sjwadams mdb_warn("can't walk 'umem'"); 4223789d94c2Sjwadams return (DCMD_ERR); 4224789d94c2Sjwadams } 4225789d94c2Sjwadams 4226789d94c2Sjwadams overhead = mi.um_malloc_overhead; 4227789d94c2Sjwadams allocated = mi.um_malloc_size; 4228789d94c2Sjwadams 4229789d94c2Sjwadams /* do integer round off for the average */ 4230789d94c2Sjwadams if (mi.um_malloc != 0) 4231789d94c2Sjwadams avg_malloc = (allocated + (mi.um_malloc - 1)/2) / mi.um_malloc; 4232789d94c2Sjwadams else 4233789d94c2Sjwadams avg_malloc = 0; 4234789d94c2Sjwadams 4235789d94c2Sjwadams /* 4236789d94c2Sjwadams * include per-slab overhead 4237789d94c2Sjwadams * 4238789d94c2Sjwadams * Each slab in a given cache is the same size, and has the same 4239789d94c2Sjwadams * number of chunks in it; we read in the first slab on the 4240789d94c2Sjwadams * slab list to get the number of chunks for all slabs. To 4241789d94c2Sjwadams * compute the per-slab overhead, we just subtract the chunk usage 4242789d94c2Sjwadams * from the slabsize: 4243789d94c2Sjwadams * 4244789d94c2Sjwadams * +------------+-------+-------+ ... --+-------+-------+-------+ 4245789d94c2Sjwadams * |////////////| | | ... | |///////|///////| 4246789d94c2Sjwadams * |////color///| chunk | chunk | ... | chunk |/color/|/slab//| 4247789d94c2Sjwadams * |////////////| | | ... | |///////|///////| 4248789d94c2Sjwadams * +------------+-------+-------+ ... --+-------+-------+-------+ 4249789d94c2Sjwadams * | \_______chunksize * chunks_____/ | 4250789d94c2Sjwadams * \__________________________slabsize__________________________/ 4251789d94c2Sjwadams * 4252789d94c2Sjwadams * For UMF_HASH caches, there is an additional source of overhead; 4253789d94c2Sjwadams * the external umem_slab_t and per-chunk bufctl structures. We 4254789d94c2Sjwadams * include those in our per-slab overhead. 4255789d94c2Sjwadams * 4256789d94c2Sjwadams * Once we have a number for the per-slab overhead, we estimate 4257789d94c2Sjwadams * the actual overhead by treating the malloc()ed buffers as if 4258789d94c2Sjwadams * they were densely packed: 4259789d94c2Sjwadams * 4260789d94c2Sjwadams * additional overhead = (# mallocs) * (per-slab) / (chunks); 4261789d94c2Sjwadams * 4262789d94c2Sjwadams * carefully ordering the multiply before the divide, to avoid 4263789d94c2Sjwadams * round-off error. 4264789d94c2Sjwadams */ 4265789d94c2Sjwadams if (mi.um_malloc != 0) { 4266789d94c2Sjwadams umem_slab_t slab; 4267789d94c2Sjwadams uintptr_t saddr = (uintptr_t)c.cache_nullslab.slab_next; 4268789d94c2Sjwadams 4269789d94c2Sjwadams if (mdb_vread(&slab, sizeof (slab), saddr) == -1) { 4270789d94c2Sjwadams mdb_warn("unable to read slab at %p\n", saddr); 4271789d94c2Sjwadams } else { 4272789d94c2Sjwadams long chunks = slab.slab_chunks; 4273789d94c2Sjwadams if (chunks != 0 && c.cache_chunksize != 0 && 4274789d94c2Sjwadams chunks <= c.cache_slabsize / c.cache_chunksize) { 4275789d94c2Sjwadams uintmax_t perslab = 4276789d94c2Sjwadams c.cache_slabsize - 4277789d94c2Sjwadams (c.cache_chunksize * chunks); 4278789d94c2Sjwadams 4279789d94c2Sjwadams if (c.cache_flags & UMF_HASH) { 4280789d94c2Sjwadams perslab += sizeof (umem_slab_t) + 4281789d94c2Sjwadams chunks * 4282789d94c2Sjwadams ((c.cache_flags & UMF_AUDIT) ? 4283789d94c2Sjwadams sizeof (umem_bufctl_audit_t) : 4284789d94c2Sjwadams sizeof (umem_bufctl_t)); 4285789d94c2Sjwadams } 4286789d94c2Sjwadams overhead += 4287789d94c2Sjwadams (perslab * (uintmax_t)mi.um_malloc)/chunks; 4288789d94c2Sjwadams } else { 4289789d94c2Sjwadams mdb_warn("invalid #chunks (%d) in slab %p\n", 4290789d94c2Sjwadams chunks, saddr); 4291789d94c2Sjwadams } 4292789d94c2Sjwadams } 4293789d94c2Sjwadams } 4294789d94c2Sjwadams 4295789d94c2Sjwadams if (allocated != 0) 4296789d94c2Sjwadams overhead_pct = (1000ULL * overhead) / allocated; 4297789d94c2Sjwadams else 4298789d94c2Sjwadams overhead_pct = 0; 4299789d94c2Sjwadams 4300789d94c2Sjwadams mdb_printf("%0?p %6ld %6ld %8ld %8ld %10ld %10ld %3ld.%01ld%%\n", 4301789d94c2Sjwadams addr, c.cache_bufsize, maxmalloc, 4302789d94c2Sjwadams mi.um_malloc, avg_malloc, allocated, overhead, 4303789d94c2Sjwadams overhead_pct / 10, overhead_pct % 10); 4304789d94c2Sjwadams 4305789d94c2Sjwadams if (!verbose) 4306789d94c2Sjwadams return (DCMD_OK); 4307789d94c2Sjwadams 4308789d94c2Sjwadams if (!dump) 4309789d94c2Sjwadams mdb_printf("\n"); 4310789d94c2Sjwadams 4311789d94c2Sjwadams if (get_umem_alloc_sizes(&alloc_sizes, &num) == -1) 4312789d94c2Sjwadams return (DCMD_ERR); 4313789d94c2Sjwadams 4314789d94c2Sjwadams for (idx = 0; idx < num; idx++) { 4315789d94c2Sjwadams if (alloc_sizes[idx] == c.cache_bufsize) 4316789d94c2Sjwadams break; 4317789d94c2Sjwadams if (alloc_sizes[idx] == 0) { 4318789d94c2Sjwadams idx = num; /* 0-terminated array */ 4319789d94c2Sjwadams break; 4320789d94c2Sjwadams } 4321789d94c2Sjwadams } 4322789d94c2Sjwadams if (idx == num) { 4323789d94c2Sjwadams mdb_warn( 4324789d94c2Sjwadams "cache %p's size (%d) not in umem_alloc_sizes\n", 4325789d94c2Sjwadams addr, c.cache_bufsize); 4326789d94c2Sjwadams return (DCMD_ERR); 4327789d94c2Sjwadams } 4328789d94c2Sjwadams 4329789d94c2Sjwadams minmalloc = (idx == 0)? 0 : alloc_sizes[idx - 1]; 4330789d94c2Sjwadams if (minmalloc > 0) { 4331789d94c2Sjwadams #ifdef _LP64 4332789d94c2Sjwadams if (minmalloc > UMEM_SECOND_ALIGN) 4333789d94c2Sjwadams minmalloc -= sizeof (struct malloc_data); 4334789d94c2Sjwadams #endif 4335789d94c2Sjwadams minmalloc -= sizeof (struct malloc_data); 4336789d94c2Sjwadams minmalloc += 1; 4337789d94c2Sjwadams } 4338789d94c2Sjwadams 4339789d94c2Sjwadams if (dump) { 4340789d94c2Sjwadams for (idx = minmalloc; idx <= maxmalloc; idx++) 4341789d94c2Sjwadams mdb_printf("%d\t%d\n", idx, mi.um_bucket[idx]); 4342789d94c2Sjwadams mdb_printf("\n"); 4343789d94c2Sjwadams } else { 4344789d94c2Sjwadams umem_malloc_print_dist(mi.um_bucket, minmalloc, maxmalloc, 4345789d94c2Sjwadams maxbuckets, minbucketsize, geometric); 4346789d94c2Sjwadams } 4347789d94c2Sjwadams 4348789d94c2Sjwadams return (DCMD_OK); 4349789d94c2Sjwadams } 4350