xref: /illumos-gate/usr/src/cmd/mdb/common/modules/genunix/memory.c (revision af4c679f647cf088543c762e33d41a3ac52cfa14)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <mdb/mdb_modapi.h>
27 #include <sys/types.h>
28 #include <vm/page.h>
29 #include <sys/thread.h>
30 #include <sys/swap.h>
31 #include <sys/memlist.h>
32 #include <sys/vnode.h>
33 #if defined(__i386) || defined(__amd64)
34 #include <sys/balloon_impl.h>
35 #endif
36 
37 /*
38  * Page walker.
39  * By default, this will walk all pages in the system.  If given an
40  * address, it will walk all pages belonging to the vnode at that
41  * address.
42  */
43 
44 /*
45  * page_walk_data
46  *
47  * pw_hashleft is set to -1 when walking a vnode's pages, and holds the
48  * number of hash locations remaining in the page hash table when
49  * walking all pages.
50  *
51  * The astute reader will notice that pw_hashloc is only used when
52  * reading all pages (to hold a pointer to our location in the page
53  * hash table), and that pw_first is only used when reading the pages
54  * belonging to a particular vnode (to hold a pointer to the first
55  * page).  While these could be combined to be a single pointer, they
56  * are left separate for clarity.
57  */
58 typedef struct page_walk_data {
59 	long		pw_hashleft;
60 	void		**pw_hashloc;
61 	uintptr_t	pw_first;
62 } page_walk_data_t;
63 
64 int
65 page_walk_init(mdb_walk_state_t *wsp)
66 {
67 	page_walk_data_t	*pwd;
68 	void	**ptr;
69 	size_t	hashsz;
70 	vnode_t	vn;
71 
72 	if (wsp->walk_addr == NULL) {
73 
74 		/*
75 		 * Walk all pages
76 		 */
77 
78 		if ((mdb_readvar(&ptr, "page_hash") == -1) ||
79 		    (mdb_readvar(&hashsz, "page_hashsz") == -1) ||
80 		    (ptr == NULL) || (hashsz == 0)) {
81 			mdb_warn("page_hash, page_hashsz not found or invalid");
82 			return (WALK_ERR);
83 		}
84 
85 		/*
86 		 * Since we are walking all pages, initialize hashleft
87 		 * to be the remaining number of entries in the page
88 		 * hash.  hashloc is set the start of the page hash
89 		 * table.  Setting the walk address to 0 indicates that
90 		 * we aren't currently following a hash chain, and that
91 		 * we need to scan the page hash table for a page.
92 		 */
93 		pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP);
94 		pwd->pw_hashleft = hashsz;
95 		pwd->pw_hashloc = ptr;
96 		wsp->walk_addr = 0;
97 	} else {
98 
99 		/*
100 		 * Walk just this vnode
101 		 */
102 
103 		if (mdb_vread(&vn, sizeof (vnode_t), wsp->walk_addr) == -1) {
104 			mdb_warn("unable to read vnode_t at %#lx",
105 			    wsp->walk_addr);
106 			return (WALK_ERR);
107 		}
108 
109 		/*
110 		 * We set hashleft to -1 to indicate that we are
111 		 * walking a vnode, and initialize first to 0 (it is
112 		 * used to terminate the walk, so it must not be set
113 		 * until after we have walked the first page).  The
114 		 * walk address is set to the first page.
115 		 */
116 		pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP);
117 		pwd->pw_hashleft = -1;
118 		pwd->pw_first = 0;
119 
120 		wsp->walk_addr = (uintptr_t)vn.v_pages;
121 	}
122 
123 	wsp->walk_data = pwd;
124 
125 	return (WALK_NEXT);
126 }
127 
128 int
129 page_walk_step(mdb_walk_state_t *wsp)
130 {
131 	page_walk_data_t	*pwd = wsp->walk_data;
132 	page_t		page;
133 	uintptr_t	pp;
134 
135 	pp = wsp->walk_addr;
136 
137 	if (pwd->pw_hashleft < 0) {
138 
139 		/* We're walking a vnode's pages */
140 
141 		/*
142 		 * If we don't have any pages to walk, we have come
143 		 * back around to the first one (we finished), or we
144 		 * can't read the page we're looking at, we are done.
145 		 */
146 		if (pp == NULL || pp == pwd->pw_first)
147 			return (WALK_DONE);
148 		if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
149 			mdb_warn("unable to read page_t at %#lx", pp);
150 			return (WALK_ERR);
151 		}
152 
153 		/*
154 		 * Set the walk address to the next page, and if the
155 		 * first page hasn't been set yet (i.e. we are on the
156 		 * first page), set it.
157 		 */
158 		wsp->walk_addr = (uintptr_t)page.p_vpnext;
159 		if (pwd->pw_first == NULL)
160 			pwd->pw_first = pp;
161 
162 	} else if (pwd->pw_hashleft > 0) {
163 
164 		/* We're walking all pages */
165 
166 		/*
167 		 * If pp (the walk address) is NULL, we scan through
168 		 * the page hash table until we find a page.
169 		 */
170 		if (pp == NULL) {
171 
172 			/*
173 			 * Iterate through the page hash table until we
174 			 * find a page or reach the end.
175 			 */
176 			do {
177 				if (mdb_vread(&pp, sizeof (uintptr_t),
178 				    (uintptr_t)pwd->pw_hashloc) == -1) {
179 					mdb_warn("unable to read from %#p",
180 					    pwd->pw_hashloc);
181 					return (WALK_ERR);
182 				}
183 				pwd->pw_hashleft--;
184 				pwd->pw_hashloc++;
185 			} while (pwd->pw_hashleft && (pp == NULL));
186 
187 			/*
188 			 * We've reached the end; exit.
189 			 */
190 			if (pp == NULL)
191 				return (WALK_DONE);
192 		}
193 
194 		if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
195 			mdb_warn("unable to read page_t at %#lx", pp);
196 			return (WALK_ERR);
197 		}
198 
199 		/*
200 		 * Set the walk address to the next page.
201 		 */
202 		wsp->walk_addr = (uintptr_t)page.p_hash;
203 
204 	} else {
205 		/* We've finished walking all pages. */
206 		return (WALK_DONE);
207 	}
208 
209 	return (wsp->walk_callback(pp, &page, wsp->walk_cbdata));
210 }
211 
212 void
213 page_walk_fini(mdb_walk_state_t *wsp)
214 {
215 	mdb_free(wsp->walk_data, sizeof (page_walk_data_t));
216 }
217 
218 /*
219  * allpages walks all pages in the system in order they appear in
220  * the memseg structure
221  */
222 
223 #define	PAGE_BUFFER	128
224 
225 int
226 allpages_walk_init(mdb_walk_state_t *wsp)
227 {
228 	if (wsp->walk_addr != 0) {
229 		mdb_warn("allpages only supports global walks.\n");
230 		return (WALK_ERR);
231 	}
232 
233 	if (mdb_layered_walk("memseg", wsp) == -1) {
234 		mdb_warn("couldn't walk 'memseg'");
235 		return (WALK_ERR);
236 	}
237 
238 	wsp->walk_data = mdb_alloc(sizeof (page_t) * PAGE_BUFFER, UM_SLEEP);
239 	return (WALK_NEXT);
240 }
241 
242 int
243 allpages_walk_step(mdb_walk_state_t *wsp)
244 {
245 	const struct memseg *msp = wsp->walk_layer;
246 	page_t *buf = wsp->walk_data;
247 	size_t pg_read, i;
248 	size_t pg_num = msp->pages_end - msp->pages_base;
249 	const page_t *pg_addr = msp->pages;
250 
251 	while (pg_num > 0) {
252 		pg_read = MIN(pg_num, PAGE_BUFFER);
253 
254 		if (mdb_vread(buf, pg_read * sizeof (page_t),
255 		    (uintptr_t)pg_addr) == -1) {
256 			mdb_warn("can't read page_t's at %#lx", pg_addr);
257 			return (WALK_ERR);
258 		}
259 		for (i = 0; i < pg_read; i++) {
260 			int ret = wsp->walk_callback((uintptr_t)&pg_addr[i],
261 			    &buf[i], wsp->walk_cbdata);
262 
263 			if (ret != WALK_NEXT)
264 				return (ret);
265 		}
266 		pg_num -= pg_read;
267 		pg_addr += pg_read;
268 	}
269 
270 	return (WALK_NEXT);
271 }
272 
273 void
274 allpages_walk_fini(mdb_walk_state_t *wsp)
275 {
276 	mdb_free(wsp->walk_data, sizeof (page_t) * PAGE_BUFFER);
277 }
278 
279 /*
280  * Hash table + LRU queue.
281  * This table is used to cache recently read vnodes for the memstat
282  * command, to reduce the number of mdb_vread calls.  This greatly
283  * speeds the memstat command on on live, large CPU count systems.
284  */
285 
286 #define	VN_SMALL	401
287 #define	VN_LARGE	10007
288 #define	VN_HTABLE_KEY(p, hp)	((p) % ((hp)->vn_htable_buckets))
289 
290 struct vn_htable_list {
291 	uint_t vn_flag;				/* v_flag from vnode	*/
292 	uintptr_t vn_ptr;			/* pointer to vnode	*/
293 	struct vn_htable_list *vn_q_next;	/* queue next pointer	*/
294 	struct vn_htable_list *vn_q_prev;	/* queue prev pointer	*/
295 	struct vn_htable_list *vn_h_next;	/* hash table pointer	*/
296 };
297 
298 /*
299  * vn_q_first        -> points to to head of queue: the vnode that was most
300  *                      recently used
301  * vn_q_last         -> points to the oldest used vnode, and is freed once a new
302  *                      vnode is read.
303  * vn_htable         -> hash table
304  * vn_htable_buf     -> contains htable objects
305  * vn_htable_size    -> total number of items in the hash table
306  * vn_htable_buckets -> number of buckets in the hash table
307  */
308 typedef struct vn_htable {
309 	struct vn_htable_list  *vn_q_first;
310 	struct vn_htable_list  *vn_q_last;
311 	struct vn_htable_list **vn_htable;
312 	struct vn_htable_list  *vn_htable_buf;
313 	int vn_htable_size;
314 	int vn_htable_buckets;
315 } vn_htable_t;
316 
317 
318 /* allocate memory, initilize hash table and LRU queue */
319 static void
320 vn_htable_init(vn_htable_t *hp, size_t vn_size)
321 {
322 	int i;
323 	int htable_size = MAX(vn_size, VN_LARGE);
324 
325 	if ((hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
326 	    * htable_size, UM_NOSLEEP|UM_GC)) == NULL) {
327 		htable_size = VN_SMALL;
328 		hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
329 		    * htable_size, UM_SLEEP|UM_GC);
330 	}
331 
332 	hp->vn_htable = mdb_zalloc(sizeof (struct vn_htable_list *)
333 	    * htable_size, UM_SLEEP|UM_GC);
334 
335 	hp->vn_q_first  = &hp->vn_htable_buf[0];
336 	hp->vn_q_last   = &hp->vn_htable_buf[htable_size - 1];
337 	hp->vn_q_first->vn_q_next = &hp->vn_htable_buf[1];
338 	hp->vn_q_last->vn_q_prev = &hp->vn_htable_buf[htable_size - 2];
339 
340 	for (i = 1; i < (htable_size-1); i++) {
341 		hp->vn_htable_buf[i].vn_q_next = &hp->vn_htable_buf[i + 1];
342 		hp->vn_htable_buf[i].vn_q_prev = &hp->vn_htable_buf[i - 1];
343 	}
344 
345 	hp->vn_htable_size = htable_size;
346 	hp->vn_htable_buckets = htable_size;
347 }
348 
349 
350 /*
351  * Find the vnode whose address is ptr, and return its v_flag in vp->v_flag.
352  * The function tries to find needed information in the following order:
353  *
354  * 1. check if ptr is the first in queue
355  * 2. check if ptr is in hash table (if so move it to the top of queue)
356  * 3. do mdb_vread, remove last queue item from queue and hash table.
357  *    Insert new information to freed object, and put this object in to the
358  *    top of the queue.
359  */
360 static int
361 vn_get(vn_htable_t *hp, struct vnode *vp, uintptr_t ptr)
362 {
363 	int hkey;
364 	struct vn_htable_list *hent, **htmp, *q_next, *q_prev;
365 	struct vn_htable_list  *q_first = hp->vn_q_first;
366 
367 	/* 1. vnode ptr is the first in queue, just get v_flag and return */
368 	if (q_first->vn_ptr == ptr) {
369 		vp->v_flag = q_first->vn_flag;
370 
371 		return (0);
372 	}
373 
374 	/* 2. search the hash table for this ptr */
375 	hkey = VN_HTABLE_KEY(ptr, hp);
376 	hent = hp->vn_htable[hkey];
377 	while (hent && (hent->vn_ptr != ptr))
378 		hent = hent->vn_h_next;
379 
380 	/* 3. if hent is NULL, we did not find in hash table, do mdb_vread */
381 	if (hent == NULL) {
382 		struct vnode vn;
383 
384 		if (mdb_vread(&vn, sizeof (vnode_t), ptr) == -1) {
385 			mdb_warn("unable to read vnode_t at %#lx", ptr);
386 			return (-1);
387 		}
388 
389 		/* we will insert read data into the last element in queue */
390 		hent = hp->vn_q_last;
391 
392 		/* remove last hp->vn_q_last object from hash table */
393 		if (hent->vn_ptr) {
394 			htmp = &hp->vn_htable[VN_HTABLE_KEY(hent->vn_ptr, hp)];
395 			while (*htmp != hent)
396 				htmp = &(*htmp)->vn_h_next;
397 			*htmp = hent->vn_h_next;
398 		}
399 
400 		/* insert data into new free object */
401 		hent->vn_ptr  = ptr;
402 		hent->vn_flag = vn.v_flag;
403 
404 		/* insert new object into hash table */
405 		hent->vn_h_next = hp->vn_htable[hkey];
406 		hp->vn_htable[hkey] = hent;
407 	}
408 
409 	/* Remove from queue. hent is not first, vn_q_prev is not NULL */
410 	q_next = hent->vn_q_next;
411 	q_prev = hent->vn_q_prev;
412 	if (q_next == NULL)
413 		hp->vn_q_last = q_prev;
414 	else
415 		q_next->vn_q_prev = q_prev;
416 	q_prev->vn_q_next = q_next;
417 
418 	/* Add to the front of queue */
419 	hent->vn_q_prev = NULL;
420 	hent->vn_q_next = q_first;
421 	q_first->vn_q_prev = hent;
422 	hp->vn_q_first = hent;
423 
424 	/* Set v_flag in vnode pointer from hent */
425 	vp->v_flag = hent->vn_flag;
426 
427 	return (0);
428 }
429 
430 /* Summary statistics of pages */
431 typedef struct memstat {
432 	struct vnode    *ms_kvp;	/* Cached address of kernel vnode */
433 	struct vnode    *ms_unused_vp;	/* Unused pages vnode pointer	  */
434 	struct vnode    *ms_zvp;	/* Cached address of zio vnode    */
435 	uint64_t	ms_kmem;	/* Pages of kernel memory	  */
436 	uint64_t	ms_zfs_data;	/* Pages of zfs data		  */
437 	uint64_t	ms_anon;	/* Pages of anonymous memory	  */
438 	uint64_t	ms_vnode;	/* Pages of named (vnode) memory  */
439 	uint64_t	ms_exec;	/* Pages of exec/library memory	  */
440 	uint64_t	ms_cachelist;	/* Pages on the cachelist (free)  */
441 	uint64_t	ms_total;	/* Pages on page hash		  */
442 	vn_htable_t	*ms_vn_htable;	/* Pointer to hash table	  */
443 	struct vnode	ms_vn;		/* vnode buffer			  */
444 } memstat_t;
445 
446 #define	MS_PP_ISKAS(pp, stats)				\
447 	((pp)->p_vnode == (stats)->ms_kvp)
448 
449 #define	MS_PP_ISZFS_DATA(pp, stats)			\
450 	(((stats)->ms_zvp != NULL) && ((pp)->p_vnode == (stats)->ms_zvp))
451 
452 /*
453  * Summarize pages by type and update stat information
454  */
455 
456 /* ARGSUSED */
457 static int
458 memstat_callback(page_t *page, page_t *pp, memstat_t *stats)
459 {
460 	struct vnode *vp = &stats->ms_vn;
461 
462 	if (pp->p_vnode == NULL || pp->p_vnode == stats->ms_unused_vp)
463 		return (WALK_NEXT);
464 	else if (MS_PP_ISKAS(pp, stats))
465 		stats->ms_kmem++;
466 	else if (MS_PP_ISZFS_DATA(pp, stats))
467 		stats->ms_zfs_data++;
468 	else if (PP_ISFREE(pp))
469 		stats->ms_cachelist++;
470 	else if (vn_get(stats->ms_vn_htable, vp, (uintptr_t)pp->p_vnode))
471 		return (WALK_ERR);
472 	else if (IS_SWAPFSVP(vp))
473 		stats->ms_anon++;
474 	else if ((vp->v_flag & VVMEXEC) != 0)
475 		stats->ms_exec++;
476 	else
477 		stats->ms_vnode++;
478 
479 	stats->ms_total++;
480 
481 	return (WALK_NEXT);
482 }
483 
484 /* ARGSUSED */
485 int
486 memstat(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
487 {
488 	ulong_t pagesize;
489 	pgcnt_t total_pages, physmem;
490 	ulong_t freemem;
491 	memstat_t stats;
492 	GElf_Sym sym;
493 	vn_htable_t ht;
494 	struct vnode *kvps;
495 	uintptr_t vn_size = 0;
496 #if defined(__i386) || defined(__amd64)
497 	bln_stats_t bln_stats;
498 	ssize_t bln_size;
499 #endif
500 
501 	bzero(&stats, sizeof (memstat_t));
502 
503 	/*
504 	 * -s size, is an internal option. It specifies the size of vn_htable.
505 	 * Hash table size is set in the following order:
506 	 * If user has specified the size that is larger than VN_LARGE: try it,
507 	 * but if malloc failed default to VN_SMALL. Otherwise try VN_LARGE, if
508 	 * failed to allocate default to VN_SMALL.
509 	 * For a better efficiency of hash table it is highly recommended to
510 	 * set size to a prime number.
511 	 */
512 	if ((flags & DCMD_ADDRSPEC) || mdb_getopts(argc, argv,
513 	    's', MDB_OPT_UINTPTR, &vn_size, NULL) != argc)
514 		return (DCMD_USAGE);
515 
516 	/* Initialize vnode hash list and queue */
517 	vn_htable_init(&ht, vn_size);
518 	stats.ms_vn_htable = &ht;
519 
520 	/* Grab base page size */
521 	if (mdb_readvar(&pagesize, "_pagesize") == -1) {
522 		mdb_warn("unable to read _pagesize");
523 		return (DCMD_ERR);
524 	}
525 
526 	/* Total physical memory */
527 	if (mdb_readvar(&total_pages, "total_pages") == -1) {
528 		mdb_warn("unable to read total_pages");
529 		return (DCMD_ERR);
530 	}
531 
532 	/* Artificially limited memory */
533 	if (mdb_readvar(&physmem, "physmem") == -1) {
534 		mdb_warn("unable to read physmem");
535 		return (DCMD_ERR);
536 	}
537 
538 	/* read kernel vnode array pointer */
539 	if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvps",
540 	    (GElf_Sym *)&sym) == -1) {
541 		mdb_warn("unable to read kvps");
542 		return (DCMD_ERR);
543 	}
544 	kvps = (struct vnode *)(uintptr_t)sym.st_value;
545 	stats.ms_kvp =  &kvps[KV_KVP];
546 
547 	/*
548 	 * Read the zio vnode pointer.
549 	 */
550 	stats.ms_zvp = &kvps[KV_ZVP];
551 
552 	/*
553 	 * If physmem != total_pages, then the administrator has limited the
554 	 * number of pages available in the system.  Excluded pages are
555 	 * associated with the unused pages vnode.  Read this vnode so the
556 	 * pages can be excluded in the page accounting.
557 	 */
558 	if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "unused_pages_vp",
559 	    (GElf_Sym *)&sym) == -1) {
560 		mdb_warn("unable to read unused_pages_vp");
561 		return (DCMD_ERR);
562 	}
563 	stats.ms_unused_vp = (struct vnode *)(uintptr_t)sym.st_value;
564 
565 	/* walk all pages, collect statistics */
566 	if (mdb_walk("allpages", (mdb_walk_cb_t)memstat_callback,
567 	    &stats) == -1) {
568 		mdb_warn("can't walk memseg");
569 		return (DCMD_ERR);
570 	}
571 
572 #define	MS_PCT_TOTAL(x)	((ulong_t)((((5 * total_pages) + ((x) * 1000ull))) / \
573 		((physmem) * 10)))
574 
575 	mdb_printf("Page Summary                Pages                MB"
576 	    "  %%Tot\n");
577 	mdb_printf("------------     ----------------  ----------------"
578 	    "  ----\n");
579 	mdb_printf("Kernel           %16llu  %16llu  %3lu%%\n",
580 	    stats.ms_kmem,
581 	    (uint64_t)stats.ms_kmem * pagesize / (1024 * 1024),
582 	    MS_PCT_TOTAL(stats.ms_kmem));
583 
584 	if (stats.ms_zfs_data != 0)
585 		mdb_printf("ZFS File Data    %16llu  %16llu  %3lu%%\n",
586 		    stats.ms_zfs_data,
587 		    (uint64_t)stats.ms_zfs_data * pagesize / (1024 * 1024),
588 		    MS_PCT_TOTAL(stats.ms_zfs_data));
589 
590 	mdb_printf("Anon             %16llu  %16llu  %3lu%%\n",
591 	    stats.ms_anon,
592 	    (uint64_t)stats.ms_anon * pagesize / (1024 * 1024),
593 	    MS_PCT_TOTAL(stats.ms_anon));
594 	mdb_printf("Exec and libs    %16llu  %16llu  %3lu%%\n",
595 	    stats.ms_exec,
596 	    (uint64_t)stats.ms_exec * pagesize / (1024 * 1024),
597 	    MS_PCT_TOTAL(stats.ms_exec));
598 	mdb_printf("Page cache       %16llu  %16llu  %3lu%%\n",
599 	    stats.ms_vnode,
600 	    (uint64_t)stats.ms_vnode * pagesize / (1024 * 1024),
601 	    MS_PCT_TOTAL(stats.ms_vnode));
602 	mdb_printf("Free (cachelist) %16llu  %16llu  %3lu%%\n",
603 	    stats.ms_cachelist,
604 	    (uint64_t)stats.ms_cachelist * pagesize / (1024 * 1024),
605 	    MS_PCT_TOTAL(stats.ms_cachelist));
606 
607 	/*
608 	 * occasionally, we double count pages above.  To avoid printing
609 	 * absurdly large values for freemem, we clamp it at zero.
610 	 */
611 	if (physmem > stats.ms_total)
612 		freemem = physmem - stats.ms_total;
613 	else
614 		freemem = 0;
615 
616 #if defined(__i386) || defined(__amd64)
617 	/* Are we running under Xen?  If so, get balloon memory usage. */
618 	if ((bln_size = mdb_readvar(&bln_stats, "bln_stats")) != -1) {
619 		if (freemem > bln_stats.bln_hv_pages)
620 			freemem -= bln_stats.bln_hv_pages;
621 		else
622 			freemem = 0;
623 	}
624 #endif
625 
626 	mdb_printf("Free (freelist)  %16lu  %16llu  %3lu%%\n", freemem,
627 	    (uint64_t)freemem * pagesize / (1024 * 1024),
628 	    MS_PCT_TOTAL(freemem));
629 
630 #if defined(__i386) || defined(__amd64)
631 	if (bln_size != -1) {
632 		mdb_printf("Balloon          %16lu  %16llu  %3lu%%\n",
633 		    bln_stats.bln_hv_pages,
634 		    (uint64_t)bln_stats.bln_hv_pages * pagesize / (1024 * 1024),
635 		    MS_PCT_TOTAL(bln_stats.bln_hv_pages));
636 	}
637 #endif
638 
639 	mdb_printf("\nTotal            %16lu  %16lu\n",
640 	    physmem,
641 	    (uint64_t)physmem * pagesize / (1024 * 1024));
642 
643 	if (physmem != total_pages) {
644 		mdb_printf("Physical         %16lu  %16lu\n",
645 		    total_pages,
646 		    (uint64_t)total_pages * pagesize / (1024 * 1024));
647 	}
648 
649 #undef MS_PCT_TOTAL
650 
651 	return (DCMD_OK);
652 }
653 
654 int
655 page(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
656 {
657 	page_t	p;
658 
659 	if (!(flags & DCMD_ADDRSPEC)) {
660 		if (mdb_walk_dcmd("page", "page", argc, argv) == -1) {
661 			mdb_warn("can't walk pages");
662 			return (DCMD_ERR);
663 		}
664 		return (DCMD_OK);
665 	}
666 
667 	if (DCMD_HDRSPEC(flags)) {
668 		mdb_printf("%<u>%?s %?s %16s %8s %3s %3s %2s %2s %2s%</u>\n",
669 		    "PAGE", "VNODE", "OFFSET", "SELOCK",
670 		    "LCT", "COW", "IO", "FS", "ST");
671 	}
672 
673 	if (mdb_vread(&p, sizeof (page_t), addr) == -1) {
674 		mdb_warn("can't read page_t at %#lx", addr);
675 		return (DCMD_ERR);
676 	}
677 
678 	mdb_printf("%0?lx %?p %16llx %8x %3d %3d %2x %2x %2x\n",
679 	    addr, p.p_vnode, p.p_offset, p.p_selock, p.p_lckcnt, p.p_cowcnt,
680 	    p.p_iolock_state, p.p_fsdata, p.p_state);
681 
682 	return (DCMD_OK);
683 }
684 
685 int
686 swap_walk_init(mdb_walk_state_t *wsp)
687 {
688 	void	*ptr;
689 
690 	if ((mdb_readvar(&ptr, "swapinfo") == -1) || ptr == NULL) {
691 		mdb_warn("swapinfo not found or invalid");
692 		return (WALK_ERR);
693 	}
694 
695 	wsp->walk_addr = (uintptr_t)ptr;
696 
697 	return (WALK_NEXT);
698 }
699 
700 int
701 swap_walk_step(mdb_walk_state_t *wsp)
702 {
703 	uintptr_t	sip;
704 	struct swapinfo	si;
705 
706 	sip = wsp->walk_addr;
707 
708 	if (sip == NULL)
709 		return (WALK_DONE);
710 
711 	if (mdb_vread(&si, sizeof (struct swapinfo), sip) == -1) {
712 		mdb_warn("unable to read swapinfo at %#lx", sip);
713 		return (WALK_ERR);
714 	}
715 
716 	wsp->walk_addr = (uintptr_t)si.si_next;
717 
718 	return (wsp->walk_callback(sip, &si, wsp->walk_cbdata));
719 }
720 
721 int
722 swapinfof(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
723 {
724 	struct swapinfo	si;
725 	char		*name;
726 
727 	if (!(flags & DCMD_ADDRSPEC)) {
728 		if (mdb_walk_dcmd("swapinfo", "swapinfo", argc, argv) == -1) {
729 			mdb_warn("can't walk swapinfo");
730 			return (DCMD_ERR);
731 		}
732 		return (DCMD_OK);
733 	}
734 
735 	if (DCMD_HDRSPEC(flags)) {
736 		mdb_printf("%<u>%?s %?s %9s %9s %s%</u>\n",
737 		    "ADDR", "VNODE", "PAGES", "FREE", "NAME");
738 	}
739 
740 	if (mdb_vread(&si, sizeof (struct swapinfo), addr) == -1) {
741 		mdb_warn("can't read swapinfo at %#lx", addr);
742 		return (DCMD_ERR);
743 	}
744 
745 	name = mdb_alloc(si.si_pnamelen, UM_SLEEP | UM_GC);
746 	if (mdb_vread(name, si.si_pnamelen, (uintptr_t)si.si_pname) == -1)
747 		name = "*error*";
748 
749 	mdb_printf("%0?lx %?p %9d %9d %s\n",
750 	    addr, si.si_vp, si.si_npgs, si.si_nfpgs, name);
751 
752 	return (DCMD_OK);
753 }
754 
755 int
756 memlist_walk_step(mdb_walk_state_t *wsp)
757 {
758 	uintptr_t	mlp;
759 	struct memlist	ml;
760 
761 	mlp = wsp->walk_addr;
762 
763 	if (mlp == NULL)
764 		return (WALK_DONE);
765 
766 	if (mdb_vread(&ml, sizeof (struct memlist), mlp) == -1) {
767 		mdb_warn("unable to read memlist at %#lx", mlp);
768 		return (WALK_ERR);
769 	}
770 
771 	wsp->walk_addr = (uintptr_t)ml.next;
772 
773 	return (wsp->walk_callback(mlp, &ml, wsp->walk_cbdata));
774 }
775 
776 int
777 memlist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
778 {
779 	struct memlist	ml;
780 
781 	if (!(flags & DCMD_ADDRSPEC)) {
782 		uintptr_t ptr;
783 		uint_t list = 0;
784 		int i;
785 		static const char *lists[] = {
786 			"phys_install",
787 			"phys_avail",
788 			"virt_avail"
789 		};
790 
791 		if (mdb_getopts(argc, argv,
792 		    'i', MDB_OPT_SETBITS, (1 << 0), &list,
793 		    'a', MDB_OPT_SETBITS, (1 << 1), &list,
794 		    'v', MDB_OPT_SETBITS, (1 << 2), &list, NULL) != argc)
795 			return (DCMD_USAGE);
796 
797 		if (!list)
798 			list = 1;
799 
800 		for (i = 0; list; i++, list >>= 1) {
801 			if (!(list & 1))
802 				continue;
803 			if ((mdb_readvar(&ptr, lists[i]) == -1) ||
804 			    (ptr == NULL)) {
805 				mdb_warn("%s not found or invalid", lists[i]);
806 				return (DCMD_ERR);
807 			}
808 
809 			mdb_printf("%s:\n", lists[i]);
810 			if (mdb_pwalk_dcmd("memlist", "memlist", 0, NULL,
811 			    ptr) == -1) {
812 				mdb_warn("can't walk memlist");
813 				return (DCMD_ERR);
814 			}
815 		}
816 		return (DCMD_OK);
817 	}
818 
819 	if (DCMD_HDRSPEC(flags))
820 		mdb_printf("%<u>%?s %16s %16s%</u>\n", "ADDR", "BASE", "SIZE");
821 
822 	if (mdb_vread(&ml, sizeof (struct memlist), addr) == -1) {
823 		mdb_warn("can't read memlist at %#lx", addr);
824 		return (DCMD_ERR);
825 	}
826 
827 	mdb_printf("%0?lx %16llx %16llx\n", addr, ml.address, ml.size);
828 
829 	return (DCMD_OK);
830 }
831