1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Copyright 2019 Joyent, Inc.
26  */
27 
28 /*
29  * This part of the file contains the mdb support for dcmds:
30  *	::memseg_list
31  * and walkers for:
32  *	memseg - a memseg list walker for ::memseg_list
33  *
34  */
35 
36 #include <sys/types.h>
37 #include <sys/machparam.h>
38 #include <sys/controlregs.h>
39 #include <sys/mach_mmu.h>
40 #ifdef __xpv
41 #include <sys/hypervisor.h>
42 #endif
43 #include <vm/as.h>
44 
45 #include <mdb/mdb_modapi.h>
46 #include <mdb/mdb_target.h>
47 
48 #include <vm/page.h>
49 #include <vm/hat_i86.h>
50 
51 #define	VA_SIGN_BIT (1UL << 47)
52 #define	VA_LOW_BITS	((1UL << 48) - 1)
53 #define	VA_SIGN_EXTEND(va) ((((va) & VA_LOW_BITS) ^ VA_SIGN_BIT) - VA_SIGN_BIT)
54 
55 struct pfn2pp {
56 	pfn_t pfn;
57 	page_t *pp;
58 };
59 
60 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *);
61 static void init_mmu(void);
62 
63 int
platform_vtop(uintptr_t addr,struct as * asp,physaddr_t * pap)64 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
65 {
66 	if (asp == NULL)
67 		return (DCMD_ERR);
68 
69 	init_mmu();
70 
71 	if (mmu.num_level == 0)
72 		return (DCMD_ERR);
73 
74 	return (do_va2pa(addr, asp, 0, pap, NULL));
75 }
76 
77 /*
78  * ::memseg_list dcmd and walker to implement it.
79  */
80 /*ARGSUSED*/
81 int
memseg_list(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)82 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
83 {
84 	struct memseg ms;
85 
86 	if (!(flags & DCMD_ADDRSPEC)) {
87 		if (mdb_pwalk_dcmd("memseg", "memseg_list",
88 		    0, NULL, 0) == -1) {
89 			mdb_warn("can't walk memseg");
90 			return (DCMD_ERR);
91 		}
92 		return (DCMD_OK);
93 	}
94 
95 	if (DCMD_HDRSPEC(flags))
96 		mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
97 		    "PAGES", "EPAGES", "BASE", "END");
98 
99 	if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
100 		mdb_warn("can't read memseg at %#lx", addr);
101 		return (DCMD_ERR);
102 	}
103 
104 	mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
105 	    ms.pages, ms.epages, ms.pages_base, ms.pages_end);
106 
107 	return (DCMD_OK);
108 }
109 
110 /*
111  * walk the memseg structures
112  */
113 int
memseg_walk_init(mdb_walk_state_t * wsp)114 memseg_walk_init(mdb_walk_state_t *wsp)
115 {
116 	if (wsp->walk_addr != 0) {
117 		mdb_warn("memseg only supports global walks\n");
118 		return (WALK_ERR);
119 	}
120 
121 	if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
122 		mdb_warn("symbol 'memsegs' not found");
123 		return (WALK_ERR);
124 	}
125 
126 	wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
127 	return (WALK_NEXT);
128 
129 }
130 
131 int
memseg_walk_step(mdb_walk_state_t * wsp)132 memseg_walk_step(mdb_walk_state_t *wsp)
133 {
134 	int status;
135 
136 	if (wsp->walk_addr == 0) {
137 		return (WALK_DONE);
138 	}
139 
140 	if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
141 	    wsp->walk_addr) == -1) {
142 		mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
143 		return (WALK_DONE);
144 	}
145 
146 	status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
147 	    wsp->walk_cbdata);
148 
149 	wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
150 
151 	return (status);
152 }
153 
154 void
memseg_walk_fini(mdb_walk_state_t * wsp)155 memseg_walk_fini(mdb_walk_state_t *wsp)
156 {
157 	mdb_free(wsp->walk_data, sizeof (struct memseg));
158 }
159 
160 /*
161  * Now HAT related dcmds.
162  */
163 
164 static struct hat *khat;		/* value of kas.a_hat */
165 struct hat_mmu_info mmu;
166 uintptr_t kernelbase;
167 
168 /*
169  * stuff for i86xpv images
170  */
171 static int is_xpv;
172 static uintptr_t mfn_list_addr; /* kernel MFN list address */
173 uintptr_t xen_virt_start; /* address of mfn_to_pfn[] table */
174 ulong_t mfn_count;	/* number of pfn's in the MFN list */
175 pfn_t *mfn_list;	/* local MFN list copy */
176 
177 /*
178  * read mmu parameters from kernel
179  */
180 static void
init_mmu(void)181 init_mmu(void)
182 {
183 	struct as kas;
184 
185 	if (mmu.num_level != 0)
186 		return;
187 
188 	if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
189 		mdb_warn("Can't use HAT information before mmu_init()\n");
190 	if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
191 		mdb_warn("Couldn't find kas - kernel's struct as\n");
192 	if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
193 		mdb_warn("Couldn't find kernelbase\n");
194 	khat = kas.a_hat;
195 
196 	/*
197 	 * Is this a paravirtualized domain image?
198 	 */
199 	if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr),
200 	    "mfn_list") == -1 ||
201 	    mdb_readsym(&xen_virt_start, sizeof (xen_virt_start),
202 	    "xen_virt_start") == -1 ||
203 	    mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) {
204 		mfn_list_addr = 0;
205 	}
206 
207 	is_xpv = mfn_list_addr != 0;
208 
209 #ifndef _KMDB
210 	/*
211 	 * recreate the local mfn_list
212 	 */
213 	if (is_xpv) {
214 		size_t sz = mfn_count * sizeof (pfn_t);
215 		mfn_list = mdb_zalloc(sz, UM_SLEEP);
216 
217 		if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) {
218 			mdb_warn("Failed to read MFN list\n");
219 			mdb_free(mfn_list, sz);
220 			mfn_list = NULL;
221 		}
222 	}
223 #endif
224 }
225 
226 void
free_mmu(void)227 free_mmu(void)
228 {
229 #ifdef __xpv
230 	if (mfn_list != NULL)
231 		mdb_free(mfn_list, mfn_count * sizeof (mfn_t));
232 #endif
233 }
234 
235 #ifdef __xpv
236 
237 #ifdef _KMDB
238 
239 /*
240  * Convert between MFNs and PFNs.  Since we're in kmdb we can go directly
241  * through the machine to phys mapping and the MFN list.
242  */
243 
244 pfn_t
mdb_mfn_to_pfn(mfn_t mfn)245 mdb_mfn_to_pfn(mfn_t mfn)
246 {
247 	pfn_t pfn;
248 	mfn_t tmp;
249 	pfn_t *pfn_list;
250 
251 	if (mfn_list_addr == 0)
252 		return (-(pfn_t)1);
253 
254 	pfn_list = (pfn_t *)xen_virt_start;
255 	if (mdb_vread(&pfn, sizeof (pfn), (uintptr_t)(pfn_list + mfn)) == -1)
256 		return (-(pfn_t)1);
257 
258 	if (mdb_vread(&tmp, sizeof (tmp),
259 	    (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
260 		return (-(pfn_t)1);
261 
262 	if (pfn >= mfn_count || tmp != mfn)
263 		return (-(pfn_t)1);
264 
265 	return (pfn);
266 }
267 
268 mfn_t
mdb_pfn_to_mfn(pfn_t pfn)269 mdb_pfn_to_mfn(pfn_t pfn)
270 {
271 	mfn_t mfn;
272 
273 	init_mmu();
274 
275 	if (mfn_list_addr == 0 || pfn >= mfn_count)
276 		return (-(mfn_t)1);
277 
278 	if (mdb_vread(&mfn, sizeof (mfn),
279 	    (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
280 		return (-(mfn_t)1);
281 
282 	return (mfn);
283 }
284 
285 #else /* _KMDB */
286 
287 /*
288  * Convert between MFNs and PFNs.  Since a crash dump doesn't include the
289  * MFN->PFN translation table (it's part of the hypervisor, not our image)
290  * we do the MFN->PFN translation by searching the PFN->MFN (mfn_list)
291  * table, if it's there.
292  */
293 
294 pfn_t
mdb_mfn_to_pfn(mfn_t mfn)295 mdb_mfn_to_pfn(mfn_t mfn)
296 {
297 	pfn_t pfn;
298 
299 	init_mmu();
300 
301 	if (mfn_list == NULL)
302 		return (-(pfn_t)1);
303 
304 	for (pfn = 0; pfn < mfn_count; ++pfn) {
305 		if (mfn_list[pfn] != mfn)
306 			continue;
307 		return (pfn);
308 	}
309 
310 	return (-(pfn_t)1);
311 }
312 
313 mfn_t
mdb_pfn_to_mfn(pfn_t pfn)314 mdb_pfn_to_mfn(pfn_t pfn)
315 {
316 	init_mmu();
317 
318 	if (mfn_list == NULL || pfn >= mfn_count)
319 		return (-(mfn_t)1);
320 
321 	return (mfn_list[pfn]);
322 }
323 
324 #endif /* _KMDB */
325 
326 static paddr_t
mdb_ma_to_pa(uint64_t ma)327 mdb_ma_to_pa(uint64_t ma)
328 {
329 	pfn_t pfn = mdb_mfn_to_pfn(mmu_btop(ma));
330 	if (pfn == -(pfn_t)1)
331 		return (-(paddr_t)1);
332 
333 	return (mmu_ptob((paddr_t)pfn) | (ma & (MMU_PAGESIZE - 1)));
334 }
335 
336 #else /* __xpv */
337 
338 #define	mdb_ma_to_pa(ma) (ma)
339 #define	mdb_mfn_to_pfn(mfn) (mfn)
340 #define	mdb_pfn_to_mfn(pfn) (pfn)
341 
342 #endif /* __xpv */
343 
344 /*
345  * ::mfntopfn dcmd translates hypervisor machine page number
346  * to physical page number
347  */
348 /*ARGSUSED*/
349 int
mfntopfn_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)350 mfntopfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
351 {
352 	pfn_t pfn;
353 
354 	if ((flags & DCMD_ADDRSPEC) == 0) {
355 		mdb_warn("MFN missing\n");
356 		return (DCMD_USAGE);
357 	}
358 
359 	if ((pfn = mdb_mfn_to_pfn((pfn_t)addr)) == -(pfn_t)1) {
360 		mdb_warn("Invalid mfn %lr\n", (pfn_t)addr);
361 		return (DCMD_ERR);
362 	}
363 
364 	mdb_printf("%lr\n", pfn);
365 
366 	return (DCMD_OK);
367 }
368 
369 /*
370  * ::pfntomfn dcmd translates physical page number to
371  * hypervisor machine page number
372  */
373 /*ARGSUSED*/
374 int
pfntomfn_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)375 pfntomfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
376 {
377 	pfn_t mfn;
378 
379 	if ((flags & DCMD_ADDRSPEC) == 0) {
380 		mdb_warn("PFN missing\n");
381 		return (DCMD_USAGE);
382 	}
383 
384 	if ((mfn = mdb_pfn_to_mfn((pfn_t)addr)) == -(pfn_t)1) {
385 		mdb_warn("Invalid pfn %lr\n", (pfn_t)addr);
386 		return (DCMD_ABORT);
387 	}
388 
389 	mdb_printf("%lr\n", mfn);
390 
391 	if (flags & DCMD_LOOP)
392 		mdb_set_dot(addr + 1);
393 	return (DCMD_OK);
394 }
395 
396 static pfn_t
pte2mfn(x86pte_t pte,uint_t level)397 pte2mfn(x86pte_t pte, uint_t level)
398 {
399 	pfn_t mfn;
400 	if (level > 0 && (pte & PT_PAGESIZE))
401 		mfn = mmu_btop(pte & PT_PADDR_LGPG);
402 	else
403 		mfn = mmu_btop(pte & PT_PADDR);
404 	return (mfn);
405 }
406 
407 static int
do_pte_dcmd(int level,uint64_t pte)408 do_pte_dcmd(int level, uint64_t pte)
409 {
410 	static char *attr[] = {
411 	    "wrback", "wrthru", "uncached", "uncached",
412 	    "wrback", "wrthru", "wrcombine", "uncached"};
413 	int pat_index = 0;
414 	pfn_t mfn;
415 
416 	mdb_printf("pte=0x%llr: ", pte);
417 
418 	mfn = pte2mfn(pte, level);
419 	mdb_printf("%s=0x%lr ", is_xpv ? "mfn" : "pfn", mfn);
420 
421 	if (PTE_GET(pte, mmu.pt_nx))
422 		mdb_printf("noexec ");
423 
424 	if (PTE_GET(pte, PT_NOCONSIST))
425 		mdb_printf("noconsist ");
426 
427 	if (PTE_GET(pte, PT_NOSYNC))
428 		mdb_printf("nosync ");
429 
430 	if (PTE_GET(pte, mmu.pt_global))
431 		mdb_printf("global ");
432 
433 	if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
434 		mdb_printf("largepage ");
435 
436 	if (level > 0 && PTE_GET(pte, PT_MOD))
437 		mdb_printf("mod ");
438 
439 	if (level > 0 && PTE_GET(pte, PT_REF))
440 		mdb_printf("ref ");
441 
442 	if (PTE_GET(pte, PT_USER))
443 		mdb_printf("user ");
444 
445 	if (PTE_GET(pte, PT_WRITABLE))
446 		mdb_printf("write ");
447 
448 	/*
449 	 * Report non-standard cacheability
450 	 */
451 	pat_index = 0;
452 	if (level > 0) {
453 		if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
454 			pat_index += 4;
455 	} else {
456 		if (PTE_GET(pte, PT_PAT_4K))
457 			pat_index += 4;
458 	}
459 
460 	if (PTE_GET(pte, PT_NOCACHE))
461 		pat_index += 2;
462 
463 	if (PTE_GET(pte, PT_WRITETHRU))
464 		pat_index += 1;
465 
466 	if (pat_index != 0)
467 		mdb_printf("%s", attr[pat_index]);
468 
469 	if (PTE_GET(pte, PT_VALID) == 0)
470 		mdb_printf(" !VALID ");
471 
472 	mdb_printf("\n");
473 	return (DCMD_OK);
474 }
475 
476 /*
477  * Print a PTE in more human friendly way. The PTE is assumed to be in
478  * a level 0 page table, unless -l specifies another level.
479  */
480 /*ARGSUSED*/
481 int
pte_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)482 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
483 {
484 	uint64_t level = 0;
485 
486 	init_mmu();
487 
488 	if (mmu.num_level == 0)
489 		return (DCMD_ERR);
490 
491 	if ((flags & DCMD_ADDRSPEC) == 0)
492 		return (DCMD_USAGE);
493 
494 	if (mdb_getopts(argc, argv,
495 	    'l', MDB_OPT_UINT64, &level, NULL) != argc)
496 		return (DCMD_USAGE);
497 
498 	if (level > mmu.max_level) {
499 		mdb_warn("invalid level %lu\n", level);
500 		return (DCMD_ERR);
501 	}
502 
503 	if (addr == 0)
504 		return (DCMD_OK);
505 
506 	return (do_pte_dcmd((int)level, addr));
507 }
508 
509 static size_t
va2entry(htable_t * htable,uintptr_t addr)510 va2entry(htable_t *htable, uintptr_t addr)
511 {
512 	size_t entry = (addr - htable->ht_vaddr);
513 
514 	entry >>= mmu.level_shift[htable->ht_level];
515 	return (entry & HTABLE_NUM_PTES(htable) - 1);
516 }
517 
518 static x86pte_t
get_pte(hat_t * hat,htable_t * htable,uintptr_t addr)519 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr)
520 {
521 	x86pte_t buf;
522 
523 	if (htable->ht_flags & HTABLE_COPIED) {
524 		uintptr_t ptr = (uintptr_t)hat->hat_copied_ptes;
525 		ptr += va2entry(htable, addr) << mmu.pte_size_shift;
526 		return (*(x86pte_t *)ptr);
527 	}
528 
529 	paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
530 	paddr += va2entry(htable, addr) << mmu.pte_size_shift;
531 
532 	if ((mdb_pread(&buf, mmu.pte_size, paddr)) == mmu.pte_size)
533 		return (buf);
534 
535 	return (0);
536 }
537 
538 static int
do_va2pa(uintptr_t addr,struct as * asp,int print_level,physaddr_t * pap,pfn_t * mfnp)539 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap,
540     pfn_t *mfnp)
541 {
542 	struct as as;
543 	struct hat *hatp;
544 	struct hat hat;
545 	htable_t *ht;
546 	htable_t htable;
547 	uintptr_t base;
548 	int h;
549 	int level;
550 	int found = 0;
551 	x86pte_t pte;
552 	physaddr_t paddr;
553 
554 	if (asp != NULL) {
555 		if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
556 			mdb_warn("Couldn't read struct as\n");
557 			return (DCMD_ERR);
558 		}
559 		hatp = as.a_hat;
560 	} else {
561 		hatp = khat;
562 	}
563 
564 	/*
565 	 * read the hat and its hash table
566 	 */
567 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
568 		mdb_warn("Couldn't read struct hat\n");
569 		return (DCMD_ERR);
570 	}
571 
572 	/*
573 	 * read the htable hashtable
574 	 */
575 	for (level = 0; level <= mmu.max_level; ++level) {
576 		if (level == TOP_LEVEL(&hat))
577 			base = 0;
578 		else
579 			base = addr & mmu.level_mask[level + 1];
580 
581 		for (h = 0; h < hat.hat_num_hash; ++h) {
582 			if (mdb_vread(&ht, sizeof (htable_t *),
583 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
584 				mdb_warn("Couldn't read htable\n");
585 				return (DCMD_ERR);
586 			}
587 			for (; ht != NULL; ht = htable.ht_next) {
588 				if (mdb_vread(&htable, sizeof (htable_t),
589 				    (uintptr_t)ht) == -1) {
590 					mdb_warn("Couldn't read htable\n");
591 					return (DCMD_ERR);
592 				}
593 
594 				if (htable.ht_vaddr != base ||
595 				    htable.ht_level != level)
596 					continue;
597 
598 				pte = get_pte(&hat, &htable, addr);
599 
600 				if (print_level) {
601 					mdb_printf("\tlevel=%d htable=0x%p "
602 					    "pte=0x%llr\n", level, ht, pte);
603 				}
604 
605 				if (!PTE_ISVALID(pte)) {
606 					mdb_printf("Address %p is unmapped.\n",
607 					    addr);
608 					return (DCMD_ERR);
609 				}
610 
611 				if (found)
612 					continue;
613 
614 				if (PTE_IS_LGPG(pte, level))
615 					paddr = mdb_ma_to_pa(pte &
616 					    PT_PADDR_LGPG);
617 				else
618 					paddr = mdb_ma_to_pa(pte & PT_PADDR);
619 				paddr += addr & mmu.level_offset[level];
620 				if (pap != NULL)
621 					*pap = paddr;
622 				if (mfnp != NULL)
623 					*mfnp = pte2mfn(pte, level);
624 				found = 1;
625 			}
626 		}
627 	}
628 
629 done:
630 	if (!found)
631 		return (DCMD_ERR);
632 	return (DCMD_OK);
633 }
634 
635 int
va2pfn_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)636 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
637 {
638 	uintptr_t addrspace;
639 	char *addrspace_str = NULL;
640 	int piped = flags & DCMD_PIPE_OUT;
641 	pfn_t pfn;
642 	pfn_t mfn;
643 	int rc;
644 
645 	init_mmu();
646 
647 	if (mmu.num_level == 0)
648 		return (DCMD_ERR);
649 
650 	if (mdb_getopts(argc, argv,
651 	    'a', MDB_OPT_STR, &addrspace_str, NULL) != argc)
652 		return (DCMD_USAGE);
653 
654 	if ((flags & DCMD_ADDRSPEC) == 0)
655 		return (DCMD_USAGE);
656 
657 	/*
658 	 * parse the address space
659 	 */
660 	if (addrspace_str != NULL)
661 		addrspace = mdb_strtoull(addrspace_str);
662 	else
663 		addrspace = 0;
664 
665 	rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn);
666 
667 	if (rc != DCMD_OK)
668 		return (rc);
669 
670 	if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) {
671 		mdb_warn("Invalid mfn %lr\n", mfn);
672 		return (DCMD_ERR);
673 	}
674 
675 	if (piped) {
676 		mdb_printf("0x%lr\n", pfn);
677 		return (DCMD_OK);
678 	}
679 
680 	mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn);
681 
682 	if (is_xpv)
683 		mdb_printf(" (mfn 0x%lr)", mfn);
684 
685 	mdb_printf("\n");
686 
687 	return (DCMD_OK);
688 }
689 
690 /*
691  * Report all hat's that either use PFN as a page table or that map the page.
692  */
693 static int
do_report_maps(pfn_t pfn)694 do_report_maps(pfn_t pfn)
695 {
696 	struct hat *hatp;
697 	struct hat hat;
698 	htable_t *ht;
699 	htable_t htable;
700 	uintptr_t base;
701 	int h;
702 	int level;
703 	int entry;
704 	x86pte_t pte;
705 	physaddr_t paddr;
706 	size_t len;
707 
708 	/*
709 	 * The hats are kept in a list with khat at the head.
710 	 */
711 	for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
712 		/*
713 		 * read the hat and its hash table
714 		 */
715 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
716 			mdb_warn("Couldn't read struct hat\n");
717 			return (DCMD_ERR);
718 		}
719 
720 		/*
721 		 * read the htable hashtable
722 		 */
723 		paddr = 0;
724 		for (h = 0; h < hat.hat_num_hash; ++h) {
725 			if (mdb_vread(&ht, sizeof (htable_t *),
726 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
727 				mdb_warn("Couldn't read htable\n");
728 				return (DCMD_ERR);
729 			}
730 			for (; ht != NULL; ht = htable.ht_next) {
731 				if (mdb_vread(&htable, sizeof (htable_t),
732 				    (uintptr_t)ht) == -1) {
733 					mdb_warn("Couldn't read htable\n");
734 					return (DCMD_ERR);
735 				}
736 
737 				/*
738 				 * only report kernel addresses once
739 				 */
740 				if (hatp != khat &&
741 				    htable.ht_vaddr >= kernelbase)
742 					continue;
743 
744 				/*
745 				 * Is the PFN a pagetable itself?
746 				 */
747 				if (htable.ht_pfn == pfn) {
748 					mdb_printf("Pagetable for "
749 					    "hat=%p htable=%p\n", hatp, ht);
750 					continue;
751 				}
752 
753 				/*
754 				 * otherwise, examine page mappings
755 				 */
756 				level = htable.ht_level;
757 				if (level > mmu.max_page_level)
758 					continue;
759 				paddr = mmu_ptob((physaddr_t)htable.ht_pfn);
760 				for (entry = 0;
761 				    entry < HTABLE_NUM_PTES(&htable);
762 				    ++entry) {
763 
764 					base = htable.ht_vaddr + entry *
765 					    mmu.level_size[level];
766 
767 					/*
768 					 * only report kernel addresses once
769 					 */
770 					if (hatp != khat &&
771 					    base >= kernelbase)
772 						continue;
773 
774 					len = mdb_pread(&pte, mmu.pte_size,
775 					    paddr + entry * mmu.pte_size);
776 					if (len != mmu.pte_size)
777 						return (DCMD_ERR);
778 
779 					if ((pte & PT_VALID) == 0)
780 						continue;
781 					if (level == 0 || !(pte & PT_PAGESIZE))
782 						pte &= PT_PADDR;
783 					else
784 						pte &= PT_PADDR_LGPG;
785 					if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
786 						continue;
787 					mdb_printf("hat=%p maps addr=%p\n",
788 					    hatp, (caddr_t)base);
789 				}
790 			}
791 		}
792 	}
793 
794 done:
795 	return (DCMD_OK);
796 }
797 
798 /*
799  * given a PFN as its address argument, prints out the uses of it
800  */
801 /*ARGSUSED*/
802 int
report_maps_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)803 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
804 {
805 	pfn_t pfn;
806 	uint_t mflag = 0;
807 
808 	init_mmu();
809 
810 	if (mmu.num_level == 0)
811 		return (DCMD_ERR);
812 
813 	if ((flags & DCMD_ADDRSPEC) == 0)
814 		return (DCMD_USAGE);
815 
816 	if (mdb_getopts(argc, argv,
817 	    'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
818 		return (DCMD_USAGE);
819 
820 	pfn = (pfn_t)addr;
821 	if (mflag)
822 		pfn = mdb_mfn_to_pfn(pfn);
823 
824 	return (do_report_maps(pfn));
825 }
826 
827 static int
do_ptable_dcmd(pfn_t pfn,uint64_t level)828 do_ptable_dcmd(pfn_t pfn, uint64_t level)
829 {
830 	struct hat *hatp;
831 	struct hat hat;
832 	htable_t *ht;
833 	htable_t htable;
834 	uintptr_t base;
835 	int h;
836 	int entry;
837 	uintptr_t pagesize;
838 	x86pte_t pte;
839 	physaddr_t paddr;
840 	size_t len;
841 
842 	/*
843 	 * The hats are kept in a list with khat at the head.
844 	 */
845 	for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
846 		/*
847 		 * read the hat and its hash table
848 		 */
849 		if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
850 			mdb_warn("Couldn't read struct hat\n");
851 			return (DCMD_ERR);
852 		}
853 
854 		/*
855 		 * read the htable hashtable
856 		 */
857 		paddr = 0;
858 		for (h = 0; h < hat.hat_num_hash; ++h) {
859 			if (mdb_vread(&ht, sizeof (htable_t *),
860 			    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
861 				mdb_warn("Couldn't read htable\n");
862 				return (DCMD_ERR);
863 			}
864 			for (; ht != NULL; ht = htable.ht_next) {
865 				if (mdb_vread(&htable, sizeof (htable_t),
866 				    (uintptr_t)ht) == -1) {
867 					mdb_warn("Couldn't read htable\n");
868 					return (DCMD_ERR);
869 				}
870 
871 				/*
872 				 * Is this the PFN for this htable
873 				 */
874 				if (htable.ht_pfn == pfn)
875 					goto found_it;
876 			}
877 		}
878 	}
879 
880 found_it:
881 	if (htable.ht_pfn == pfn) {
882 		mdb_printf("htable=%p\n", ht);
883 		if (level == (uint64_t)-1) {
884 			level = htable.ht_level;
885 		} else if (htable.ht_level != level) {
886 			mdb_warn("htable has level %d but forcing level %lu\n",
887 			    htable.ht_level, level);
888 		}
889 		base = htable.ht_vaddr;
890 		pagesize = mmu.level_size[level];
891 	} else {
892 		if (level == (uint64_t)-1)
893 			level = 0;
894 		mdb_warn("couldn't find matching htable, using level=%lu, "
895 		    "base address=0x0\n", level);
896 		base = 0;
897 		pagesize = mmu.level_size[level];
898 	}
899 
900 	paddr = mmu_ptob((physaddr_t)pfn);
901 	for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
902 		len = mdb_pread(&pte, mmu.pte_size,
903 		    paddr + entry * mmu.pte_size);
904 		if (len != mmu.pte_size)
905 			return (DCMD_ERR);
906 
907 		if (pte == 0)
908 			continue;
909 
910 		mdb_printf("[%3d] va=0x%p ", entry,
911 		    VA_SIGN_EXTEND(base + entry * pagesize));
912 		do_pte_dcmd(level, pte);
913 	}
914 
915 done:
916 	return (DCMD_OK);
917 }
918 
919 /*
920  * Dump the page table at the given PFN
921  */
922 /*ARGSUSED*/
923 int
ptable_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)924 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
925 {
926 	pfn_t pfn;
927 	uint_t mflag = 0;
928 	uint64_t level = (uint64_t)-1;
929 
930 	init_mmu();
931 
932 	if (mmu.num_level == 0)
933 		return (DCMD_ERR);
934 
935 	if ((flags & DCMD_ADDRSPEC) == 0)
936 		return (DCMD_USAGE);
937 
938 	if (mdb_getopts(argc, argv,
939 	    'm', MDB_OPT_SETBITS, TRUE, &mflag,
940 	    'l', MDB_OPT_UINT64, &level, NULL) != argc)
941 		return (DCMD_USAGE);
942 
943 	if (level != (uint64_t)-1 && level > mmu.max_level) {
944 		mdb_warn("invalid level %lu\n", level);
945 		return (DCMD_ERR);
946 	}
947 
948 	pfn = (pfn_t)addr;
949 	if (mflag)
950 		pfn = mdb_mfn_to_pfn(pfn);
951 
952 	return (do_ptable_dcmd(pfn, level));
953 }
954 
955 static int
do_htables_dcmd(hat_t * hatp)956 do_htables_dcmd(hat_t *hatp)
957 {
958 	struct hat hat;
959 	htable_t *ht;
960 	htable_t htable;
961 	int h;
962 
963 	/*
964 	 * read the hat and its hash table
965 	 */
966 	if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
967 		mdb_warn("Couldn't read struct hat\n");
968 		return (DCMD_ERR);
969 	}
970 
971 	/*
972 	 * read the htable hashtable
973 	 */
974 	for (h = 0; h < hat.hat_num_hash; ++h) {
975 		if (mdb_vread(&ht, sizeof (htable_t *),
976 		    (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
977 			mdb_warn("Couldn't read htable ptr\\n");
978 			return (DCMD_ERR);
979 		}
980 		for (; ht != NULL; ht = htable.ht_next) {
981 			mdb_printf("%p\n", ht);
982 			if (mdb_vread(&htable, sizeof (htable_t),
983 			    (uintptr_t)ht) == -1) {
984 				mdb_warn("Couldn't read htable\n");
985 				return (DCMD_ERR);
986 			}
987 		}
988 	}
989 	return (DCMD_OK);
990 }
991 
992 /*
993  * Dump the htables for the given hat
994  */
995 /*ARGSUSED*/
996 int
htables_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)997 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
998 {
999 	hat_t *hat;
1000 
1001 	init_mmu();
1002 
1003 	if (mmu.num_level == 0)
1004 		return (DCMD_ERR);
1005 
1006 	if ((flags & DCMD_ADDRSPEC) == 0)
1007 		return (DCMD_USAGE);
1008 
1009 	hat = (hat_t *)addr;
1010 
1011 	return (do_htables_dcmd(hat));
1012 }
1013 
1014 static uintptr_t
entry2va(size_t * entries)1015 entry2va(size_t *entries)
1016 {
1017 	uintptr_t va = 0;
1018 
1019 	for (level_t l = mmu.max_level; l >= 0; l--)
1020 		va += entries[l] << mmu.level_shift[l];
1021 
1022 	return (VA_SIGN_EXTEND(va));
1023 }
1024 
1025 static void
ptmap_report(size_t * entries,uintptr_t start,boolean_t user,boolean_t writable,boolean_t wflag)1026 ptmap_report(size_t *entries, uintptr_t start,
1027     boolean_t user, boolean_t writable, boolean_t wflag)
1028 {
1029 	uint64_t curva = entry2va(entries);
1030 
1031 	mdb_printf("mapped %s,%s range of %lu bytes: %a-%a\n",
1032 	    user ? "user" : "kernel", writable ? "writable" : "read-only",
1033 	    curva - start, start, curva - 1);
1034 	if (wflag && start >= kernelbase)
1035 		(void) mdb_call_dcmd("whatis", start, DCMD_ADDRSPEC, 0, NULL);
1036 }
1037 
1038 int
ptmap_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)1039 ptmap_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1040 {
1041 	physaddr_t paddrs[MAX_NUM_LEVEL] = { 0, };
1042 	size_t entry[MAX_NUM_LEVEL] = { 0, };
1043 	uintptr_t start = (uintptr_t)-1;
1044 	boolean_t writable = B_FALSE;
1045 	boolean_t user = B_FALSE;
1046 	boolean_t wflag = B_FALSE;
1047 	level_t curlevel;
1048 
1049 	if ((flags & DCMD_ADDRSPEC) == 0)
1050 		return (DCMD_USAGE);
1051 
1052 	if (mdb_getopts(argc, argv,
1053 	    'w', MDB_OPT_SETBITS, TRUE, &wflag, NULL) != argc)
1054 		return (DCMD_USAGE);
1055 
1056 	init_mmu();
1057 
1058 	if (mmu.num_level == 0)
1059 		return (DCMD_ERR);
1060 
1061 	curlevel = mmu.max_level;
1062 
1063 	paddrs[curlevel] = addr & MMU_PAGEMASK;
1064 
1065 	for (;;) {
1066 		physaddr_t pte_addr;
1067 		x86pte_t pte;
1068 
1069 		pte_addr = paddrs[curlevel] +
1070 		    (entry[curlevel] << mmu.pte_size_shift);
1071 
1072 		if (mdb_pread(&pte, sizeof (pte), pte_addr) != sizeof (pte)) {
1073 			mdb_warn("couldn't read pte at %p", pte_addr);
1074 			return (DCMD_ERR);
1075 		}
1076 
1077 		if (PTE_GET(pte, PT_VALID) == 0) {
1078 			if (start != (uintptr_t)-1) {
1079 				ptmap_report(entry, start,
1080 				    user, writable, wflag);
1081 				start = (uintptr_t)-1;
1082 			}
1083 		} else if (curlevel == 0 || PTE_GET(pte, PT_PAGESIZE)) {
1084 			if (start == (uintptr_t)-1) {
1085 				start = entry2va(entry);
1086 				user = PTE_GET(pte, PT_USER);
1087 				writable = PTE_GET(pte, PT_WRITABLE);
1088 			} else if (user != PTE_GET(pte, PT_USER) ||
1089 			    writable != PTE_GET(pte, PT_WRITABLE)) {
1090 				ptmap_report(entry, start,
1091 				    user, writable, wflag);
1092 				start = entry2va(entry);
1093 				user = PTE_GET(pte, PT_USER);
1094 				writable = PTE_GET(pte, PT_WRITABLE);
1095 			}
1096 		} else {
1097 			/* Descend a level. */
1098 			physaddr_t pa = mmu_ptob(pte2mfn(pte, curlevel));
1099 			paddrs[--curlevel] = pa;
1100 			entry[curlevel] = 0;
1101 			continue;
1102 		}
1103 
1104 		while (++entry[curlevel] == mmu.ptes_per_table) {
1105 			/* Ascend back up. */
1106 			entry[curlevel] = 0;
1107 			if (curlevel == mmu.max_level) {
1108 				if (start != (uintptr_t)-1) {
1109 					ptmap_report(entry, start,
1110 					    user, writable, wflag);
1111 				}
1112 				goto out;
1113 			}
1114 
1115 			curlevel++;
1116 		}
1117 	}
1118 
1119 out:
1120 	return (DCMD_OK);
1121 }
1122