1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2019 Joyent, Inc.
24 */
25
26/*
27 * Mdb kernel support module.  This module is loaded automatically when the
28 * kvm target is initialized.  Any global functions declared here are exported
29 * for the resolution of symbols in subsequently loaded modules.
30 *
31 * WARNING: Do not assume that static variables in mdb_ks will be initialized
32 * to zero.
33 */
34
35#include <mdb/mdb_target.h>
36#include <mdb/mdb_param.h>
37#include <mdb/mdb_modapi.h>
38#include <mdb/mdb_ks.h>
39
40#include <sys/types.h>
41#include <sys/procfs.h>
42#include <sys/proc.h>
43#include <sys/dnlc.h>
44#include <sys/autoconf.h>
45#include <sys/machelf.h>
46#include <sys/modctl.h>
47#include <sys/hwconf.h>
48#include <sys/kobj.h>
49#include <sys/fs/autofs.h>
50#include <sys/ddi_impldefs.h>
51#include <sys/refstr_impl.h>
52#include <sys/cpuvar.h>
53#include <sys/dlpi.h>
54#include <sys/clock_impl.h>
55#include <sys/swap.h>
56#include <errno.h>
57
58#include <vm/seg_vn.h>
59#include <vm/page.h>
60
61#define	MDB_PATH_NELEM	256			/* Maximum path components */
62
63typedef struct mdb_path {
64	size_t mdp_nelem;			/* Number of components */
65	uint_t mdp_complete;			/* Path completely resolved? */
66	uintptr_t mdp_vnode[MDB_PATH_NELEM];	/* Array of vnode_t addresses */
67	char *mdp_name[MDB_PATH_NELEM];		/* Array of name components */
68} mdb_path_t;
69
70static int mdb_autonode2path(uintptr_t, mdb_path_t *);
71static int mdb_sprintpath(char *, size_t, mdb_path_t *);
72
73/*
74 * Kernel parameters from <sys/param.h> which we keep in-core:
75 */
76unsigned long _mdb_ks_pagesize;
77unsigned int _mdb_ks_pageshift;
78unsigned long _mdb_ks_pageoffset;
79unsigned long long _mdb_ks_pagemask;
80unsigned long _mdb_ks_mmu_pagesize;
81unsigned int _mdb_ks_mmu_pageshift;
82unsigned long _mdb_ks_mmu_pageoffset;
83unsigned long _mdb_ks_mmu_pagemask;
84uintptr_t _mdb_ks_kernelbase;
85uintptr_t _mdb_ks_userlimit;
86uintptr_t _mdb_ks_userlimit32;
87uintptr_t _mdb_ks_argsbase;
88unsigned long _mdb_ks_msg_bsize;
89unsigned long _mdb_ks_defaultstksz;
90int _mdb_ks_ncpu;
91int _mdb_ks_ncpu_log2;
92int _mdb_ks_ncpu_p2;
93
94/*
95 * In-core copy of DNLC information:
96 */
97#define	MDB_DNLC_HSIZE	1024
98#define	MDB_DNLC_HASH(vp)	(((uintptr_t)(vp) >> 3) & (MDB_DNLC_HSIZE - 1))
99#define	MDB_DNLC_NCACHE_SZ(ncp) (sizeof (ncache_t) + (ncp)->namlen)
100#define	MDB_DNLC_MAX_RETRY 4
101
102static ncache_t **dnlc_hash;	/* mdbs hash array of dnlc entries */
103
104/*
105 * copy of page_hash-related data
106 */
107static int page_hash_loaded;
108static long mdb_page_hashsz;
109static uint_t mdb_page_hashsz_shift;	/* Needed for PAGE_HASH_FUNC */
110static uintptr_t mdb_page_hash;		/* base address of page hash */
111#define	page_hashsz		mdb_page_hashsz
112#define	page_hashsz_shift	mdb_page_hashsz_shift
113
114/*
115 * This will be the location of the vnodeops pointer for "autofs_vnodeops"
116 * The pointer still needs to be read with mdb_vread() to get the location
117 * of the vnodeops structure for autofs.
118 */
119static struct vnodeops *autofs_vnops_ptr;
120
121/*
122 * STREAMS queue registrations:
123 */
124typedef struct mdb_qinfo {
125	const mdb_qops_t *qi_ops;	/* Address of ops vector */
126	uintptr_t qi_addr;		/* Address of qinit structure (key) */
127	struct mdb_qinfo *qi_next;	/* Next qinfo in list */
128} mdb_qinfo_t;
129
130static mdb_qinfo_t *qi_head;		/* Head of qinfo chain */
131
132/*
133 * Device naming callback structure:
134 */
135typedef struct nm_query {
136	const char *nm_name;		/* Device driver name [in/out] */
137	major_t nm_major;		/* Device major number [in/out] */
138	ushort_t nm_found;		/* Did we find a match? [out] */
139} nm_query_t;
140
141/*
142 * Address-to-modctl callback structure:
143 */
144typedef struct a2m_query {
145	uintptr_t a2m_addr;		/* Virtual address [in] */
146	uintptr_t a2m_where;		/* Modctl address [out] */
147} a2m_query_t;
148
149/*
150 * Segment-to-mdb_map callback structure:
151 */
152typedef struct {
153	struct seg_ops *asm_segvn_ops;	/* Address of segvn ops [in] */
154	void (*asm_callback)(const struct mdb_map *, void *); /* Callb [in] */
155	void *asm_cbdata;		/* Callback data [in] */
156} asmap_arg_t;
157
158static void
159dnlc_free(void)
160{
161	ncache_t *ncp, *next;
162	int i;
163
164	if (dnlc_hash == NULL) {
165		return;
166	}
167
168	/*
169	 * Free up current dnlc entries
170	 */
171	for (i = 0; i < MDB_DNLC_HSIZE; i++) {
172		for (ncp = dnlc_hash[i]; ncp; ncp = next) {
173			next = ncp->hash_next;
174			mdb_free(ncp, MDB_DNLC_NCACHE_SZ(ncp));
175		}
176	}
177	mdb_free(dnlc_hash, MDB_DNLC_HSIZE * sizeof (ncache_t *));
178	dnlc_hash = NULL;
179}
180
181char bad_dnlc[] = "inconsistent dnlc chain: %d, ncache va: %p"
182	" - continuing with the rest\n";
183
184static int
185dnlc_load(void)
186{
187	int i; /* hash index */
188	int retry_cnt = 0;
189	int skip_bad_chains = 0;
190	int nc_hashsz; /* kernel hash array size */
191	uintptr_t nc_hash_addr; /* kernel va of ncache hash array */
192	uintptr_t head; /* kernel va of head of hash chain */
193
194	/*
195	 * If we've already cached the DNLC and we're looking at a dump,
196	 * our cache is good forever, so don't bother re-loading.
197	 */
198	if (dnlc_hash && mdb_prop_postmortem) {
199		return (0);
200	}
201
202	/*
203	 * For a core dump, retries wont help.
204	 * Just print and skip any bad chains.
205	 */
206	if (mdb_prop_postmortem) {
207		skip_bad_chains = 1;
208	}
209retry:
210	if (retry_cnt++ >= MDB_DNLC_MAX_RETRY) {
211		/*
212		 * Give up retrying the rapidly changing dnlc.
213		 * Just print and skip any bad chains
214		 */
215		skip_bad_chains = 1;
216	}
217
218	dnlc_free(); /* Free up the mdb hashed dnlc - if any */
219
220	/*
221	 * Although nc_hashsz and the location of nc_hash doesn't currently
222	 * change, it may do in the future with a more dynamic dnlc.
223	 * So always read these values afresh.
224	 */
225	if (mdb_readvar(&nc_hashsz, "nc_hashsz") == -1) {
226		mdb_warn("failed to read nc_hashsz");
227		return (-1);
228	}
229	if (mdb_readvar(&nc_hash_addr, "nc_hash") == -1) {
230		mdb_warn("failed to read nc_hash");
231		return (-1);
232	}
233
234	/*
235	 * Allocate the mdb dnlc hash array
236	 */
237	dnlc_hash = mdb_zalloc(MDB_DNLC_HSIZE * sizeof (ncache_t *), UM_SLEEP);
238
239	/* for each kernel hash chain */
240	for (i = 0, head = nc_hash_addr; i < nc_hashsz;
241	    i++, head += sizeof (nc_hash_t)) {
242		nc_hash_t nch; /* kernel hash chain header */
243		ncache_t *ncp; /* name cache pointer */
244		int hash; /* mdb hash value */
245		uintptr_t nc_va; /* kernel va of next ncache */
246		uintptr_t ncprev_va; /* kernel va of previous ncache */
247		int khash; /* kernel dnlc hash value */
248		uchar_t namelen; /* name length */
249		ncache_t nc; /* name cache entry */
250		int nc_size; /* size of a name cache entry */
251
252		/*
253		 * We read each element of the nc_hash array individually
254		 * just before we process the entries in its chain. This is
255		 * because the chain can change so rapidly on a running system.
256		 */
257		if (mdb_vread(&nch, sizeof (nc_hash_t), head) == -1) {
258			mdb_warn("failed to read nc_hash chain header %d", i);
259			dnlc_free();
260			return (-1);
261		}
262
263		ncprev_va = head;
264		nc_va = (uintptr_t)(nch.hash_next);
265		/* for each entry in the chain */
266		while (nc_va != head) {
267			/*
268			 * The size of the ncache entries varies
269			 * because the name is appended to the structure.
270			 * So we read in the structure then re-read
271			 * for the structure plus name.
272			 */
273			if (mdb_vread(&nc, sizeof (ncache_t), nc_va) == -1) {
274				if (skip_bad_chains) {
275					mdb_warn(bad_dnlc, i, nc_va);
276					break;
277				}
278				goto retry;
279			}
280			nc_size = MDB_DNLC_NCACHE_SZ(&nc);
281			ncp = mdb_alloc(nc_size, UM_SLEEP);
282			if (mdb_vread(ncp, nc_size - 1, nc_va) == -1) {
283				mdb_free(ncp, nc_size);
284				if (skip_bad_chains) {
285					mdb_warn(bad_dnlc, i, nc_va);
286					break;
287				}
288				goto retry;
289			}
290
291			/*
292			 * Check for chain consistency
293			 */
294			if ((uintptr_t)ncp->hash_prev != ncprev_va) {
295				mdb_free(ncp, nc_size);
296				if (skip_bad_chains) {
297					mdb_warn(bad_dnlc, i, nc_va);
298					break;
299				}
300				goto retry;
301			}
302			/*
303			 * Terminate the new name with a null.
304			 * Note, we allowed space for this null when
305			 * allocating space for the entry.
306			 */
307			ncp->name[ncp->namlen] = '\0';
308
309			/*
310			 * Validate new entry by re-hashing using the
311			 * kernel dnlc hash function and comparing the hash
312			 */
313			DNLCHASH(ncp->name, ncp->dp, khash, namelen);
314			if ((namelen != ncp->namlen) ||
315			    (khash != ncp->hash)) {
316				mdb_free(ncp, nc_size);
317				if (skip_bad_chains) {
318					mdb_warn(bad_dnlc, i, nc_va);
319					break;
320				}
321				goto retry;
322			}
323
324			/*
325			 * Finally put the validated entry into the mdb
326			 * hash chains. Reuse the kernel next hash field
327			 * for the mdb hash chain pointer.
328			 */
329			hash = MDB_DNLC_HASH(ncp->vp);
330			ncprev_va = nc_va;
331			nc_va = (uintptr_t)(ncp->hash_next);
332			ncp->hash_next = dnlc_hash[hash];
333			dnlc_hash[hash] = ncp;
334		}
335	}
336	return (0);
337}
338
339/*ARGSUSED*/
340int
341dnlcdump(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
342{
343	ncache_t *ent;
344	int i;
345
346	if ((flags & DCMD_ADDRSPEC) || argc != 0)
347		return (DCMD_USAGE);
348
349	if (dnlc_load() == -1)
350		return (DCMD_ERR);
351
352	mdb_printf("%<u>%-?s %-?s %-32s%</u>\n", "VP", "DVP", "NAME");
353
354	for (i = 0; i < MDB_DNLC_HSIZE; i++) {
355		for (ent = dnlc_hash[i]; ent != NULL; ent = ent->hash_next) {
356			mdb_printf("%0?p %0?p %s\n",
357			    ent->vp, ent->dp, ent->name);
358		}
359	}
360
361	return (DCMD_OK);
362}
363
364static int
365mdb_sprintpath(char *buf, size_t len, mdb_path_t *path)
366{
367	char *s = buf;
368	int i;
369
370	if (len < sizeof ("/..."))
371		return (-1);
372
373	if (!path->mdp_complete) {
374		(void) strcpy(s, "??");
375		s += 2;
376
377		if (path->mdp_nelem == 0)
378			return (-1);
379	}
380
381	if (path->mdp_nelem == 0) {
382		(void) strcpy(s, "/");
383		return (0);
384	}
385
386	for (i = path->mdp_nelem - 1; i >= 0; i--) {
387		/*
388		 * Number of bytes left is the distance from where we
389		 * are to the end, minus 2 for '/' and '\0'
390		 */
391		ssize_t left = (ssize_t)(&buf[len] - s) - 2;
392
393		if (left <= 0)
394			break;
395
396		*s++ = '/';
397		(void) strncpy(s, path->mdp_name[i], left);
398		s[left - 1] = '\0';
399		s += strlen(s);
400
401		if (left < strlen(path->mdp_name[i]))
402			break;
403	}
404
405	if (i >= 0)
406		(void) strcpy(&buf[len - 4], "...");
407
408	return (0);
409}
410
411static int
412mdb_autonode2path(uintptr_t addr, mdb_path_t *path)
413{
414	fninfo_t fni;
415	fnnode_t fn;
416
417	vnode_t vn;
418	vfs_t vfs;
419	struct vnodeops *autofs_vnops = NULL;
420
421	/*
422	 * "autofs_vnops_ptr" is the address of the pointer to the vnodeops
423	 * structure for autofs.  We want to read it each time we access
424	 * it since autofs could (in theory) be unloaded and reloaded.
425	 */
426	if (mdb_vread(&autofs_vnops, sizeof (autofs_vnops),
427	    (uintptr_t)autofs_vnops_ptr) == -1)
428		return (-1);
429
430	if (mdb_vread(&vn, sizeof (vn), addr) == -1)
431		return (-1);
432
433	if (autofs_vnops == NULL || vn.v_op != autofs_vnops)
434		return (-1);
435
436	addr = (uintptr_t)vn.v_data;
437
438	if (mdb_vread(&vfs, sizeof (vfs), (uintptr_t)vn.v_vfsp) == -1 ||
439	    mdb_vread(&fni, sizeof (fni), (uintptr_t)vfs.vfs_data) == -1 ||
440	    mdb_vread(&vn, sizeof (vn), (uintptr_t)fni.fi_rootvp) == -1)
441		return (-1);
442
443	for (;;) {
444		size_t elem = path->mdp_nelem++;
445		char elemstr[MAXNAMELEN];
446		char *c, *p;
447
448		if (elem == MDB_PATH_NELEM) {
449			path->mdp_nelem--;
450			return (-1);
451		}
452
453		if (mdb_vread(&fn, sizeof (fn), addr) != sizeof (fn)) {
454			path->mdp_nelem--;
455			return (-1);
456		}
457
458		if (mdb_readstr(elemstr, sizeof (elemstr),
459		    (uintptr_t)fn.fn_name) <= 0) {
460			(void) strcpy(elemstr, "?");
461		}
462
463		c = mdb_alloc(strlen(elemstr) + 1, UM_SLEEP | UM_GC);
464		(void) strcpy(c, elemstr);
465
466		path->mdp_vnode[elem] = (uintptr_t)fn.fn_vnode;
467
468		if (addr == (uintptr_t)fn.fn_parent) {
469			path->mdp_name[elem] = &c[1];
470			path->mdp_complete = TRUE;
471			break;
472		}
473
474		if ((p = strrchr(c, '/')) != NULL)
475			path->mdp_name[elem] = p + 1;
476		else
477			path->mdp_name[elem] = c;
478
479		addr = (uintptr_t)fn.fn_parent;
480	}
481
482	return (0);
483}
484
485int
486mdb_vnode2path(uintptr_t addr, char *buf, size_t buflen)
487{
488	uintptr_t rootdir;
489	ncache_t *ent;
490	vnode_t vp;
491	mdb_path_t path;
492
493	/*
494	 * Check to see if we have a cached value for this vnode
495	 */
496	if (mdb_vread(&vp, sizeof (vp), addr) != -1 &&
497	    vp.v_path != NULL &&
498	    mdb_readstr(buf, buflen, (uintptr_t)vp.v_path) != -1)
499		return (0);
500
501	if (dnlc_load() == -1)
502		return (-1);
503
504	if (mdb_readvar(&rootdir, "rootdir") == -1) {
505		mdb_warn("failed to read 'rootdir'");
506		return (-1);
507	}
508
509	bzero(&path, sizeof (mdb_path_t));
510again:
511	if ((addr == 0) && (path.mdp_nelem == 0)) {
512		/*
513		 * 0 elems && complete tells sprintpath to just print "/"
514		 */
515		path.mdp_complete = TRUE;
516		goto out;
517	}
518
519	if (addr == rootdir) {
520		path.mdp_complete = TRUE;
521		goto out;
522	}
523
524	for (ent = dnlc_hash[MDB_DNLC_HASH(addr)]; ent; ent = ent->hash_next) {
525		if ((uintptr_t)ent->vp == addr) {
526			if (strcmp(ent->name, "..") == 0 ||
527			    strcmp(ent->name, ".") == 0)
528				continue;
529
530			path.mdp_vnode[path.mdp_nelem] = (uintptr_t)ent->vp;
531			path.mdp_name[path.mdp_nelem] = ent->name;
532			path.mdp_nelem++;
533
534			if (path.mdp_nelem == MDB_PATH_NELEM) {
535				path.mdp_nelem--;
536				mdb_warn("path exceeded maximum expected "
537				    "elements\n");
538				return (-1);
539			}
540
541			addr = (uintptr_t)ent->dp;
542			goto again;
543		}
544	}
545
546	(void) mdb_autonode2path(addr, &path);
547
548out:
549	return (mdb_sprintpath(buf, buflen, &path));
550}
551
552
553uintptr_t
554mdb_pid2proc(pid_t pid, proc_t *proc)
555{
556	int pid_hashsz, hash;
557	uintptr_t paddr, pidhash, procdir;
558	struct pid pidp;
559
560	if (mdb_readvar(&pidhash, "pidhash") == -1)
561		return (0);
562
563	if (mdb_readvar(&pid_hashsz, "pid_hashsz") == -1)
564		return (0);
565
566	if (mdb_readvar(&procdir, "procdir") == -1)
567		return (0);
568
569	hash = pid & (pid_hashsz - 1);
570
571	if (mdb_vread(&paddr, sizeof (paddr),
572	    pidhash + (hash * sizeof (paddr))) == -1)
573		return (0);
574
575	while (paddr != 0) {
576		if (mdb_vread(&pidp, sizeof (pidp), paddr) == -1)
577			return (0);
578
579		if (pidp.pid_id == pid) {
580			uintptr_t procp;
581
582			if (mdb_vread(&procp, sizeof (procp), procdir +
583			    (pidp.pid_prslot * sizeof (procp))) == -1)
584				return (0);
585
586			if (proc != NULL)
587				(void) mdb_vread(proc, sizeof (proc_t), procp);
588
589			return (procp);
590		}
591		paddr = (uintptr_t)pidp.pid_link;
592	}
593	return (0);
594}
595
596int
597mdb_cpu2cpuid(uintptr_t cpup)
598{
599	cpu_t cpu;
600
601	if (mdb_vread(&cpu, sizeof (cpu_t), cpup) != sizeof (cpu_t))
602		return (-1);
603
604	return (cpu.cpu_id);
605}
606
607int
608mdb_cpuset_find(uintptr_t cpusetp)
609{
610	ulong_t	*cpuset;
611	size_t nr_words = BT_BITOUL(NCPU);
612	size_t sz = nr_words * sizeof (ulong_t);
613	size_t	i;
614	int cpu = -1;
615
616	cpuset = mdb_alloc(sz, UM_SLEEP);
617
618	if (mdb_vread((void *)cpuset, sz, cpusetp) != sz)
619		goto out;
620
621	for (i = 0; i < nr_words; i++) {
622		size_t j;
623		ulong_t m;
624
625		for (j = 0, m = 1; j < BT_NBIPUL; j++, m <<= 1) {
626			if (cpuset[i] & m) {
627				cpu = i * BT_NBIPUL + j;
628				goto out;
629			}
630		}
631	}
632
633out:
634	mdb_free(cpuset, sz);
635	return (cpu);
636}
637
638static int
639page_hash_load(void)
640{
641	if (page_hash_loaded) {
642		return (1);
643	}
644
645	if (mdb_readvar(&mdb_page_hashsz, "page_hashsz") == -1) {
646		mdb_warn("unable to read page_hashsz");
647		return (0);
648	}
649	if (mdb_readvar(&mdb_page_hashsz_shift, "page_hashsz_shift") == -1) {
650		mdb_warn("unable to read page_hashsz_shift");
651		return (0);
652	}
653	if (mdb_readvar(&mdb_page_hash, "page_hash") == -1) {
654		mdb_warn("unable to read page_hash");
655		return (0);
656	}
657
658	page_hash_loaded = 1;	/* zeroed on state change */
659	return (1);
660}
661
662uintptr_t
663mdb_page_lookup(uintptr_t vp, u_offset_t offset)
664{
665	size_t ndx;
666	uintptr_t page_hash_entry, pp;
667
668	if (!page_hash_loaded && !page_hash_load()) {
669		return (0);
670	}
671
672	ndx = PAGE_HASH_FUNC(vp, offset);
673	page_hash_entry = mdb_page_hash + ndx * sizeof (uintptr_t);
674
675	if (mdb_vread(&pp, sizeof (pp), page_hash_entry) < 0) {
676		mdb_warn("unable to read page_hash[%ld] (%p)", ndx,
677		    page_hash_entry);
678		return (0);
679	}
680
681	while (pp != 0) {
682		page_t page;
683		long nndx;
684
685		if (mdb_vread(&page, sizeof (page), pp) < 0) {
686			mdb_warn("unable to read page_t at %p", pp);
687			return (0);
688		}
689
690		if ((uintptr_t)page.p_vnode == vp &&
691		    (uint64_t)page.p_offset == offset)
692			return (pp);
693
694		/*
695		 * Double check that the pages actually hash to the
696		 * bucket we're searching.  If not, our version of
697		 * PAGE_HASH_FUNC() doesn't match the kernel's, and we're
698		 * not going to be able to find the page.  The most
699		 * likely reason for this that mdb_ks doesn't match the
700		 * kernel we're running against.
701		 */
702		nndx = PAGE_HASH_FUNC(page.p_vnode, page.p_offset);
703		if (page.p_vnode != NULL && nndx != ndx) {
704			mdb_warn("mdb_page_lookup: mdb_ks PAGE_HASH_FUNC() "
705			    "mismatch: in bucket %ld, but page %p hashes to "
706			    "bucket %ld\n", ndx, pp, nndx);
707			return (0);
708		}
709
710		pp = (uintptr_t)page.p_hash;
711	}
712
713	return (0);
714}
715
716char
717mdb_vtype2chr(vtype_t type, mode_t mode)
718{
719	static const char vttab[] = {
720		' ',	/* VNON */
721		' ',	/* VREG */
722		'/',	/* VDIR */
723		' ',	/* VBLK */
724		' ',	/* VCHR */
725		'@',	/* VLNK */
726		'|',	/* VFIFO */
727		'>',	/* VDOOR */
728		' ',	/* VPROC */
729		'=',	/* VSOCK */
730		' ',	/* VBAD */
731	};
732
733	if (type < 0 || type >= sizeof (vttab) / sizeof (vttab[0]))
734		return ('?');
735
736	if (type == VREG && (mode & 0111) != 0)
737		return ('*');
738
739	return (vttab[type]);
740}
741
742struct pfn2page {
743	pfn_t pfn;
744	page_t *pp;
745};
746
747/*ARGSUSED*/
748static int
749pfn2page_cb(uintptr_t addr, const struct memseg *msp, void *data)
750{
751	struct pfn2page *p = data;
752
753	if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
754		p->pp = msp->pages + (p->pfn - msp->pages_base);
755		return (WALK_DONE);
756	}
757
758	return (WALK_NEXT);
759}
760
761uintptr_t
762mdb_pfn2page(pfn_t pfn)
763{
764	struct pfn2page	arg;
765	struct page	page;
766
767	arg.pfn = pfn;
768	arg.pp = NULL;
769
770	if (mdb_walk("memseg", (mdb_walk_cb_t)pfn2page_cb, &arg) == -1) {
771		mdb_warn("pfn2page: can't walk memsegs");
772		return (0);
773	}
774	if (arg.pp == NULL) {
775		mdb_warn("pfn2page: unable to find page_t for pfn %lx\n",
776		    pfn);
777		return (0);
778	}
779
780	if (mdb_vread(&page, sizeof (page_t), (uintptr_t)arg.pp) == -1) {
781		mdb_warn("pfn2page: can't read page 0x%lx at %p", pfn, arg.pp);
782		return (0);
783	}
784	if (page.p_pagenum != pfn) {
785		mdb_warn("pfn2page: page_t 0x%p should have PFN 0x%lx, "
786		    "but actually has 0x%lx\n", arg.pp, pfn, page.p_pagenum);
787		return (0);
788	}
789
790	return ((uintptr_t)arg.pp);
791}
792
793pfn_t
794mdb_page2pfn(uintptr_t addr)
795{
796	struct page	page;
797
798	if (mdb_vread(&page, sizeof (page_t), addr) == -1) {
799		mdb_warn("pp2pfn: can't read page at %p", addr);
800		return ((pfn_t)(-1));
801	}
802
803	return (page.p_pagenum);
804}
805
806static int
807a2m_walk_modctl(uintptr_t addr, const struct modctl *m, a2m_query_t *a2m)
808{
809	struct module mod;
810
811	if (m->mod_mp == NULL)
812		return (0);
813
814	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
815		mdb_warn("couldn't read modctl %p's module", addr);
816		return (0);
817	}
818
819	if (a2m->a2m_addr >= (uintptr_t)mod.text &&
820	    a2m->a2m_addr < (uintptr_t)mod.text + mod.text_size)
821		goto found;
822
823	if (a2m->a2m_addr >= (uintptr_t)mod.data &&
824	    a2m->a2m_addr < (uintptr_t)mod.data + mod.data_size)
825		goto found;
826
827	return (0);
828
829found:
830	a2m->a2m_where = addr;
831	return (-1);
832}
833
834uintptr_t
835mdb_addr2modctl(uintptr_t addr)
836{
837	a2m_query_t a2m;
838
839	a2m.a2m_addr = addr;
840	a2m.a2m_where = 0;
841
842	(void) mdb_walk("modctl", (mdb_walk_cb_t)a2m_walk_modctl, &a2m);
843	return (a2m.a2m_where);
844}
845
846static mdb_qinfo_t *
847qi_lookup(uintptr_t qinit_addr)
848{
849	mdb_qinfo_t *qip;
850
851	for (qip = qi_head; qip != NULL; qip = qip->qi_next) {
852		if (qip->qi_addr == qinit_addr)
853			return (qip);
854	}
855
856	return (NULL);
857}
858
859void
860mdb_qops_install(const mdb_qops_t *qops, uintptr_t qinit_addr)
861{
862	mdb_qinfo_t *qip = qi_lookup(qinit_addr);
863
864	if (qip != NULL) {
865		qip->qi_ops = qops;
866		return;
867	}
868
869	qip = mdb_alloc(sizeof (mdb_qinfo_t), UM_SLEEP);
870
871	qip->qi_ops = qops;
872	qip->qi_addr = qinit_addr;
873	qip->qi_next = qi_head;
874
875	qi_head = qip;
876}
877
878void
879mdb_qops_remove(const mdb_qops_t *qops, uintptr_t qinit_addr)
880{
881	mdb_qinfo_t *qip, *p = NULL;
882
883	for (qip = qi_head; qip != NULL; p = qip, qip = qip->qi_next) {
884		if (qip->qi_addr == qinit_addr && qip->qi_ops == qops) {
885			if (qi_head == qip)
886				qi_head = qip->qi_next;
887			else
888				p->qi_next = qip->qi_next;
889			mdb_free(qip, sizeof (mdb_qinfo_t));
890			return;
891		}
892	}
893}
894
895char *
896mdb_qname(const queue_t *q, char *buf, size_t nbytes)
897{
898	struct module_info mi;
899	struct qinit qi;
900
901	if (mdb_vread(&qi, sizeof (qi), (uintptr_t)q->q_qinfo) == -1) {
902		mdb_warn("failed to read qinit at %p", q->q_qinfo);
903		goto err;
904	}
905
906	if (mdb_vread(&mi, sizeof (mi), (uintptr_t)qi.qi_minfo) == -1) {
907		mdb_warn("failed to read module_info at %p", qi.qi_minfo);
908		goto err;
909	}
910
911	if (mdb_readstr(buf, nbytes, (uintptr_t)mi.mi_idname) <= 0) {
912		mdb_warn("failed to read mi_idname at %p", mi.mi_idname);
913		goto err;
914	}
915
916	return (buf);
917
918err:
919	(void) mdb_snprintf(buf, nbytes, "???");
920	return (buf);
921}
922
923void
924mdb_qinfo(const queue_t *q, char *buf, size_t nbytes)
925{
926	mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
927	buf[0] = '\0';
928
929	if (qip != NULL)
930		qip->qi_ops->q_info(q, buf, nbytes);
931}
932
933uintptr_t
934mdb_qrnext(const queue_t *q)
935{
936	mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
937
938	if (qip != NULL)
939		return (qip->qi_ops->q_rnext(q));
940
941	return (0);
942}
943
944uintptr_t
945mdb_qwnext(const queue_t *q)
946{
947	mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
948
949	if (qip != NULL)
950		return (qip->qi_ops->q_wnext(q));
951
952	return (0);
953}
954
955uintptr_t
956mdb_qrnext_default(const queue_t *q)
957{
958	return ((uintptr_t)q->q_next);
959}
960
961uintptr_t
962mdb_qwnext_default(const queue_t *q)
963{
964	return ((uintptr_t)q->q_next);
965}
966
967/*
968 * The following three routines borrowed from modsubr.c
969 */
970static int
971nm_hash(const char *name)
972{
973	char c;
974	int hash = 0;
975
976	for (c = *name++; c; c = *name++)
977		hash ^= c;
978
979	return (hash & MOD_BIND_HASHMASK);
980}
981
982static uintptr_t
983find_mbind(const char *name, uintptr_t *hashtab)
984{
985	int hashndx;
986	uintptr_t mb;
987	struct bind mb_local;
988	char node_name[MAXPATHLEN + 1];
989
990	hashndx = nm_hash(name);
991	mb = hashtab[hashndx];
992	while (mb) {
993		if (mdb_vread(&mb_local, sizeof (mb_local), mb) == -1) {
994			mdb_warn("failed to read struct bind at %p", mb);
995			return (0);
996		}
997		if (mdb_readstr(node_name, sizeof (node_name),
998		    (uintptr_t)mb_local.b_name) == -1) {
999			mdb_warn("failed to read node name string at %p",
1000			    mb_local.b_name);
1001			return (0);
1002		}
1003
1004		if (strcmp(name, node_name) == 0)
1005			break;
1006
1007		mb = (uintptr_t)mb_local.b_next;
1008	}
1009	return (mb);
1010}
1011
1012int
1013mdb_name_to_major(const char *name, major_t *major)
1014{
1015	uintptr_t mbind;
1016	uintptr_t mb_hashtab[MOD_BIND_HASHSIZE];
1017	struct bind mbind_local;
1018
1019
1020	if (mdb_readsym(mb_hashtab, sizeof (mb_hashtab), "mb_hashtab") == -1) {
1021		mdb_warn("failed to read symbol 'mb_hashtab'");
1022		return (-1);
1023	}
1024
1025	if ((mbind = find_mbind(name, mb_hashtab)) != 0) {
1026		if (mdb_vread(&mbind_local, sizeof (mbind_local), mbind) ==
1027		    -1) {
1028			mdb_warn("failed to read mbind struct at %p", mbind);
1029			return (-1);
1030		}
1031
1032		*major = (major_t)mbind_local.b_num;
1033		return (0);
1034	}
1035	return (-1);
1036}
1037
1038const char *
1039mdb_major_to_name(major_t major)
1040{
1041	static char name[MODMAXNAMELEN + 1];
1042
1043	uintptr_t devnamesp;
1044	struct devnames dn;
1045	uint_t devcnt;
1046
1047	if (mdb_readvar(&devcnt, "devcnt") == -1 || major >= devcnt ||
1048	    mdb_readvar(&devnamesp, "devnamesp") == -1)
1049		return (NULL);
1050
1051	if (mdb_vread(&dn, sizeof (struct devnames), devnamesp +
1052	    major * sizeof (struct devnames)) != sizeof (struct devnames))
1053		return (NULL);
1054
1055	if (mdb_readstr(name, MODMAXNAMELEN + 1, (uintptr_t)dn.dn_name) == -1)
1056		return (NULL);
1057
1058	return ((const char *)name);
1059}
1060
1061/*
1062 * Return the name of the driver attached to the dip in drivername.
1063 */
1064int
1065mdb_devinfo2driver(uintptr_t dip_addr, char *drivername, size_t namebufsize)
1066{
1067	struct dev_info	devinfo;
1068	char bind_name[MAXPATHLEN + 1];
1069	major_t	major;
1070	const char *namestr;
1071
1072
1073	if (mdb_vread(&devinfo, sizeof (devinfo), dip_addr) == -1) {
1074		mdb_warn("failed to read devinfo at %p", dip_addr);
1075		return (-1);
1076	}
1077
1078	if (mdb_readstr(bind_name, sizeof (bind_name),
1079	    (uintptr_t)devinfo.devi_binding_name) == -1) {
1080		mdb_warn("failed to read binding name at %p",
1081		    devinfo.devi_binding_name);
1082		return (-1);
1083	}
1084
1085	/*
1086	 * Many->one relation: various names to one major number
1087	 */
1088	if (mdb_name_to_major(bind_name, &major) == -1) {
1089		mdb_warn("failed to translate bind name to major number\n");
1090		return (-1);
1091	}
1092
1093	/*
1094	 * One->one relation: one major number corresponds to one driver
1095	 */
1096	if ((namestr = mdb_major_to_name(major)) == NULL) {
1097		(void) strncpy(drivername, "???", namebufsize);
1098		return (-1);
1099	}
1100
1101	(void) strncpy(drivername, namestr, namebufsize);
1102	return (0);
1103}
1104
1105/*
1106 * Find the name of the driver attached to this dip (if any), given:
1107 * - the address of a dip (in core)
1108 * - the NAME of the global pointer to the driver's i_ddi_soft_state struct
1109 * - pointer to a pointer to receive the address
1110 */
1111int
1112mdb_devinfo2statep(uintptr_t dip_addr, char *soft_statep_name,
1113    uintptr_t *statep)
1114{
1115	struct dev_info	dev_info;
1116
1117
1118	if (mdb_vread(&dev_info, sizeof (dev_info), dip_addr) == -1) {
1119		mdb_warn("failed to read devinfo at %p", dip_addr);
1120		return (-1);
1121	}
1122
1123	return (mdb_get_soft_state_byname(soft_statep_name,
1124	    dev_info.devi_instance, statep, NULL, 0));
1125}
1126
1127/*
1128 * Returns a pointer to the top of the soft state struct for the instance
1129 * specified (in state_addr), given the address of the global soft state
1130 * pointer and size of the struct.  Also fills in the buffer pointed to by
1131 * state_buf_p (if non-NULL) with the contents of the state struct.
1132 */
1133int
1134mdb_get_soft_state_byaddr(uintptr_t ssaddr, uint_t instance,
1135    uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1136{
1137	struct i_ddi_soft_state ss;
1138	void *statep;
1139
1140
1141	if (mdb_vread(&ss, sizeof (ss), ssaddr) == -1)
1142		return (-1);
1143
1144	if (instance >= ss.n_items)
1145		return (-1);
1146
1147	if (mdb_vread(&statep, sizeof (statep), (uintptr_t)ss.array +
1148	    (sizeof (statep) * instance)) == -1)
1149		return (-1);
1150
1151	if (state_addr != NULL)
1152		*state_addr = (uintptr_t)statep;
1153
1154	if (statep == NULL) {
1155		errno = ENOENT;
1156		return (-1);
1157	}
1158
1159	if (state_buf_p != NULL) {
1160
1161		/* Read the state struct into the buffer in local space. */
1162		if (mdb_vread(state_buf_p, sizeof_state,
1163		    (uintptr_t)statep) == -1)
1164			return (-1);
1165	}
1166
1167	return (0);
1168}
1169
1170
1171/*
1172 * Returns a pointer to the top of the soft state struct for the instance
1173 * specified (in state_addr), given the name of the global soft state pointer
1174 * and size of the struct.  Also fills in the buffer pointed to by
1175 * state_buf_p (if non-NULL) with the contents of the state struct.
1176 */
1177int
1178mdb_get_soft_state_byname(char *softstatep_name, uint_t instance,
1179    uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1180{
1181	uintptr_t ssaddr;
1182
1183	if (mdb_readvar((void *)&ssaddr, softstatep_name) == -1)
1184		return (-1);
1185
1186	return (mdb_get_soft_state_byaddr(ssaddr, instance, state_addr,
1187	    state_buf_p, sizeof_state));
1188}
1189
1190static const mdb_dcmd_t dcmds[] = {
1191	{ "dnlc", NULL, "print DNLC contents", dnlcdump },
1192	{ NULL }
1193};
1194
1195static const mdb_modinfo_t modinfo = { MDB_API_VERSION, dcmds };
1196
1197/*ARGSUSED*/
1198static void
1199update_vars(void *arg)
1200{
1201	GElf_Sym sym;
1202
1203	if (mdb_lookup_by_name("auto_vnodeops", &sym) == 0)
1204		autofs_vnops_ptr = (struct vnodeops *)(uintptr_t)sym.st_value;
1205	else
1206		autofs_vnops_ptr = NULL;
1207
1208	(void) mdb_readvar(&_mdb_ks_pagesize, "_pagesize");
1209	(void) mdb_readvar(&_mdb_ks_pageshift, "_pageshift");
1210	(void) mdb_readvar(&_mdb_ks_pageoffset, "_pageoffset");
1211	(void) mdb_readvar(&_mdb_ks_pagemask, "_pagemask");
1212	(void) mdb_readvar(&_mdb_ks_mmu_pagesize, "_mmu_pagesize");
1213	(void) mdb_readvar(&_mdb_ks_mmu_pageshift, "_mmu_pageshift");
1214	(void) mdb_readvar(&_mdb_ks_mmu_pageoffset, "_mmu_pageoffset");
1215	(void) mdb_readvar(&_mdb_ks_mmu_pagemask, "_mmu_pagemask");
1216	(void) mdb_readvar(&_mdb_ks_kernelbase, "_kernelbase");
1217
1218	(void) mdb_readvar(&_mdb_ks_userlimit, "_userlimit");
1219	(void) mdb_readvar(&_mdb_ks_userlimit32, "_userlimit32");
1220	(void) mdb_readvar(&_mdb_ks_argsbase, "_argsbase");
1221	(void) mdb_readvar(&_mdb_ks_msg_bsize, "_msg_bsize");
1222	(void) mdb_readvar(&_mdb_ks_defaultstksz, "_defaultstksz");
1223	(void) mdb_readvar(&_mdb_ks_ncpu, "_ncpu");
1224	(void) mdb_readvar(&_mdb_ks_ncpu_log2, "_ncpu_log2");
1225	(void) mdb_readvar(&_mdb_ks_ncpu_p2, "_ncpu_p2");
1226
1227	page_hash_loaded = 0;	/* invalidate cached page_hash state */
1228}
1229
1230const mdb_modinfo_t *
1231_mdb_init(void)
1232{
1233	/*
1234	 * When used with mdb, mdb_ks is a separate dmod.  With kmdb, however,
1235	 * mdb_ks is compiled into the debugger module.  kmdb cannot
1236	 * automatically modunload itself when it exits.  If it restarts after
1237	 * debugger fault, static variables may not be initialized to zero.
1238	 * They must be manually reinitialized here.
1239	 */
1240	dnlc_hash = NULL;
1241	qi_head = NULL;
1242
1243	mdb_callback_add(MDB_CALLBACK_STCHG, update_vars, NULL);
1244
1245	update_vars(NULL);
1246
1247	return (&modinfo);
1248}
1249
1250void
1251_mdb_fini(void)
1252{
1253	dnlc_free();
1254	while (qi_head != NULL) {
1255		mdb_qinfo_t *qip = qi_head;
1256		qi_head = qip->qi_next;
1257		mdb_free(qip, sizeof (mdb_qinfo_t));
1258	}
1259}
1260
1261/*
1262 * Interface between MDB kproc target and mdb_ks.  The kproc target relies
1263 * on looking up and invoking these functions in mdb_ks so that dependencies
1264 * on the current kernel implementation are isolated in mdb_ks.
1265 */
1266
1267/*
1268 * Given the address of a proc_t, return the p.p_as pointer; return NULL
1269 * if we were unable to read a proc structure from the given address.
1270 */
1271uintptr_t
1272mdb_kproc_as(uintptr_t proc_addr)
1273{
1274	proc_t p;
1275
1276	if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p))
1277		return ((uintptr_t)p.p_as);
1278
1279	return (0);
1280}
1281
1282/*
1283 * Given the address of a proc_t, return the p.p_model value; return
1284 * PR_MODEL_UNKNOWN if we were unable to read a proc structure or if
1285 * the model value does not match one of the two known values.
1286 */
1287uint_t
1288mdb_kproc_model(uintptr_t proc_addr)
1289{
1290	proc_t p;
1291
1292	if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p)) {
1293		switch (p.p_model) {
1294		case DATAMODEL_ILP32:
1295			return (PR_MODEL_ILP32);
1296		case DATAMODEL_LP64:
1297			return (PR_MODEL_LP64);
1298		}
1299	}
1300
1301	return (PR_MODEL_UNKNOWN);
1302}
1303
1304/*
1305 * Callback function for walking process's segment list.  For each segment,
1306 * we fill in an mdb_map_t describing its properties, and then invoke
1307 * the callback function provided by the kproc target.
1308 */
1309static int
1310asmap_step(uintptr_t addr, const struct seg *seg, asmap_arg_t *asmp)
1311{
1312	struct segvn_data svd;
1313	mdb_map_t map;
1314
1315	if (seg->s_ops == asmp->asm_segvn_ops && mdb_vread(&svd,
1316	    sizeof (svd), (uintptr_t)seg->s_data) == sizeof (svd)) {
1317
1318		if (svd.vp != NULL) {
1319			if (mdb_vnode2path((uintptr_t)svd.vp, map.map_name,
1320			    MDB_TGT_MAPSZ) != 0) {
1321				(void) mdb_snprintf(map.map_name,
1322				    MDB_TGT_MAPSZ, "[ vnode %p ]", svd.vp);
1323			}
1324		} else
1325			(void) strcpy(map.map_name, "[ anon ]");
1326
1327	} else {
1328		(void) mdb_snprintf(map.map_name, MDB_TGT_MAPSZ,
1329		    "[ seg %p ]", addr);
1330	}
1331
1332	map.map_base = (uintptr_t)seg->s_base;
1333	map.map_size = seg->s_size;
1334	map.map_flags = 0;
1335
1336	asmp->asm_callback((const struct mdb_map *)&map, asmp->asm_cbdata);
1337	return (WALK_NEXT);
1338}
1339
1340/*
1341 * Given a process address space, walk its segment list using the seg walker,
1342 * convert the segment data to an mdb_map_t, and pass this information
1343 * back to the kproc target via the given callback function.
1344 */
1345int
1346mdb_kproc_asiter(uintptr_t as,
1347    void (*func)(const struct mdb_map *, void *), void *p)
1348{
1349	asmap_arg_t arg;
1350	GElf_Sym sym;
1351
1352	arg.asm_segvn_ops = NULL;
1353	arg.asm_callback = func;
1354	arg.asm_cbdata = p;
1355
1356	if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
1357		arg.asm_segvn_ops = (struct seg_ops *)(uintptr_t)sym.st_value;
1358
1359	return (mdb_pwalk("seg", (mdb_walk_cb_t)asmap_step, &arg, as));
1360}
1361
1362/*
1363 * Copy the auxv array from the given process's u-area into the provided
1364 * buffer.  If the buffer is NULL, only return the size of the auxv array
1365 * so the caller knows how much space will be required.
1366 */
1367int
1368mdb_kproc_auxv(uintptr_t proc, auxv_t *auxv)
1369{
1370	if (auxv != NULL) {
1371		proc_t p;
1372
1373		if (mdb_vread(&p, sizeof (p), proc) != sizeof (p))
1374			return (-1);
1375
1376		bcopy(p.p_user.u_auxv, auxv,
1377		    sizeof (auxv_t) * __KERN_NAUXV_IMPL);
1378	}
1379
1380	return (__KERN_NAUXV_IMPL);
1381}
1382
1383/*
1384 * Given a process address, return the PID.
1385 */
1386pid_t
1387mdb_kproc_pid(uintptr_t proc_addr)
1388{
1389	struct pid pid;
1390	proc_t p;
1391
1392	if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p) &&
1393	    mdb_vread(&pid, sizeof (pid), (uintptr_t)p.p_pidp) == sizeof (pid))
1394		return (pid.pid_id);
1395
1396	return (-1);
1397}
1398
1399/*
1400 * Interface between the MDB kvm target and mdb_ks.  The kvm target relies
1401 * on looking up and invoking these functions in mdb_ks so that dependencies
1402 * on the current kernel implementation are isolated in mdb_ks.
1403 */
1404
1405/*
1406 * Determine whether or not the thread that panicked the given kernel was a
1407 * kernel thread (panic_thread->t_procp == &p0).
1408 */
1409void
1410mdb_dump_print_content(dumphdr_t *dh, pid_t content)
1411{
1412	GElf_Sym sym;
1413	uintptr_t pt;
1414	uintptr_t procp;
1415	int expcont = 0;
1416	int actcont;
1417
1418	(void) mdb_readvar(&expcont, "dump_conflags");
1419	actcont = dh->dump_flags & DF_CONTENT;
1420
1421	if (actcont == DF_ALL) {
1422		mdb_printf("dump content: all kernel and user pages\n");
1423		return;
1424	} else if (actcont == DF_CURPROC) {
1425		mdb_printf("dump content: kernel pages and pages from "
1426		    "PID %d", content);
1427		return;
1428	}
1429
1430	mdb_printf("dump content: kernel pages only\n");
1431	if (!(expcont & DF_CURPROC))
1432		return;
1433
1434	if (mdb_readvar(&pt, "panic_thread") != sizeof (pt) || pt == 0)
1435		goto kthreadpanic_err;
1436
1437	if (mdb_vread(&procp, sizeof (procp), pt + OFFSETOF(kthread_t,
1438	    t_procp)) == -1 || procp == 0)
1439		goto kthreadpanic_err;
1440
1441	if (mdb_lookup_by_name("p0", &sym) != 0)
1442		goto kthreadpanic_err;
1443
1444	if (procp == (uintptr_t)sym.st_value) {
1445		mdb_printf("  (curproc requested, but a kernel thread "
1446		    "panicked)\n");
1447	} else {
1448		mdb_printf("  (curproc requested, but the process that "
1449		    "panicked could not be dumped)\n");
1450	}
1451
1452	return;
1453
1454kthreadpanic_err:
1455	mdb_printf("  (curproc requested, but the process that panicked could "
1456	    "not be found)\n");
1457}
1458
1459/*
1460 * Determine the process that was saved in a `curproc' dump.  This process will
1461 * be recorded as the first element in dump_pids[].
1462 */
1463int
1464mdb_dump_find_curproc(void)
1465{
1466	uintptr_t pidp;
1467	pid_t pid = -1;
1468
1469	if (mdb_readvar(&pidp, "dump_pids") == sizeof (pidp) &&
1470	    mdb_vread(&pid, sizeof (pid), pidp) == sizeof (pid) &&
1471	    pid > 0)
1472		return (pid);
1473	else
1474		return (-1);
1475}
1476
1477
1478/*
1479 * Following three funcs extracted from sunddi.c
1480 */
1481
1482/*
1483 * Return core address of root node of devinfo tree
1484 */
1485static uintptr_t
1486mdb_ddi_root_node(void)
1487{
1488	uintptr_t	top_devinfo_addr;
1489
1490	/* return (top_devinfo);   */
1491	if (mdb_readvar(&top_devinfo_addr, "top_devinfo") == -1) {
1492		mdb_warn("failed to read top_devinfo");
1493		return (0);
1494	}
1495	return (top_devinfo_addr);
1496}
1497
1498/*
1499 * Return the name of the devinfo node pointed at by 'dip_addr' in the buffer
1500 * pointed at by 'name.'
1501 *
1502 * - dip_addr is a pointer to a dev_info struct in core.
1503 */
1504static char *
1505mdb_ddi_deviname(uintptr_t dip_addr, char *name, size_t name_size)
1506{
1507	uintptr_t addrname;
1508	ssize_t	length;
1509	char *local_namep = name;
1510	size_t local_name_size = name_size;
1511	struct dev_info	local_dip;
1512
1513
1514	if (dip_addr == mdb_ddi_root_node()) {
1515		if (name_size < 1) {
1516			mdb_warn("failed to get node name: buf too small\n");
1517			return (NULL);
1518		}
1519
1520		*name = '\0';
1521		return (name);
1522	}
1523
1524	if (name_size < 2) {
1525		mdb_warn("failed to get node name: buf too small\n");
1526		return (NULL);
1527	}
1528
1529	local_namep = name;
1530	*local_namep++ = '/';
1531	*local_namep = '\0';
1532	local_name_size--;
1533
1534	if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1535		mdb_warn("failed to read devinfo struct");
1536	}
1537
1538	length = mdb_readstr(local_namep, local_name_size,
1539	    (uintptr_t)local_dip.devi_node_name);
1540	if (length == -1) {
1541		mdb_warn("failed to read node name");
1542		return (NULL);
1543	}
1544	local_namep += length;
1545	local_name_size -= length;
1546	addrname = (uintptr_t)local_dip.devi_addr;
1547
1548	if (addrname != 0) {
1549
1550		if (local_name_size < 2) {
1551			mdb_warn("not enough room for node address string");
1552			return (name);
1553		}
1554		*local_namep++ = '@';
1555		*local_namep = '\0';
1556		local_name_size--;
1557
1558		length = mdb_readstr(local_namep, local_name_size, addrname);
1559		if (length == -1) {
1560			mdb_warn("failed to read name");
1561			return (NULL);
1562		}
1563	}
1564
1565	return (name);
1566}
1567
1568/*
1569 * Generate the full path under the /devices dir to the device entry.
1570 *
1571 * dip is a pointer to a devinfo struct in core (not in local memory).
1572 */
1573char *
1574mdb_ddi_pathname(uintptr_t dip_addr, char *path, size_t pathlen)
1575{
1576	struct dev_info local_dip;
1577	uintptr_t	parent_dip;
1578	char		*bp;
1579	size_t		buf_left;
1580
1581
1582	if (dip_addr == mdb_ddi_root_node()) {
1583		*path = '\0';
1584		return (path);
1585	}
1586
1587
1588	if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1589		mdb_warn("failed to read devinfo struct");
1590	}
1591
1592	parent_dip = (uintptr_t)local_dip.devi_parent;
1593	(void) mdb_ddi_pathname(parent_dip, path, pathlen);
1594
1595	bp = path + strlen(path);
1596	buf_left = pathlen - strlen(path);
1597	(void) mdb_ddi_deviname(dip_addr, bp, buf_left);
1598	return (path);
1599}
1600
1601
1602/*
1603 * Read in the string value of a refstr, which is appended to the end of
1604 * the structure.
1605 */
1606ssize_t
1607mdb_read_refstr(uintptr_t refstr_addr, char *str, size_t nbytes)
1608{
1609	struct refstr *r = (struct refstr *)refstr_addr;
1610
1611	return (mdb_readstr(str, nbytes, (uintptr_t)r->rs_string));
1612}
1613
1614/*
1615 * Chase an mblk list by b_next and return the length.
1616 */
1617int
1618mdb_mblk_count(const mblk_t *mb)
1619{
1620	int count;
1621	mblk_t mblk;
1622
1623	if (mb == NULL)
1624		return (0);
1625
1626	count = 1;
1627	while (mb->b_next != NULL) {
1628		count++;
1629		if (mdb_vread(&mblk, sizeof (mblk), (uintptr_t)mb->b_next) ==
1630		    -1)
1631			break;
1632		mb = &mblk;
1633	}
1634	return (count);
1635}
1636
1637/*
1638 * Write the given MAC address as a printable string in the usual colon-
1639 * separated format.  Assumes that buflen is at least 2.
1640 */
1641void
1642mdb_mac_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
1643{
1644	int slen;
1645
1646	if (alen == 0 || buflen < 4) {
1647		(void) strcpy(buf, "?");
1648		return;
1649	}
1650	for (;;) {
1651		/*
1652		 * If there are more MAC address bytes available, but we won't
1653		 * have any room to print them, then add "..." to the string
1654		 * instead.  See below for the 'magic number' explanation.
1655		 */
1656		if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
1657			(void) strcpy(buf, "...");
1658			break;
1659		}
1660		slen = mdb_snprintf(buf, buflen, "%02x", *addr++);
1661		buf += slen;
1662		if (--alen == 0)
1663			break;
1664		*buf++ = ':';
1665		buflen -= slen + 1;
1666		/*
1667		 * At this point, based on the first 'if' statement above,
1668		 * either alen == 1 and buflen >= 3, or alen > 1 and
1669		 * buflen >= 4.  The first case leaves room for the final "xx"
1670		 * number and trailing NUL byte.  The second leaves room for at
1671		 * least "...".  Thus the apparently 'magic' numbers chosen for
1672		 * that statement.
1673		 */
1674	}
1675}
1676
1677/*
1678 * Produce a string that represents a DLPI primitive, or NULL if no such string
1679 * is possible.
1680 */
1681const char *
1682mdb_dlpi_prim(int prim)
1683{
1684	switch (prim) {
1685	case DL_INFO_REQ:	return ("DL_INFO_REQ");
1686	case DL_INFO_ACK:	return ("DL_INFO_ACK");
1687	case DL_ATTACH_REQ:	return ("DL_ATTACH_REQ");
1688	case DL_DETACH_REQ:	return ("DL_DETACH_REQ");
1689	case DL_BIND_REQ:	return ("DL_BIND_REQ");
1690	case DL_BIND_ACK:	return ("DL_BIND_ACK");
1691	case DL_UNBIND_REQ:	return ("DL_UNBIND_REQ");
1692	case DL_OK_ACK:		return ("DL_OK_ACK");
1693	case DL_ERROR_ACK:	return ("DL_ERROR_ACK");
1694	case DL_ENABMULTI_REQ:	return ("DL_ENABMULTI_REQ");
1695	case DL_DISABMULTI_REQ:	return ("DL_DISABMULTI_REQ");
1696	case DL_PROMISCON_REQ:	return ("DL_PROMISCON_REQ");
1697	case DL_PROMISCOFF_REQ:	return ("DL_PROMISCOFF_REQ");
1698	case DL_UNITDATA_REQ:	return ("DL_UNITDATA_REQ");
1699	case DL_UNITDATA_IND:	return ("DL_UNITDATA_IND");
1700	case DL_UDERROR_IND:	return ("DL_UDERROR_IND");
1701	case DL_PHYS_ADDR_REQ:	return ("DL_PHYS_ADDR_REQ");
1702	case DL_PHYS_ADDR_ACK:	return ("DL_PHYS_ADDR_ACK");
1703	case DL_SET_PHYS_ADDR_REQ:	return ("DL_SET_PHYS_ADDR_REQ");
1704	case DL_NOTIFY_REQ:	return ("DL_NOTIFY_REQ");
1705	case DL_NOTIFY_ACK:	return ("DL_NOTIFY_ACK");
1706	case DL_NOTIFY_IND:	return ("DL_NOTIFY_IND");
1707	case DL_NOTIFY_CONF:	return ("DL_NOTIFY_CONF");
1708	case DL_CAPABILITY_REQ:	return ("DL_CAPABILITY_REQ");
1709	case DL_CAPABILITY_ACK:	return ("DL_CAPABILITY_ACK");
1710	case DL_CONTROL_REQ:	return ("DL_CONTROL_REQ");
1711	case DL_CONTROL_ACK:	return ("DL_CONTROL_ACK");
1712	case DL_PASSIVE_REQ:	return ("DL_PASSIVE_REQ");
1713	default:		return (NULL);
1714	}
1715}
1716
1717/*
1718 * mdb_gethrtime() returns the hires system time. This will be the timestamp at
1719 * which we dropped into, if called from, kmdb(1); the core dump's hires time
1720 * if inspecting one; or the running system's hires time if we're inspecting
1721 * a live kernel.
1722 */
1723hrtime_t
1724mdb_gethrtime(void)
1725{
1726	uintptr_t ptr;
1727	GElf_Sym sym;
1728	lbolt_info_t lbi;
1729	hrtime_t ts;
1730
1731	/*
1732	 * We first check whether the lbolt info structure has been allocated
1733	 * and initialized. If not, lbolt_hybrid will be pointing at
1734	 * lbolt_bootstrap.
1735	 */
1736	if (mdb_lookup_by_name("lbolt_bootstrap", &sym) == -1)
1737		return (0);
1738
1739	if (mdb_readvar(&ptr, "lbolt_hybrid") == -1)
1740		return (0);
1741
1742	if (ptr == (uintptr_t)sym.st_value)
1743		return (0);
1744
1745#ifdef _KMDB
1746	if (mdb_readvar(&ptr, "lb_info") == -1)
1747		return (0);
1748
1749	if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1750	    sizeof (lbolt_info_t))
1751		return (0);
1752
1753	ts = lbi.lbi_debug_ts;
1754#else
1755	if (mdb_prop_postmortem) {
1756		if (mdb_readvar(&ptr, "lb_info") == -1)
1757			return (0);
1758
1759		if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1760		    sizeof (lbolt_info_t))
1761			return (0);
1762
1763		ts = lbi.lbi_debug_ts;
1764	} else {
1765		ts = gethrtime();
1766	}
1767#endif
1768	return (ts);
1769}
1770
1771/*
1772 * mdb_get_lbolt() returns the number of clock ticks since system boot.
1773 * Depending on the context in which it's called, the value will be derived
1774 * from different sources per mdb_gethrtime(). If inspecting a panicked
1775 * system, the routine returns the 'panic_lbolt64' variable from the core file.
1776 */
1777int64_t
1778mdb_get_lbolt(void)
1779{
1780	lbolt_info_t lbi;
1781	uintptr_t ptr;
1782	int64_t pl;
1783	hrtime_t ts;
1784	int nsec;
1785
1786	if (mdb_readvar(&pl, "panic_lbolt64") != -1 && pl > 0)
1787		return (pl);
1788
1789	/*
1790	 * mdb_gethrtime() will return zero if the lbolt info structure hasn't
1791	 * been allocated and initialized yet, or if it fails to read it.
1792	 */
1793	if ((ts = mdb_gethrtime()) <= 0)
1794		return (0);
1795
1796	/*
1797	 * Load the time spent in kmdb, if any.
1798	 */
1799	if (mdb_readvar(&ptr, "lb_info") == -1)
1800		return (0);
1801
1802	if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1803	    sizeof (lbolt_info_t))
1804		return (0);
1805
1806	if (mdb_readvar(&nsec, "nsec_per_tick") == -1 || nsec == 0) {
1807		mdb_warn("failed to read 'nsec_per_tick'");
1808		return (-1);
1809	}
1810
1811	return ((ts/nsec) - lbi.lbi_debug_time);
1812}
1813
1814void
1815mdb_print_buildversion(void)
1816{
1817	GElf_Sym sym;
1818
1819	if (mdb_lookup_by_name("buildversion", &sym) != 0)
1820		return;
1821
1822	char *str = mdb_zalloc(4096, UM_SLEEP | UM_GC);
1823
1824	if (mdb_readstr(str, 4096, sym.st_value) < 1)
1825		return;
1826
1827	mdb_printf("build version: %s\n", str);
1828}
1829