xref: /illumos-gate/usr/src/cmd/mdb/common/mdb/mdb_kvm.c (revision f6e214c7418f43af38bd8c3a557e3d0a1d311cfa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Libkvm Kernel Target
27  *
28  * The libkvm kernel target provides access to both crash dumps and live
29  * kernels through /dev/ksyms and /dev/kmem, using the facilities provided by
30  * the libkvm.so library.  The target-specific data structures are shared
31  * between this file (common code) and the ISA-dependent parts of the target,
32  * and so they are defined in the mdb_kvm.h header.  The target processes an
33  * "executable" (/dev/ksyms or the unix.X file) which contains a primary
34  * .symtab and .dynsym, and then also iterates over the krtld module chain in
35  * the kernel in order to obtain a list of loaded modules and per-module symbol
36  * tables.  To improve startup performance, the per-module symbol tables are
37  * instantiated on-the-fly whenever an address lookup falls within the text
38  * section of a given module.  The target also relies on services from the
39  * mdb_ks (kernel support) module, which contains pieces of the implementation
40  * that must be compiled against the kernel implementation.
41  */
42 
43 #include <sys/modctl.h>
44 #include <sys/kobj.h>
45 #include <sys/kobj_impl.h>
46 #include <sys/utsname.h>
47 #include <sys/panic.h>
48 #include <sys/dumphdr.h>
49 #include <sys/dumpadm.h>
50 
51 #include <dlfcn.h>
52 #include <libctf.h>
53 #include <string.h>
54 #include <fcntl.h>
55 #include <errno.h>
56 
57 #include <mdb/mdb_target_impl.h>
58 #include <mdb/mdb_err.h>
59 #include <mdb/mdb_debug.h>
60 #include <mdb/mdb_string.h>
61 #include <mdb/mdb_modapi.h>
62 #include <mdb/mdb_io_impl.h>
63 #include <mdb/mdb_ctf.h>
64 #include <mdb/mdb_kvm.h>
65 #include <mdb/mdb_module.h>
66 #include <mdb/mdb_kb.h>
67 #include <mdb/mdb.h>
68 
69 #define	KT_RELOC_BUF(buf, obase, nbase) \
70 	((uintptr_t)(buf) - (uintptr_t)(obase) + (uintptr_t)(nbase))
71 
72 #define	KT_BAD_BUF(buf, base, size) \
73 	((uintptr_t)(buf) < (uintptr_t)(base) || \
74 	((uintptr_t)(buf) >= (uintptr_t)(base) + (uintptr_t)(size)))
75 
76 typedef struct kt_symarg {
77 	mdb_tgt_sym_f *sym_cb;		/* Caller's callback function */
78 	void *sym_data;			/* Callback function argument */
79 	uint_t sym_type;		/* Symbol type/binding filter */
80 	mdb_syminfo_t sym_info;		/* Symbol id and table id */
81 	const char *sym_obj;		/* Containing object */
82 } kt_symarg_t;
83 
84 typedef struct kt_maparg {
85 	mdb_tgt_t *map_target;		/* Target used for mapping iter */
86 	mdb_tgt_map_f *map_cb;		/* Caller's callback function */
87 	void *map_data;			/* Callback function argument */
88 } kt_maparg_t;
89 
90 static const char KT_MODULE[] = "mdb_ks";
91 static const char KT_CTFPARENT[] = "genunix";
92 
93 static void
94 kt_load_module(kt_data_t *kt, mdb_tgt_t *t, kt_module_t *km)
95 {
96 	km->km_data = mdb_alloc(km->km_datasz, UM_SLEEP);
97 
98 	(void) mdb_tgt_vread(t, km->km_data, km->km_datasz, km->km_symspace_va);
99 
100 	km->km_symbuf = (void *)
101 	    KT_RELOC_BUF(km->km_symtab_va, km->km_symspace_va, km->km_data);
102 
103 	km->km_strtab = (char *)
104 	    KT_RELOC_BUF(km->km_strtab_va, km->km_symspace_va, km->km_data);
105 
106 	km->km_symtab = mdb_gelf_symtab_create_raw(&kt->k_file->gf_ehdr,
107 	    &km->km_symtab_hdr, km->km_symbuf,
108 	    &km->km_strtab_hdr, km->km_strtab, MDB_TGT_SYMTAB);
109 }
110 
111 static void
112 kt_load_modules(kt_data_t *kt, mdb_tgt_t *t)
113 {
114 	char name[MAXNAMELEN];
115 	uintptr_t addr, head;
116 
117 	struct module kmod;
118 	struct modctl ctl;
119 	Shdr symhdr, strhdr;
120 	GElf_Sym sym;
121 
122 	kt_module_t *km;
123 
124 	if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
125 	    "modules", &sym, NULL) == -1) {
126 		warn("failed to get 'modules' symbol");
127 		return;
128 	}
129 
130 	if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl),
131 	    MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) {
132 		warn("failed to read 'modules' struct");
133 		return;
134 	}
135 
136 	addr = head = (uintptr_t)sym.st_value;
137 
138 	do {
139 		if (addr == NULL)
140 			break; /* Avoid spurious NULL pointers in list */
141 
142 		if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) {
143 			warn("failed to read modctl at %p", (void *)addr);
144 			return;
145 		}
146 
147 		if (ctl.mod_mp == NULL)
148 			continue; /* No associated krtld structure */
149 
150 		if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN,
151 		    (uintptr_t)ctl.mod_modname) <= 0) {
152 			warn("failed to read module name at %p",
153 			    (void *)ctl.mod_modname);
154 			continue;
155 		}
156 
157 		mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n",
158 		    name, (void *)addr);
159 
160 		if (mdb_nv_lookup(&kt->k_modules, name) != NULL) {
161 			warn("skipping duplicate module '%s', id=%d\n",
162 			    name, ctl.mod_id);
163 			continue;
164 		}
165 
166 		if (mdb_tgt_vread(t, &kmod, sizeof (kmod),
167 		    (uintptr_t)ctl.mod_mp) == -1) {
168 			warn("failed to read module at %p\n",
169 			    (void *)ctl.mod_mp);
170 			continue;
171 		}
172 
173 		if (kmod.symspace == NULL || kmod.symhdr == NULL ||
174 		    kmod.strhdr == NULL) {
175 			/*
176 			 * If no buffer for the symbols has been allocated,
177 			 * or the shdrs for .symtab and .strtab are missing,
178 			 * then we're out of luck.
179 			 */
180 			continue;
181 		}
182 
183 		if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr),
184 		    (uintptr_t)kmod.symhdr) == -1) {
185 			warn("failed to read .symtab header for '%s', id=%d",
186 			    name, ctl.mod_id);
187 			continue;
188 		}
189 
190 		if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr),
191 		    (uintptr_t)kmod.strhdr) == -1) {
192 			warn("failed to read .strtab header for '%s', id=%d",
193 			    name, ctl.mod_id);
194 			continue;
195 		}
196 
197 		/*
198 		 * Now get clever: f(*^ing krtld didn't used to bother updating
199 		 * its own kmod.symsize value.  We know that prior to this bug
200 		 * being fixed, symspace was a contiguous buffer containing
201 		 * .symtab, .strtab, and the symbol hash table in that order.
202 		 * So if symsize is zero, recompute it as the size of .symtab
203 		 * plus the size of .strtab.  We don't need to load the hash
204 		 * table anyway since we re-hash all the symbols internally.
205 		 */
206 		if (kmod.symsize == 0)
207 			kmod.symsize = symhdr.sh_size + strhdr.sh_size;
208 
209 		/*
210 		 * Similar logic can be used to make educated guesses
211 		 * at the values of kmod.symtbl and kmod.strings.
212 		 */
213 		if (kmod.symtbl == NULL)
214 			kmod.symtbl = kmod.symspace;
215 		if (kmod.strings == NULL)
216 			kmod.strings = kmod.symspace + symhdr.sh_size;
217 
218 		/*
219 		 * Make sure things seem reasonable before we proceed
220 		 * to actually read and decipher the symspace.
221 		 */
222 		if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) ||
223 		    KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) {
224 			warn("skipping module '%s', id=%d (corrupt symspace)\n",
225 			    name, ctl.mod_id);
226 			continue;
227 		}
228 
229 		km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP);
230 		km->km_name = strdup(name);
231 
232 		(void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL,
233 		    (uintptr_t)km, MDB_NV_EXTNAME);
234 
235 		km->km_datasz = kmod.symsize;
236 		km->km_symspace_va = (uintptr_t)kmod.symspace;
237 		km->km_symtab_va = (uintptr_t)kmod.symtbl;
238 		km->km_strtab_va = (uintptr_t)kmod.strings;
239 		km->km_symtab_hdr = symhdr;
240 		km->km_strtab_hdr = strhdr;
241 		km->km_text_va = (uintptr_t)kmod.text;
242 		km->km_text_size = kmod.text_size;
243 		km->km_data_va = (uintptr_t)kmod.data;
244 		km->km_data_size = kmod.data_size;
245 		km->km_bss_va = (uintptr_t)kmod.bss;
246 		km->km_bss_size = kmod.bss_size;
247 
248 		if (kt->k_ctfvalid) {
249 			km->km_ctf_va = (uintptr_t)kmod.ctfdata;
250 			km->km_ctf_size = kmod.ctfsize;
251 		}
252 
253 		/*
254 		 * Add the module to the end of the list of modules in load-
255 		 * dependency order.  This is needed to load the corresponding
256 		 * debugger modules in the same order for layering purposes.
257 		 */
258 		mdb_list_append(&kt->k_modlist, km);
259 
260 		if (t->t_flags & MDB_TGT_F_PRELOAD) {
261 			mdb_iob_printf(mdb.m_out, " %s", name);
262 			mdb_iob_flush(mdb.m_out);
263 			kt_load_module(kt, t, km);
264 		}
265 
266 	} while ((addr = (uintptr_t)ctl.mod_next) != head);
267 }
268 
269 int
270 kt_setflags(mdb_tgt_t *t, int flags)
271 {
272 	int iochg = ((flags ^ t->t_flags) & MDB_TGT_F_ALLOWIO) &&
273 	    !mdb_prop_postmortem;
274 	int rwchg = (flags ^ t->t_flags) & MDB_TGT_F_RDWR;
275 	kt_data_t *kt = t->t_data;
276 	const char *kvmfile;
277 	void *cookie;
278 	int mode;
279 
280 	if (!iochg && !rwchg)
281 		return (0);
282 
283 	if (kt->k_xpv_domu) {
284 		warn("read-only target");
285 		return (-1);
286 	}
287 
288 	if (iochg) {
289 		kvmfile = (flags & MDB_TGT_F_ALLOWIO) ? "/dev/allkmem" :
290 		    "/dev/kmem";
291 	} else {
292 		kvmfile = kt->k_kvmfile;
293 	}
294 
295 	mode = (flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
296 
297 	if ((cookie = kt->k_kb_ops->kb_open(kt->k_symfile, kvmfile, NULL, mode,
298 	    mdb.m_pname)) == NULL) {
299 		/* We failed to re-open, so don't change t_flags */
300 		warn("failed to re-open target");
301 		return (-1);
302 	}
303 
304 	/*
305 	 * We successfully reopened the target, so update k_kvmfile.  Also set
306 	 * the RDWR and ALLOWIO bits in t_flags to match those in flags.
307 	 */
308 	(void) kt->k_kb_ops->kb_close(kt->k_cookie);
309 	kt->k_cookie = cookie;
310 
311 	if (kvmfile != kt->k_kvmfile) {
312 		strfree(kt->k_kvmfile);
313 		kt->k_kvmfile = strdup(kvmfile);
314 	}
315 
316 	t->t_flags = (t->t_flags & ~(MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO)) |
317 	    (flags & (MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO));
318 
319 	return (0);
320 }
321 
322 /*
323  * Determine which PIDs (if any) have their pages saved in the dump.  We
324  * do this by looking for content flags in dump_flags in the header.  These
325  * flags, which won't be set in older dumps, tell us whether a single process
326  * has had its pages included in the dump.  If a single process has been
327  * included, we need to get the PID for that process from the dump_pids
328  * array in the dump.
329  */
330 static int
331 kt_find_dump_contents(kt_data_t *kt)
332 {
333 	dumphdr_t *dh = kt->k_dumphdr;
334 	pid_t pid = -1;
335 
336 	if (dh->dump_flags & DF_ALL)
337 		return (KT_DUMPCONTENT_ALL);
338 
339 	if (dh->dump_flags & DF_CURPROC) {
340 		if ((pid = kt->k_dump_find_curproc()) == -1)
341 			return (KT_DUMPCONTENT_INVALID);
342 		else
343 			return (pid);
344 	} else {
345 		return (KT_DUMPCONTENT_KERNEL);
346 	}
347 }
348 
349 static int
350 kt_dump_contains_proc(mdb_tgt_t *t, void *context)
351 {
352 	kt_data_t *kt = t->t_data;
353 	pid_t (*f_pid)(uintptr_t);
354 	pid_t reqpid;
355 
356 	switch (kt->k_dumpcontent) {
357 	case KT_DUMPCONTENT_KERNEL:
358 		return (0);
359 	case KT_DUMPCONTENT_ALL:
360 		return (1);
361 	case KT_DUMPCONTENT_INVALID:
362 		goto procnotfound;
363 	default:
364 		f_pid = (pid_t (*)()) dlsym(RTLD_NEXT, "mdb_kproc_pid");
365 		if (f_pid == NULL)
366 			goto procnotfound;
367 
368 		reqpid = f_pid((uintptr_t)context);
369 		if (reqpid == -1)
370 			goto procnotfound;
371 
372 		return (kt->k_dumpcontent == reqpid);
373 	}
374 
375 procnotfound:
376 	warn("unable to determine whether dump contains proc %p\n", context);
377 	return (1);
378 }
379 
380 int
381 kt_setcontext(mdb_tgt_t *t, void *context)
382 {
383 	if (context != NULL) {
384 		const char *argv[2];
385 		int argc = 0;
386 		mdb_tgt_t *ct;
387 		kt_data_t *kt = t->t_data;
388 
389 		argv[argc++] = (const char *)context;
390 		argv[argc] = NULL;
391 
392 		if (kt->k_dumphdr != NULL &&
393 		    !kt_dump_contains_proc(t, context)) {
394 			warn("dump does not contain pages for proc %p\n",
395 			    context);
396 			return (-1);
397 		}
398 
399 		if ((ct = mdb_tgt_create(mdb_kproc_tgt_create,
400 		    t->t_flags, argc, argv)) == NULL)
401 			return (-1);
402 
403 		mdb_printf("debugger context set to proc %p\n", context);
404 		mdb_tgt_activate(ct);
405 	} else
406 		mdb_printf("debugger context set to kernel\n");
407 
408 	return (0);
409 }
410 
411 static int
412 kt_stack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
413 {
414 	kt_data_t *kt = mdb.m_target->t_data;
415 	return (kt->k_dcmd_stack(addr, flags, argc, argv));
416 }
417 
418 static int
419 kt_stackv(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
420 {
421 	kt_data_t *kt = mdb.m_target->t_data;
422 	return (kt->k_dcmd_stackv(addr, flags, argc, argv));
423 }
424 
425 static int
426 kt_stackr(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
427 {
428 	kt_data_t *kt = mdb.m_target->t_data;
429 	return (kt->k_dcmd_stackr(addr, flags, argc, argv));
430 }
431 
432 static int
433 kt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
434 {
435 	kt_data_t *kt = mdb.m_target->t_data;
436 
437 	if (argc != 0 || (flags & DCMD_ADDRSPEC))
438 		return (DCMD_USAGE);
439 
440 	addr = (uintptr_t)kt->k_regs;
441 
442 	return (kt->k_dcmd_regs(addr, flags, argc, argv));
443 }
444 
445 #ifdef __x86
446 static int
447 kt_cpustack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
448 {
449 	kt_data_t *kt = mdb.m_target->t_data;
450 	return (kt->k_dcmd_cpustack(addr, flags, argc, argv));
451 }
452 
453 static int
454 kt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
455 {
456 	kt_data_t *kt = mdb.m_target->t_data;
457 	return (kt->k_dcmd_cpuregs(addr, flags, argc, argv));
458 }
459 #endif /* __x86 */
460 
461 /*ARGSUSED*/
462 static int
463 kt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
464 {
465 	kt_data_t *kt = mdb.m_target->t_data;
466 	struct utsname uts;
467 
468 	bzero(&uts, sizeof (uts));
469 	(void) strcpy(uts.nodename, "unknown machine");
470 	(void) kt_uname(mdb.m_target, &uts);
471 
472 	if (mdb_prop_postmortem) {
473 		mdb_printf("debugging %scrash dump %s (%d-bit) from %s\n",
474 		    kt->k_xpv_domu ? "domain " : "", kt->k_kvmfile,
475 		    (int)(sizeof (void *) * NBBY), uts.nodename);
476 	} else {
477 		mdb_printf("debugging live kernel (%d-bit) on %s\n",
478 		    (int)(sizeof (void *) * NBBY), uts.nodename);
479 	}
480 
481 	mdb_printf("operating system: %s %s (%s)\n",
482 	    uts.release, uts.version, uts.machine);
483 
484 	if (kt->k_dumphdr) {
485 		dumphdr_t *dh = kt->k_dumphdr;
486 
487 		mdb_printf("image uuid: %s\n", dh->dump_uuid[0] != '\0' ?
488 		    dh->dump_uuid : "(not set)");
489 		mdb_printf("panic message: %s\n", dh->dump_panicstring);
490 
491 		kt->k_dump_print_content(dh, kt->k_dumpcontent);
492 	} else {
493 		char uuid[37];
494 
495 		if (mdb_readsym(uuid, 37, "dump_osimage_uuid") == 37 &&
496 		    uuid[36] == '\0') {
497 			mdb_printf("image uuid: %s\n", uuid);
498 		}
499 	}
500 
501 	return (DCMD_OK);
502 }
503 
504 static const mdb_dcmd_t kt_dcmds[] = {
505 	{ "$c", "?[cnt]", "print stack backtrace", kt_stack },
506 	{ "$C", "?[cnt]", "print stack backtrace", kt_stackv },
507 	{ "$r", NULL, "print general-purpose registers", kt_regs },
508 	{ "$?", NULL, "print status and registers", kt_regs },
509 	{ "regs", NULL, "print general-purpose registers", kt_regs },
510 	{ "stack", "?[cnt]", "print stack backtrace", kt_stack },
511 	{ "stackregs", "?", "print stack backtrace and registers", kt_stackr },
512 #ifdef __x86
513 	{ "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a "
514 	    "specific CPU", kt_cpustack },
515 	{ "cpuregs", "?[-c cpuid]", "print general-purpose registers for a "
516 	    "specific CPU", kt_cpuregs },
517 #endif
518 	{ "status", NULL, "print summary of current target", kt_status_dcmd },
519 	{ NULL }
520 };
521 
522 static uintmax_t
523 reg_disc_get(const mdb_var_t *v)
524 {
525 	mdb_tgt_t *t = MDB_NV_COOKIE(v);
526 	kt_data_t *kt = t->t_data;
527 	mdb_tgt_reg_t r = 0;
528 
529 	(void) mdb_tgt_getareg(t, kt->k_tid, mdb_nv_get_name(v), &r);
530 	return (r);
531 }
532 
533 static kt_module_t *
534 kt_module_by_name(kt_data_t *kt, const char *name)
535 {
536 	kt_module_t *km;
537 
538 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
539 		if (strcmp(name, km->km_name) == 0)
540 			return (km);
541 	}
542 
543 	return (NULL);
544 }
545 
546 void
547 kt_activate(mdb_tgt_t *t)
548 {
549 	static const mdb_nv_disc_t reg_disc = { NULL, reg_disc_get };
550 	kt_data_t *kt = t->t_data;
551 	void *sym;
552 
553 	int oflag;
554 
555 	mdb_prop_postmortem = kt->k_xpv_domu || (kt->k_dumphdr != NULL);
556 	mdb_prop_kernel = TRUE;
557 	mdb_prop_datamodel = MDB_TGT_MODEL_NATIVE;
558 
559 	if (kt->k_activated == FALSE) {
560 		struct utsname u1, u2;
561 		/*
562 		 * If we're examining a crash dump, root is /, and uname(2)
563 		 * does not match the utsname in the dump, issue a warning.
564 		 * Note that we are assuming that the modules and macros in
565 		 * /usr/lib are compiled against the kernel from uname -rv.
566 		 */
567 		if (mdb_prop_postmortem && strcmp(mdb.m_root, "/") == 0 &&
568 		    uname(&u1) >= 0 && kt_uname(t, &u2) >= 0 &&
569 		    (strcmp(u1.release, u2.release) ||
570 		    strcmp(u1.version, u2.version))) {
571 			mdb_warn("warning: dump is from %s %s %s; dcmds and "
572 			    "macros may not match kernel implementation\n",
573 			    u2.sysname, u2.release, u2.version);
574 		}
575 
576 		if (mdb_module_load(KT_MODULE, MDB_MOD_GLOBAL) < 0) {
577 			warn("failed to load kernel support module -- "
578 			    "some modules may not load\n");
579 		}
580 
581 		if (mdb_prop_postmortem && kt->k_dumphdr != NULL) {
582 			sym = dlsym(RTLD_NEXT, "mdb_dump_print_content");
583 			if (sym != NULL)
584 				kt->k_dump_print_content = (void (*)())sym;
585 
586 			sym = dlsym(RTLD_NEXT, "mdb_dump_find_curproc");
587 			if (sym != NULL)
588 				kt->k_dump_find_curproc = (int (*)())sym;
589 
590 			kt->k_dumpcontent = kt_find_dump_contents(kt);
591 		}
592 
593 		if (t->t_flags & MDB_TGT_F_PRELOAD) {
594 			oflag = mdb_iob_getflags(mdb.m_out) & MDB_IOB_PGENABLE;
595 
596 			mdb_iob_clrflags(mdb.m_out, oflag);
597 			mdb_iob_puts(mdb.m_out, "Preloading module symbols: [");
598 			mdb_iob_flush(mdb.m_out);
599 		}
600 
601 		if (!(t->t_flags & MDB_TGT_F_NOLOAD)) {
602 			kt_load_modules(kt, t);
603 
604 			/*
605 			 * Determine where the CTF data for krtld is. If krtld
606 			 * is rolled into unix, force load the MDB krtld
607 			 * module.
608 			 */
609 			kt->k_rtld_name = "krtld";
610 
611 			if (kt_module_by_name(kt, "krtld") == NULL) {
612 				(void) mdb_module_load("krtld", MDB_MOD_SILENT);
613 				kt->k_rtld_name = "unix";
614 			}
615 		}
616 
617 
618 		if (t->t_flags & MDB_TGT_F_PRELOAD) {
619 			mdb_iob_puts(mdb.m_out, " ]\n");
620 			mdb_iob_setflags(mdb.m_out, oflag);
621 		}
622 
623 		kt->k_activated = TRUE;
624 	}
625 
626 	(void) mdb_tgt_register_dcmds(t, &kt_dcmds[0], MDB_MOD_FORCE);
627 
628 	/* Export some of our registers as named variables */
629 	mdb_tgt_register_regvars(t, kt->k_rds, &reg_disc, MDB_NV_RDONLY);
630 
631 	mdb_tgt_elf_export(kt->k_file);
632 }
633 
634 void
635 kt_deactivate(mdb_tgt_t *t)
636 {
637 	kt_data_t *kt = t->t_data;
638 
639 	const mdb_tgt_regdesc_t *rdp;
640 	const mdb_dcmd_t *dcp;
641 
642 	for (rdp = kt->k_rds; rdp->rd_name != NULL; rdp++) {
643 		mdb_var_t *v;
644 
645 		if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
646 			continue; /* Didn't export register as a variable */
647 
648 		if ((v = mdb_nv_lookup(&mdb.m_nv, rdp->rd_name)) != NULL) {
649 			v->v_flags &= ~MDB_NV_PERSIST;
650 			mdb_nv_remove(&mdb.m_nv, v);
651 		}
652 	}
653 
654 	for (dcp = &kt_dcmds[0]; dcp->dc_name != NULL; dcp++) {
655 		if (mdb_module_remove_dcmd(t->t_module, dcp->dc_name) == -1)
656 			warn("failed to remove dcmd %s", dcp->dc_name);
657 	}
658 
659 	mdb_prop_postmortem = FALSE;
660 	mdb_prop_kernel = FALSE;
661 	mdb_prop_datamodel = MDB_TGT_MODEL_UNKNOWN;
662 }
663 
664 /*ARGSUSED*/
665 const char *
666 kt_name(mdb_tgt_t *t)
667 {
668 	return ("kvm");
669 }
670 
671 const char *
672 kt_platform(mdb_tgt_t *t)
673 {
674 	kt_data_t *kt = t->t_data;
675 	return (kt->k_platform);
676 }
677 
678 int
679 kt_uname(mdb_tgt_t *t, struct utsname *utsp)
680 {
681 	return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp,
682 	    sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname"));
683 }
684 
685 /*ARGSUSED*/
686 int
687 kt_dmodel(mdb_tgt_t *t)
688 {
689 	return (MDB_TGT_MODEL_NATIVE);
690 }
691 
692 ssize_t
693 kt_aread(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf,
694     size_t nbytes, mdb_tgt_addr_t addr)
695 {
696 	kt_data_t *kt = t->t_data;
697 	ssize_t rval;
698 
699 	if ((rval = kt->k_kb_ops->kb_aread(kt->k_cookie, addr, buf,
700 	    nbytes, as)) == -1)
701 		return (set_errno(EMDB_NOMAP));
702 
703 	return (rval);
704 }
705 
706 ssize_t
707 kt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
708     size_t nbytes, mdb_tgt_addr_t addr)
709 {
710 	kt_data_t *kt = t->t_data;
711 	ssize_t rval;
712 
713 	if ((rval = kt->k_kb_ops->kb_awrite(kt->k_cookie, addr, buf,
714 	    nbytes, as)) == -1)
715 		return (set_errno(EMDB_NOMAP));
716 
717 	return (rval);
718 }
719 
720 ssize_t
721 kt_vread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
722 {
723 	kt_data_t *kt = t->t_data;
724 	ssize_t rval;
725 
726 	if ((rval = kt->k_kb_ops->kb_kread(kt->k_cookie, addr, buf,
727 	    nbytes)) == -1)
728 		return (set_errno(EMDB_NOMAP));
729 
730 	return (rval);
731 }
732 
733 ssize_t
734 kt_vwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
735 {
736 	kt_data_t *kt = t->t_data;
737 	ssize_t rval;
738 
739 	if ((rval = kt->k_kb_ops->kb_kwrite(kt->k_cookie, addr, buf,
740 	    nbytes)) == -1)
741 		return (set_errno(EMDB_NOMAP));
742 
743 	return (rval);
744 }
745 
746 ssize_t
747 kt_fread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
748 {
749 	return (kt_vread(t, buf, nbytes, addr));
750 }
751 
752 ssize_t
753 kt_fwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
754 {
755 	return (kt_vwrite(t, buf, nbytes, addr));
756 }
757 
758 ssize_t
759 kt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr)
760 {
761 	kt_data_t *kt = t->t_data;
762 	ssize_t rval;
763 
764 	if ((rval = kt->k_kb_ops->kb_pread(kt->k_cookie, addr, buf,
765 	    nbytes)) == -1)
766 		return (set_errno(EMDB_NOMAP));
767 
768 	return (rval);
769 }
770 
771 ssize_t
772 kt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr)
773 {
774 	kt_data_t *kt = t->t_data;
775 	ssize_t rval;
776 
777 	if ((rval = kt->k_kb_ops->kb_pwrite(kt->k_cookie, addr, buf,
778 	    nbytes)) == -1)
779 		return (set_errno(EMDB_NOMAP));
780 
781 	return (rval);
782 }
783 
784 int
785 kt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
786 {
787 	kt_data_t *kt = t->t_data;
788 
789 	struct as *asp;
790 	physaddr_t pa;
791 	mdb_module_t *mod;
792 	mdb_var_t *v;
793 	int (*fptr)(uintptr_t, struct as *, physaddr_t *);
794 
795 	switch ((uintptr_t)as) {
796 	case (uintptr_t)MDB_TGT_AS_PHYS:
797 	case (uintptr_t)MDB_TGT_AS_FILE:
798 	case (uintptr_t)MDB_TGT_AS_IO:
799 		return (set_errno(EINVAL));
800 	case (uintptr_t)MDB_TGT_AS_VIRT:
801 		asp = kt->k_as;
802 		break;
803 	default:
804 		asp = (struct as *)as;
805 	}
806 
807 	if ((pa = kt->k_kb_ops->kb_vtop(kt->k_cookie, asp, va)) != -1ULL) {
808 		*pap = pa;
809 		return (0);
810 	}
811 
812 	if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL &&
813 	    (mod = mdb_nv_get_cookie(v)) != NULL) {
814 
815 		fptr = (int (*)(uintptr_t, struct as *, physaddr_t *))
816 		    dlsym(mod->mod_hdl, "platform_vtop");
817 
818 		if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0))
819 			return (0);
820 	}
821 
822 	return (set_errno(EMDB_NOMAP));
823 }
824 
825 int
826 kt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name,
827     GElf_Sym *symp, mdb_syminfo_t *sip)
828 {
829 	kt_data_t *kt = t->t_data;
830 	kt_module_t *km, kmod;
831 	mdb_var_t *v;
832 	int n;
833 
834 	/*
835 	 * To simplify the implementation, we create a fake module on the stack
836 	 * which is "prepended" to k_modlist and whose symtab is kt->k_symtab.
837 	 */
838 	kmod.km_symtab = kt->k_symtab;
839 	kmod.km_list.ml_next = mdb_list_next(&kt->k_modlist);
840 
841 	switch ((uintptr_t)obj) {
842 	case (uintptr_t)MDB_TGT_OBJ_EXEC:
843 		km = &kmod;
844 		n = 1;
845 		break;
846 
847 	case (uintptr_t)MDB_TGT_OBJ_EVERY:
848 		km = &kmod;
849 		n = mdb_nv_size(&kt->k_modules) + 1;
850 		break;
851 
852 	case (uintptr_t)MDB_TGT_OBJ_RTLD:
853 		obj = kt->k_rtld_name;
854 		/*FALLTHRU*/
855 
856 	default:
857 		if ((v = mdb_nv_lookup(&kt->k_modules, obj)) == NULL)
858 			return (set_errno(EMDB_NOOBJ));
859 
860 		km = mdb_nv_get_cookie(v);
861 		n = 1;
862 
863 		if (km->km_symtab == NULL)
864 			kt_load_module(kt, t, km);
865 	}
866 
867 	for (; n > 0; n--, km = mdb_list_next(km)) {
868 		if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name,
869 		    symp, &sip->sym_id) == 0) {
870 			sip->sym_table = MDB_TGT_SYMTAB;
871 			return (0);
872 		}
873 	}
874 
875 	return (set_errno(EMDB_NOSYM));
876 }
877 
878 int
879 kt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
880     char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip)
881 {
882 	kt_data_t *kt = t->t_data;
883 	kt_module_t kmods[3], *kmods_begin = &kmods[0], *kmods_end;
884 	const char *name;
885 
886 	kt_module_t *km = &kmods[0];	/* Point km at first fake module */
887 	kt_module_t *sym_km = NULL;	/* Module associated with best sym */
888 	GElf_Sym sym;			/* Best symbol found so far if !exact */
889 	uint_t symid;			/* ID of best symbol found so far */
890 
891 	/*
892 	 * To simplify the implementation, we create fake modules on the stack
893 	 * that are "prepended" to k_modlist and whose symtab is set to
894 	 * each of three special symbol tables, in order of precedence.
895 	 */
896 	km->km_symtab = mdb.m_prsym;
897 
898 	if (kt->k_symtab != NULL) {
899 		km->km_list.ml_next = (mdb_list_t *)(km + 1);
900 		km = mdb_list_next(km);
901 		km->km_symtab = kt->k_symtab;
902 	}
903 
904 	if (kt->k_dynsym != NULL) {
905 		km->km_list.ml_next = (mdb_list_t *)(km + 1);
906 		km = mdb_list_next(km);
907 		km->km_symtab = kt->k_dynsym;
908 	}
909 
910 	km->km_list.ml_next = mdb_list_next(&kt->k_modlist);
911 	kmods_end = km;
912 
913 	/*
914 	 * Now iterate over the list of fake and real modules.  If the module
915 	 * has no symbol table and the address is in the text section,
916 	 * instantiate the module's symbol table.  In exact mode, we can
917 	 * jump to 'found' immediately if we match.  Otherwise we continue
918 	 * looking and improve our choice if we find a closer symbol.
919 	 */
920 	for (km = &kmods[0]; km != NULL; km = mdb_list_next(km)) {
921 		if (km->km_symtab == NULL && addr >= km->km_text_va &&
922 		    addr < km->km_text_va + km->km_text_size)
923 			kt_load_module(kt, t, km);
924 
925 		if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr,
926 		    flags, buf, nbytes, symp, &sip->sym_id) != 0 ||
927 		    symp->st_value == 0)
928 			continue;
929 
930 		if (flags & MDB_TGT_SYM_EXACT) {
931 			sym_km = km;
932 			goto found;
933 		}
934 
935 		if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) {
936 			sym_km = km;
937 			sym = *symp;
938 			symid = sip->sym_id;
939 		}
940 	}
941 
942 	if (sym_km == NULL)
943 		return (set_errno(EMDB_NOSYMADDR));
944 
945 	*symp = sym; /* Copy our best symbol into the caller's symbol */
946 	sip->sym_id = symid;
947 found:
948 	/*
949 	 * Once we've found something, copy the final name into the caller's
950 	 * buffer and prefix it with the load object name if appropriate.
951 	 */
952 	if (sym_km != NULL) {
953 		name = mdb_gelf_sym_name(sym_km->km_symtab, symp);
954 
955 		if (sym_km < kmods_begin || sym_km > kmods_end) {
956 			(void) mdb_snprintf(buf, nbytes, "%s`%s",
957 			    sym_km->km_name, name);
958 		} else if (nbytes > 0) {
959 			(void) strncpy(buf, name, nbytes);
960 			buf[nbytes - 1] = '\0';
961 		}
962 
963 		if (sym_km->km_symtab == mdb.m_prsym)
964 			sip->sym_table = MDB_TGT_PRVSYM;
965 		else
966 			sip->sym_table = MDB_TGT_SYMTAB;
967 	} else {
968 		sip->sym_table = MDB_TGT_SYMTAB;
969 	}
970 
971 	return (0);
972 }
973 
974 static int
975 kt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id)
976 {
977 	kt_symarg_t *argp = data;
978 
979 	if (mdb_tgt_sym_match(sym, argp->sym_type)) {
980 		argp->sym_info.sym_id = id;
981 
982 		return (argp->sym_cb(argp->sym_data, sym, name,
983 		    &argp->sym_info, argp->sym_obj));
984 	}
985 
986 	return (0);
987 }
988 
989 static void
990 kt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj,
991     mdb_tgt_sym_f *cb, void *p)
992 {
993 	kt_symarg_t arg;
994 
995 	arg.sym_cb = cb;
996 	arg.sym_data = p;
997 	arg.sym_type = type;
998 	arg.sym_info.sym_table = gst->gst_tabid;
999 	arg.sym_obj = obj;
1000 
1001 	mdb_gelf_symtab_iter(gst, kt_symtab_func, &arg);
1002 }
1003 
1004 int
1005 kt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type,
1006     mdb_tgt_sym_f *cb, void *data)
1007 {
1008 	kt_data_t *kt = t->t_data;
1009 	kt_module_t *km;
1010 
1011 	mdb_gelf_symtab_t *symtab = NULL;
1012 	mdb_var_t *v;
1013 
1014 	switch ((uintptr_t)obj) {
1015 	case (uintptr_t)MDB_TGT_OBJ_EXEC:
1016 		if (which == MDB_TGT_SYMTAB)
1017 			symtab = kt->k_symtab;
1018 		else
1019 			symtab = kt->k_dynsym;
1020 		break;
1021 
1022 	case (uintptr_t)MDB_TGT_OBJ_EVERY:
1023 		if (which == MDB_TGT_DYNSYM) {
1024 			symtab = kt->k_dynsym;
1025 			obj = MDB_TGT_OBJ_EXEC;
1026 			break;
1027 		}
1028 
1029 		mdb_nv_rewind(&kt->k_modules);
1030 		while ((v = mdb_nv_advance(&kt->k_modules)) != NULL) {
1031 			km = mdb_nv_get_cookie(v);
1032 
1033 			if (km->km_symtab == NULL)
1034 				kt_load_module(kt, t, km);
1035 
1036 			if (km->km_symtab != NULL)
1037 				kt_symtab_iter(km->km_symtab, type,
1038 				    km->km_name, cb, data);
1039 		}
1040 		break;
1041 
1042 	case (uintptr_t)MDB_TGT_OBJ_RTLD:
1043 		obj = kt->k_rtld_name;
1044 		/*FALLTHRU*/
1045 
1046 	default:
1047 		v = mdb_nv_lookup(&kt->k_modules, obj);
1048 
1049 		if (v == NULL)
1050 			return (set_errno(EMDB_NOOBJ));
1051 
1052 		km = mdb_nv_get_cookie(v);
1053 
1054 		if (km->km_symtab == NULL)
1055 			kt_load_module(kt, t, km);
1056 
1057 		symtab = km->km_symtab;
1058 	}
1059 
1060 	if (symtab)
1061 		kt_symtab_iter(symtab, type, obj, cb, data);
1062 
1063 	return (0);
1064 }
1065 
1066 static int
1067 kt_mapping_walk(uintptr_t addr, const void *data, kt_maparg_t *marg)
1068 {
1069 	/*
1070 	 * This is a bit sketchy but avoids problematic compilation of this
1071 	 * target against the current VM implementation.  Now that we have
1072 	 * vmem, we can make this less broken and more informative by changing
1073 	 * this code to invoke the vmem walker in the near future.
1074 	 */
1075 	const struct kt_seg {
1076 		caddr_t s_base;
1077 		size_t s_size;
1078 	} *segp = (const struct kt_seg *)data;
1079 
1080 	mdb_map_t map;
1081 	GElf_Sym sym;
1082 	mdb_syminfo_t info;
1083 
1084 	map.map_base = (uintptr_t)segp->s_base;
1085 	map.map_size = segp->s_size;
1086 	map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1087 
1088 	if (kt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT,
1089 	    map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) {
1090 
1091 		(void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ,
1092 		    "%lr", addr);
1093 	}
1094 
1095 	return (marg->map_cb(marg->map_data, &map, map.map_name));
1096 }
1097 
1098 int
1099 kt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1100 {
1101 	kt_data_t *kt = t->t_data;
1102 	kt_maparg_t m;
1103 
1104 	m.map_target = t;
1105 	m.map_cb = func;
1106 	m.map_data = private;
1107 
1108 	return (mdb_pwalk("seg", (mdb_walk_cb_t)kt_mapping_walk, &m,
1109 	    (uintptr_t)kt->k_as));
1110 }
1111 
1112 static const mdb_map_t *
1113 kt_module_to_map(kt_module_t *km, mdb_map_t *map)
1114 {
1115 	(void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ);
1116 	map->map_name[MDB_TGT_MAPSZ - 1] = '\0';
1117 	map->map_base = km->km_text_va;
1118 	map->map_size = km->km_text_size;
1119 	map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1120 
1121 	return (map);
1122 }
1123 
1124 int
1125 kt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1126 {
1127 	kt_data_t *kt = t->t_data;
1128 	kt_module_t *km;
1129 	mdb_map_t m;
1130 
1131 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1132 		if (func(private, kt_module_to_map(km, &m), km->km_name) == -1)
1133 			break;
1134 	}
1135 
1136 	return (0);
1137 }
1138 
1139 const mdb_map_t *
1140 kt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
1141 {
1142 	kt_data_t *kt = t->t_data;
1143 	kt_module_t *km;
1144 
1145 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1146 		if (addr - km->km_text_va < km->km_text_size ||
1147 		    addr - km->km_data_va < km->km_data_size ||
1148 		    addr - km->km_bss_va < km->km_bss_size)
1149 			return (kt_module_to_map(km, &kt->k_map));
1150 	}
1151 
1152 	(void) set_errno(EMDB_NOMAP);
1153 	return (NULL);
1154 }
1155 
1156 const mdb_map_t *
1157 kt_name_to_map(mdb_tgt_t *t, const char *name)
1158 {
1159 	kt_data_t *kt = t->t_data;
1160 	kt_module_t *km;
1161 	mdb_map_t m;
1162 
1163 	/*
1164 	 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list,
1165 	 * which will be unix since we keep k_modlist in load order.
1166 	 */
1167 	if (name == MDB_TGT_OBJ_EXEC)
1168 		return (kt_module_to_map(mdb_list_next(&kt->k_modlist), &m));
1169 
1170 	if (name == MDB_TGT_OBJ_RTLD)
1171 		name = kt->k_rtld_name;
1172 
1173 	if ((km = kt_module_by_name(kt, name)) != NULL)
1174 		return (kt_module_to_map(km, &m));
1175 
1176 	(void) set_errno(EMDB_NOOBJ);
1177 	return (NULL);
1178 }
1179 
1180 static ctf_file_t *
1181 kt_load_ctfdata(mdb_tgt_t *t, kt_module_t *km)
1182 {
1183 	kt_data_t *kt = t->t_data;
1184 	int err;
1185 
1186 	if (km->km_ctfp != NULL)
1187 		return (km->km_ctfp);
1188 
1189 	if (km->km_ctf_va == NULL) {
1190 		(void) set_errno(EMDB_NOCTF);
1191 		return (NULL);
1192 	}
1193 
1194 	if (km->km_symtab == NULL)
1195 		kt_load_module(t->t_data, t, km);
1196 
1197 	if ((km->km_ctf_buf = mdb_alloc(km->km_ctf_size, UM_NOSLEEP)) == NULL) {
1198 		warn("failed to allocate memory to load %s debugging "
1199 		    "information", km->km_name);
1200 		return (NULL);
1201 	}
1202 
1203 	if (mdb_tgt_vread(t, km->km_ctf_buf, km->km_ctf_size,
1204 	    km->km_ctf_va) != km->km_ctf_size) {
1205 		warn("failed to read %lu bytes of debug data for %s at %p",
1206 		    (ulong_t)km->km_ctf_size, km->km_name,
1207 		    (void *)km->km_ctf_va);
1208 		mdb_free(km->km_ctf_buf, km->km_ctf_size);
1209 		km->km_ctf_buf = NULL;
1210 		return (NULL);
1211 	}
1212 
1213 	if ((km->km_ctfp = mdb_ctf_bufopen((const void *)km->km_ctf_buf,
1214 	    km->km_ctf_size, km->km_symbuf, &km->km_symtab_hdr,
1215 	    km->km_strtab, &km->km_strtab_hdr, &err)) == NULL) {
1216 		mdb_free(km->km_ctf_buf, km->km_ctf_size);
1217 		km->km_ctf_buf = NULL;
1218 		(void) set_errno(ctf_to_errno(err));
1219 		return (NULL);
1220 	}
1221 
1222 	mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n",
1223 	    (ulong_t)km->km_ctf_size, km->km_name);
1224 
1225 	if (ctf_parent_name(km->km_ctfp) != NULL) {
1226 		mdb_var_t *v;
1227 
1228 		if ((v = mdb_nv_lookup(&kt->k_modules,
1229 		    ctf_parent_name(km->km_ctfp))) == NULL) {
1230 			warn("failed to load CTF data for %s - parent %s not "
1231 			    "loaded\n", km->km_name,
1232 			    ctf_parent_name(km->km_ctfp));
1233 		}
1234 
1235 		if (v != NULL) {
1236 			kt_module_t *pm = mdb_nv_get_cookie(v);
1237 
1238 			if (pm->km_ctfp == NULL)
1239 				(void) kt_load_ctfdata(t, pm);
1240 
1241 			if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp,
1242 			    pm->km_ctfp) == CTF_ERR) {
1243 				warn("failed to import parent types into "
1244 				    "%s: %s\n", km->km_name,
1245 				    ctf_errmsg(ctf_errno(km->km_ctfp)));
1246 			}
1247 		}
1248 	}
1249 
1250 	return (km->km_ctfp);
1251 }
1252 
1253 ctf_file_t *
1254 kt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
1255 {
1256 	kt_data_t *kt = t->t_data;
1257 	kt_module_t *km;
1258 
1259 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1260 		if (addr - km->km_text_va < km->km_text_size ||
1261 		    addr - km->km_data_va < km->km_data_size ||
1262 		    addr - km->km_bss_va < km->km_bss_size)
1263 			return (kt_load_ctfdata(t, km));
1264 	}
1265 
1266 	(void) set_errno(EMDB_NOMAP);
1267 	return (NULL);
1268 }
1269 
1270 ctf_file_t *
1271 kt_name_to_ctf(mdb_tgt_t *t, const char *name)
1272 {
1273 	kt_data_t *kt = t->t_data;
1274 	kt_module_t *km;
1275 
1276 	if (name == MDB_TGT_OBJ_EXEC)
1277 		name = KT_CTFPARENT;
1278 	else if (name == MDB_TGT_OBJ_RTLD)
1279 		name = kt->k_rtld_name;
1280 
1281 	if ((km = kt_module_by_name(kt, name)) != NULL)
1282 		return (kt_load_ctfdata(t, km));
1283 
1284 	(void) set_errno(EMDB_NOOBJ);
1285 	return (NULL);
1286 }
1287 
1288 /*ARGSUSED*/
1289 int
1290 kt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1291 {
1292 	kt_data_t *kt = t->t_data;
1293 	bzero(tsp, sizeof (mdb_tgt_status_t));
1294 	tsp->st_state = (kt->k_xpv_domu || (kt->k_dumphdr != NULL)) ?
1295 	    MDB_TGT_DEAD : MDB_TGT_RUNNING;
1296 	return (0);
1297 }
1298 
1299 static ssize_t
1300 kt_xd_dumphdr(mdb_tgt_t *t, void *buf, size_t nbytes)
1301 {
1302 	kt_data_t *kt = t->t_data;
1303 
1304 	if (buf == NULL && nbytes == 0)
1305 		return (sizeof (dumphdr_t));
1306 
1307 	if (kt->k_dumphdr == NULL)
1308 		return (set_errno(ENODATA));
1309 
1310 	nbytes = MIN(nbytes, sizeof (dumphdr_t));
1311 	bcopy(kt->k_dumphdr, buf, nbytes);
1312 
1313 	return (nbytes);
1314 }
1315 
1316 void
1317 kt_destroy(mdb_tgt_t *t)
1318 {
1319 	kt_data_t *kt = t->t_data;
1320 	kt_module_t *km, *nkm;
1321 
1322 	(void) mdb_module_unload(KT_MODULE, 0);
1323 
1324 	if (kt->k_regs != NULL)
1325 		mdb_free(kt->k_regs, kt->k_regsize);
1326 
1327 	if (kt->k_symtab != NULL)
1328 		mdb_gelf_symtab_destroy(kt->k_symtab);
1329 
1330 	if (kt->k_dynsym != NULL)
1331 		mdb_gelf_symtab_destroy(kt->k_dynsym);
1332 
1333 	if (kt->k_dumphdr != NULL)
1334 		mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1335 
1336 	mdb_gelf_destroy(kt->k_file);
1337 
1338 	(void) kt->k_kb_ops->kb_close(kt->k_cookie);
1339 
1340 	for (km = mdb_list_next(&kt->k_modlist); km; km = nkm) {
1341 		if (km->km_symtab)
1342 			mdb_gelf_symtab_destroy(km->km_symtab);
1343 
1344 		if (km->km_data)
1345 			mdb_free(km->km_data, km->km_datasz);
1346 
1347 		if (km->km_ctfp)
1348 			ctf_close(km->km_ctfp);
1349 
1350 		if (km->km_ctf_buf != NULL)
1351 			mdb_free(km->km_ctf_buf, km->km_ctf_size);
1352 
1353 		nkm = mdb_list_next(km);
1354 		strfree(km->km_name);
1355 		mdb_free(km, sizeof (kt_module_t));
1356 	}
1357 
1358 	mdb_nv_destroy(&kt->k_modules);
1359 
1360 	strfree(kt->k_kvmfile);
1361 	if (kt->k_symfile != NULL)
1362 		strfree(kt->k_symfile);
1363 
1364 	mdb_free(kt, sizeof (kt_data_t));
1365 }
1366 
1367 static int
1368 kt_data_stub(void)
1369 {
1370 	return (-1);
1371 }
1372 
1373 int
1374 mdb_kvm_tgt_create(mdb_tgt_t *t, int argc, const char *argv[])
1375 {
1376 	kt_data_t *kt = mdb_zalloc(sizeof (kt_data_t), UM_SLEEP);
1377 	mdb_kb_ops_t *kvm_kb_ops = libkvm_kb_ops();
1378 	int oflag = (t->t_flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
1379 	struct utsname uts;
1380 	GElf_Sym sym;
1381 	pgcnt_t pmem;
1382 
1383 
1384 	if (argc == 2) {
1385 		kt->k_symfile = strdup(argv[0]);
1386 		kt->k_kvmfile = strdup(argv[1]);
1387 
1388 		kt->k_cookie = kvm_kb_ops->kb_open(kt->k_symfile,
1389 		    kt->k_kvmfile, NULL, oflag, (char *)mdb.m_pname);
1390 
1391 		if (kt->k_cookie == NULL)
1392 			goto err;
1393 
1394 		kt->k_xpv_domu = 0;
1395 		kt->k_kb_ops = kvm_kb_ops;
1396 	} else {
1397 #ifndef __x86
1398 		return (set_errno(EINVAL));
1399 #else
1400 		mdb_kb_ops_t *(*getops)(void);
1401 
1402 		kt->k_symfile = NULL;
1403 		kt->k_kvmfile = strdup(argv[0]);
1404 
1405 		getops = (mdb_kb_ops_t *(*)())dlsym(RTLD_NEXT, "mdb_kb_ops");
1406 
1407 		/*
1408 		 * Load mdb_kb if it's not already loaded during
1409 		 * identification.
1410 		 */
1411 		if (getops == NULL) {
1412 			(void) mdb_module_load("mdb_kb",
1413 			    MDB_MOD_GLOBAL | MDB_MOD_SILENT);
1414 			getops = (mdb_kb_ops_t *(*)())
1415 			    dlsym(RTLD_NEXT, "mdb_kb_ops");
1416 		}
1417 
1418 		if (getops == NULL || (kt->k_kb_ops = getops()) == NULL) {
1419 			warn("failed to load KVM backend ops\n");
1420 			goto err;
1421 		}
1422 
1423 		kt->k_cookie = kt->k_kb_ops->kb_open(NULL, kt->k_kvmfile, NULL,
1424 		    oflag, (char *)mdb.m_pname);
1425 
1426 		if (kt->k_cookie == NULL)
1427 			goto err;
1428 
1429 		kt->k_xpv_domu = 1;
1430 #endif
1431 	}
1432 
1433 	if ((kt->k_fio = kt->k_kb_ops->kb_sym_io(kt->k_cookie,
1434 	    kt->k_symfile)) == NULL)
1435 		goto err;
1436 
1437 	if ((kt->k_file = mdb_gelf_create(kt->k_fio,
1438 	    ET_EXEC, GF_FILE)) == NULL) {
1439 		mdb_io_destroy(kt->k_fio);
1440 		goto err;
1441 	}
1442 
1443 	kt->k_symtab =
1444 	    mdb_gelf_symtab_create_file(kt->k_file, SHT_SYMTAB, MDB_TGT_SYMTAB);
1445 
1446 	kt->k_dynsym =
1447 	    mdb_gelf_symtab_create_file(kt->k_file, SHT_DYNSYM, MDB_TGT_DYNSYM);
1448 
1449 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "kas",
1450 	    &sym, NULL) == -1) {
1451 		warn("'kas' symbol is missing from kernel\n");
1452 		goto err;
1453 	}
1454 
1455 	kt->k_as = (struct as *)(uintptr_t)sym.st_value;
1456 
1457 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "platform",
1458 	    &sym, NULL) == -1) {
1459 		warn("'platform' symbol is missing from kernel\n");
1460 		goto err;
1461 	}
1462 
1463 	if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value,
1464 	    kt->k_platform, MAXNAMELEN) <= 0) {
1465 		warn("failed to read 'platform' string from kernel");
1466 		goto err;
1467 	}
1468 
1469 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "utsname",
1470 	    &sym, NULL) == -1) {
1471 		warn("'utsname' symbol is missing from kernel\n");
1472 		goto err;
1473 	}
1474 
1475 	if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value, &uts,
1476 	    sizeof (uts)) <= 0) {
1477 		warn("failed to read 'utsname' struct from kernel");
1478 		goto err;
1479 	}
1480 
1481 	kt->k_dump_print_content = (void (*)())kt_data_stub;
1482 	kt->k_dump_find_curproc = kt_data_stub;
1483 
1484 	/*
1485 	 * We set k_ctfvalid based on the presence of the CTF vmem arena
1486 	 * symbol.  The CTF members were added to the end of struct module at
1487 	 * the same time, so this allows us to know whether we can use them.
1488 	 */
1489 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "ctf_arena", &sym,
1490 	    NULL) == 0 && !(mdb.m_flags & MDB_FL_NOCTF))
1491 		kt->k_ctfvalid = 1;
1492 
1493 	(void) mdb_nv_create(&kt->k_modules, UM_SLEEP);
1494 	t->t_pshandle = kt->k_cookie;
1495 	t->t_data = kt;
1496 
1497 #if defined(__sparc)
1498 #if defined(__sparcv9)
1499 	kt_sparcv9_init(t);
1500 #else
1501 	kt_sparcv7_init(t);
1502 #endif
1503 #elif defined(__amd64)
1504 	kt_amd64_init(t);
1505 #elif defined(__i386)
1506 	kt_ia32_init(t);
1507 #else
1508 #error	"unknown ISA"
1509 #endif
1510 
1511 	/*
1512 	 * We read our representative thread ID (address) from the kernel's
1513 	 * global panic_thread.  It will remain 0 if this is a live kernel.
1514 	 */
1515 	(void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &kt->k_tid, sizeof (void *),
1516 	    MDB_TGT_OBJ_EXEC, "panic_thread");
1517 
1518 	if ((mdb.m_flags & MDB_FL_ADB) && mdb_tgt_readsym(t, MDB_TGT_AS_VIRT,
1519 	    &pmem, sizeof (pmem), MDB_TGT_OBJ_EXEC, "physmem") == sizeof (pmem))
1520 		mdb_printf("physmem %lx\n", (ulong_t)pmem);
1521 
1522 	/*
1523 	 * If this is not a live kernel or a hypervisor dump, read the dump
1524 	 * header.  We don't have to sanity-check the header, as the open would
1525 	 * not have succeeded otherwise.
1526 	 */
1527 	if (!kt->k_xpv_domu && strcmp(kt->k_symfile, "/dev/ksyms") != 0) {
1528 		mdb_io_t *vmcore;
1529 
1530 		kt->k_dumphdr = mdb_alloc(sizeof (dumphdr_t), UM_SLEEP);
1531 
1532 		if ((vmcore = mdb_fdio_create_path(NULL, kt->k_kvmfile,
1533 		    O_RDONLY, 0)) == NULL) {
1534 			mdb_warn("failed to open %s", kt->k_kvmfile);
1535 			goto err;
1536 		}
1537 
1538 		if (IOP_READ(vmcore, kt->k_dumphdr, sizeof (dumphdr_t)) !=
1539 		    sizeof (dumphdr_t)) {
1540 			mdb_warn("failed to read dump header");
1541 			mdb_io_destroy(vmcore);
1542 			goto err;
1543 		}
1544 
1545 		mdb_io_destroy(vmcore);
1546 
1547 		(void) mdb_tgt_xdata_insert(t, "dumphdr",
1548 		    "dump header structure", kt_xd_dumphdr);
1549 	}
1550 
1551 	return (0);
1552 
1553 err:
1554 	if (kt->k_dumphdr != NULL)
1555 		mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1556 
1557 	if (kt->k_symtab != NULL)
1558 		mdb_gelf_symtab_destroy(kt->k_symtab);
1559 
1560 	if (kt->k_dynsym != NULL)
1561 		mdb_gelf_symtab_destroy(kt->k_dynsym);
1562 
1563 	if (kt->k_file != NULL)
1564 		mdb_gelf_destroy(kt->k_file);
1565 
1566 	if (kt->k_cookie != NULL)
1567 		(void) kt->k_kb_ops->kb_close(kt->k_cookie);
1568 
1569 	mdb_free(kt, sizeof (kt_data_t));
1570 	return (-1);
1571 }
1572 
1573 int
1574 mdb_kvm_is_compressed_dump(mdb_io_t *io)
1575 {
1576 	dumphdr_t h;
1577 
1578 	return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1579 	    h.dump_magic == DUMP_MAGIC &&
1580 	    (h.dump_flags & DF_COMPRESSED) != 0);
1581 }
1582