xref: /illumos-gate/usr/src/cmd/mdb/intel/kmdb/kaif.c (revision 0c1b95be)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Copyright 2018 Joyent, Inc.
26  */
27 
28 /*
29  * The debugger/"PROM" interface layer
30  *
31  * It makes more sense on SPARC. In reality, these interfaces deal with three
32  * things: setting break/watchpoints, stepping, and interfacing with the KDI to
33  * set up kmdb's IDT handlers.
34  */
35 
36 #include <kmdb/kmdb_dpi_impl.h>
37 #include <kmdb/kmdb_kdi.h>
38 #include <kmdb/kmdb_umemglue.h>
39 #include <kmdb/kaif.h>
40 #include <kmdb/kmdb_io.h>
41 #include <kmdb/kaif_start.h>
42 #include <mdb/mdb_err.h>
43 #include <mdb/mdb_debug.h>
44 #include <mdb/mdb_isautil.h>
45 #include <mdb/mdb_io_impl.h>
46 #include <mdb/mdb_kreg_impl.h>
47 #include <mdb/mdb.h>
48 
49 #include <sys/types.h>
50 #include <sys/bitmap.h>
51 #include <sys/termios.h>
52 #include <sys/kdi_impl.h>
53 #include <sys/sysmacros.h>
54 
55 /*
56  * This is the area containing the saved state when we enter
57  * via kmdb's IDT entries.
58  */
59 kdi_cpusave_t	*kaif_cpusave;
60 int		kaif_ncpusave;
61 kdi_drreg_t	kaif_drreg;
62 
63 uint32_t	kaif_waptmap;
64 
65 int		kaif_trap_switch;
66 
67 void (*kaif_modchg_cb)(struct modctl *, int);
68 
69 enum {
70 	M_SYSRET	= 0x07, /* after M_ESC */
71 	M_ESC		= 0x0f,
72 	M_SYSEXIT	= 0x35, /* after M_ESC */
73 	M_REX_LO	= 0x40, /* first REX prefix */
74 	M_REX_HI	= 0x4f, /* last REX prefix */
75 	M_PUSHF		= 0x9c,	/* pushfl and pushfq */
76 	M_POPF		= 0x9d,	/* popfl and popfq */
77 	M_INT3		= 0xcc,
78 	M_INTX		= 0xcd,
79 	M_INTO		= 0xce,
80 	M_IRET		= 0xcf,
81 	M_CLI		= 0xfa,
82 	M_STI		= 0xfb
83 };
84 
85 #define	KAIF_BREAKPOINT_INSTR	M_INT3
86 
87 #define	KAIF_WPPRIV2ID(wp)	(int)(uintptr_t)((wp)->wp_priv)
88 
89 #ifdef __amd64
90 #define	FLAGS_REG_NAME		"rflags"
91 #else
92 #define	FLAGS_REG_NAME		"eflags"
93 #endif
94 
95 /*
96  * Called during normal debugger operation and during debugger faults.
97  */
98 static void
kaif_enter_mon(void)99 kaif_enter_mon(void)
100 {
101 	char c;
102 
103 	for (;;) {
104 		mdb_iob_printf(mdb.m_out,
105 		    "%s: Do you really want to reboot? (y/n) ",
106 		    mdb.m_pname);
107 		mdb_iob_flush(mdb.m_out);
108 		mdb_iob_clearlines(mdb.m_out);
109 
110 		c = kmdb_getchar();
111 
112 		if (c == 'n' || c == 'N' || c == CTRL('c'))
113 			return;
114 		else if (c == 'y' || c == 'Y') {
115 			mdb_iob_printf(mdb.m_out, "Rebooting...\n");
116 
117 			kmdb_dpi_reboot();
118 		}
119 	}
120 }
121 
122 static kaif_cpusave_t *
kaif_cpuid2save(int cpuid)123 kaif_cpuid2save(int cpuid)
124 {
125 	kaif_cpusave_t *save;
126 
127 	if (cpuid == DPI_MASTER_CPUID)
128 		return (&kaif_cpusave[kaif_master_cpuid]);
129 
130 	if (cpuid < 0 || cpuid >= kaif_ncpusave) {
131 		(void) set_errno(EINVAL);
132 		return (NULL);
133 	}
134 
135 	save = &kaif_cpusave[cpuid];
136 
137 	if (save->krs_cpu_state != KAIF_CPU_STATE_MASTER &&
138 	    save->krs_cpu_state != KAIF_CPU_STATE_SLAVE) {
139 		(void) set_errno(EINVAL);
140 		return (NULL);
141 	}
142 
143 	return (save);
144 }
145 
146 static int
kaif_get_cpu_state(int cpuid)147 kaif_get_cpu_state(int cpuid)
148 {
149 	kaif_cpusave_t *save;
150 
151 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
152 		return (-1); /* errno is set for us */
153 
154 	switch (save->krs_cpu_state) {
155 	case KAIF_CPU_STATE_MASTER:
156 		return (DPI_CPU_STATE_MASTER);
157 	case KAIF_CPU_STATE_SLAVE:
158 		return (DPI_CPU_STATE_SLAVE);
159 	default:
160 		return (set_errno(EINVAL));
161 	}
162 }
163 
164 static int
kaif_get_master_cpuid(void)165 kaif_get_master_cpuid(void)
166 {
167 	return (kaif_master_cpuid);
168 }
169 
170 static mdb_tgt_gregset_t *
kaif_kdi_to_gregs(int cpuid)171 kaif_kdi_to_gregs(int cpuid)
172 {
173 	kaif_cpusave_t *save;
174 
175 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
176 		return (NULL); /* errno is set for us */
177 
178 	/*
179 	 * The saved registers are actually identical to an mdb_tgt_gregset,
180 	 * so we can directly cast here.
181 	 */
182 	return ((mdb_tgt_gregset_t *)save->krs_gregs);
183 }
184 
185 static const mdb_tgt_gregset_t *
kaif_get_gregs(int cpuid)186 kaif_get_gregs(int cpuid)
187 {
188 	return (kaif_kdi_to_gregs(cpuid));
189 }
190 
191 typedef struct kaif_reg_synonyms {
192 	const char *rs_syn;
193 	const char *rs_name;
194 } kaif_reg_synonyms_t;
195 
196 static kreg_t *
kaif_find_regp(const char * regname)197 kaif_find_regp(const char *regname)
198 {
199 	static const kaif_reg_synonyms_t synonyms[] = {
200 #ifdef __amd64
201 	    { "pc", "rip" },
202 	    { "sp", "rsp" },
203 	    { "fp", "rbp" },
204 #else
205 	    { "pc", "eip" },
206 	    { "sp", "esp" },
207 	    { "fp", "ebp" },
208 #endif
209 	    { "tt", "trapno" }
210 	};
211 	mdb_tgt_gregset_t *regs;
212 	int i;
213 
214 	if ((regs = kaif_kdi_to_gregs(DPI_MASTER_CPUID)) == NULL)
215 		return (NULL);
216 
217 	for (i = 0; i < sizeof (synonyms) / sizeof (synonyms[0]); i++) {
218 		if (strcmp(synonyms[i].rs_syn, regname) == 0)
219 			regname = synonyms[i].rs_name;
220 	}
221 
222 	for (i = 0; mdb_isa_kregs[i].rd_name != NULL; i++) {
223 		const mdb_tgt_regdesc_t *rd = &mdb_isa_kregs[i];
224 
225 		if (strcmp(rd->rd_name, regname) == 0)
226 			return (&regs->kregs[rd->rd_num]);
227 	}
228 
229 	(void) set_errno(ENOENT);
230 	return (NULL);
231 }
232 
233 /*ARGSUSED*/
234 static int
kaif_get_register(const char * regname,kreg_t * valp)235 kaif_get_register(const char *regname, kreg_t *valp)
236 {
237 	kreg_t *regp;
238 
239 	if ((regp = kaif_find_regp(regname)) == NULL)
240 		return (-1);
241 
242 	*valp = *regp;
243 
244 	return (0);
245 }
246 
247 static int
kaif_set_register(const char * regname,kreg_t val)248 kaif_set_register(const char *regname, kreg_t val)
249 {
250 	kreg_t *regp;
251 
252 	if ((regp = kaif_find_regp(regname)) == NULL)
253 		return (-1);
254 
255 	*regp = val;
256 
257 	return (0);
258 }
259 
260 /*
261  * Refuse to single-step or break within any stub that loads a user %cr3 value.
262  * As the KDI traps are not careful to restore such a %cr3, this can all go
263  * wrong, both spectacularly and subtly.
264  */
265 static boolean_t
kaif_toxic_text(uintptr_t addr)266 kaif_toxic_text(uintptr_t addr)
267 {
268 	static GElf_Sym toxic_syms[2] = { 0, };
269 	size_t i;
270 
271 	if (toxic_syms[0].st_name == 0) {
272 		if (mdb_tgt_lookup_by_name(mdb.m_target, MDB_TGT_OBJ_EXEC,
273 		    "tr_iret_user", &toxic_syms[0], NULL) != 0)
274 			warn("couldn't find tr_iret_user\n");
275 		if (mdb_tgt_lookup_by_name(mdb.m_target, MDB_TGT_OBJ_EXEC,
276 		    "tr_mmu_flush_user_range", &toxic_syms[1], NULL) != 0)
277 			warn("couldn't find tr_mmu_flush_user_range\n");
278 	}
279 
280 	for (i = 0; i < ARRAY_SIZE(toxic_syms); i++) {
281 		if (addr >= toxic_syms[i].st_value &&
282 		    addr - toxic_syms[i].st_value < toxic_syms[i].st_size)
283 			return (B_TRUE);
284 	}
285 
286 	return (B_FALSE);
287 }
288 
289 static int
kaif_brkpt_arm(uintptr_t addr,mdb_instr_t * instrp)290 kaif_brkpt_arm(uintptr_t addr, mdb_instr_t *instrp)
291 {
292 	mdb_instr_t bkpt = KAIF_BREAKPOINT_INSTR;
293 
294 	if (kaif_toxic_text(addr)) {
295 		warn("%a cannot be a breakpoint target\n", addr);
296 		return (set_errno(EMDB_TGTNOTSUP));
297 	}
298 
299 	if (mdb_tgt_aread(mdb.m_target, MDB_TGT_AS_VIRT_I, instrp,
300 	    sizeof (mdb_instr_t), addr) != sizeof (mdb_instr_t))
301 		return (-1); /* errno is set for us */
302 
303 	if (mdb_tgt_awrite(mdb.m_target, MDB_TGT_AS_VIRT_I, &bkpt,
304 	    sizeof (mdb_instr_t), addr) != sizeof (mdb_instr_t))
305 		return (-1); /* errno is set for us */
306 
307 	return (0);
308 }
309 
310 static int
kaif_brkpt_disarm(uintptr_t addr,mdb_instr_t instrp)311 kaif_brkpt_disarm(uintptr_t addr, mdb_instr_t instrp)
312 {
313 	if (mdb_tgt_awrite(mdb.m_target, MDB_TGT_AS_VIRT_I, &instrp,
314 	    sizeof (mdb_instr_t), addr) != sizeof (mdb_instr_t))
315 		return (-1); /* errno is set for us */
316 
317 	return (0);
318 }
319 
320 /*
321  * Intel watchpoints are even more fun than SPARC ones.  The Intel architecture
322  * manuals refer to watchpoints as breakpoints.  For consistency  with the
323  * terminology used in other portions of kmdb, we will, however, refer to them
324  * as watchpoints.
325  *
326  * Execute, data write, I/O read/write, and data read/write watchpoints are
327  * supported by the hardware.  Execute watchpoints must be one byte in length,
328  * and must be placed on the first byte of the instruction to be watched.
329  * Lengths of other watchpoints are more varied.
330  *
331  * Given that we already have a breakpoint facility, and given the restrictions
332  * placed on execute watchpoints, we're going to disallow the creation of
333  * execute watchpoints.  The others will be fully supported.  See the Debugging
334  * chapter in both the IA32 and AMD64 System Programming books for more details.
335  */
336 
337 #ifdef __amd64
338 #define	WAPT_DATA_MAX_SIZE	8
339 #define	WAPT_DATA_SIZES_MSG	"1, 2, 4, or 8"
340 #else
341 #define	WAPT_DATA_MAX_SIZE	4
342 #define	WAPT_DATA_SIZES_MSG	"1, 2, or 4"
343 #endif
344 
345 static int
kaif_wapt_validate(kmdb_wapt_t * wp)346 kaif_wapt_validate(kmdb_wapt_t *wp)
347 {
348 	if (wp->wp_type == DPI_WAPT_TYPE_IO) {
349 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W)) {
350 			warn("I/O port watchpoints must be read/write\n");
351 			return (set_errno(EINVAL));
352 		}
353 
354 		if (wp->wp_size != 1 && wp->wp_size != 2 && wp->wp_size != 4) {
355 			warn("I/O watchpoint size must be 1, 2, or 4 bytes\n");
356 			return (set_errno(EINVAL));
357 		}
358 
359 	} else if (wp->wp_type == DPI_WAPT_TYPE_PHYS) {
360 		warn("physical address watchpoints are not supported on this "
361 		    "platform\n");
362 		return (set_errno(EMDB_TGTHWNOTSUP));
363 
364 	} else {
365 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W) &&
366 		    wp->wp_wflags != MDB_TGT_WA_W) {
367 			warn("watchpoints must be read/write or write-only\n");
368 			return (set_errno(EINVAL));
369 		}
370 
371 		if ((wp->wp_size & -(wp->wp_size)) != wp->wp_size ||
372 		    wp->wp_size > WAPT_DATA_MAX_SIZE) {
373 			warn("data watchpoint size must be " WAPT_DATA_SIZES_MSG
374 			    " bytes\n");
375 			return (set_errno(EINVAL));
376 		}
377 
378 	}
379 
380 	if (wp->wp_addr & (wp->wp_size - 1)) {
381 		warn("%lu-byte watchpoints must be %lu-byte aligned\n",
382 		    (ulong_t)wp->wp_size, (ulong_t)wp->wp_size);
383 		return (set_errno(EINVAL));
384 	}
385 
386 	return (0);
387 }
388 
389 static int
kaif_wapt_reserve(kmdb_wapt_t * wp)390 kaif_wapt_reserve(kmdb_wapt_t *wp)
391 {
392 	int id;
393 
394 	for (id = 0; id <= KDI_MAXWPIDX; id++) {
395 		if (!BT_TEST(&kaif_waptmap, id)) {
396 			/* found one */
397 			BT_SET(&kaif_waptmap, id);
398 			wp->wp_priv = (void *)(uintptr_t)id;
399 			return (0);
400 		}
401 	}
402 
403 	return (set_errno(EMDB_WPTOOMANY));
404 }
405 
406 static void
kaif_wapt_release(kmdb_wapt_t * wp)407 kaif_wapt_release(kmdb_wapt_t *wp)
408 {
409 	int id = KAIF_WPPRIV2ID(wp);
410 
411 	ASSERT(BT_TEST(&kaif_waptmap, id));
412 	BT_CLEAR(&kaif_waptmap, id);
413 }
414 
415 /*ARGSUSED*/
416 static void
kaif_wapt_arm(kmdb_wapt_t * wp)417 kaif_wapt_arm(kmdb_wapt_t *wp)
418 {
419 	uint_t rw;
420 	int hwid = KAIF_WPPRIV2ID(wp);
421 
422 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
423 
424 	if (wp->wp_type == DPI_WAPT_TYPE_IO)
425 		rw = KREG_DRCTL_WP_IORW;
426 	else if (wp->wp_wflags & MDB_TGT_WA_R)
427 		rw = KREG_DRCTL_WP_RW;
428 	else if (wp->wp_wflags & MDB_TGT_WA_X)
429 		rw = KREG_DRCTL_WP_EXEC;
430 	else
431 		rw = KREG_DRCTL_WP_WONLY;
432 
433 	kaif_drreg.dr_addr[hwid] = wp->wp_addr;
434 
435 	kaif_drreg.dr_ctl &= ~KREG_DRCTL_WP_MASK(hwid);
436 	kaif_drreg.dr_ctl |= KREG_DRCTL_WP_LENRW(hwid, wp->wp_size - 1, rw);
437 	kaif_drreg.dr_ctl |= KREG_DRCTL_WPEN(hwid);
438 	kmdb_kdi_update_drreg(&kaif_drreg);
439 }
440 
441 /*ARGSUSED*/
442 static void
kaif_wapt_disarm(kmdb_wapt_t * wp)443 kaif_wapt_disarm(kmdb_wapt_t *wp)
444 {
445 	int hwid = KAIF_WPPRIV2ID(wp);
446 
447 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
448 
449 	kaif_drreg.dr_addr[hwid] = 0;
450 	kaif_drreg.dr_ctl &= ~(KREG_DRCTL_WP_MASK(hwid) |
451 	    KREG_DRCTL_WPEN_MASK(hwid));
452 	kmdb_kdi_update_drreg(&kaif_drreg);
453 }
454 
455 /*ARGSUSED*/
456 static int
kaif_wapt_match(kmdb_wapt_t * wp)457 kaif_wapt_match(kmdb_wapt_t *wp)
458 {
459 	int hwid = KAIF_WPPRIV2ID(wp);
460 	uint32_t mask = KREG_DRSTAT_WP_MASK(hwid);
461 	int n = 0;
462 	int i;
463 
464 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
465 
466 	for (i = 0; i < kaif_ncpusave; i++)
467 		n += (kaif_cpusave[i].krs_dr.dr_stat & mask) != 0;
468 
469 	return (n);
470 }
471 
472 static int
kaif_step(void)473 kaif_step(void)
474 {
475 	kreg_t pc, fl, oldfl, newfl, sp;
476 	mdb_tgt_addr_t npc;
477 	mdb_instr_t instr;
478 	int emulated = 0, rchk = 0;
479 	size_t pcoff = 0;
480 
481 	(void) kmdb_dpi_get_register("pc", &pc);
482 
483 	if (kaif_toxic_text(pc)) {
484 		warn("%a cannot be stepped\n", pc);
485 		return (set_errno(EMDB_TGTNOTSUP));
486 	}
487 
488 	if ((npc = mdb_dis_nextins(mdb.m_disasm, mdb.m_target,
489 	    MDB_TGT_AS_VIRT_I, pc)) == pc) {
490 		warn("failed to decode instruction at %a for step\n", pc);
491 		return (set_errno(EINVAL));
492 	}
493 
494 	/*
495 	 * Stepping behavior depends on the type of instruction.  It does not
496 	 * depend on the presence of a REX prefix, as the action we take for a
497 	 * given instruction doesn't currently vary for 32-bit instructions
498 	 * versus their 64-bit counterparts.
499 	 */
500 	do {
501 		if (mdb_tgt_aread(mdb.m_target, MDB_TGT_AS_VIRT_I, &instr,
502 		    sizeof (mdb_instr_t), pc + pcoff) != sizeof (mdb_instr_t)) {
503 			warn("failed to read at %p for step",
504 			    (void *)(pc + pcoff));
505 			return (-1);
506 		}
507 	} while (pcoff++, (instr >= M_REX_LO && instr <= M_REX_HI && !rchk++));
508 
509 	switch (instr) {
510 	case M_IRET:
511 		warn("iret cannot be stepped\n");
512 		return (set_errno(EMDB_TGTNOTSUP));
513 
514 	case M_INT3:
515 	case M_INTX:
516 	case M_INTO:
517 		warn("int cannot be stepped\n");
518 		return (set_errno(EMDB_TGTNOTSUP));
519 
520 	case M_ESC:
521 		if (mdb_tgt_aread(mdb.m_target, MDB_TGT_AS_VIRT_I, &instr,
522 		    sizeof (mdb_instr_t), pc + pcoff) != sizeof (mdb_instr_t)) {
523 			warn("failed to read at %p for step",
524 			    (void *)(pc + pcoff));
525 			return (-1);
526 		}
527 
528 		switch (instr) {
529 		case M_SYSRET:
530 			warn("sysret cannot be stepped\n");
531 			return (set_errno(EMDB_TGTNOTSUP));
532 		case M_SYSEXIT:
533 			warn("sysexit cannot be stepped\n");
534 			return (set_errno(EMDB_TGTNOTSUP));
535 		}
536 		break;
537 
538 	/*
539 	 * Some instructions need to be emulated.  We need to prevent direct
540 	 * manipulations of EFLAGS, so we'll emulate cli, sti.  pushfl and
541 	 * popfl also receive special handling, as they manipulate both EFLAGS
542 	 * and %esp.
543 	 */
544 	case M_CLI:
545 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
546 		fl &= ~KREG_EFLAGS_IF_MASK;
547 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
548 
549 		emulated = 1;
550 		break;
551 
552 	case M_STI:
553 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
554 		fl |= (1 << KREG_EFLAGS_IF_SHIFT);
555 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
556 
557 		emulated = 1;
558 		break;
559 
560 	case M_POPF:
561 		/*
562 		 * popfl will restore a pushed EFLAGS from the stack, and could
563 		 * in so doing cause IF to be turned on, if only for a brief
564 		 * period.  To avoid this, we'll secretly replace the stack's
565 		 * EFLAGS with our decaffeinated brand.  We'll then manually
566 		 * load our EFLAGS copy with the real verion after the step.
567 		 */
568 		(void) kmdb_dpi_get_register("sp", &sp);
569 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
570 
571 		if (mdb_tgt_aread(mdb.m_target, MDB_TGT_AS_VIRT_S, &newfl,
572 		    sizeof (kreg_t), sp) != sizeof (kreg_t)) {
573 			warn("failed to read " FLAGS_REG_NAME
574 			    " at %p for popfl step\n", (void *)sp);
575 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
576 		}
577 
578 		fl = (fl & ~KREG_EFLAGS_IF_MASK) | KREG_EFLAGS_TF_MASK;
579 
580 		if (mdb_tgt_awrite(mdb.m_target, MDB_TGT_AS_VIRT_S, &fl,
581 		    sizeof (kreg_t), sp) != sizeof (kreg_t)) {
582 			warn("failed to update " FLAGS_REG_NAME
583 			    " at %p for popfl step\n", (void *)sp);
584 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
585 		}
586 		break;
587 	}
588 
589 	if (emulated) {
590 		(void) kmdb_dpi_set_register("pc", npc);
591 		return (0);
592 	}
593 
594 	/* Do the step with IF off, and TF (step) on */
595 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &oldfl);
596 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
597 	    ((oldfl | (1 << KREG_EFLAGS_TF_SHIFT)) & ~KREG_EFLAGS_IF_MASK));
598 
599 	kmdb_dpi_resume_master(); /* ... there and back again ... */
600 
601 	/* EFLAGS has now changed, and may require tuning */
602 
603 	switch (instr) {
604 	case M_POPF:
605 		/*
606 		 * Use the EFLAGS we grabbed before the pop - see the pre-step
607 		 * M_POPFL comment.
608 		 */
609 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, newfl);
610 		return (0);
611 
612 	case M_PUSHF:
613 		/*
614 		 * We pushed our modified EFLAGS (with IF and TF turned off)
615 		 * onto the stack.  Replace the pushed version with our
616 		 * unmodified one.
617 		 */
618 		(void) kmdb_dpi_get_register("sp", &sp);
619 
620 		if (mdb_tgt_awrite(mdb.m_target, MDB_TGT_AS_VIRT_S, &oldfl,
621 		    sizeof (kreg_t), sp) != sizeof (kreg_t)) {
622 			warn("failed to update pushed " FLAGS_REG_NAME
623 			    " at %p after pushfl step\n", (void *)sp);
624 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
625 		}
626 
627 		/* Go back to using the EFLAGS we were using before the step */
628 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, oldfl);
629 		return (0);
630 
631 	default:
632 		/*
633 		 * The stepped instruction may have altered EFLAGS.  We only
634 		 * really care about the value of IF, and we know the stepped
635 		 * instruction didn't alter it, so we can simply copy the
636 		 * pre-step value.  We'll also need to turn TF back off.
637 		 */
638 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
639 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
640 		    ((fl & ~(KREG_EFLAGS_TF_MASK|KREG_EFLAGS_IF_MASK)) |
641 		    (oldfl & KREG_EFLAGS_IF_MASK)));
642 		return (0);
643 	}
644 }
645 
646 /*ARGSUSED*/
647 static uintptr_t
kaif_call(uintptr_t funcva,uint_t argc,const uintptr_t argv[])648 kaif_call(uintptr_t funcva, uint_t argc, const uintptr_t argv[])
649 {
650 	return (kaif_invoke(funcva, argc, argv));
651 }
652 
653 static void
dump_crumb(kdi_crumb_t * krmp)654 dump_crumb(kdi_crumb_t *krmp)
655 {
656 	kdi_crumb_t krm;
657 
658 	if (mdb_vread(&krm, sizeof (kdi_crumb_t), (uintptr_t)krmp) !=
659 	    sizeof (kdi_crumb_t)) {
660 		warn("failed to read crumb at %p", krmp);
661 		return;
662 	}
663 
664 	mdb_printf("state: ");
665 	switch (krm.krm_cpu_state) {
666 	case KAIF_CPU_STATE_MASTER:
667 		mdb_printf("M");
668 		break;
669 	case KAIF_CPU_STATE_SLAVE:
670 		mdb_printf("S");
671 		break;
672 	default:
673 		mdb_printf("%d", krm.krm_cpu_state);
674 	}
675 
676 	mdb_printf(" trapno %3d sp %08x flag %d pc %p %A\n",
677 	    krm.krm_trapno, krm.krm_sp, krm.krm_flag, krm.krm_pc, krm.krm_pc);
678 }
679 
680 static void
dump_crumbs(kaif_cpusave_t * save)681 dump_crumbs(kaif_cpusave_t *save)
682 {
683 	int i;
684 
685 	for (i = KDI_NCRUMBS; i > 0; i--) {
686 		uint_t idx = (save->krs_curcrumbidx + i) % KDI_NCRUMBS;
687 		dump_crumb(&save->krs_crumbs[idx]);
688 	}
689 }
690 
691 static void
kaif_dump_crumbs(uintptr_t addr,int cpuid)692 kaif_dump_crumbs(uintptr_t addr, int cpuid)
693 {
694 	int i;
695 
696 	if (addr != 0) {
697 		/* dump_crumb will protect us against bogus addresses */
698 		dump_crumb((kdi_crumb_t *)addr);
699 
700 	} else if (cpuid != -1) {
701 		if (cpuid < 0 || cpuid >= kaif_ncpusave)
702 			return;
703 
704 		dump_crumbs(&kaif_cpusave[cpuid]);
705 
706 	} else {
707 		for (i = 0; i < kaif_ncpusave; i++) {
708 			kaif_cpusave_t *save = &kaif_cpusave[i];
709 
710 			if (save->krs_cpu_state == KAIF_CPU_STATE_NONE)
711 				continue;
712 
713 			mdb_printf("%sCPU %d crumbs: (curidx %d)\n",
714 			    (i == 0 ? "" : "\n"), i, save->krs_curcrumbidx);
715 
716 			dump_crumbs(save);
717 		}
718 	}
719 }
720 
721 static void
kaif_modchg_register(void (* func)(struct modctl *,int))722 kaif_modchg_register(void (*func)(struct modctl *, int))
723 {
724 	kaif_modchg_cb = func;
725 }
726 
727 static void
kaif_modchg_cancel(void)728 kaif_modchg_cancel(void)
729 {
730 	ASSERT(kaif_modchg_cb != NULL);
731 
732 	kaif_modchg_cb = NULL;
733 }
734 
735 void
kaif_trap_set_debugger(void)736 kaif_trap_set_debugger(void)
737 {
738 	kmdb_kdi_idt_switch(NULL);
739 }
740 
741 void
kaif_trap_set_saved(kaif_cpusave_t * cpusave)742 kaif_trap_set_saved(kaif_cpusave_t *cpusave)
743 {
744 	kmdb_kdi_idt_switch(cpusave);
745 }
746 
747 static void
kaif_vmready(void)748 kaif_vmready(void)
749 {
750 }
751 
752 void
kaif_memavail(caddr_t base,size_t len)753 kaif_memavail(caddr_t base, size_t len)
754 {
755 	int ret;
756 	/*
757 	 * In the unlikely event that someone is stepping through this routine,
758 	 * we need to make sure that the KDI knows about the new range before
759 	 * umem gets it.  That way the entry code can recognize stacks
760 	 * allocated from the new region.
761 	 */
762 	kmdb_kdi_memrange_add(base, len);
763 	ret = mdb_umem_add(base, len);
764 	ASSERT(ret == 0);
765 }
766 
767 void
kaif_mod_loaded(struct modctl * modp)768 kaif_mod_loaded(struct modctl *modp)
769 {
770 	if (kaif_modchg_cb != NULL)
771 		kaif_modchg_cb(modp, 1);
772 }
773 
774 void
kaif_mod_unloading(struct modctl * modp)775 kaif_mod_unloading(struct modctl *modp)
776 {
777 	if (kaif_modchg_cb != NULL)
778 		kaif_modchg_cb(modp, 0);
779 }
780 
781 void
kaif_handle_fault(greg_t trapno,greg_t pc,greg_t sp,int cpuid)782 kaif_handle_fault(greg_t trapno, greg_t pc, greg_t sp, int cpuid)
783 {
784 	kmdb_dpi_handle_fault((kreg_t)trapno, (kreg_t)pc,
785 	    (kreg_t)sp, cpuid);
786 }
787 
788 static kdi_debugvec_t kaif_dvec = {
789 	NULL,			/* dv_kctl_vmready */
790 	NULL,			/* dv_kctl_memavail */
791 	NULL,			/* dv_kctl_modavail */
792 	NULL,			/* dv_kctl_thravail */
793 	kaif_vmready,
794 	kaif_memavail,
795 	kaif_mod_loaded,
796 	kaif_mod_unloading,
797 	kaif_handle_fault
798 };
799 
800 void
kaif_kdi_entry(kdi_cpusave_t * cpusave)801 kaif_kdi_entry(kdi_cpusave_t *cpusave)
802 {
803 	int ret = kaif_main_loop(cpusave);
804 	ASSERT(ret == KAIF_CPU_CMD_RESUME ||
805 	    ret == KAIF_CPU_CMD_RESUME_MASTER);
806 }
807 
808 /*ARGSUSED*/
809 void
kaif_activate(kdi_debugvec_t ** dvecp,uint_t flags)810 kaif_activate(kdi_debugvec_t **dvecp, uint_t flags)
811 {
812 	kmdb_kdi_activate(kaif_kdi_entry, kaif_cpusave, kaif_ncpusave);
813 	*dvecp = &kaif_dvec;
814 }
815 
816 static int
kaif_init(kmdb_auxv_t * kav)817 kaif_init(kmdb_auxv_t *kav)
818 {
819 	/* Allocate the per-CPU save areas */
820 	kaif_cpusave = mdb_zalloc(sizeof (kaif_cpusave_t) * kav->kav_ncpu,
821 	    UM_SLEEP);
822 	kaif_ncpusave = kav->kav_ncpu;
823 
824 	kaif_modchg_cb = NULL;
825 
826 	kaif_waptmap = 0;
827 
828 	kaif_trap_switch = (kav->kav_flags & KMDB_AUXV_FL_NOTRPSWTCH) == 0;
829 
830 	return (0);
831 }
832 
833 dpi_ops_t kmdb_dpi_ops = {
834 	.dpo_init = kaif_init,
835 	.dpo_debugger_activate = kaif_activate,
836 	.dpo_debugger_deactivate = kmdb_kdi_deactivate,
837 	.dpo_enter_mon = kaif_enter_mon,
838 	.dpo_modchg_register = kaif_modchg_register,
839 	.dpo_modchg_cancel = kaif_modchg_cancel,
840 	.dpo_get_cpu_state = kaif_get_cpu_state,
841 	.dpo_get_master_cpuid = kaif_get_master_cpuid,
842 	.dpo_get_gregs = kaif_get_gregs,
843 	.dpo_get_register = kaif_get_register,
844 	.dpo_set_register = kaif_set_register,
845 	.dpo_brkpt_arm = kaif_brkpt_arm,
846 	.dpo_brkpt_disarm = kaif_brkpt_disarm,
847 	.dpo_wapt_validate = kaif_wapt_validate,
848 	.dpo_wapt_reserve = kaif_wapt_reserve,
849 	.dpo_wapt_release = kaif_wapt_release,
850 	.dpo_wapt_arm = kaif_wapt_arm,
851 	.dpo_wapt_disarm = kaif_wapt_disarm,
852 	.dpo_wapt_match = kaif_wapt_match,
853 	.dpo_step = kaif_step,
854 	.dpo_call = kaif_call,
855 	.dpo_dump_crumbs = kaif_dump_crumbs,
856 };
857