xref: /illumos-gate/usr/src/cmd/mdb/intel/kmdb/kaif.c (revision 9acbbeaf2a1ffe5c14b244867d427714fab43c5c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * The debugger/"PROM" interface layer
30  *
31  * (it makes more sense on SPARC)
32  */
33 
34 #include <kmdb/kmdb_dpi_impl.h>
35 #include <kmdb/kmdb_kdi.h>
36 #include <kmdb/kmdb_umemglue.h>
37 #include <kmdb/kaif.h>
38 #include <kmdb/kaif_asmutil.h>
39 #include <kmdb/kmdb_io.h>
40 #include <mdb/mdb_err.h>
41 #include <mdb/mdb_debug.h>
42 #include <mdb/mdb_isautil.h>
43 #include <mdb/mdb_io_impl.h>
44 #include <mdb/mdb_kreg.h>
45 #include <mdb/mdb.h>
46 
47 #include <sys/types.h>
48 #include <sys/segments.h>
49 #include <sys/bitmap.h>
50 #include <sys/termios.h>
51 
52 kaif_cpusave_t	*kaif_cpusave;
53 int		kaif_ncpusave;
54 
55 kaif_drreg_t	kaif_drreg;
56 
57 uint32_t	kaif_waptmap;
58 
59 #ifndef __amd64
60 /* Used to track the current set of valid kernel selectors. */
61 uint32_t	kaif_cs;
62 uint32_t	kaif_ds;
63 uint32_t	kaif_fs;
64 uint32_t	kaif_gs;
65 #endif
66 
67 uint_t		kaif_msr_wrexit_msr;
68 uint64_t	*kaif_msr_wrexit_valp;
69 
70 uintptr_t	kaif_kernel_handler;
71 uintptr_t	kaif_sys_sysenter;
72 uintptr_t	kaif_brand_sys_sysenter;
73 
74 int		kaif_trap_switch;
75 
76 void (*kaif_modchg_cb)(struct modctl *, int);
77 
78 #define	KAIF_MEMRANGES_MAX	2
79 
80 kaif_memrange_t	kaif_memranges[KAIF_MEMRANGES_MAX];
81 int		kaif_nmemranges;
82 
83 enum {
84 	M_SYSRET	= 0x07, /* after M_ESC */
85 	M_ESC		= 0x0f,
86 	M_SYSEXIT	= 0x35, /* after M_ESC */
87 	M_REX_LO	= 0x40, /* first REX prefix */
88 	M_REX_HI	= 0x4f, /* last REX prefix */
89 	M_PUSHF		= 0x9c,	/* pushfl and pushfq */
90 	M_POPF		= 0x9d,	/* popfl and popfq */
91 	M_INT3		= 0xcc,
92 	M_INTX		= 0xcd,
93 	M_INTO		= 0xce,
94 	M_IRET		= 0xcf,
95 	M_CLI		= 0xfa,
96 	M_STI		= 0xfb
97 };
98 
99 #define	KAIF_BREAKPOINT_INSTR	M_INT3
100 
101 #define	KAIF_WPPRIV2ID(wp)	(int)(uintptr_t)((wp)->wp_priv)
102 
103 #ifdef __amd64
104 #define	FLAGS_REG_NAME		"rflags"
105 #else
106 #define	FLAGS_REG_NAME		"eflags"
107 #endif
108 
109 /*
110  * Called during normal debugger operation and during debugger faults.
111  */
112 static void
113 kaif_enter_mon(void)
114 {
115 	char c;
116 
117 	for (;;) {
118 		mdb_iob_printf(mdb.m_out,
119 		    "%s: Do you really want to reboot? (y/n) ",
120 		    mdb.m_pname);
121 		mdb_iob_flush(mdb.m_out);
122 		mdb_iob_clearlines(mdb.m_out);
123 
124 		c = kmdb_getchar();
125 
126 		if (c == 'n' || c == 'N' || c == CTRL('c'))
127 			return;
128 		else if (c == 'y' || c == 'Y') {
129 			mdb_iob_printf(mdb.m_out, "Rebooting...\n");
130 
131 			kmdb_dpi_reboot();
132 		}
133 	}
134 }
135 
136 static kaif_cpusave_t *
137 kaif_cpuid2save(int cpuid)
138 {
139 	kaif_cpusave_t *save;
140 
141 	if (cpuid == DPI_MASTER_CPUID)
142 		return (&kaif_cpusave[kaif_master_cpuid]);
143 
144 	if (cpuid < 0 || cpuid >= kaif_ncpusave) {
145 		(void) set_errno(EINVAL);
146 		return (NULL);
147 	}
148 
149 	save = &kaif_cpusave[cpuid];
150 
151 	if (save->krs_cpu_state != KAIF_CPU_STATE_MASTER &&
152 	    save->krs_cpu_state != KAIF_CPU_STATE_SLAVE) {
153 		(void) set_errno(EINVAL);
154 		return (NULL);
155 	}
156 
157 	return (save);
158 }
159 
160 static int
161 kaif_get_cpu_state(int cpuid)
162 {
163 	kaif_cpusave_t *save;
164 
165 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
166 		return (-1); /* errno is set for us */
167 
168 	switch (save->krs_cpu_state) {
169 	case KAIF_CPU_STATE_MASTER:
170 		return (DPI_CPU_STATE_MASTER);
171 	case KAIF_CPU_STATE_SLAVE:
172 		return (DPI_CPU_STATE_SLAVE);
173 	default:
174 		return (set_errno(EINVAL));
175 	}
176 }
177 
178 static int
179 kaif_get_master_cpuid(void)
180 {
181 	return (kaif_master_cpuid);
182 }
183 
184 static const mdb_tgt_gregset_t *
185 kaif_get_gregs(int cpuid)
186 {
187 	kaif_cpusave_t *save;
188 
189 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
190 		return (NULL); /* errno is set for us */
191 
192 	return (save->krs_gregs);
193 }
194 
195 typedef struct kaif_reg_synonyms {
196 	const char *rs_syn;
197 	const char *rs_name;
198 } kaif_reg_synonyms_t;
199 
200 static kreg_t *
201 kaif_find_regp(const char *regname)
202 {
203 	static const kaif_reg_synonyms_t synonyms[] = {
204 #ifdef __amd64
205 	    { "pc", "rip" },
206 	    { "sp", "rsp" },
207 	    { "fp", "rbp" },
208 #else
209 	    { "pc", "eip" },
210 	    { "sp", "esp" },
211 	    { "fp", "ebp" },
212 #endif
213 	    { "tt", "trapno" }
214 	};
215 
216 	kaif_cpusave_t *save;
217 	int i;
218 
219 	save = kaif_cpuid2save(DPI_MASTER_CPUID);
220 
221 	for (i = 0; i < sizeof (synonyms) / sizeof (synonyms[0]); i++) {
222 		if (strcmp(synonyms[i].rs_syn, regname) == 0)
223 			regname = synonyms[i].rs_name;
224 	}
225 
226 	for (i = 0; mdb_isa_kregs[i].rd_name != NULL; i++) {
227 		const mdb_tgt_regdesc_t *rd = &mdb_isa_kregs[i];
228 
229 		if (strcmp(rd->rd_name, regname) == 0)
230 			return (&save->krs_gregs->kregs[rd->rd_num]);
231 	}
232 
233 	(void) set_errno(ENOENT);
234 	return (NULL);
235 }
236 
237 /*ARGSUSED*/
238 static int
239 kaif_get_register(const char *regname, kreg_t *valp)
240 {
241 	kreg_t *regp;
242 
243 	if ((regp = kaif_find_regp(regname)) == NULL)
244 		return (-1);
245 
246 	*valp = *regp;
247 
248 	return (0);
249 }
250 
251 static int
252 kaif_set_register(const char *regname, kreg_t val)
253 {
254 	kreg_t *regp;
255 
256 	if ((regp = kaif_find_regp(regname)) == NULL)
257 		return (-1);
258 
259 	*regp = val;
260 
261 	return (0);
262 }
263 
264 static int
265 kaif_brkpt_arm(uintptr_t addr, mdb_instr_t *instrp)
266 {
267 	mdb_instr_t bkpt = KAIF_BREAKPOINT_INSTR;
268 
269 	if (mdb_tgt_vread(mdb.m_target, instrp, sizeof (mdb_instr_t), addr) !=
270 	    sizeof (mdb_instr_t))
271 		return (-1); /* errno is set for us */
272 
273 	if (mdb_tgt_vwrite(mdb.m_target, &bkpt, sizeof (mdb_instr_t), addr) !=
274 	    sizeof (mdb_instr_t))
275 		return (-1); /* errno is set for us */
276 
277 	return (0);
278 }
279 
280 static int
281 kaif_brkpt_disarm(uintptr_t addr, mdb_instr_t instrp)
282 {
283 	if (mdb_tgt_vwrite(mdb.m_target, &instrp, sizeof (mdb_instr_t), addr) !=
284 	    sizeof (mdb_instr_t))
285 		return (-1); /* errno is set for us */
286 
287 	return (0);
288 }
289 
290 /*
291  * Intel watchpoints are even more fun than SPARC ones.  The Intel architecture
292  * manuals refer to watchpoints as breakpoints.  For consistency  with the
293  * terminology used in other portions of kmdb, we will, however, refer to them
294  * as watchpoints.
295  *
296  * Execute, data write, I/O read/write, and data read/write watchpoints are
297  * supported by the hardware.  Execute watchpoints must be one byte in length,
298  * and must be placed on the first byte of the instruction to be watched.
299  * Lengths of other watchpoints are more varied.
300  *
301  * Given that we already have a breakpoint facility, and given the restrictions
302  * placed on execute watchpoints, we're going to disallow the creation of
303  * execute watchpoints.  The others will be fully supported.  See the Debugging
304  * chapter in both the IA32 and AMD64 System Programming books for more details.
305  */
306 
307 #ifdef __amd64
308 #define	WAPT_DATA_MAX_SIZE	8
309 #define	WAPT_DATA_SIZES_MSG	"1, 2, 4, or 8"
310 #else
311 #define	WAPT_DATA_MAX_SIZE	4
312 #define	WAPT_DATA_SIZES_MSG	"1, 2, or 4"
313 #endif
314 
315 static int
316 kaif_wapt_validate(kmdb_wapt_t *wp)
317 {
318 	if (wp->wp_type == DPI_WAPT_TYPE_IO) {
319 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W)) {
320 			warn("I/O port watchpoints must be read/write\n");
321 			return (set_errno(EINVAL));
322 		}
323 
324 		if (wp->wp_size != 1 && wp->wp_size != 2 && wp->wp_size != 4) {
325 			warn("I/O watchpoint size must be 1, 2, or 4 bytes\n");
326 			return (set_errno(EINVAL));
327 		}
328 
329 	} else if (wp->wp_type == DPI_WAPT_TYPE_PHYS) {
330 		warn("physical address watchpoints are not supported on this "
331 		    "platform\n");
332 		return (set_errno(EMDB_TGTHWNOTSUP));
333 
334 	} else {
335 		if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W) &&
336 		    wp->wp_wflags != MDB_TGT_WA_W) {
337 			warn("watchpoints must be read/write or write-only\n");
338 			return (set_errno(EINVAL));
339 		}
340 
341 		if ((wp->wp_size & -(wp->wp_size)) != wp->wp_size ||
342 		    wp->wp_size > WAPT_DATA_MAX_SIZE) {
343 			warn("data watchpoint size must be " WAPT_DATA_SIZES_MSG
344 			    " bytes\n");
345 			return (set_errno(EINVAL));
346 		}
347 
348 	}
349 
350 	if (wp->wp_addr & (wp->wp_size - 1)) {
351 		warn("%lu-byte watchpoints must be %lu-byte aligned\n",
352 		    (ulong_t)wp->wp_size, (ulong_t)wp->wp_size);
353 		return (set_errno(EINVAL));
354 	}
355 
356 	return (0);
357 }
358 
359 static int
360 kaif_wapt_reserve(kmdb_wapt_t *wp)
361 {
362 	int id;
363 
364 	for (id = 0; id <= KREG_MAXWPIDX; id++) {
365 		if (!BT_TEST(&kaif_waptmap, id)) {
366 			/* found one */
367 			BT_SET(&kaif_waptmap, id);
368 			wp->wp_priv = (void *)(uintptr_t)id;
369 			return (0);
370 		}
371 	}
372 
373 	return (set_errno(EMDB_WPTOOMANY));
374 }
375 
376 static void
377 kaif_wapt_release(kmdb_wapt_t *wp)
378 {
379 	int id = KAIF_WPPRIV2ID(wp);
380 
381 	ASSERT(BT_TEST(&kaif_waptmap, id));
382 	BT_CLEAR(&kaif_waptmap, id);
383 }
384 
385 /*ARGSUSED*/
386 static void
387 kaif_wapt_arm(kmdb_wapt_t *wp)
388 {
389 	uint_t rw;
390 	int hwid = KAIF_WPPRIV2ID(wp);
391 
392 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
393 
394 	if (wp->wp_type == DPI_WAPT_TYPE_IO)
395 		rw = KREG_DRCTL_WP_IORW;
396 	else if (wp->wp_wflags & MDB_TGT_WA_R)
397 		rw = KREG_DRCTL_WP_RW;
398 	else if (wp->wp_wflags & MDB_TGT_WA_X)
399 		rw = KREG_DRCTL_WP_EXEC;
400 	else
401 		rw = KREG_DRCTL_WP_WONLY;
402 
403 	kaif_drreg.dr_addr[hwid] = wp->wp_addr;
404 
405 	kaif_drreg.dr_ctl &= ~KREG_DRCTL_WP_MASK(hwid);
406 	kaif_drreg.dr_ctl |= KREG_DRCTL_WP_LENRW(hwid, wp->wp_size - 1, rw);
407 	kaif_drreg.dr_ctl |= KREG_DRCTL_WPEN(hwid);
408 }
409 
410 /*ARGSUSED*/
411 static void
412 kaif_wapt_disarm(kmdb_wapt_t *wp)
413 {
414 	int hwid = KAIF_WPPRIV2ID(wp);
415 
416 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
417 
418 	kaif_drreg.dr_addr[hwid] = 0;
419 	kaif_drreg.dr_ctl &= ~(KREG_DRCTL_WP_MASK(hwid) |
420 	    KREG_DRCTL_WPEN_MASK(hwid));
421 }
422 
423 /*ARGSUSED*/
424 static int
425 kaif_wapt_match(kmdb_wapt_t *wp)
426 {
427 	int hwid = KAIF_WPPRIV2ID(wp);
428 	uint32_t mask = KREG_DRSTAT_WP_MASK(hwid);
429 	int n = 0;
430 	int i;
431 
432 	ASSERT(BT_TEST(&kaif_waptmap, hwid));
433 
434 	for (i = 0; i < kaif_ncpusave; i++)
435 		n += (kaif_cpusave[i].krs_dr.dr_stat & mask) != 0;
436 
437 	return (n);
438 }
439 
440 static int
441 kaif_step(void)
442 {
443 	kreg_t pc, fl, oldfl, newfl, sp;
444 	mdb_tgt_addr_t npc;
445 	mdb_instr_t instr;
446 	int emulated = 0, rchk = 0;
447 	size_t pcoff = 0;
448 
449 	(void) kmdb_dpi_get_register("pc", &pc);
450 
451 	if ((npc = mdb_dis_nextins(mdb.m_disasm, mdb.m_target,
452 	    MDB_TGT_AS_VIRT, pc)) == pc) {
453 		warn("failed to decode instruction at %a for step\n", pc);
454 		return (set_errno(EINVAL));
455 	}
456 
457 	/*
458 	 * Stepping behavior depends on the type of instruction.  It does not
459 	 * depend on the presence of a REX prefix, as the action we take for a
460 	 * given instruction doesn't currently vary for 32-bit instructions
461 	 * versus their 64-bit counterparts.
462 	 */
463 	do {
464 		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
465 		    pc + pcoff) != sizeof (mdb_instr_t)) {
466 			warn("failed to read at %p for step",
467 			    (void *)(pc + pcoff));
468 			return (-1);
469 		}
470 	} while (pcoff++, (instr >= M_REX_LO && instr <= M_REX_HI && !rchk++));
471 
472 	switch (instr) {
473 	case M_IRET:
474 		warn("iret cannot be stepped\n");
475 		return (set_errno(EMDB_TGTNOTSUP));
476 
477 	case M_INT3:
478 	case M_INTX:
479 	case M_INTO:
480 		warn("int cannot be stepped\n");
481 		return (set_errno(EMDB_TGTNOTSUP));
482 
483 	case M_ESC:
484 		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
485 		    pc + pcoff) != sizeof (mdb_instr_t)) {
486 			warn("failed to read at %p for step",
487 			    (void *)(pc + pcoff));
488 			return (-1);
489 		}
490 
491 		switch (instr) {
492 		case M_SYSRET:
493 			warn("sysret cannot be stepped\n");
494 			return (set_errno(EMDB_TGTNOTSUP));
495 		case M_SYSEXIT:
496 			warn("sysexit cannot be stepped\n");
497 			return (set_errno(EMDB_TGTNOTSUP));
498 		}
499 		break;
500 
501 	/*
502 	 * Some instructions need to be emulated.  We need to prevent direct
503 	 * manipulations of EFLAGS, so we'll emulate cli, sti.  pushfl and
504 	 * popfl also receive special handling, as they manipulate both EFLAGS
505 	 * and %esp.
506 	 */
507 	case M_CLI:
508 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
509 		fl &= ~KREG_EFLAGS_IF_MASK;
510 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
511 
512 		emulated = 1;
513 		break;
514 
515 	case M_STI:
516 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
517 		fl |= (1 << KREG_EFLAGS_IF_SHIFT);
518 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
519 
520 		emulated = 1;
521 		break;
522 
523 	case M_POPF:
524 		/*
525 		 * popfl will restore a pushed EFLAGS from the stack, and could
526 		 * in so doing cause IF to be turned on, if only for a a brief
527 		 * period.  To avoid this, we'll secretly replace the stack's
528 		 * EFLAGS with our decaffeinated brand.  We'll then manually
529 		 * load our EFLAGS copy with the real verion after the step.
530 		 */
531 		(void) kmdb_dpi_get_register("sp", &sp);
532 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
533 
534 		if (mdb_tgt_vread(mdb.m_target, &newfl, sizeof (kreg_t),
535 		    sp) != sizeof (kreg_t)) {
536 			warn("failed to read " FLAGS_REG_NAME
537 			    " at %p for popfl step\n", (void *)sp);
538 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
539 		}
540 
541 		fl = (fl & ~KREG_EFLAGS_IF_MASK) | KREG_EFLAGS_TF_MASK;
542 
543 		if (mdb_tgt_vwrite(mdb.m_target, &fl, sizeof (kreg_t),
544 		    sp) != sizeof (kreg_t)) {
545 			warn("failed to update " FLAGS_REG_NAME
546 			    " at %p for popfl step\n", (void *)sp);
547 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
548 		}
549 		break;
550 	}
551 
552 	if (emulated) {
553 		(void) kmdb_dpi_set_register("pc", npc);
554 		return (0);
555 	}
556 
557 	/* Do the step with IF off, and TF (step) on */
558 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &oldfl);
559 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
560 	    ((oldfl | (1 << KREG_EFLAGS_TF_SHIFT)) & ~KREG_EFLAGS_IF_MASK));
561 
562 	kmdb_dpi_resume_master(); /* ... there and back again ... */
563 
564 	/* EFLAGS has now changed, and may require tuning */
565 
566 	switch (instr) {
567 	case M_POPF:
568 		/*
569 		 * Use the EFLAGS we grabbed before the pop - see the pre-step
570 		 * M_POPFL comment.
571 		 */
572 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, newfl);
573 		return (0);
574 
575 	case M_PUSHF:
576 		/*
577 		 * We pushed our modified EFLAGS (with IF and TF turned off)
578 		 * onto the stack.  Replace the pushed version with our
579 		 * unmodified one.
580 		 */
581 		(void) kmdb_dpi_get_register("sp", &sp);
582 
583 		if (mdb_tgt_vwrite(mdb.m_target, &oldfl, sizeof (kreg_t),
584 		    sp) != sizeof (kreg_t)) {
585 			warn("failed to update pushed " FLAGS_REG_NAME
586 			    " at %p after pushfl step\n", (void *)sp);
587 			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
588 		}
589 
590 		/* Go back to using the EFLAGS we were using before the step */
591 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, oldfl);
592 		return (0);
593 
594 	default:
595 		/*
596 		 * The stepped instruction may have altered EFLAGS.  We only
597 		 * really care about the value of IF, and we know the stepped
598 		 * instruction didn't alter it, so we can simply copy the
599 		 * pre-step value.  We'll also need to turn TF back off.
600 		 */
601 		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
602 		(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
603 		    ((fl & ~(KREG_EFLAGS_TF_MASK|KREG_EFLAGS_IF_MASK)) |
604 		    (oldfl & KREG_EFLAGS_IF_MASK)));
605 		return (0);
606 	}
607 }
608 
609 /*
610  * The target has already configured the chip for branch step, leaving us to
611  * actually make the machine go.  Due to a number of issues involving
612  * the potential alteration of system state via instructions like sti, cli,
613  * pushfl, and popfl, we're going to treat this like a normal system resume.
614  * All CPUs will be released, on the kernel's IDT.  Our primary concern is
615  * the alteration/storage of our TF'd EFLAGS via pushfl and popfl.  There's no
616  * real workaround - we don't have opcode breakpoints - so the best we can do is
617  * to ensure that the world won't end if someone does bad things to EFLAGS.
618  *
619  * Two things can happen:
620  *  1. EFLAGS.TF may be cleared, either maliciously or via a popfl from saved
621  *     state.  The CPU will continue execution beyond the branch, and will not
622  *     reenter the debugger unless brought/sent in by other means.
623  *  2. Someone may pushlf the TF'd EFLAGS, and may stash a copy of it somewhere.
624  *     When the saved version is popfl'd back into place, the debugger will be
625  *     re-entered on a single-step trap.
626  */
627 static void
628 kaif_step_branch(void)
629 {
630 	kreg_t fl;
631 
632 	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
633 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
634 	    (fl | (1 << KREG_EFLAGS_TF_SHIFT)));
635 
636 	kmdb_dpi_resume_master();
637 
638 	(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
639 }
640 
641 /*ARGSUSED*/
642 static uintptr_t
643 kaif_call(uintptr_t funcva, uint_t argc, const uintptr_t argv[])
644 {
645 	return (kaif_invoke(funcva, argc, argv));
646 }
647 
648 static void
649 dump_crumb(kaif_crumb_t *krmp)
650 {
651 	kaif_crumb_t krm;
652 
653 	if (mdb_vread(&krm, sizeof (kaif_crumb_t), (uintptr_t)krmp) !=
654 	    sizeof (kaif_crumb_t)) {
655 		warn("failed to read crumb at %p", krmp);
656 		return;
657 	}
658 
659 	mdb_printf("state: ");
660 	switch (krm.krm_cpu_state) {
661 	case KAIF_CPU_STATE_MASTER:
662 		mdb_printf("M");
663 		break;
664 	case KAIF_CPU_STATE_SLAVE:
665 		mdb_printf("S");
666 		break;
667 	default:
668 		mdb_printf("%d", krm.krm_cpu_state);
669 	}
670 
671 	mdb_printf(" trapno %3d sp %08x flag %d pc %p %A\n",
672 	    krm.krm_trapno, krm.krm_sp, krm.krm_flag, krm.krm_pc, krm.krm_pc);
673 }
674 
675 static void
676 dump_crumbs(kaif_cpusave_t *save)
677 {
678 	int i;
679 
680 	for (i = KAIF_NCRUMBS; i > 0; i--) {
681 		uint_t idx = (save->krs_curcrumbidx + i) % KAIF_NCRUMBS;
682 		dump_crumb(&save->krs_crumbs[idx]);
683 	}
684 }
685 
686 static void
687 kaif_dump_crumbs(uintptr_t addr, int cpuid)
688 {
689 	int i;
690 
691 	if (addr != NULL) {
692 		/* dump_crumb will protect us against bogus addresses */
693 		dump_crumb((kaif_crumb_t *)addr);
694 
695 	} else if (cpuid != -1) {
696 		if (cpuid < 0 || cpuid >= kaif_ncpusave)
697 			return;
698 
699 		dump_crumbs(&kaif_cpusave[cpuid]);
700 
701 	} else {
702 		for (i = 0; i < kaif_ncpusave; i++) {
703 			kaif_cpusave_t *save = &kaif_cpusave[i];
704 
705 			if (save->krs_cpu_state == KAIF_CPU_STATE_NONE)
706 				continue;
707 
708 			mdb_printf("%sCPU %d crumbs: (curidx %d)\n",
709 			    (i == 0 ? "" : "\n"), i, save->krs_curcrumbidx);
710 
711 			dump_crumbs(save);
712 		}
713 	}
714 }
715 
716 static void
717 kaif_modchg_register(void (*func)(struct modctl *, int))
718 {
719 	kaif_modchg_cb = func;
720 }
721 
722 static void
723 kaif_modchg_cancel(void)
724 {
725 	ASSERT(kaif_modchg_cb != NULL);
726 
727 	kaif_modchg_cb = NULL;
728 }
729 
730 void
731 kaif_mod_loaded(struct modctl *modp)
732 {
733 	if (kaif_modchg_cb != NULL)
734 		kaif_modchg_cb(modp, 1);
735 }
736 
737 void
738 kaif_mod_unloading(struct modctl *modp)
739 {
740 	if (kaif_modchg_cb != NULL)
741 		kaif_modchg_cb(modp, 0);
742 }
743 
744 /*
745  * On some processors, we'll need to clear a certain MSR before proceeding into
746  * the debugger.  Complicating matters, this MSR must be cleared before we take
747  * any branches.  We have patch points in every trap handler, which will cover
748  * all entry paths for master CPUs.  We also have a patch point in the slave
749  * entry code.
750  */
751 static void
752 kaif_msr_add_clrentry(uint_t msr)
753 {
754 #ifdef __amd64
755 	uchar_t code[] = {
756 		0x51, 0x50, 0x52,		/* pushq %rcx, %rax, %rdx */
757 		0xb9, 0x00, 0x00, 0x00, 0x00,	/* movl $MSRNUM, %ecx */
758 		0x31, 0xc0,			/* clr %eax */
759 		0x31, 0xd2,			/* clr %edx */
760 		0x0f, 0x30,			/* wrmsr */
761 		0x5a, 0x58, 0x59		/* popq %rdx, %rax, %rcx */
762 	};
763 	uchar_t *patch = &code[4];
764 #else
765 	uchar_t code[] = {
766 		0x60,				/* pushal */
767 		0xb9, 0x00, 0x00, 0x00, 0x00,	/* movl $MSRNUM, %ecx */
768 		0x31, 0xc0,			/* clr %eax */
769 		0x31, 0xd2,			/* clr %edx */
770 		0x0f, 0x30,			/* wrmsr */
771 		0x61				/* popal */
772 	};
773 	uchar_t *patch = &code[2];
774 #endif
775 
776 	bcopy(&msr, patch, sizeof (uint32_t));
777 
778 	kaif_idt_patch((caddr_t)code, sizeof (code));
779 
780 	bcopy(code, &kaif_slave_entry_patch, sizeof (code));
781 }
782 
783 static void
784 kaif_msr_add_wrexit(uint_t msr, uint64_t *valp)
785 {
786 	kaif_msr_wrexit_msr = msr;
787 	kaif_msr_wrexit_valp = valp;
788 }
789 
790 static void
791 kaif_msr_add(const kmdb_msr_t *msrs)
792 {
793 	kmdb_msr_t *save;
794 	int nmsrs, i;
795 
796 	ASSERT(kaif_cpusave[0].krs_msr == NULL);
797 
798 	for (i = 0; msrs[i].msr_num != 0; i++) {
799 		switch (msrs[i].msr_type) {
800 		case KMDB_MSR_CLEARENTRY:
801 			kaif_msr_add_clrentry(msrs[i].msr_num);
802 			break;
803 
804 		case KMDB_MSR_WRITEDELAY:
805 			kaif_msr_add_wrexit(msrs[i].msr_num, msrs[i].msr_valp);
806 			break;
807 		}
808 	}
809 	nmsrs = i + 1; /* we want to copy the terminating kmdb_msr_t too */
810 
811 	save = mdb_zalloc(sizeof (kmdb_msr_t) * nmsrs * kaif_ncpusave,
812 	    UM_SLEEP);
813 
814 	for (i = 0; i < kaif_ncpusave; i++) {
815 		bcopy(msrs, &save[nmsrs * i], sizeof (kmdb_msr_t) * nmsrs);
816 		kaif_cpusave[i].krs_msr = &save[nmsrs * i];
817 	}
818 }
819 
820 static uint64_t
821 kaif_msr_get(int cpuid, uint_t num)
822 {
823 	kaif_cpusave_t *save;
824 	kmdb_msr_t *msr;
825 	int i;
826 
827 	if ((save = kaif_cpuid2save(cpuid)) == NULL)
828 		return (-1); /* errno is set for us */
829 
830 	msr = save->krs_msr;
831 
832 	for (i = 0; msr[i].msr_num != 0; i++) {
833 		if (msr[i].msr_num == num &&
834 		    (msr[i].msr_type & KMDB_MSR_READ))
835 			return (msr[i].msr_val);
836 	}
837 
838 	return (0);
839 }
840 
841 int
842 kaif_memrange_add(caddr_t base, size_t len)
843 {
844 	kaif_memrange_t *mr = &kaif_memranges[kaif_nmemranges];
845 
846 	if (kaif_nmemranges == KAIF_MEMRANGES_MAX)
847 		return (set_errno(ENOSPC));
848 
849 	/*
850 	 * In the unlikely event that someone is stepping through this routine,
851 	 * we need to make sure that kaif_memranges knows about the new range
852 	 * before umem gets it.  That way the entry code can recognize stacks
853 	 * allocated from the new region.
854 	 */
855 	mr->mr_base = base;
856 	mr->mr_lim = base + len - 1;
857 	kaif_nmemranges++;
858 
859 	if (mdb_umem_add(base, len) < 0) {
860 		kaif_nmemranges--;
861 		return (-1); /* errno is set for us */
862 	}
863 
864 	return (0);
865 }
866 
867 void
868 kaif_trap_set_debugger(void)
869 {
870 	set_idt(&kaif_idtr);
871 }
872 
873 void
874 kaif_trap_set_saved(kaif_cpusave_t *cpusave)
875 {
876 	set_idt(&cpusave->krs_idtr);
877 }
878 
879 static int
880 kaif_init(kmdb_auxv_t *kav)
881 {
882 	int i;
883 
884 	/* Allocate the per-CPU save areas */
885 	kaif_cpusave = mdb_zalloc(sizeof (kaif_cpusave_t) * kav->kav_ncpu,
886 	    UM_SLEEP);
887 	kaif_ncpusave = kav->kav_ncpu;
888 
889 	for (i = 0; i < kaif_ncpusave; i++) {
890 		kaif_cpusave_t *save = &kaif_cpusave[i];
891 
892 		save->krs_cpu_id = i;
893 		save->krs_curcrumbidx = KAIF_NCRUMBS - 1;
894 		save->krs_curcrumb = &save->krs_crumbs[save->krs_curcrumbidx];
895 	}
896 
897 	kaif_idt_init();
898 
899 	/* The initial selector set.  Updated by the debugger-entry code */
900 #ifndef __amd64
901 	kaif_cs = BOOTCODE_SEL;
902 	kaif_ds = kaif_fs = kaif_gs = BOOTFLAT_SEL;
903 #endif
904 
905 	kaif_memranges[0].mr_base = kav->kav_dseg;
906 	kaif_memranges[0].mr_lim = kav->kav_dseg + kav->kav_dseg_size - 1;
907 	kaif_nmemranges = 1;
908 
909 	kaif_modchg_cb = NULL;
910 
911 	kaif_waptmap = 0;
912 
913 	kaif_drreg.dr_ctl = KREG_DRCTL_RESERVED;
914 	kaif_drreg.dr_stat = KREG_DRSTAT_RESERVED;
915 
916 	kaif_msr_wrexit_msr = 0;
917 	kaif_msr_wrexit_valp = NULL;
918 
919 	kaif_trap_switch = (kav->kav_flags & KMDB_AUXV_FL_NOTRPSWTCH) == 0;
920 
921 	if ((kaif_sys_sysenter = kmdb_kdi_lookup_by_name("unix",
922 	    "sys_sysenter")) == NULL)
923 		return (set_errno(ENOENT));
924 
925 	if ((kaif_brand_sys_sysenter = kmdb_kdi_lookup_by_name("unix",
926 	    "brand_sys_sysenter")) == NULL)
927 		return (set_errno(ENOENT));
928 
929 	return (0);
930 }
931 
932 dpi_ops_t kmdb_dpi_ops = {
933 	kaif_init,
934 	kaif_activate,
935 	kaif_deactivate,
936 	kaif_enter_mon,
937 	kaif_modchg_register,
938 	kaif_modchg_cancel,
939 	kaif_get_cpu_state,
940 	kaif_get_master_cpuid,
941 	kaif_get_gregs,
942 	kaif_get_register,
943 	kaif_set_register,
944 	kaif_brkpt_arm,
945 	kaif_brkpt_disarm,
946 	kaif_wapt_validate,
947 	kaif_wapt_reserve,
948 	kaif_wapt_release,
949 	kaif_wapt_arm,
950 	kaif_wapt_disarm,
951 	kaif_wapt_match,
952 	kaif_step,
953 	kaif_step_branch,
954 	kaif_call,
955 	kaif_dump_crumbs,
956 	kaif_memrange_add,
957 	kaif_msr_add,
958 	kaif_msr_get,
959 };
960