1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2021 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/smp.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #include <sys/x86_archext.h> 59 #include <sys/smp_impldefs.h> 60 #include <sys/smt.h> 61 #include <sys/hma.h> 62 #include <sys/trap.h> 63 64 #include <machine/psl.h> 65 #include <machine/cpufunc.h> 66 #include <machine/md_var.h> 67 #include <machine/reg.h> 68 #include <machine/segments.h> 69 #include <machine/smp.h> 70 #include <machine/specialreg.h> 71 #include <machine/vmparam.h> 72 #include <sys/vmm_vm.h> 73 74 #include <machine/vmm.h> 75 #include <machine/vmm_dev.h> 76 #include <sys/vmm_instruction_emul.h> 77 #include "vmm_lapic.h" 78 #include "vmm_host.h" 79 #include "vmm_ioport.h" 80 #include "vmm_ktr.h" 81 #include "vmm_stat.h" 82 #include "vatpic.h" 83 #include "vlapic.h" 84 #include "vlapic_priv.h" 85 86 #include "ept.h" 87 #include "vmcs.h" 88 #include "vmx.h" 89 #include "vmx_msr.h" 90 #include "x86.h" 91 #include "vmx_controls.h" 92 93 #define PINBASED_CTLS_ONE_SETTING \ 94 (PINBASED_EXTINT_EXITING | \ 95 PINBASED_NMI_EXITING | \ 96 PINBASED_VIRTUAL_NMI) 97 #define PINBASED_CTLS_ZERO_SETTING 0 98 99 #define PROCBASED_CTLS_WINDOW_SETTING \ 100 (PROCBASED_INT_WINDOW_EXITING | \ 101 PROCBASED_NMI_WINDOW_EXITING) 102 103 /* We consider TSC offset a necessity for unsynched TSC handling */ 104 #define PROCBASED_CTLS_ONE_SETTING \ 105 (PROCBASED_SECONDARY_CONTROLS | \ 106 PROCBASED_TSC_OFFSET | \ 107 PROCBASED_MWAIT_EXITING | \ 108 PROCBASED_MONITOR_EXITING | \ 109 PROCBASED_IO_EXITING | \ 110 PROCBASED_MSR_BITMAPS | \ 111 PROCBASED_CTLS_WINDOW_SETTING | \ 112 PROCBASED_CR8_LOAD_EXITING | \ 113 PROCBASED_CR8_STORE_EXITING) 114 115 #define PROCBASED_CTLS_ZERO_SETTING \ 116 (PROCBASED_CR3_LOAD_EXITING | \ 117 PROCBASED_CR3_STORE_EXITING | \ 118 PROCBASED_IO_BITMAPS) 119 120 /* 121 * EPT and Unrestricted Guest are considered necessities. The latter is not a 122 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 123 * without a bootrom starting in real mode. 124 */ 125 #define PROCBASED_CTLS2_ONE_SETTING \ 126 (PROCBASED2_ENABLE_EPT | \ 127 PROCBASED2_UNRESTRICTED_GUEST) 128 #define PROCBASED_CTLS2_ZERO_SETTING 0 129 130 #define VM_EXIT_CTLS_ONE_SETTING \ 131 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 132 VM_EXIT_HOST_LMA | \ 133 VM_EXIT_LOAD_PAT | \ 134 VM_EXIT_SAVE_EFER | \ 135 VM_EXIT_LOAD_EFER | \ 136 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 137 138 #define VM_EXIT_CTLS_ZERO_SETTING 0 139 140 #define VM_ENTRY_CTLS_ONE_SETTING \ 141 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 142 VM_ENTRY_LOAD_EFER) 143 144 #define VM_ENTRY_CTLS_ZERO_SETTING \ 145 (VM_ENTRY_INTO_SMM | \ 146 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 147 148 #define HANDLED 1 149 #define UNHANDLED 0 150 151 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 152 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 153 154 SYSCTL_DECL(_hw_vmm); 155 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 156 NULL); 157 158 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 159 static uint32_t exit_ctls, entry_ctls; 160 161 static uint64_t cr0_ones_mask, cr0_zeros_mask; 162 163 static uint64_t cr4_ones_mask, cr4_zeros_mask; 164 165 static int vmx_initialized; 166 167 /* Do not flush RSB upon vmexit */ 168 static int no_flush_rsb; 169 170 /* 171 * Optional capabilities 172 */ 173 174 /* HLT triggers a VM-exit */ 175 static int cap_halt_exit; 176 177 /* PAUSE triggers a VM-exit */ 178 static int cap_pause_exit; 179 180 /* Monitor trap flag */ 181 static int cap_monitor_trap; 182 183 /* Guests are allowed to use INVPCID */ 184 static int cap_invpcid; 185 186 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 187 static enum vmx_caps vmx_capabilities; 188 189 /* APICv posted interrupt vector */ 190 static int pirvec = -1; 191 192 static uint_t vpid_alloc_failed; 193 194 int guest_l1d_flush; 195 int guest_l1d_flush_sw; 196 197 /* MSR save region is composed of an array of 'struct msr_entry' */ 198 struct msr_entry { 199 uint32_t index; 200 uint32_t reserved; 201 uint64_t val; 202 }; 203 204 static struct msr_entry msr_load_list[1] __aligned(16); 205 206 /* 207 * The definitions of SDT probes for VMX. 208 */ 209 210 /* BEGIN CSTYLED */ 211 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 212 "struct vmx *", "int", "struct vm_exit *"); 213 214 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 215 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 216 217 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 218 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 219 220 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 221 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 222 223 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 224 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 225 226 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 227 "struct vmx *", "int", "struct vm_exit *"); 228 229 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 230 "struct vmx *", "int", "struct vm_exit *"); 231 232 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 233 "struct vmx *", "int", "struct vm_exit *"); 234 235 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 236 "struct vmx *", "int", "struct vm_exit *"); 237 238 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 239 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 240 241 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 242 "struct vmx *", "int", "struct vm_exit *"); 243 244 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 245 "struct vmx *", "int", "struct vm_exit *"); 246 247 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 248 "struct vmx *", "int", "struct vm_exit *"); 249 250 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 251 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 252 253 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 254 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 255 256 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 257 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 258 259 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 260 "struct vmx *", "int", "struct vm_exit *"); 261 262 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 263 "struct vmx *", "int", "struct vm_exit *"); 264 265 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 266 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 267 268 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 269 "struct vmx *", "int", "struct vm_exit *"); 270 271 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 272 "struct vmx *", "int", "struct vm_exit *"); 273 274 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 275 "struct vmx *", "int", "struct vm_exit *"); 276 277 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 278 "struct vmx *", "int", "struct vm_exit *"); 279 280 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 281 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 282 283 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 284 "struct vmx *", "int", "struct vm_exit *", "int"); 285 /* END CSTYLED */ 286 287 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 288 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 289 static void vmx_apply_tsc_adjust(struct vmx *, int); 290 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 291 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 292 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 293 294 static void 295 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 296 { 297 /* 298 * Allow readonly access to the following x2APIC MSRs from the guest. 299 */ 300 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 301 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 302 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 303 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 304 305 for (uint_t i = 0; i < 8; i++) { 306 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 307 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 308 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 309 } 310 311 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 312 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 313 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 314 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 315 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 316 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 317 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 318 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 319 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 320 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 321 322 /* 323 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 324 * 325 * These registers get special treatment described in the section 326 * "Virtualizing MSR-Based APIC Accesses". 327 */ 328 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 329 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 330 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 331 } 332 333 static ulong_t 334 vmx_fix_cr0(ulong_t cr0) 335 { 336 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 337 } 338 339 static ulong_t 340 vmx_fix_cr4(ulong_t cr4) 341 { 342 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 343 } 344 345 static void 346 vpid_free(int vpid) 347 { 348 if (vpid < 0 || vpid > 0xffff) 349 panic("vpid_free: invalid vpid %d", vpid); 350 351 /* 352 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 353 * the unit number allocator. 354 */ 355 356 if (vpid > VM_MAXCPU) 357 hma_vmx_vpid_free((uint16_t)vpid); 358 } 359 360 static void 361 vpid_alloc(uint16_t *vpid, int num) 362 { 363 int i, x; 364 365 if (num <= 0 || num > VM_MAXCPU) 366 panic("invalid number of vpids requested: %d", num); 367 368 /* 369 * If the "enable vpid" execution control is not enabled then the 370 * VPID is required to be 0 for all vcpus. 371 */ 372 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 373 for (i = 0; i < num; i++) 374 vpid[i] = 0; 375 return; 376 } 377 378 /* 379 * Allocate a unique VPID for each vcpu from the unit number allocator. 380 */ 381 for (i = 0; i < num; i++) { 382 uint16_t tmp; 383 384 tmp = hma_vmx_vpid_alloc(); 385 x = (tmp == 0) ? -1 : tmp; 386 387 if (x == -1) 388 break; 389 else 390 vpid[i] = x; 391 } 392 393 if (i < num) { 394 atomic_add_int(&vpid_alloc_failed, 1); 395 396 /* 397 * If the unit number allocator does not have enough unique 398 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 399 * 400 * These VPIDs are not be unique across VMs but this does not 401 * affect correctness because the combined mappings are also 402 * tagged with the EP4TA which is unique for each VM. 403 * 404 * It is still sub-optimal because the invvpid will invalidate 405 * combined mappings for a particular VPID across all EP4TAs. 406 */ 407 while (i-- > 0) 408 vpid_free(vpid[i]); 409 410 for (i = 0; i < num; i++) 411 vpid[i] = i + 1; 412 } 413 } 414 415 static int 416 vmx_cleanup(void) 417 { 418 /* This is taken care of by the hma registration */ 419 return (0); 420 } 421 422 static void 423 vmx_restore(void) 424 { 425 /* No-op on illumos */ 426 } 427 428 static int 429 vmx_init(int ipinum) 430 { 431 int error; 432 uint64_t fixed0, fixed1; 433 uint32_t tmp; 434 enum vmx_caps avail_caps = VMX_CAP_NONE; 435 436 /* Check support for primary processor-based VM-execution controls */ 437 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 438 MSR_VMX_TRUE_PROCBASED_CTLS, 439 PROCBASED_CTLS_ONE_SETTING, 440 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 441 if (error) { 442 printf("vmx_init: processor does not support desired primary " 443 "processor-based controls\n"); 444 return (error); 445 } 446 447 /* Clear the processor-based ctl bits that are set on demand */ 448 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 449 450 /* Check support for secondary processor-based VM-execution controls */ 451 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 452 MSR_VMX_PROCBASED_CTLS2, 453 PROCBASED_CTLS2_ONE_SETTING, 454 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 455 if (error) { 456 printf("vmx_init: processor does not support desired secondary " 457 "processor-based controls\n"); 458 return (error); 459 } 460 461 /* Check support for VPID */ 462 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 463 MSR_VMX_PROCBASED_CTLS2, 464 PROCBASED2_ENABLE_VPID, 465 0, &tmp); 466 if (error == 0) 467 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 468 469 /* Check support for pin-based VM-execution controls */ 470 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 471 MSR_VMX_TRUE_PINBASED_CTLS, 472 PINBASED_CTLS_ONE_SETTING, 473 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 474 if (error) { 475 printf("vmx_init: processor does not support desired " 476 "pin-based controls\n"); 477 return (error); 478 } 479 480 /* Check support for VM-exit controls */ 481 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 482 VM_EXIT_CTLS_ONE_SETTING, 483 VM_EXIT_CTLS_ZERO_SETTING, 484 &exit_ctls); 485 if (error) { 486 printf("vmx_init: processor does not support desired " 487 "exit controls\n"); 488 return (error); 489 } 490 491 /* Check support for VM-entry controls */ 492 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 493 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 494 &entry_ctls); 495 if (error) { 496 printf("vmx_init: processor does not support desired " 497 "entry controls\n"); 498 return (error); 499 } 500 501 /* 502 * Check support for optional features by testing them 503 * as individual bits 504 */ 505 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 506 MSR_VMX_TRUE_PROCBASED_CTLS, 507 PROCBASED_HLT_EXITING, 0, 508 &tmp) == 0); 509 510 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 511 MSR_VMX_PROCBASED_CTLS, 512 PROCBASED_MTF, 0, 513 &tmp) == 0); 514 515 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 516 MSR_VMX_TRUE_PROCBASED_CTLS, 517 PROCBASED_PAUSE_EXITING, 0, 518 &tmp) == 0); 519 520 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 521 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 522 &tmp) == 0); 523 524 /* 525 * Check for APIC virtualization capabilities: 526 * - TPR shadowing 527 * - Full APICv (with or without x2APIC support) 528 * - Posted interrupt handling 529 */ 530 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 531 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 532 avail_caps |= VMX_CAP_TPR_SHADOW; 533 534 const uint32_t apicv_bits = 535 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 536 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 537 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 538 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 539 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 540 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 541 avail_caps |= VMX_CAP_APICV; 542 543 /* 544 * It may make sense in the future to differentiate 545 * hardware (or software) configurations with APICv but 546 * no support for accelerating x2APIC mode. 547 */ 548 avail_caps |= VMX_CAP_APICV_X2APIC; 549 550 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 551 MSR_VMX_TRUE_PINBASED_CTLS, 552 PINBASED_POSTED_INTERRUPT, 0, &tmp); 553 if (error == 0) { 554 /* 555 * If the PSM-provided interfaces for requesting 556 * and using a PIR IPI vector are present, use 557 * them for posted interrupts. 558 */ 559 if (psm_get_pir_ipivect != NULL && 560 psm_send_pir_ipi != NULL) { 561 pirvec = psm_get_pir_ipivect(); 562 avail_caps |= VMX_CAP_APICV_PIR; 563 } 564 } 565 } 566 } 567 568 /* Initialize EPT */ 569 error = ept_init(ipinum); 570 if (error) { 571 printf("vmx_init: ept initialization failed (%d)\n", error); 572 return (error); 573 } 574 575 #ifdef __FreeBSD__ 576 guest_l1d_flush = (cpu_ia32_arch_caps & 577 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 578 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 579 580 /* 581 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 582 * available. Otherwise fall back to the software flush 583 * method which loads enough data from the kernel text to 584 * flush existing L1D content, both on VMX entry and on NMI 585 * return. 586 */ 587 if (guest_l1d_flush) { 588 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 589 guest_l1d_flush_sw = 1; 590 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 591 &guest_l1d_flush_sw); 592 } 593 if (guest_l1d_flush_sw) { 594 if (nmi_flush_l1d_sw <= 1) 595 nmi_flush_l1d_sw = 1; 596 } else { 597 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 598 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 599 } 600 } 601 #else 602 /* L1D flushing is taken care of by smt_acquire() and friends */ 603 guest_l1d_flush = 0; 604 #endif /* __FreeBSD__ */ 605 606 /* 607 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 608 */ 609 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 610 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 611 cr0_ones_mask = fixed0 & fixed1; 612 cr0_zeros_mask = ~fixed0 & ~fixed1; 613 614 /* 615 * Since Unrestricted Guest was already verified present, CR0_PE and 616 * CR0_PG are allowed to be set to zero in VMX non-root operation 617 */ 618 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 619 620 /* 621 * Do not allow the guest to set CR0_NW or CR0_CD. 622 */ 623 cr0_zeros_mask |= (CR0_NW | CR0_CD); 624 625 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 626 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 627 cr4_ones_mask = fixed0 & fixed1; 628 cr4_zeros_mask = ~fixed0 & ~fixed1; 629 630 vmx_msr_init(); 631 632 vmx_capabilities = avail_caps; 633 vmx_initialized = 1; 634 635 return (0); 636 } 637 638 static void 639 vmx_trigger_hostintr(int vector) 640 { 641 VERIFY(vector >= 32 && vector <= 255); 642 vmx_call_isr(vector - 32); 643 } 644 645 static void * 646 vmx_vminit(struct vm *vm, pmap_t pmap) 647 { 648 uint16_t vpid[VM_MAXCPU]; 649 int i, error, datasel; 650 struct vmx *vmx; 651 uint32_t exc_bitmap; 652 uint16_t maxcpus; 653 uint32_t proc_ctls, proc2_ctls, pin_ctls; 654 uint64_t apic_access_pa = UINT64_MAX; 655 656 vmx = malloc(sizeof (struct vmx), M_VMX, M_WAITOK | M_ZERO); 657 if ((uintptr_t)vmx & PAGE_MASK) { 658 panic("malloc of struct vmx not aligned on %d byte boundary", 659 PAGE_SIZE); 660 } 661 vmx->vm = vm; 662 663 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 664 665 /* 666 * Clean up EPTP-tagged guest physical and combined mappings 667 * 668 * VMX transitions are not required to invalidate any guest physical 669 * mappings. So, it may be possible for stale guest physical mappings 670 * to be present in the processor TLBs. 671 * 672 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 673 */ 674 ept_invalidate_mappings(vmx->eptp); 675 676 vmx_msr_bitmap_initialize(vmx); 677 678 vpid_alloc(vpid, VM_MAXCPU); 679 680 /* Grab the established defaults */ 681 proc_ctls = procbased_ctls; 682 proc2_ctls = procbased_ctls2; 683 pin_ctls = pinbased_ctls; 684 /* For now, default to the available capabilities */ 685 vmx->vmx_caps = vmx_capabilities; 686 687 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 688 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 689 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 690 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 691 } 692 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 693 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 694 695 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 696 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 697 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 698 699 /* 700 * Allocate a page of memory to back the APIC access address for 701 * when APICv features are in use. Guest MMIO accesses should 702 * never actually reach this page, but rather be intercepted. 703 */ 704 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 705 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 706 apic_access_pa = vtophys(vmx->apic_access_page); 707 708 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 709 apic_access_pa); 710 /* XXX this should really return an error to the caller */ 711 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 712 } 713 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 714 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 715 716 pin_ctls |= PINBASED_POSTED_INTERRUPT; 717 } 718 719 maxcpus = vm_get_maxcpus(vm); 720 datasel = vmm_get_host_datasel(); 721 for (i = 0; i < maxcpus; i++) { 722 /* 723 * Cache physical address lookups for various components which 724 * may be required inside the critical_enter() section implied 725 * by VMPTRLD() below. 726 */ 727 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 728 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 729 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 730 731 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 732 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 733 734 vmx_msr_guest_init(vmx, i); 735 736 vmcs_load(vmx->vmcs_pa[i]); 737 738 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 739 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 740 741 /* Load the control registers */ 742 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 743 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 744 745 /* Load the segment selectors */ 746 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 747 748 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 749 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 750 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 751 752 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 753 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 754 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 755 756 /* 757 * Configure host sysenter MSRs to be restored on VM exit. 758 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 759 * vmx_run. 760 */ 761 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 762 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 763 rdmsr(MSR_SYSENTER_EIP_MSR)); 764 765 /* instruction pointer */ 766 if (no_flush_rsb) { 767 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 768 } else { 769 vmcs_write(VMCS_HOST_RIP, 770 (uint64_t)vmx_exit_guest_flush_rsb); 771 } 772 773 /* link pointer */ 774 vmcs_write(VMCS_LINK_POINTER, ~0); 775 776 vmcs_write(VMCS_EPTP, vmx->eptp); 777 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 778 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 779 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 780 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 781 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 782 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 783 vmcs_write(VMCS_VPID, vpid[i]); 784 785 if (guest_l1d_flush && !guest_l1d_flush_sw) { 786 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 787 (vm_offset_t)&msr_load_list[0])); 788 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 789 nitems(msr_load_list)); 790 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 791 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 792 } 793 794 /* exception bitmap */ 795 if (vcpu_trace_exceptions(vm, i)) 796 exc_bitmap = 0xffffffff; 797 else 798 exc_bitmap = 1 << IDT_MC; 799 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 800 801 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 802 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 803 804 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 805 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 806 } 807 808 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 809 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 810 vmcs_write(VMCS_EOI_EXIT0, 0); 811 vmcs_write(VMCS_EOI_EXIT1, 0); 812 vmcs_write(VMCS_EOI_EXIT2, 0); 813 vmcs_write(VMCS_EOI_EXIT3, 0); 814 } 815 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 816 vmcs_write(VMCS_PIR_VECTOR, pirvec); 817 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 818 } 819 820 /* 821 * Set up the CR0/4 masks and configure the read shadow state 822 * to the power-on register value from the Intel Sys Arch. 823 * CR0 - 0x60000010 824 * CR4 - 0 825 */ 826 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 827 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 828 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 829 vmcs_write(VMCS_CR4_SHADOW, 0); 830 831 vmcs_clear(vmx->vmcs_pa[i]); 832 833 vmx->cap[i].set = 0; 834 vmx->cap[i].proc_ctls = proc_ctls; 835 vmx->cap[i].proc_ctls2 = proc2_ctls; 836 vmx->cap[i].exc_bitmap = exc_bitmap; 837 838 vmx->state[i].nextrip = ~0; 839 vmx->state[i].lastcpu = NOCPU; 840 vmx->state[i].vpid = vpid[i]; 841 842 843 vmx->ctx[i].pmap = pmap; 844 } 845 846 return (vmx); 847 } 848 849 static int 850 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 851 { 852 int handled; 853 854 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 855 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 856 (uint64_t *)&vmxctx->guest_rdx); 857 return (handled); 858 } 859 860 static __inline void 861 vmx_run_trace(struct vmx *vmx, int vcpu) 862 { 863 #ifdef KTR 864 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip()); 865 #endif 866 } 867 868 static __inline void 869 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 870 { 871 #ifdef KTR 872 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 873 #endif 874 } 875 876 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 877 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 878 879 #define INVVPID_TYPE_ADDRESS 0UL 880 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 881 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 882 883 struct invvpid_desc { 884 uint16_t vpid; 885 uint16_t _res1; 886 uint32_t _res2; 887 uint64_t linear_addr; 888 }; 889 CTASSERT(sizeof (struct invvpid_desc) == 16); 890 891 static __inline void 892 invvpid(uint64_t type, struct invvpid_desc desc) 893 { 894 int error; 895 896 __asm __volatile("invvpid %[desc], %[type];" 897 VMX_SET_ERROR_CODE_ASM 898 : [error] "=r" (error) 899 : [desc] "m" (desc), [type] "r" (type) 900 : "memory"); 901 902 if (error) 903 panic("invvpid error %d", error); 904 } 905 906 /* 907 * Invalidate guest mappings identified by its vpid from the TLB. 908 */ 909 static __inline void 910 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 911 { 912 struct vmxstate *vmxstate; 913 struct invvpid_desc invvpid_desc; 914 915 vmxstate = &vmx->state[vcpu]; 916 if (vmxstate->vpid == 0) 917 return; 918 919 if (!running) { 920 /* 921 * Set the 'lastcpu' to an invalid host cpu. 922 * 923 * This will invalidate TLB entries tagged with the vcpu's 924 * vpid the next time it runs via vmx_set_pcpu_defaults(). 925 */ 926 vmxstate->lastcpu = NOCPU; 927 return; 928 } 929 930 /* 931 * Invalidate all mappings tagged with 'vpid' 932 * 933 * We do this because this vcpu was executing on a different host 934 * cpu when it last ran. We do not track whether it invalidated 935 * mappings associated with its 'vpid' during that run. So we must 936 * assume that the mappings associated with 'vpid' on 'curcpu' are 937 * stale and invalidate them. 938 * 939 * Note that we incur this penalty only when the scheduler chooses to 940 * move the thread associated with this vcpu between host cpus. 941 * 942 * Note also that this will invalidate mappings tagged with 'vpid' 943 * for "all" EP4TAs. 944 */ 945 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 946 invvpid_desc._res1 = 0; 947 invvpid_desc._res2 = 0; 948 invvpid_desc.vpid = vmxstate->vpid; 949 invvpid_desc.linear_addr = 0; 950 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 951 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 952 } else { 953 /* 954 * The invvpid can be skipped if an invept is going to 955 * be performed before entering the guest. The invept 956 * will invalidate combined mappings tagged with 957 * 'vmx->eptp' for all vpids. 958 */ 959 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 960 } 961 } 962 963 static void 964 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 965 { 966 struct vmxstate *vmxstate; 967 968 /* 969 * Regardless of whether the VM appears to have migrated between CPUs, 970 * save the host sysenter stack pointer. As it points to the kernel 971 * stack of each thread, the correct value must be maintained for every 972 * trip into the critical section. 973 */ 974 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 975 976 /* 977 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 978 * migration between host CPUs with differing TSC values. 979 */ 980 vmx_apply_tsc_adjust(vmx, vcpu); 981 982 vmxstate = &vmx->state[vcpu]; 983 if (vmxstate->lastcpu == curcpu) 984 return; 985 986 vmxstate->lastcpu = curcpu; 987 988 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 989 990 /* Load the per-CPU IDT address */ 991 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 992 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 993 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 994 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 995 vmx_invvpid(vmx, vcpu, pmap, 1); 996 } 997 998 /* 999 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1000 */ 1001 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1002 1003 static __inline void 1004 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1005 { 1006 1007 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1008 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1009 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1010 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1011 } 1012 } 1013 1014 static __inline void 1015 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1016 { 1017 1018 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1019 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1020 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1021 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1022 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1023 } 1024 1025 static __inline bool 1026 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1027 { 1028 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1029 } 1030 1031 static __inline void 1032 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1033 { 1034 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1035 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1036 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1037 } 1038 } 1039 1040 static __inline void 1041 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1042 { 1043 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1044 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1045 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1046 } 1047 1048 /* 1049 * Set the TSC adjustment, taking into account the offsets measured between 1050 * host physical CPUs. This is required even if the guest has not set a TSC 1051 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1052 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1053 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1054 */ 1055 static void 1056 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1057 { 1058 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1059 1060 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1061 1062 if (vmx->tsc_offset_active[vcpu] != offset) { 1063 vmcs_write(VMCS_TSC_OFFSET, offset); 1064 vmx->tsc_offset_active[vcpu] = offset; 1065 } 1066 } 1067 1068 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1069 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1070 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1071 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1072 1073 static void 1074 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1075 { 1076 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1077 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1078 1079 /* 1080 * Inject the virtual NMI. The vector must be the NMI IDT entry 1081 * or the VMCS entry check will fail. 1082 */ 1083 vmcs_write(VMCS_ENTRY_INTR_INFO, 1084 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1085 1086 /* Clear the request */ 1087 vm_nmi_clear(vmx->vm, vcpu); 1088 } 1089 1090 /* 1091 * Inject exceptions, NMIs, and ExtINTs. 1092 * 1093 * The logic behind these are complicated and may involve mutex contention, so 1094 * the injection is performed without the protection of host CPU interrupts 1095 * being disabled. This means a racing notification could be "lost", 1096 * necessitating a later call to vmx_inject_recheck() to close that window 1097 * of opportunity. 1098 */ 1099 static enum event_inject_state 1100 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1101 { 1102 uint64_t entryinfo; 1103 uint32_t gi, info; 1104 int vector; 1105 enum event_inject_state state; 1106 1107 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1108 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1109 state = EIS_CAN_INJECT; 1110 1111 /* Clear any interrupt blocking if the guest %rip has changed */ 1112 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1113 gi &= ~HWINTR_BLOCKING; 1114 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1115 } 1116 1117 /* 1118 * It could be that an interrupt is already pending for injection from 1119 * the VMCS. This would be the case if the vCPU exited for conditions 1120 * such as an AST before a vm-entry delivered the injection. 1121 */ 1122 if ((info & VMCS_INTR_VALID) != 0) { 1123 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1124 } 1125 1126 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1127 ASSERT(entryinfo & VMCS_INTR_VALID); 1128 1129 info = entryinfo; 1130 vector = info & 0xff; 1131 if (vector == IDT_BP || vector == IDT_OF) { 1132 /* 1133 * VT-x requires #BP and #OF to be injected as software 1134 * exceptions. 1135 */ 1136 info &= ~VMCS_INTR_T_MASK; 1137 info |= VMCS_INTR_T_SWEXCEPTION; 1138 } 1139 1140 if (info & VMCS_INTR_DEL_ERRCODE) { 1141 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1142 } 1143 1144 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1145 state = EIS_EV_INJECTED; 1146 } 1147 1148 if (vm_nmi_pending(vmx->vm, vcpu)) { 1149 /* 1150 * If there are no conditions blocking NMI injection then inject 1151 * it directly here otherwise enable "NMI window exiting" to 1152 * inject it as soon as we can. 1153 * 1154 * According to the Intel manual, some CPUs do not allow NMI 1155 * injection when STI_BLOCKING is active. That check is 1156 * enforced here, regardless of CPU capability. If running on a 1157 * CPU without such a restriction it will immediately exit and 1158 * the NMI will be injected in the "NMI window exiting" handler. 1159 */ 1160 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1161 if (state == EIS_CAN_INJECT) { 1162 vmx_inject_nmi(vmx, vcpu); 1163 state = EIS_EV_INJECTED; 1164 } else { 1165 return (state | EIS_REQ_EXIT); 1166 } 1167 } else { 1168 vmx_set_nmi_window_exiting(vmx, vcpu); 1169 } 1170 } 1171 1172 if (vm_extint_pending(vmx->vm, vcpu)) { 1173 if (state != EIS_CAN_INJECT) { 1174 return (state | EIS_REQ_EXIT); 1175 } 1176 if ((gi & HWINTR_BLOCKING) != 0 || 1177 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1178 return (EIS_GI_BLOCK); 1179 } 1180 1181 /* Ask the legacy pic for a vector to inject */ 1182 vatpic_pending_intr(vmx->vm, &vector); 1183 1184 /* 1185 * From the Intel SDM, Volume 3, Section "Maskable 1186 * Hardware Interrupts": 1187 * - maskable interrupt vectors [0,255] can be delivered 1188 * through the INTR pin. 1189 */ 1190 KASSERT(vector >= 0 && vector <= 255, 1191 ("invalid vector %d from INTR", vector)); 1192 1193 /* Inject the interrupt */ 1194 vmcs_write(VMCS_ENTRY_INTR_INFO, 1195 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1196 1197 vm_extint_clear(vmx->vm, vcpu); 1198 vatpic_intr_accepted(vmx->vm, vector); 1199 state = EIS_EV_INJECTED; 1200 } 1201 1202 return (state); 1203 } 1204 1205 /* 1206 * Inject any interrupts pending on the vLAPIC. 1207 * 1208 * This is done with host CPU interrupts disabled so notification IPIs, either 1209 * from the standard vCPU notification or APICv posted interrupts, will be 1210 * queued on the host APIC and recognized when entering VMX context. 1211 */ 1212 static enum event_inject_state 1213 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1214 { 1215 int vector; 1216 1217 if (!vlapic_pending_intr(vlapic, &vector)) { 1218 return (EIS_CAN_INJECT); 1219 } 1220 1221 /* 1222 * From the Intel SDM, Volume 3, Section "Maskable 1223 * Hardware Interrupts": 1224 * - maskable interrupt vectors [16,255] can be delivered 1225 * through the local APIC. 1226 */ 1227 KASSERT(vector >= 16 && vector <= 255, 1228 ("invalid vector %d from local APIC", vector)); 1229 1230 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1231 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1232 uint16_t status_new = (status_old & 0xff00) | vector; 1233 1234 /* 1235 * The APICv state will have been synced into the vLAPIC 1236 * as part of vlapic_pending_intr(). Prepare the VMCS 1237 * for the to-be-injected pending interrupt. 1238 */ 1239 if (status_new > status_old) { 1240 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1241 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, 1242 "vmx_inject_interrupts: guest_intr_status " 1243 "changed from 0x%04x to 0x%04x", 1244 status_old, status_new); 1245 } 1246 1247 /* 1248 * Ensure VMCS state regarding EOI traps is kept in sync 1249 * with the TMRs in the vlapic. 1250 */ 1251 vmx_apicv_sync_tmr(vlapic); 1252 1253 /* 1254 * The rest of the injection process for injecting the 1255 * interrupt(s) is handled by APICv. It does not preclude other 1256 * event injection from occurring. 1257 */ 1258 return (EIS_CAN_INJECT); 1259 } 1260 1261 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1262 1263 /* Does guest interruptability block injection? */ 1264 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1265 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1266 return (EIS_GI_BLOCK); 1267 } 1268 1269 /* Inject the interrupt */ 1270 vmcs_write(VMCS_ENTRY_INTR_INFO, 1271 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1272 1273 /* Update the Local APIC ISR */ 1274 vlapic_intr_accepted(vlapic, vector); 1275 1276 return (EIS_EV_INJECTED); 1277 } 1278 1279 /* 1280 * Re-check for events to be injected. 1281 * 1282 * Once host CPU interrupts are disabled, check for the presence of any events 1283 * which require injection processing. If an exit is required upon injection, 1284 * or once the guest becomes interruptable, that will be configured too. 1285 */ 1286 static bool 1287 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1288 { 1289 if (state == EIS_CAN_INJECT) { 1290 if (vm_nmi_pending(vmx->vm, vcpu) && 1291 !vmx_nmi_window_exiting(vmx, vcpu)) { 1292 /* queued NMI not blocked by NMI-window-exiting */ 1293 return (true); 1294 } 1295 if (vm_extint_pending(vmx->vm, vcpu)) { 1296 /* queued ExtINT not blocked by existing injection */ 1297 return (true); 1298 } 1299 } else { 1300 if ((state & EIS_REQ_EXIT) != 0) { 1301 /* 1302 * Use a self-IPI to force an immediate exit after 1303 * event injection has occurred. 1304 */ 1305 poke_cpu(CPU->cpu_id); 1306 } else { 1307 /* 1308 * If any event is being injected, an exit immediately 1309 * upon becoming interruptable again will allow pending 1310 * or newly queued events to be injected in a timely 1311 * manner. 1312 */ 1313 vmx_set_int_window_exiting(vmx, vcpu); 1314 } 1315 } 1316 return (false); 1317 } 1318 1319 /* 1320 * If the Virtual NMIs execution control is '1' then the logical processor 1321 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1322 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1323 * virtual-NMI blocking. 1324 * 1325 * This unblocking occurs even if the IRET causes a fault. In this case the 1326 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1327 */ 1328 static void 1329 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1330 { 1331 uint32_t gi; 1332 1333 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1334 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1335 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1336 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1337 } 1338 1339 static void 1340 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1341 { 1342 uint32_t gi; 1343 1344 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1345 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1346 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1347 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1348 } 1349 1350 static void 1351 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1352 { 1353 uint32_t gi; 1354 1355 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1356 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1357 ("NMI blocking is not in effect %x", gi)); 1358 } 1359 1360 static int 1361 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1362 { 1363 struct vmxctx *vmxctx; 1364 uint64_t xcrval; 1365 const struct xsave_limits *limits; 1366 1367 vmxctx = &vmx->ctx[vcpu]; 1368 limits = vmm_get_xsave_limits(); 1369 1370 /* 1371 * Note that the processor raises a GP# fault on its own if 1372 * xsetbv is executed for CPL != 0, so we do not have to 1373 * emulate that fault here. 1374 */ 1375 1376 /* Only xcr0 is supported. */ 1377 if (vmxctx->guest_rcx != 0) { 1378 vm_inject_gp(vmx->vm, vcpu); 1379 return (HANDLED); 1380 } 1381 1382 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1383 if (!limits->xsave_enabled || 1384 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1385 vm_inject_ud(vmx->vm, vcpu); 1386 return (HANDLED); 1387 } 1388 1389 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1390 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1391 vm_inject_gp(vmx->vm, vcpu); 1392 return (HANDLED); 1393 } 1394 1395 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1396 vm_inject_gp(vmx->vm, vcpu); 1397 return (HANDLED); 1398 } 1399 1400 /* AVX (YMM_Hi128) requires SSE. */ 1401 if (xcrval & XFEATURE_ENABLED_AVX && 1402 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1403 vm_inject_gp(vmx->vm, vcpu); 1404 return (HANDLED); 1405 } 1406 1407 /* 1408 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1409 * ZMM_Hi256, and Hi16_ZMM. 1410 */ 1411 if (xcrval & XFEATURE_AVX512 && 1412 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1413 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1414 vm_inject_gp(vmx->vm, vcpu); 1415 return (HANDLED); 1416 } 1417 1418 /* 1419 * Intel MPX requires both bound register state flags to be 1420 * set. 1421 */ 1422 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1423 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1424 vm_inject_gp(vmx->vm, vcpu); 1425 return (HANDLED); 1426 } 1427 1428 /* 1429 * This runs "inside" vmrun() with the guest's FPU state, so 1430 * modifying xcr0 directly modifies the guest's xcr0, not the 1431 * host's. 1432 */ 1433 load_xcr(0, xcrval); 1434 return (HANDLED); 1435 } 1436 1437 static uint64_t 1438 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1439 { 1440 const struct vmxctx *vmxctx; 1441 1442 vmxctx = &vmx->ctx[vcpu]; 1443 1444 switch (ident) { 1445 case 0: 1446 return (vmxctx->guest_rax); 1447 case 1: 1448 return (vmxctx->guest_rcx); 1449 case 2: 1450 return (vmxctx->guest_rdx); 1451 case 3: 1452 return (vmxctx->guest_rbx); 1453 case 4: 1454 return (vmcs_read(VMCS_GUEST_RSP)); 1455 case 5: 1456 return (vmxctx->guest_rbp); 1457 case 6: 1458 return (vmxctx->guest_rsi); 1459 case 7: 1460 return (vmxctx->guest_rdi); 1461 case 8: 1462 return (vmxctx->guest_r8); 1463 case 9: 1464 return (vmxctx->guest_r9); 1465 case 10: 1466 return (vmxctx->guest_r10); 1467 case 11: 1468 return (vmxctx->guest_r11); 1469 case 12: 1470 return (vmxctx->guest_r12); 1471 case 13: 1472 return (vmxctx->guest_r13); 1473 case 14: 1474 return (vmxctx->guest_r14); 1475 case 15: 1476 return (vmxctx->guest_r15); 1477 default: 1478 panic("invalid vmx register %d", ident); 1479 } 1480 } 1481 1482 static void 1483 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1484 { 1485 struct vmxctx *vmxctx; 1486 1487 vmxctx = &vmx->ctx[vcpu]; 1488 1489 switch (ident) { 1490 case 0: 1491 vmxctx->guest_rax = regval; 1492 break; 1493 case 1: 1494 vmxctx->guest_rcx = regval; 1495 break; 1496 case 2: 1497 vmxctx->guest_rdx = regval; 1498 break; 1499 case 3: 1500 vmxctx->guest_rbx = regval; 1501 break; 1502 case 4: 1503 vmcs_write(VMCS_GUEST_RSP, regval); 1504 break; 1505 case 5: 1506 vmxctx->guest_rbp = regval; 1507 break; 1508 case 6: 1509 vmxctx->guest_rsi = regval; 1510 break; 1511 case 7: 1512 vmxctx->guest_rdi = regval; 1513 break; 1514 case 8: 1515 vmxctx->guest_r8 = regval; 1516 break; 1517 case 9: 1518 vmxctx->guest_r9 = regval; 1519 break; 1520 case 10: 1521 vmxctx->guest_r10 = regval; 1522 break; 1523 case 11: 1524 vmxctx->guest_r11 = regval; 1525 break; 1526 case 12: 1527 vmxctx->guest_r12 = regval; 1528 break; 1529 case 13: 1530 vmxctx->guest_r13 = regval; 1531 break; 1532 case 14: 1533 vmxctx->guest_r14 = regval; 1534 break; 1535 case 15: 1536 vmxctx->guest_r15 = regval; 1537 break; 1538 default: 1539 panic("invalid vmx register %d", ident); 1540 } 1541 } 1542 1543 static int 1544 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1545 { 1546 uint64_t crval, regval; 1547 1548 /* We only handle mov to %cr0 at this time */ 1549 if ((exitqual & 0xf0) != 0x00) 1550 return (UNHANDLED); 1551 1552 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1553 1554 vmcs_write(VMCS_CR0_SHADOW, regval); 1555 1556 crval = regval | cr0_ones_mask; 1557 crval &= ~cr0_zeros_mask; 1558 vmcs_write(VMCS_GUEST_CR0, crval); 1559 1560 if (regval & CR0_PG) { 1561 uint64_t efer, entry_ctls; 1562 1563 /* 1564 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1565 * the "IA-32e mode guest" bit in VM-entry control must be 1566 * equal. 1567 */ 1568 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1569 if (efer & EFER_LME) { 1570 efer |= EFER_LMA; 1571 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1572 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1573 entry_ctls |= VM_ENTRY_GUEST_LMA; 1574 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1575 } 1576 } 1577 1578 return (HANDLED); 1579 } 1580 1581 static int 1582 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1583 { 1584 uint64_t crval, regval; 1585 1586 /* We only handle mov to %cr4 at this time */ 1587 if ((exitqual & 0xf0) != 0x00) 1588 return (UNHANDLED); 1589 1590 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1591 1592 vmcs_write(VMCS_CR4_SHADOW, regval); 1593 1594 crval = regval | cr4_ones_mask; 1595 crval &= ~cr4_zeros_mask; 1596 vmcs_write(VMCS_GUEST_CR4, crval); 1597 1598 return (HANDLED); 1599 } 1600 1601 static int 1602 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1603 { 1604 struct vlapic *vlapic; 1605 uint64_t cr8; 1606 int regnum; 1607 1608 /* We only handle mov %cr8 to/from a register at this time. */ 1609 if ((exitqual & 0xe0) != 0x00) { 1610 return (UNHANDLED); 1611 } 1612 1613 vlapic = vm_lapic(vmx->vm, vcpu); 1614 regnum = (exitqual >> 8) & 0xf; 1615 if (exitqual & 0x10) { 1616 cr8 = vlapic_get_cr8(vlapic); 1617 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1618 } else { 1619 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1620 vlapic_set_cr8(vlapic, cr8); 1621 } 1622 1623 return (HANDLED); 1624 } 1625 1626 /* 1627 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1628 */ 1629 static int 1630 vmx_cpl(void) 1631 { 1632 uint32_t ssar; 1633 1634 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1635 return ((ssar >> 5) & 0x3); 1636 } 1637 1638 static enum vm_cpu_mode 1639 vmx_cpu_mode(void) 1640 { 1641 uint32_t csar; 1642 1643 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1644 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1645 if (csar & 0x2000) 1646 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1647 else 1648 return (CPU_MODE_COMPATIBILITY); 1649 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1650 return (CPU_MODE_PROTECTED); 1651 } else { 1652 return (CPU_MODE_REAL); 1653 } 1654 } 1655 1656 static enum vm_paging_mode 1657 vmx_paging_mode(void) 1658 { 1659 1660 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1661 return (PAGING_MODE_FLAT); 1662 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1663 return (PAGING_MODE_32); 1664 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1665 return (PAGING_MODE_64); 1666 else 1667 return (PAGING_MODE_PAE); 1668 } 1669 1670 static void 1671 vmx_paging_info(struct vm_guest_paging *paging) 1672 { 1673 paging->cr3 = vmcs_guest_cr3(); 1674 paging->cpl = vmx_cpl(); 1675 paging->cpu_mode = vmx_cpu_mode(); 1676 paging->paging_mode = vmx_paging_mode(); 1677 } 1678 1679 static void 1680 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1681 uint64_t gla) 1682 { 1683 struct vm_guest_paging paging; 1684 uint32_t csar; 1685 1686 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1687 vmexit->inst_length = 0; 1688 vmexit->u.mmio_emul.gpa = gpa; 1689 vmexit->u.mmio_emul.gla = gla; 1690 vmx_paging_info(&paging); 1691 1692 switch (paging.cpu_mode) { 1693 case CPU_MODE_REAL: 1694 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1695 vmexit->u.mmio_emul.cs_d = 0; 1696 break; 1697 case CPU_MODE_PROTECTED: 1698 case CPU_MODE_COMPATIBILITY: 1699 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1700 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1701 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1702 break; 1703 default: 1704 vmexit->u.mmio_emul.cs_base = 0; 1705 vmexit->u.mmio_emul.cs_d = 0; 1706 break; 1707 } 1708 1709 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1710 } 1711 1712 static void 1713 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1714 uint32_t eax) 1715 { 1716 struct vm_guest_paging paging; 1717 struct vm_inout *inout; 1718 1719 inout = &vmexit->u.inout; 1720 1721 inout->bytes = (qual & 0x7) + 1; 1722 inout->flags = 0; 1723 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1724 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1725 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1726 inout->port = (uint16_t)(qual >> 16); 1727 inout->eax = eax; 1728 if (inout->flags & INOUT_STR) { 1729 uint64_t inst_info; 1730 1731 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1732 1733 /* 1734 * According to the SDM, bits 9:7 encode the address size of the 1735 * ins/outs operation, but only values 0/1/2 are expected, 1736 * corresponding to 16/32/64 bit sizes. 1737 */ 1738 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1739 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1740 inout->addrsize == 8); 1741 1742 if (inout->flags & INOUT_IN) { 1743 /* 1744 * The bits describing the segment in INSTRUCTION_INFO 1745 * are not defined for ins, leaving it to system 1746 * software to assume %es (encoded as 0) 1747 */ 1748 inout->segment = 0; 1749 } else { 1750 /* 1751 * Bits 15-17 encode the segment for OUTS. 1752 * This value follows the standard x86 segment order. 1753 */ 1754 inout->segment = (inst_info >> 15) & 0x7; 1755 } 1756 } 1757 1758 vmexit->exitcode = VM_EXITCODE_INOUT; 1759 vmx_paging_info(&paging); 1760 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1761 1762 /* The in/out emulation will handle advancing %rip */ 1763 vmexit->inst_length = 0; 1764 } 1765 1766 static int 1767 ept_fault_type(uint64_t ept_qual) 1768 { 1769 int fault_type; 1770 1771 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1772 fault_type = PROT_WRITE; 1773 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1774 fault_type = PROT_EXEC; 1775 else 1776 fault_type = PROT_READ; 1777 1778 return (fault_type); 1779 } 1780 1781 static bool 1782 ept_emulation_fault(uint64_t ept_qual) 1783 { 1784 int read, write; 1785 1786 /* EPT fault on an instruction fetch doesn't make sense here */ 1787 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1788 return (false); 1789 1790 /* EPT fault must be a read fault or a write fault */ 1791 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1792 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1793 if ((read | write) == 0) 1794 return (false); 1795 1796 /* 1797 * The EPT violation must have been caused by accessing a 1798 * guest-physical address that is a translation of a guest-linear 1799 * address. 1800 */ 1801 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1802 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1803 return (false); 1804 } 1805 1806 return (true); 1807 } 1808 1809 static __inline int 1810 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1811 { 1812 uint32_t proc_ctls2; 1813 1814 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1815 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1816 } 1817 1818 static __inline int 1819 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1820 { 1821 uint32_t proc_ctls2; 1822 1823 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1824 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1825 } 1826 1827 static int 1828 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1829 uint64_t qual) 1830 { 1831 int handled, offset; 1832 uint32_t *apic_regs, vector; 1833 1834 handled = HANDLED; 1835 offset = APIC_WRITE_OFFSET(qual); 1836 1837 if (!apic_access_virtualization(vmx, vcpuid)) { 1838 /* 1839 * In general there should not be any APIC write VM-exits 1840 * unless APIC-access virtualization is enabled. 1841 * 1842 * However self-IPI virtualization can legitimately trigger 1843 * an APIC-write VM-exit so treat it specially. 1844 */ 1845 if (x2apic_virtualization(vmx, vcpuid) && 1846 offset == APIC_OFFSET_SELF_IPI) { 1847 apic_regs = (uint32_t *)(vlapic->apic_page); 1848 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1849 vlapic_self_ipi_handler(vlapic, vector); 1850 return (HANDLED); 1851 } else 1852 return (UNHANDLED); 1853 } 1854 1855 switch (offset) { 1856 case APIC_OFFSET_ID: 1857 vlapic_id_write_handler(vlapic); 1858 break; 1859 case APIC_OFFSET_LDR: 1860 vlapic_ldr_write_handler(vlapic); 1861 break; 1862 case APIC_OFFSET_DFR: 1863 vlapic_dfr_write_handler(vlapic); 1864 break; 1865 case APIC_OFFSET_SVR: 1866 vlapic_svr_write_handler(vlapic); 1867 break; 1868 case APIC_OFFSET_ESR: 1869 vlapic_esr_write_handler(vlapic); 1870 break; 1871 case APIC_OFFSET_ICR_LOW: 1872 if (vlapic_icrlo_write_handler(vlapic) != 0) { 1873 handled = UNHANDLED; 1874 } 1875 break; 1876 case APIC_OFFSET_CMCI_LVT: 1877 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1878 vlapic_lvt_write_handler(vlapic, offset); 1879 break; 1880 case APIC_OFFSET_TIMER_ICR: 1881 vlapic_icrtmr_write_handler(vlapic); 1882 break; 1883 case APIC_OFFSET_TIMER_DCR: 1884 vlapic_dcr_write_handler(vlapic); 1885 break; 1886 default: 1887 handled = UNHANDLED; 1888 break; 1889 } 1890 return (handled); 1891 } 1892 1893 static bool 1894 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1895 { 1896 1897 if (apic_access_virtualization(vmx, vcpuid) && 1898 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1899 return (true); 1900 else 1901 return (false); 1902 } 1903 1904 static int 1905 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1906 { 1907 uint64_t qual; 1908 int access_type, offset, allowed; 1909 struct vie *vie; 1910 1911 if (!apic_access_virtualization(vmx, vcpuid)) 1912 return (UNHANDLED); 1913 1914 qual = vmexit->u.vmx.exit_qualification; 1915 access_type = APIC_ACCESS_TYPE(qual); 1916 offset = APIC_ACCESS_OFFSET(qual); 1917 1918 allowed = 0; 1919 if (access_type == 0) { 1920 /* 1921 * Read data access to the following registers is expected. 1922 */ 1923 switch (offset) { 1924 case APIC_OFFSET_APR: 1925 case APIC_OFFSET_PPR: 1926 case APIC_OFFSET_RRR: 1927 case APIC_OFFSET_CMCI_LVT: 1928 case APIC_OFFSET_TIMER_CCR: 1929 allowed = 1; 1930 break; 1931 default: 1932 break; 1933 } 1934 } else if (access_type == 1) { 1935 /* 1936 * Write data access to the following registers is expected. 1937 */ 1938 switch (offset) { 1939 case APIC_OFFSET_VER: 1940 case APIC_OFFSET_APR: 1941 case APIC_OFFSET_PPR: 1942 case APIC_OFFSET_RRR: 1943 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1944 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1945 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1946 case APIC_OFFSET_CMCI_LVT: 1947 case APIC_OFFSET_TIMER_CCR: 1948 allowed = 1; 1949 break; 1950 default: 1951 break; 1952 } 1953 } 1954 1955 if (allowed) { 1956 vie = vm_vie_ctx(vmx->vm, vcpuid); 1957 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 1958 VIE_INVALID_GLA); 1959 } 1960 1961 /* 1962 * Regardless of whether the APIC-access is allowed this handler 1963 * always returns UNHANDLED: 1964 * - if the access is allowed then it is handled by emulating the 1965 * instruction that caused the VM-exit (outside the critical section) 1966 * - if the access is not allowed then it will be converted to an 1967 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1968 */ 1969 return (UNHANDLED); 1970 } 1971 1972 static enum task_switch_reason 1973 vmx_task_switch_reason(uint64_t qual) 1974 { 1975 int reason; 1976 1977 reason = (qual >> 30) & 0x3; 1978 switch (reason) { 1979 case 0: 1980 return (TSR_CALL); 1981 case 1: 1982 return (TSR_IRET); 1983 case 2: 1984 return (TSR_JMP); 1985 case 3: 1986 return (TSR_IDT_GATE); 1987 default: 1988 panic("%s: invalid reason %d", __func__, reason); 1989 } 1990 } 1991 1992 static int 1993 emulate_wrmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t val) 1994 { 1995 int error; 1996 1997 if (lapic_msr(num)) 1998 error = lapic_wrmsr(vmx->vm, vcpuid, num, val); 1999 else 2000 error = vmx_wrmsr(vmx, vcpuid, num, val); 2001 2002 return (error); 2003 } 2004 2005 static int 2006 emulate_rdmsr(struct vmx *vmx, int vcpuid, uint_t num) 2007 { 2008 uint64_t result; 2009 int error; 2010 2011 if (lapic_msr(num)) 2012 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result); 2013 else 2014 error = vmx_rdmsr(vmx, vcpuid, num, &result); 2015 2016 if (error == 0) { 2017 vmx->ctx[vcpuid].guest_rax = (uint32_t)result; 2018 vmx->ctx[vcpuid].guest_rdx = result >> 32; 2019 } 2020 2021 return (error); 2022 } 2023 2024 static int 2025 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2026 { 2027 int error, errcode, errcode_valid, handled; 2028 struct vmxctx *vmxctx; 2029 struct vie *vie; 2030 struct vlapic *vlapic; 2031 struct vm_task_switch *ts; 2032 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info; 2033 uint32_t intr_type, intr_vec, reason; 2034 uint64_t exitintinfo, qual, gpa; 2035 2036 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2037 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2038 2039 handled = UNHANDLED; 2040 vmxctx = &vmx->ctx[vcpu]; 2041 2042 qual = vmexit->u.vmx.exit_qualification; 2043 reason = vmexit->u.vmx.exit_reason; 2044 vmexit->exitcode = VM_EXITCODE_BOGUS; 2045 2046 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2047 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2048 2049 /* 2050 * VM-entry failures during or after loading guest state. 2051 * 2052 * These VM-exits are uncommon but must be handled specially 2053 * as most VM-exit fields are not populated as usual. 2054 */ 2055 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2056 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2057 vmm_call_trap(T_MCE); 2058 return (1); 2059 } 2060 2061 /* 2062 * VM exits that can be triggered during event delivery need to 2063 * be handled specially by re-injecting the event if the IDT 2064 * vectoring information field's valid bit is set. 2065 * 2066 * See "Information for VM Exits During Event Delivery" in Intel SDM 2067 * for details. 2068 */ 2069 idtvec_info = vmcs_idt_vectoring_info(); 2070 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2071 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2072 exitintinfo = idtvec_info; 2073 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2074 idtvec_err = vmcs_idt_vectoring_err(); 2075 exitintinfo |= (uint64_t)idtvec_err << 32; 2076 } 2077 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2078 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2079 __func__, error)); 2080 2081 /* 2082 * If 'virtual NMIs' are being used and the VM-exit 2083 * happened while injecting an NMI during the previous 2084 * VM-entry, then clear "blocking by NMI" in the 2085 * Guest Interruptibility-State so the NMI can be 2086 * reinjected on the subsequent VM-entry. 2087 * 2088 * However, if the NMI was being delivered through a task 2089 * gate, then the new task must start execution with NMIs 2090 * blocked so don't clear NMI blocking in this case. 2091 */ 2092 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2093 if (intr_type == VMCS_INTR_T_NMI) { 2094 if (reason != EXIT_REASON_TASK_SWITCH) 2095 vmx_clear_nmi_blocking(vmx, vcpu); 2096 else 2097 vmx_assert_nmi_blocking(vmx, vcpu); 2098 } 2099 2100 /* 2101 * Update VM-entry instruction length if the event being 2102 * delivered was a software interrupt or software exception. 2103 */ 2104 if (intr_type == VMCS_INTR_T_SWINTR || 2105 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2106 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2107 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2108 } 2109 } 2110 2111 switch (reason) { 2112 case EXIT_REASON_TASK_SWITCH: 2113 ts = &vmexit->u.task_switch; 2114 ts->tsssel = qual & 0xffff; 2115 ts->reason = vmx_task_switch_reason(qual); 2116 ts->ext = 0; 2117 ts->errcode_valid = 0; 2118 vmx_paging_info(&ts->paging); 2119 /* 2120 * If the task switch was due to a CALL, JMP, IRET, software 2121 * interrupt (INT n) or software exception (INT3, INTO), 2122 * then the saved %rip references the instruction that caused 2123 * the task switch. The instruction length field in the VMCS 2124 * is valid in this case. 2125 * 2126 * In all other cases (e.g., NMI, hardware exception) the 2127 * saved %rip is one that would have been saved in the old TSS 2128 * had the task switch completed normally so the instruction 2129 * length field is not needed in this case and is explicitly 2130 * set to 0. 2131 */ 2132 if (ts->reason == TSR_IDT_GATE) { 2133 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2134 ("invalid idtvec_info %x for IDT task switch", 2135 idtvec_info)); 2136 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2137 if (intr_type != VMCS_INTR_T_SWINTR && 2138 intr_type != VMCS_INTR_T_SWEXCEPTION && 2139 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2140 /* Task switch triggered by external event */ 2141 ts->ext = 1; 2142 vmexit->inst_length = 0; 2143 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2144 ts->errcode_valid = 1; 2145 ts->errcode = vmcs_idt_vectoring_err(); 2146 } 2147 } 2148 } 2149 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2150 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2151 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2152 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2153 ts->ext ? "external" : "internal", 2154 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2155 break; 2156 case EXIT_REASON_CR_ACCESS: 2157 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2158 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2159 switch (qual & 0xf) { 2160 case 0: 2161 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2162 break; 2163 case 4: 2164 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2165 break; 2166 case 8: 2167 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2168 break; 2169 } 2170 break; 2171 case EXIT_REASON_RDMSR: 2172 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2173 ecx = vmxctx->guest_rcx; 2174 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2175 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2176 error = emulate_rdmsr(vmx, vcpu, ecx); 2177 if (error == 0) { 2178 handled = HANDLED; 2179 } else if (error > 0) { 2180 vmexit->exitcode = VM_EXITCODE_RDMSR; 2181 vmexit->u.msr.code = ecx; 2182 } else { 2183 /* Return to userspace with a valid exitcode */ 2184 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2185 ("emulate_rdmsr retu with bogus exitcode")); 2186 } 2187 break; 2188 case EXIT_REASON_WRMSR: 2189 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2190 eax = vmxctx->guest_rax; 2191 ecx = vmxctx->guest_rcx; 2192 edx = vmxctx->guest_rdx; 2193 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2194 ecx, (uint64_t)edx << 32 | eax); 2195 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2196 (uint64_t)edx << 32 | eax); 2197 error = emulate_wrmsr(vmx, vcpu, ecx, 2198 (uint64_t)edx << 32 | eax); 2199 if (error == 0) { 2200 handled = HANDLED; 2201 } else if (error > 0) { 2202 vmexit->exitcode = VM_EXITCODE_WRMSR; 2203 vmexit->u.msr.code = ecx; 2204 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2205 } else { 2206 /* Return to userspace with a valid exitcode */ 2207 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2208 ("emulate_wrmsr retu with bogus exitcode")); 2209 } 2210 break; 2211 case EXIT_REASON_HLT: 2212 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2213 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2214 vmexit->exitcode = VM_EXITCODE_HLT; 2215 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2216 break; 2217 case EXIT_REASON_MTF: 2218 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2219 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2220 vmexit->exitcode = VM_EXITCODE_MTRAP; 2221 vmexit->inst_length = 0; 2222 break; 2223 case EXIT_REASON_PAUSE: 2224 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2225 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2226 vmexit->exitcode = VM_EXITCODE_PAUSE; 2227 break; 2228 case EXIT_REASON_INTR_WINDOW: 2229 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2230 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2231 vmx_clear_int_window_exiting(vmx, vcpu); 2232 return (1); 2233 case EXIT_REASON_EXT_INTR: 2234 /* 2235 * External interrupts serve only to cause VM exits and allow 2236 * the host interrupt handler to run. 2237 * 2238 * If this external interrupt triggers a virtual interrupt 2239 * to a VM, then that state will be recorded by the 2240 * host interrupt handler in the VM's softc. We will inject 2241 * this virtual interrupt during the subsequent VM enter. 2242 */ 2243 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2244 SDT_PROBE4(vmm, vmx, exit, interrupt, 2245 vmx, vcpu, vmexit, intr_info); 2246 2247 /* 2248 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2249 * This appears to be a bug in VMware Fusion? 2250 */ 2251 if (!(intr_info & VMCS_INTR_VALID)) 2252 return (1); 2253 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2254 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2255 ("VM exit interruption info invalid: %x", intr_info)); 2256 vmx_trigger_hostintr(intr_info & 0xff); 2257 2258 /* 2259 * This is special. We want to treat this as an 'handled' 2260 * VM-exit but not increment the instruction pointer. 2261 */ 2262 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2263 return (1); 2264 case EXIT_REASON_NMI_WINDOW: 2265 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2266 /* Exit to allow the pending virtual NMI to be injected */ 2267 if (vm_nmi_pending(vmx->vm, vcpu)) 2268 vmx_inject_nmi(vmx, vcpu); 2269 vmx_clear_nmi_window_exiting(vmx, vcpu); 2270 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2271 return (1); 2272 case EXIT_REASON_INOUT: 2273 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2274 vie = vm_vie_ctx(vmx->vm, vcpu); 2275 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2276 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2277 break; 2278 case EXIT_REASON_CPUID: 2279 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2280 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2281 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2282 break; 2283 case EXIT_REASON_EXCEPTION: 2284 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2285 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2286 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2287 ("VM exit interruption info invalid: %x", intr_info)); 2288 2289 intr_vec = intr_info & 0xff; 2290 intr_type = intr_info & VMCS_INTR_T_MASK; 2291 2292 /* 2293 * If Virtual NMIs control is 1 and the VM-exit is due to a 2294 * fault encountered during the execution of IRET then we must 2295 * restore the state of "virtual-NMI blocking" before resuming 2296 * the guest. 2297 * 2298 * See "Resuming Guest Software after Handling an Exception". 2299 * See "Information for VM Exits Due to Vectored Events". 2300 */ 2301 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2302 (intr_vec != IDT_DF) && 2303 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2304 vmx_restore_nmi_blocking(vmx, vcpu); 2305 2306 /* 2307 * The NMI has already been handled in vmx_exit_handle_nmi(). 2308 */ 2309 if (intr_type == VMCS_INTR_T_NMI) 2310 return (1); 2311 2312 /* 2313 * Call the machine check handler by hand. Also don't reflect 2314 * the machine check back into the guest. 2315 */ 2316 if (intr_vec == IDT_MC) { 2317 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2318 vmm_call_trap(T_MCE); 2319 return (1); 2320 } 2321 2322 /* 2323 * If the hypervisor has requested user exits for 2324 * debug exceptions, bounce them out to userland. 2325 */ 2326 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2327 intr_vec == IDT_BP && 2328 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2329 vmexit->exitcode = VM_EXITCODE_BPT; 2330 vmexit->u.bpt.inst_length = vmexit->inst_length; 2331 vmexit->inst_length = 0; 2332 break; 2333 } 2334 2335 if (intr_vec == IDT_PF) { 2336 vmxctx->guest_cr2 = qual; 2337 } 2338 2339 /* 2340 * Software exceptions exhibit trap-like behavior. This in 2341 * turn requires populating the VM-entry instruction length 2342 * so that the %rip in the trap frame is past the INT3/INTO 2343 * instruction. 2344 */ 2345 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2346 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2347 2348 /* Reflect all other exceptions back into the guest */ 2349 errcode_valid = errcode = 0; 2350 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2351 errcode_valid = 1; 2352 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2353 } 2354 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%x into " 2355 "the guest", intr_vec, errcode); 2356 SDT_PROBE5(vmm, vmx, exit, exception, 2357 vmx, vcpu, vmexit, intr_vec, errcode); 2358 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2359 errcode_valid, errcode, 0); 2360 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2361 __func__, error)); 2362 return (1); 2363 2364 case EXIT_REASON_EPT_FAULT: 2365 /* 2366 * If 'gpa' lies within the address space allocated to 2367 * memory then this must be a nested page fault otherwise 2368 * this must be an instruction that accesses MMIO space. 2369 */ 2370 gpa = vmcs_gpa(); 2371 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2372 apic_access_fault(vmx, vcpu, gpa)) { 2373 vmexit->exitcode = VM_EXITCODE_PAGING; 2374 vmexit->inst_length = 0; 2375 vmexit->u.paging.gpa = gpa; 2376 vmexit->u.paging.fault_type = ept_fault_type(qual); 2377 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2378 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2379 vmx, vcpu, vmexit, gpa, qual); 2380 } else if (ept_emulation_fault(qual)) { 2381 vie = vm_vie_ctx(vmx->vm, vcpu); 2382 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla()); 2383 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2384 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2385 vmx, vcpu, vmexit, gpa); 2386 } 2387 /* 2388 * If Virtual NMIs control is 1 and the VM-exit is due to an 2389 * EPT fault during the execution of IRET then we must restore 2390 * the state of "virtual-NMI blocking" before resuming. 2391 * 2392 * See description of "NMI unblocking due to IRET" in 2393 * "Exit Qualification for EPT Violations". 2394 */ 2395 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2396 (qual & EXIT_QUAL_NMIUDTI) != 0) 2397 vmx_restore_nmi_blocking(vmx, vcpu); 2398 break; 2399 case EXIT_REASON_VIRTUALIZED_EOI: 2400 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2401 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2402 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2403 vmexit->inst_length = 0; /* trap-like */ 2404 break; 2405 case EXIT_REASON_APIC_ACCESS: 2406 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2407 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2408 break; 2409 case EXIT_REASON_APIC_WRITE: 2410 /* 2411 * APIC-write VM exit is trap-like so the %rip is already 2412 * pointing to the next instruction. 2413 */ 2414 vmexit->inst_length = 0; 2415 vlapic = vm_lapic(vmx->vm, vcpu); 2416 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2417 vmx, vcpu, vmexit, vlapic); 2418 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2419 break; 2420 case EXIT_REASON_XSETBV: 2421 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2422 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2423 break; 2424 case EXIT_REASON_MONITOR: 2425 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2426 vmexit->exitcode = VM_EXITCODE_MONITOR; 2427 break; 2428 case EXIT_REASON_MWAIT: 2429 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2430 vmexit->exitcode = VM_EXITCODE_MWAIT; 2431 break; 2432 case EXIT_REASON_TPR: 2433 vlapic = vm_lapic(vmx->vm, vcpu); 2434 vlapic_sync_tpr(vlapic); 2435 vmexit->inst_length = 0; 2436 handled = HANDLED; 2437 break; 2438 case EXIT_REASON_VMCALL: 2439 case EXIT_REASON_VMCLEAR: 2440 case EXIT_REASON_VMLAUNCH: 2441 case EXIT_REASON_VMPTRLD: 2442 case EXIT_REASON_VMPTRST: 2443 case EXIT_REASON_VMREAD: 2444 case EXIT_REASON_VMRESUME: 2445 case EXIT_REASON_VMWRITE: 2446 case EXIT_REASON_VMXOFF: 2447 case EXIT_REASON_VMXON: 2448 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2449 vmexit->exitcode = VM_EXITCODE_VMINSN; 2450 break; 2451 default: 2452 SDT_PROBE4(vmm, vmx, exit, unknown, 2453 vmx, vcpu, vmexit, reason); 2454 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2455 break; 2456 } 2457 2458 if (handled) { 2459 /* 2460 * It is possible that control is returned to userland 2461 * even though we were able to handle the VM exit in the 2462 * kernel. 2463 * 2464 * In such a case we want to make sure that the userland 2465 * restarts guest execution at the instruction *after* 2466 * the one we just processed. Therefore we update the 2467 * guest rip in the VMCS and in 'vmexit'. 2468 */ 2469 vmexit->rip += vmexit->inst_length; 2470 vmexit->inst_length = 0; 2471 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2472 } else { 2473 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2474 /* 2475 * If this VM exit was not claimed by anybody then 2476 * treat it as a generic VMX exit. 2477 */ 2478 vmexit->exitcode = VM_EXITCODE_VMX; 2479 vmexit->u.vmx.status = VM_SUCCESS; 2480 vmexit->u.vmx.inst_type = 0; 2481 vmexit->u.vmx.inst_error = 0; 2482 } else { 2483 /* 2484 * The exitcode and collateral have been populated. 2485 * The VM exit will be processed further in userland. 2486 */ 2487 } 2488 } 2489 2490 SDT_PROBE4(vmm, vmx, exit, return, 2491 vmx, vcpu, vmexit, handled); 2492 return (handled); 2493 } 2494 2495 static void 2496 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2497 { 2498 2499 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2500 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2501 vmxctx->inst_fail_status)); 2502 2503 vmexit->inst_length = 0; 2504 vmexit->exitcode = VM_EXITCODE_VMX; 2505 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2506 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2507 vmexit->u.vmx.exit_reason = ~0; 2508 vmexit->u.vmx.exit_qualification = ~0; 2509 2510 switch (rc) { 2511 case VMX_VMRESUME_ERROR: 2512 case VMX_VMLAUNCH_ERROR: 2513 case VMX_INVEPT_ERROR: 2514 case VMX_VMWRITE_ERROR: 2515 vmexit->u.vmx.inst_type = rc; 2516 break; 2517 default: 2518 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2519 } 2520 } 2521 2522 /* 2523 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2524 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2525 * sufficient to simply vector to the NMI handler via a software interrupt. 2526 * However, this must be done before maskable interrupts are enabled 2527 * otherwise the "iret" issued by an interrupt handler will incorrectly 2528 * clear NMI blocking. 2529 */ 2530 static __inline void 2531 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2532 { 2533 uint32_t intr_info; 2534 2535 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2536 2537 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2538 return; 2539 2540 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2541 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2542 ("VM exit interruption info invalid: %x", intr_info)); 2543 2544 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2545 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2546 "to NMI has invalid vector: %x", intr_info)); 2547 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2548 vmm_call_trap(T_NMIFLT); 2549 } 2550 } 2551 2552 static __inline void 2553 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2554 { 2555 uint64_t rflags; 2556 2557 /* Save host control debug registers. */ 2558 vmxctx->host_dr7 = rdr7(); 2559 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2560 2561 /* 2562 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2563 * exceptions in the host based on the guest DRx values. The 2564 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2565 */ 2566 load_dr7(0); 2567 wrmsr(MSR_DEBUGCTLMSR, 0); 2568 2569 /* 2570 * Disable single stepping the kernel to avoid corrupting the 2571 * guest DR6. A debugger might still be able to corrupt the 2572 * guest DR6 by setting a breakpoint after this point and then 2573 * single stepping. 2574 */ 2575 rflags = read_rflags(); 2576 vmxctx->host_tf = rflags & PSL_T; 2577 write_rflags(rflags & ~PSL_T); 2578 2579 /* Save host debug registers. */ 2580 vmxctx->host_dr0 = rdr0(); 2581 vmxctx->host_dr1 = rdr1(); 2582 vmxctx->host_dr2 = rdr2(); 2583 vmxctx->host_dr3 = rdr3(); 2584 vmxctx->host_dr6 = rdr6(); 2585 2586 /* Restore guest debug registers. */ 2587 load_dr0(vmxctx->guest_dr0); 2588 load_dr1(vmxctx->guest_dr1); 2589 load_dr2(vmxctx->guest_dr2); 2590 load_dr3(vmxctx->guest_dr3); 2591 load_dr6(vmxctx->guest_dr6); 2592 } 2593 2594 static __inline void 2595 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2596 { 2597 2598 /* Save guest debug registers. */ 2599 vmxctx->guest_dr0 = rdr0(); 2600 vmxctx->guest_dr1 = rdr1(); 2601 vmxctx->guest_dr2 = rdr2(); 2602 vmxctx->guest_dr3 = rdr3(); 2603 vmxctx->guest_dr6 = rdr6(); 2604 2605 /* 2606 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2607 * PSL_T last. 2608 */ 2609 load_dr0(vmxctx->host_dr0); 2610 load_dr1(vmxctx->host_dr1); 2611 load_dr2(vmxctx->host_dr2); 2612 load_dr3(vmxctx->host_dr3); 2613 load_dr6(vmxctx->host_dr6); 2614 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2615 load_dr7(vmxctx->host_dr7); 2616 write_rflags(read_rflags() | vmxctx->host_tf); 2617 } 2618 2619 static int 2620 vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap) 2621 { 2622 int rc, handled, launched; 2623 struct vmx *vmx; 2624 struct vm *vm; 2625 struct vmxctx *vmxctx; 2626 uintptr_t vmcs_pa; 2627 struct vm_exit *vmexit; 2628 struct vlapic *vlapic; 2629 uint32_t exit_reason; 2630 bool tpr_shadow_active; 2631 2632 vmx = arg; 2633 vm = vmx->vm; 2634 vmcs_pa = vmx->vmcs_pa[vcpu]; 2635 vmxctx = &vmx->ctx[vcpu]; 2636 vlapic = vm_lapic(vm, vcpu); 2637 vmexit = vm_exitinfo(vm, vcpu); 2638 launched = 0; 2639 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2640 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2641 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2642 2643 KASSERT(vmxctx->pmap == pmap, 2644 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2645 2646 vmx_msr_guest_enter(vmx, vcpu); 2647 2648 vmcs_load(vmcs_pa); 2649 2650 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2651 vmx->vmcs_state[vcpu] = VS_LOADED; 2652 2653 /* 2654 * XXX 2655 * We do this every time because we may setup the virtual machine 2656 * from a different process than the one that actually runs it. 2657 * 2658 * If the life of a virtual machine was spent entirely in the context 2659 * of a single process we could do this once in vmx_vminit(). 2660 */ 2661 vmcs_write(VMCS_HOST_CR3, rcr3()); 2662 2663 vmcs_write(VMCS_GUEST_RIP, rip); 2664 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2665 do { 2666 enum event_inject_state inject_state; 2667 2668 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2669 "%lx/%lx", __func__, vmcs_guest_rip(), rip)); 2670 2671 handled = UNHANDLED; 2672 2673 /* 2674 * Perform initial event/exception/interrupt injection before 2675 * host CPU interrupts are disabled. 2676 */ 2677 inject_state = vmx_inject_events(vmx, vcpu, rip); 2678 2679 /* 2680 * Interrupts are disabled from this point on until the 2681 * guest starts executing. This is done for the following 2682 * reasons: 2683 * 2684 * If an AST is asserted on this thread after the check below, 2685 * then the IPI_AST notification will not be lost, because it 2686 * will cause a VM exit due to external interrupt as soon as 2687 * the guest state is loaded. 2688 * 2689 * A posted interrupt after vmx_inject_vlapic() will not be 2690 * "lost" because it will be held pending in the host APIC 2691 * because interrupts are disabled. The pending interrupt will 2692 * be recognized as soon as the guest state is loaded. 2693 * 2694 * The same reasoning applies to the IPI generated by 2695 * pmap_invalidate_ept(). 2696 */ 2697 disable_intr(); 2698 2699 /* 2700 * If not precluded by existing events, inject any interrupt 2701 * pending on the vLAPIC. As a lock-less operation, it is safe 2702 * (and prudent) to perform with host CPU interrupts disabled. 2703 */ 2704 if (inject_state == EIS_CAN_INJECT) { 2705 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2706 } 2707 2708 /* 2709 * Check for vCPU bail-out conditions. This must be done after 2710 * vmx_inject_events() to detect a triple-fault condition. 2711 */ 2712 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2713 enable_intr(); 2714 break; 2715 } 2716 2717 if (vcpu_run_state_pending(vm, vcpu)) { 2718 enable_intr(); 2719 vm_exit_run_state(vmx->vm, vcpu, rip); 2720 break; 2721 } 2722 2723 /* 2724 * If subsequent activity queued events which require injection 2725 * handling, take another lap to handle them. 2726 */ 2727 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2728 enable_intr(); 2729 handled = HANDLED; 2730 continue; 2731 } 2732 2733 if ((rc = smt_acquire()) != 1) { 2734 enable_intr(); 2735 vmexit->rip = rip; 2736 vmexit->inst_length = 0; 2737 if (rc == -1) { 2738 vmexit->exitcode = VM_EXITCODE_HT; 2739 } else { 2740 vmexit->exitcode = VM_EXITCODE_BOGUS; 2741 handled = HANDLED; 2742 } 2743 break; 2744 } 2745 2746 /* 2747 * If this thread has gone off-cpu due to mutex operations 2748 * during vmx_run, the VMCS will have been unloaded, forcing a 2749 * re-VMLAUNCH as opposed to VMRESUME. 2750 */ 2751 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2752 /* 2753 * Restoration of the GDT limit is taken care of by 2754 * vmx_savectx(). Since the maximum practical index for the 2755 * IDT is 255, restoring its limits from the post-VMX-exit 2756 * default of 0xffff is not a concern. 2757 * 2758 * Only 64-bit hypervisor callers are allowed, which forgoes 2759 * the need to restore any LDT descriptor. Toss an error to 2760 * anyone attempting to break that rule. 2761 */ 2762 if (curproc->p_model != DATAMODEL_LP64) { 2763 smt_release(); 2764 enable_intr(); 2765 bzero(vmexit, sizeof (*vmexit)); 2766 vmexit->rip = rip; 2767 vmexit->exitcode = VM_EXITCODE_VMX; 2768 vmexit->u.vmx.status = VM_FAIL_INVALID; 2769 handled = UNHANDLED; 2770 break; 2771 } 2772 2773 if (tpr_shadow_active) { 2774 vmx_tpr_shadow_enter(vlapic); 2775 } 2776 2777 vmx_run_trace(vmx, vcpu); 2778 vcpu_ustate_change(vm, vcpu, VU_RUN); 2779 vmx_dr_enter_guest(vmxctx); 2780 rc = vmx_enter_guest(vmxctx, vmx, launched); 2781 vmx_dr_leave_guest(vmxctx); 2782 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2783 2784 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2785 smt_release(); 2786 2787 if (tpr_shadow_active) { 2788 vmx_tpr_shadow_exit(vlapic); 2789 } 2790 2791 /* Collect some information for VM exit processing */ 2792 vmexit->rip = rip = vmcs_guest_rip(); 2793 vmexit->inst_length = vmexit_instruction_length(); 2794 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2795 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2796 2797 /* Update 'nextrip' */ 2798 vmx->state[vcpu].nextrip = rip; 2799 2800 if (rc == VMX_GUEST_VMEXIT) { 2801 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2802 enable_intr(); 2803 handled = vmx_exit_process(vmx, vcpu, vmexit); 2804 } else { 2805 enable_intr(); 2806 vmx_exit_inst_error(vmxctx, rc, vmexit); 2807 } 2808 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2809 uint32_t, exit_reason); 2810 rip = vmexit->rip; 2811 } while (handled); 2812 2813 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2814 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2815 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2816 vmexit->exitcode); 2817 } 2818 2819 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2820 vmexit->exitcode); 2821 2822 vmcs_clear(vmcs_pa); 2823 vmx_msr_guest_exit(vmx, vcpu); 2824 2825 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 2826 vmx->vmcs_state[vcpu] = VS_NONE; 2827 2828 return (0); 2829 } 2830 2831 static void 2832 vmx_vmcleanup(void *arg) 2833 { 2834 int i; 2835 struct vmx *vmx = arg; 2836 uint16_t maxcpus; 2837 2838 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2839 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2840 kmem_free(vmx->apic_access_page, PAGESIZE); 2841 } else { 2842 VERIFY3P(vmx->apic_access_page, ==, NULL); 2843 } 2844 2845 vmx_msr_bitmap_destroy(vmx); 2846 2847 maxcpus = vm_get_maxcpus(vmx->vm); 2848 for (i = 0; i < maxcpus; i++) 2849 vpid_free(vmx->state[i].vpid); 2850 2851 free(vmx, M_VMX); 2852 } 2853 2854 static uint64_t * 2855 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2856 { 2857 switch (reg) { 2858 case VM_REG_GUEST_RAX: 2859 return (&vmxctx->guest_rax); 2860 case VM_REG_GUEST_RBX: 2861 return (&vmxctx->guest_rbx); 2862 case VM_REG_GUEST_RCX: 2863 return (&vmxctx->guest_rcx); 2864 case VM_REG_GUEST_RDX: 2865 return (&vmxctx->guest_rdx); 2866 case VM_REG_GUEST_RSI: 2867 return (&vmxctx->guest_rsi); 2868 case VM_REG_GUEST_RDI: 2869 return (&vmxctx->guest_rdi); 2870 case VM_REG_GUEST_RBP: 2871 return (&vmxctx->guest_rbp); 2872 case VM_REG_GUEST_R8: 2873 return (&vmxctx->guest_r8); 2874 case VM_REG_GUEST_R9: 2875 return (&vmxctx->guest_r9); 2876 case VM_REG_GUEST_R10: 2877 return (&vmxctx->guest_r10); 2878 case VM_REG_GUEST_R11: 2879 return (&vmxctx->guest_r11); 2880 case VM_REG_GUEST_R12: 2881 return (&vmxctx->guest_r12); 2882 case VM_REG_GUEST_R13: 2883 return (&vmxctx->guest_r13); 2884 case VM_REG_GUEST_R14: 2885 return (&vmxctx->guest_r14); 2886 case VM_REG_GUEST_R15: 2887 return (&vmxctx->guest_r15); 2888 case VM_REG_GUEST_CR2: 2889 return (&vmxctx->guest_cr2); 2890 case VM_REG_GUEST_DR0: 2891 return (&vmxctx->guest_dr0); 2892 case VM_REG_GUEST_DR1: 2893 return (&vmxctx->guest_dr1); 2894 case VM_REG_GUEST_DR2: 2895 return (&vmxctx->guest_dr2); 2896 case VM_REG_GUEST_DR3: 2897 return (&vmxctx->guest_dr3); 2898 case VM_REG_GUEST_DR6: 2899 return (&vmxctx->guest_dr6); 2900 default: 2901 break; 2902 } 2903 return (NULL); 2904 } 2905 2906 static int 2907 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2908 { 2909 int running, hostcpu, err; 2910 struct vmx *vmx = arg; 2911 uint64_t *regp; 2912 2913 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2914 if (running && hostcpu != curcpu) 2915 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2916 2917 /* VMCS access not required for ctx reads */ 2918 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 2919 *retval = *regp; 2920 return (0); 2921 } 2922 2923 if (!running) { 2924 vmcs_load(vmx->vmcs_pa[vcpu]); 2925 } 2926 2927 err = EINVAL; 2928 if (reg == VM_REG_GUEST_INTR_SHADOW) { 2929 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 2930 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 2931 err = 0; 2932 } else { 2933 uint32_t encoding; 2934 2935 encoding = vmcs_field_encoding(reg); 2936 if (encoding != VMCS_INVALID_ENCODING) { 2937 *retval = vmcs_read(encoding); 2938 err = 0; 2939 } 2940 } 2941 2942 if (!running) { 2943 vmcs_clear(vmx->vmcs_pa[vcpu]); 2944 } 2945 2946 return (err); 2947 } 2948 2949 static int 2950 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2951 { 2952 int running, hostcpu, error; 2953 struct vmx *vmx = arg; 2954 uint64_t *regp; 2955 2956 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2957 if (running && hostcpu != curcpu) 2958 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2959 2960 /* VMCS access not required for ctx writes */ 2961 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 2962 *regp = val; 2963 return (0); 2964 } 2965 2966 if (!running) { 2967 vmcs_load(vmx->vmcs_pa[vcpu]); 2968 } 2969 2970 if (reg == VM_REG_GUEST_INTR_SHADOW) { 2971 if (val != 0) { 2972 /* 2973 * Forcing the vcpu into an interrupt shadow is not 2974 * presently supported. 2975 */ 2976 error = EINVAL; 2977 } else { 2978 uint64_t gi; 2979 2980 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 2981 gi &= ~HWINTR_BLOCKING; 2982 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 2983 error = 0; 2984 } 2985 } else { 2986 uint32_t encoding; 2987 2988 error = 0; 2989 encoding = vmcs_field_encoding(reg); 2990 switch (encoding) { 2991 case VMCS_GUEST_IA32_EFER: 2992 /* 2993 * If the "load EFER" VM-entry control is 1 then the 2994 * value of EFER.LMA must be identical to "IA-32e mode 2995 * guest" bit in the VM-entry control. 2996 */ 2997 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) { 2998 uint64_t ctls; 2999 3000 ctls = vmcs_read(VMCS_ENTRY_CTLS); 3001 if (val & EFER_LMA) { 3002 ctls |= VM_ENTRY_GUEST_LMA; 3003 } else { 3004 ctls &= ~VM_ENTRY_GUEST_LMA; 3005 } 3006 vmcs_write(VMCS_ENTRY_CTLS, ctls); 3007 } 3008 vmcs_write(encoding, val); 3009 break; 3010 case VMCS_GUEST_CR0: 3011 /* 3012 * The guest is not allowed to modify certain bits in 3013 * %cr0 and %cr4. To maintain the illusion of full 3014 * control, they have shadow versions which contain the 3015 * guest-perceived (via reads from the register) values 3016 * as opposed to the guest-effective values. 3017 * 3018 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3019 */ 3020 vmcs_write(VMCS_CR0_SHADOW, val); 3021 vmcs_write(encoding, vmx_fix_cr0(val)); 3022 break; 3023 case VMCS_GUEST_CR4: 3024 /* See above for detail on %cr4 shadowing */ 3025 vmcs_write(VMCS_CR4_SHADOW, val); 3026 vmcs_write(encoding, vmx_fix_cr4(val)); 3027 break; 3028 case VMCS_GUEST_CR3: 3029 vmcs_write(encoding, val); 3030 /* 3031 * Invalidate the guest vcpu's TLB mappings to emulate 3032 * the behavior of updating %cr3. 3033 * 3034 * XXX the processor retains global mappings when %cr3 3035 * is updated but vmx_invvpid() does not. 3036 */ 3037 vmx_invvpid(vmx, vcpu, vmx->ctx[vcpu].pmap, running); 3038 break; 3039 case VMCS_INVALID_ENCODING: 3040 error = EINVAL; 3041 break; 3042 default: 3043 vmcs_write(encoding, val); 3044 break; 3045 } 3046 } 3047 3048 if (!running) { 3049 vmcs_clear(vmx->vmcs_pa[vcpu]); 3050 } 3051 3052 return (error); 3053 } 3054 3055 static int 3056 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3057 { 3058 int hostcpu, running; 3059 struct vmx *vmx = arg; 3060 uint32_t base, limit, access; 3061 3062 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3063 if (running && hostcpu != curcpu) 3064 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3065 3066 if (!running) { 3067 vmcs_load(vmx->vmcs_pa[vcpu]); 3068 } 3069 3070 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3071 desc->base = vmcs_read(base); 3072 desc->limit = vmcs_read(limit); 3073 if (access != VMCS_INVALID_ENCODING) { 3074 desc->access = vmcs_read(access); 3075 } else { 3076 desc->access = 0; 3077 } 3078 3079 if (!running) { 3080 vmcs_clear(vmx->vmcs_pa[vcpu]); 3081 } 3082 return (0); 3083 } 3084 3085 static int 3086 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3087 { 3088 int hostcpu, running; 3089 struct vmx *vmx = arg; 3090 uint32_t base, limit, access; 3091 3092 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3093 if (running && hostcpu != curcpu) 3094 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3095 3096 if (!running) { 3097 vmcs_load(vmx->vmcs_pa[vcpu]); 3098 } 3099 3100 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3101 vmcs_write(base, desc->base); 3102 vmcs_write(limit, desc->limit); 3103 if (access != VMCS_INVALID_ENCODING) { 3104 vmcs_write(access, desc->access); 3105 } 3106 3107 if (!running) { 3108 vmcs_clear(vmx->vmcs_pa[vcpu]); 3109 } 3110 return (0); 3111 } 3112 3113 static int 3114 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3115 { 3116 struct vmx *vmx = arg; 3117 int vcap; 3118 int ret; 3119 3120 ret = ENOENT; 3121 3122 vcap = vmx->cap[vcpu].set; 3123 3124 switch (type) { 3125 case VM_CAP_HALT_EXIT: 3126 if (cap_halt_exit) 3127 ret = 0; 3128 break; 3129 case VM_CAP_PAUSE_EXIT: 3130 if (cap_pause_exit) 3131 ret = 0; 3132 break; 3133 case VM_CAP_MTRAP_EXIT: 3134 if (cap_monitor_trap) 3135 ret = 0; 3136 break; 3137 case VM_CAP_ENABLE_INVPCID: 3138 if (cap_invpcid) 3139 ret = 0; 3140 break; 3141 case VM_CAP_BPT_EXIT: 3142 ret = 0; 3143 break; 3144 default: 3145 break; 3146 } 3147 3148 if (ret == 0) 3149 *retval = (vcap & (1 << type)) ? 1 : 0; 3150 3151 return (ret); 3152 } 3153 3154 static int 3155 vmx_setcap(void *arg, int vcpu, int type, int val) 3156 { 3157 struct vmx *vmx = arg; 3158 uint32_t baseval, reg, flag; 3159 uint32_t *pptr; 3160 int error; 3161 3162 error = ENOENT; 3163 pptr = NULL; 3164 3165 switch (type) { 3166 case VM_CAP_HALT_EXIT: 3167 if (cap_halt_exit) { 3168 error = 0; 3169 pptr = &vmx->cap[vcpu].proc_ctls; 3170 baseval = *pptr; 3171 flag = PROCBASED_HLT_EXITING; 3172 reg = VMCS_PRI_PROC_BASED_CTLS; 3173 } 3174 break; 3175 case VM_CAP_MTRAP_EXIT: 3176 if (cap_monitor_trap) { 3177 error = 0; 3178 pptr = &vmx->cap[vcpu].proc_ctls; 3179 baseval = *pptr; 3180 flag = PROCBASED_MTF; 3181 reg = VMCS_PRI_PROC_BASED_CTLS; 3182 } 3183 break; 3184 case VM_CAP_PAUSE_EXIT: 3185 if (cap_pause_exit) { 3186 error = 0; 3187 pptr = &vmx->cap[vcpu].proc_ctls; 3188 baseval = *pptr; 3189 flag = PROCBASED_PAUSE_EXITING; 3190 reg = VMCS_PRI_PROC_BASED_CTLS; 3191 } 3192 break; 3193 case VM_CAP_ENABLE_INVPCID: 3194 if (cap_invpcid) { 3195 error = 0; 3196 pptr = &vmx->cap[vcpu].proc_ctls2; 3197 baseval = *pptr; 3198 flag = PROCBASED2_ENABLE_INVPCID; 3199 reg = VMCS_SEC_PROC_BASED_CTLS; 3200 } 3201 break; 3202 case VM_CAP_BPT_EXIT: 3203 error = 0; 3204 3205 /* Don't change the bitmap if we are tracing all exceptions. */ 3206 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3207 pptr = &vmx->cap[vcpu].exc_bitmap; 3208 baseval = *pptr; 3209 flag = (1 << IDT_BP); 3210 reg = VMCS_EXCEPTION_BITMAP; 3211 } 3212 break; 3213 default: 3214 break; 3215 } 3216 3217 if (error != 0) { 3218 return (error); 3219 } 3220 3221 if (pptr != NULL) { 3222 if (val) { 3223 baseval |= flag; 3224 } else { 3225 baseval &= ~flag; 3226 } 3227 vmcs_load(vmx->vmcs_pa[vcpu]); 3228 vmcs_write(reg, baseval); 3229 vmcs_clear(vmx->vmcs_pa[vcpu]); 3230 3231 /* 3232 * Update optional stored flags, and record 3233 * setting 3234 */ 3235 *pptr = baseval; 3236 } 3237 3238 if (val) { 3239 vmx->cap[vcpu].set |= (1 << type); 3240 } else { 3241 vmx->cap[vcpu].set &= ~(1 << type); 3242 } 3243 3244 return (0); 3245 } 3246 3247 struct vlapic_vtx { 3248 struct vlapic vlapic; 3249 3250 /* Align to the nearest cacheline */ 3251 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3252 3253 /* TMR handling state for posted interrupts */ 3254 uint32_t tmr_active[8]; 3255 uint32_t pending_level[8]; 3256 uint32_t pending_edge[8]; 3257 3258 struct pir_desc *pir_desc; 3259 struct vmx *vmx; 3260 uint_t pending_prio; 3261 boolean_t tmr_sync; 3262 }; 3263 3264 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3265 3266 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3267 3268 static vcpu_notify_t 3269 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3270 { 3271 struct vlapic_vtx *vlapic_vtx; 3272 struct pir_desc *pir_desc; 3273 uint32_t mask, tmrval; 3274 int idx; 3275 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3276 3277 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3278 pir_desc = vlapic_vtx->pir_desc; 3279 idx = vector / 32; 3280 mask = 1UL << (vector % 32); 3281 3282 /* 3283 * If the currently asserted TMRs do not match the state requested by 3284 * the incoming interrupt, an exit will be required to reconcile those 3285 * bits in the APIC page. This will keep the vLAPIC behavior in line 3286 * with the architecturally defined expectations. 3287 * 3288 * If actors of mixed types (edge and level) are racing against the same 3289 * vector (toggling its TMR bit back and forth), the results could 3290 * inconsistent. Such circumstances are considered a rare edge case and 3291 * are never expected to be found in the wild. 3292 */ 3293 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3294 if (!level) { 3295 if ((tmrval & mask) != 0) { 3296 /* Edge-triggered interrupt needs TMR de-asserted */ 3297 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3298 atomic_store_rel_long(&pir_desc->pending, 1); 3299 return (VCPU_NOTIFY_EXIT); 3300 } 3301 } else { 3302 if ((tmrval & mask) == 0) { 3303 /* Level-triggered interrupt needs TMR asserted */ 3304 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3305 atomic_store_rel_long(&pir_desc->pending, 1); 3306 return (VCPU_NOTIFY_EXIT); 3307 } 3308 } 3309 3310 /* 3311 * If the interrupt request does not require manipulation of the TMRs 3312 * for delivery, set it in PIR descriptor. It cannot be inserted into 3313 * the APIC page while the vCPU might be running. 3314 */ 3315 atomic_set_int(&pir_desc->pir[idx], mask); 3316 3317 /* 3318 * A notification is required whenever the 'pending' bit makes a 3319 * transition from 0->1. 3320 * 3321 * Even if the 'pending' bit is already asserted, notification about 3322 * the incoming interrupt may still be necessary. For example, if a 3323 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3324 * the 0->1 'pending' transition with a notification, but the vCPU 3325 * would ignore the interrupt for the time being. The same vCPU would 3326 * need to then be notified if a high-priority interrupt arrived which 3327 * satisfied the PPR. 3328 * 3329 * The priorities of interrupts injected while 'pending' is asserted 3330 * are tracked in a custom bitfield 'pending_prio'. Should the 3331 * to-be-injected interrupt exceed the priorities already present, the 3332 * notification is sent. The priorities recorded in 'pending_prio' are 3333 * cleared whenever the 'pending' bit makes another 0->1 transition. 3334 */ 3335 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3336 notify = VCPU_NOTIFY_APIC; 3337 vlapic_vtx->pending_prio = 0; 3338 } else { 3339 const uint_t old_prio = vlapic_vtx->pending_prio; 3340 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3341 3342 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3343 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3344 notify = VCPU_NOTIFY_APIC; 3345 } 3346 } 3347 3348 return (notify); 3349 } 3350 3351 static void 3352 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3353 { 3354 /* 3355 * When APICv is enabled for an instance, the traditional interrupt 3356 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3357 * used and the CPU does the heavy lifting of virtual interrupt 3358 * delivery. For that reason vmx_intr_accepted() should never be called 3359 * when APICv is enabled. 3360 */ 3361 panic("vmx_intr_accepted: not expected to be called"); 3362 } 3363 3364 static void 3365 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3366 { 3367 struct vlapic_vtx *vlapic_vtx; 3368 const uint32_t *tmrs; 3369 3370 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3371 tmrs = &vlapic_vtx->tmr_active[0]; 3372 3373 if (!vlapic_vtx->tmr_sync) { 3374 return; 3375 } 3376 3377 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3378 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3379 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3380 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3381 vlapic_vtx->tmr_sync = B_FALSE; 3382 } 3383 3384 static void 3385 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3386 { 3387 struct vmx *vmx; 3388 uint32_t proc_ctls; 3389 int vcpuid; 3390 3391 vcpuid = vlapic->vcpuid; 3392 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3393 3394 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3395 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3396 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3397 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3398 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3399 3400 vmcs_load(vmx->vmcs_pa[vcpuid]); 3401 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3402 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3403 } 3404 3405 static void 3406 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3407 { 3408 struct vmx *vmx; 3409 uint32_t proc_ctls2; 3410 int vcpuid; 3411 3412 vcpuid = vlapic->vcpuid; 3413 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3414 3415 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3416 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3417 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3418 3419 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3420 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3421 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3422 3423 vmcs_load(vmx->vmcs_pa[vcpuid]); 3424 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3425 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3426 3427 vmx_allow_x2apic_msrs(vmx, vcpuid); 3428 } 3429 3430 static void 3431 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3432 { 3433 psm_send_pir_ipi(hostcpu); 3434 } 3435 3436 static void 3437 vmx_apicv_sync(struct vlapic *vlapic) 3438 { 3439 struct vlapic_vtx *vlapic_vtx; 3440 struct pir_desc *pir_desc; 3441 struct LAPIC *lapic; 3442 uint_t i; 3443 3444 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3445 pir_desc = vlapic_vtx->pir_desc; 3446 lapic = vlapic->apic_page; 3447 3448 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3449 return; 3450 } 3451 3452 vlapic_vtx->pending_prio = 0; 3453 3454 /* Make sure the invalid (0-15) vectors are not set */ 3455 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3456 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3457 ASSERT0(pir_desc->pir[0] & 0xffff); 3458 3459 for (i = 0; i <= 7; i++) { 3460 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3461 uint32_t *irrp = &lapic->irr0 + (i * 4); 3462 3463 const uint32_t pending_level = 3464 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3465 const uint32_t pending_edge = 3466 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3467 const uint32_t pending_inject = 3468 atomic_readandclear_int(&pir_desc->pir[i]); 3469 3470 if (pending_level != 0) { 3471 /* 3472 * Level-triggered interrupts assert their corresponding 3473 * bit in the TMR when queued in IRR. 3474 */ 3475 *tmrp |= pending_level; 3476 *irrp |= pending_level; 3477 } 3478 if (pending_edge != 0) { 3479 /* 3480 * When queuing an edge-triggered interrupt in IRR, the 3481 * corresponding bit in the TMR is cleared. 3482 */ 3483 *tmrp &= ~pending_edge; 3484 *irrp |= pending_edge; 3485 } 3486 if (pending_inject != 0) { 3487 /* 3488 * Interrupts which do not require a change to the TMR 3489 * (because it already matches the necessary state) can 3490 * simply be queued in IRR. 3491 */ 3492 *irrp |= pending_inject; 3493 } 3494 3495 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3496 /* Check if VMX EOI triggers require updating. */ 3497 vlapic_vtx->tmr_active[i] = *tmrp; 3498 vlapic_vtx->tmr_sync = B_TRUE; 3499 } 3500 } 3501 } 3502 3503 static void 3504 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3505 { 3506 /* 3507 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3508 * TPR falls below a threshold priority. That threshold is set to the 3509 * current TPR priority, since guest interrupt status should be 3510 * re-evaluated if its TPR is set lower. 3511 */ 3512 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3513 } 3514 3515 static void 3516 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3517 { 3518 /* 3519 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3520 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3521 * the PPR is updated to reflect any change in the TPR here. 3522 */ 3523 vlapic_sync_tpr(vlapic); 3524 } 3525 3526 static struct vlapic * 3527 vmx_vlapic_init(void *arg, int vcpuid) 3528 { 3529 struct vmx *vmx; 3530 struct vlapic *vlapic; 3531 struct vlapic_vtx *vlapic_vtx; 3532 3533 vmx = arg; 3534 3535 vlapic = malloc(sizeof (struct vlapic_vtx), M_VLAPIC, 3536 M_WAITOK | M_ZERO); 3537 vlapic->vm = vmx->vm; 3538 vlapic->vcpuid = vcpuid; 3539 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3540 3541 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3542 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3543 vlapic_vtx->vmx = vmx; 3544 3545 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3546 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3547 } 3548 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3549 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3550 vlapic->ops.sync_state = vmx_apicv_sync; 3551 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3552 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3553 3554 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3555 vlapic->ops.post_intr = vmx_apicv_notify; 3556 } 3557 } 3558 3559 vlapic_init(vlapic); 3560 3561 return (vlapic); 3562 } 3563 3564 static void 3565 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3566 { 3567 3568 vlapic_cleanup(vlapic); 3569 free(vlapic, M_VLAPIC); 3570 } 3571 3572 static void 3573 vmx_savectx(void *arg, int vcpu) 3574 { 3575 struct vmx *vmx = arg; 3576 3577 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3578 vmcs_clear(vmx->vmcs_pa[vcpu]); 3579 vmx_msr_guest_exit(vmx, vcpu); 3580 /* 3581 * Having VMCLEARed the VMCS, it can no longer be re-entered 3582 * with VMRESUME, but must be VMLAUNCHed again. 3583 */ 3584 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3585 } 3586 3587 reset_gdtr_limit(); 3588 } 3589 3590 static void 3591 vmx_restorectx(void *arg, int vcpu) 3592 { 3593 struct vmx *vmx = arg; 3594 3595 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3596 3597 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3598 vmx_msr_guest_enter(vmx, vcpu); 3599 vmcs_load(vmx->vmcs_pa[vcpu]); 3600 } 3601 } 3602 3603 struct vmm_ops vmm_ops_intel = { 3604 .init = vmx_init, 3605 .cleanup = vmx_cleanup, 3606 .resume = vmx_restore, 3607 .vminit = vmx_vminit, 3608 .vmrun = vmx_run, 3609 .vmcleanup = vmx_vmcleanup, 3610 .vmgetreg = vmx_getreg, 3611 .vmsetreg = vmx_setreg, 3612 .vmgetdesc = vmx_getdesc, 3613 .vmsetdesc = vmx_setdesc, 3614 .vmgetcap = vmx_getcap, 3615 .vmsetcap = vmx_setcap, 3616 .vmspace_alloc = ept_vmspace_alloc, 3617 .vmspace_free = ept_vmspace_free, 3618 .vlapic_init = vmx_vlapic_init, 3619 .vlapic_cleanup = vmx_vlapic_cleanup, 3620 3621 .vmsavectx = vmx_savectx, 3622 .vmrestorectx = vmx_restorectx, 3623 }; 3624 3625 /* Side-effect free HW validation derived from checks in vmx_init. */ 3626 int 3627 vmx_x86_supported(const char **msg) 3628 { 3629 int error; 3630 uint32_t tmp; 3631 3632 ASSERT(msg != NULL); 3633 3634 /* Check support for primary processor-based VM-execution controls */ 3635 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3636 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3637 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3638 if (error) { 3639 *msg = "processor does not support desired primary " 3640 "processor-based controls"; 3641 return (error); 3642 } 3643 3644 /* Check support for secondary processor-based VM-execution controls */ 3645 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3646 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3647 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3648 if (error) { 3649 *msg = "processor does not support desired secondary " 3650 "processor-based controls"; 3651 return (error); 3652 } 3653 3654 /* Check support for pin-based VM-execution controls */ 3655 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3656 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3657 PINBASED_CTLS_ZERO_SETTING, &tmp); 3658 if (error) { 3659 *msg = "processor does not support desired pin-based controls"; 3660 return (error); 3661 } 3662 3663 /* Check support for VM-exit controls */ 3664 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3665 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3666 if (error) { 3667 *msg = "processor does not support desired exit controls"; 3668 return (error); 3669 } 3670 3671 /* Check support for VM-entry controls */ 3672 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3673 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3674 if (error) { 3675 *msg = "processor does not support desired entry controls"; 3676 return (error); 3677 } 3678 3679 /* Unrestricted guest is nominally optional, but not for us. */ 3680 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3681 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3682 if (error) { 3683 *msg = "processor does not support desired unrestricted guest " 3684 "controls"; 3685 return (error); 3686 } 3687 3688 return (0); 3689 } 3690