1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2022 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/kmem.h> 53 #include <sys/pcpu.h> 54 #include <sys/proc.h> 55 #include <sys/sysctl.h> 56 57 #include <sys/x86_archext.h> 58 #include <sys/smp_impldefs.h> 59 #include <sys/smt.h> 60 #include <sys/hma.h> 61 #include <sys/trap.h> 62 #include <sys/archsystm.h> 63 64 #include <machine/psl.h> 65 #include <machine/cpufunc.h> 66 #include <machine/md_var.h> 67 #include <machine/reg.h> 68 #include <machine/segments.h> 69 #include <machine/specialreg.h> 70 #include <machine/vmparam.h> 71 #include <sys/vmm_vm.h> 72 #include <sys/vmm_kernel.h> 73 74 #include <machine/vmm.h> 75 #include <machine/vmm_dev.h> 76 #include <sys/vmm_instruction_emul.h> 77 #include "vmm_lapic.h" 78 #include "vmm_host.h" 79 #include "vmm_ioport.h" 80 #include "vmm_stat.h" 81 #include "vatpic.h" 82 #include "vlapic.h" 83 #include "vlapic_priv.h" 84 85 #include "vmcs.h" 86 #include "vmx.h" 87 #include "vmx_msr.h" 88 #include "vmx_controls.h" 89 90 #define PINBASED_CTLS_ONE_SETTING \ 91 (PINBASED_EXTINT_EXITING | \ 92 PINBASED_NMI_EXITING | \ 93 PINBASED_VIRTUAL_NMI) 94 #define PINBASED_CTLS_ZERO_SETTING 0 95 96 #define PROCBASED_CTLS_WINDOW_SETTING \ 97 (PROCBASED_INT_WINDOW_EXITING | \ 98 PROCBASED_NMI_WINDOW_EXITING) 99 100 /* We consider TSC offset a necessity for unsynched TSC handling */ 101 #define PROCBASED_CTLS_ONE_SETTING \ 102 (PROCBASED_SECONDARY_CONTROLS | \ 103 PROCBASED_TSC_OFFSET | \ 104 PROCBASED_MWAIT_EXITING | \ 105 PROCBASED_MONITOR_EXITING | \ 106 PROCBASED_IO_EXITING | \ 107 PROCBASED_MSR_BITMAPS | \ 108 PROCBASED_CTLS_WINDOW_SETTING | \ 109 PROCBASED_CR8_LOAD_EXITING | \ 110 PROCBASED_CR8_STORE_EXITING) 111 112 #define PROCBASED_CTLS_ZERO_SETTING \ 113 (PROCBASED_CR3_LOAD_EXITING | \ 114 PROCBASED_CR3_STORE_EXITING | \ 115 PROCBASED_IO_BITMAPS) 116 117 /* 118 * EPT and Unrestricted Guest are considered necessities. The latter is not a 119 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 120 * without a bootrom starting in real mode. 121 */ 122 #define PROCBASED_CTLS2_ONE_SETTING \ 123 (PROCBASED2_ENABLE_EPT | \ 124 PROCBASED2_UNRESTRICTED_GUEST) 125 #define PROCBASED_CTLS2_ZERO_SETTING 0 126 127 #define VM_EXIT_CTLS_ONE_SETTING \ 128 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 129 VM_EXIT_HOST_LMA | \ 130 VM_EXIT_LOAD_PAT | \ 131 VM_EXIT_SAVE_EFER | \ 132 VM_EXIT_LOAD_EFER | \ 133 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 134 135 #define VM_EXIT_CTLS_ZERO_SETTING 0 136 137 #define VM_ENTRY_CTLS_ONE_SETTING \ 138 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 139 VM_ENTRY_LOAD_EFER) 140 141 #define VM_ENTRY_CTLS_ZERO_SETTING \ 142 (VM_ENTRY_INTO_SMM | \ 143 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 144 145 /* 146 * Cover the EPT capabilities used by bhyve at present: 147 * - 4-level page walks 148 * - write-back memory type 149 * - INVEPT operations (all types) 150 * - INVVPID operations (single-context only) 151 */ 152 #define EPT_CAPS_REQUIRED \ 153 (IA32_VMX_EPT_VPID_PWL4 | \ 154 IA32_VMX_EPT_VPID_TYPE_WB | \ 155 IA32_VMX_EPT_VPID_INVEPT | \ 156 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 157 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 158 IA32_VMX_EPT_VPID_INVVPID | \ 159 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 160 161 #define HANDLED 1 162 #define UNHANDLED 0 163 164 SYSCTL_DECL(_hw_vmm); 165 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 166 NULL); 167 168 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 169 static uint32_t exit_ctls, entry_ctls; 170 171 static uint64_t cr0_ones_mask, cr0_zeros_mask; 172 173 static uint64_t cr4_ones_mask, cr4_zeros_mask; 174 175 static int vmx_initialized; 176 177 /* Do not flush RSB upon vmexit */ 178 static int no_flush_rsb; 179 180 /* 181 * Optional capabilities 182 */ 183 184 /* HLT triggers a VM-exit */ 185 static int cap_halt_exit; 186 187 /* PAUSE triggers a VM-exit */ 188 static int cap_pause_exit; 189 190 /* Monitor trap flag */ 191 static int cap_monitor_trap; 192 193 /* Guests are allowed to use INVPCID */ 194 static int cap_invpcid; 195 196 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 197 static enum vmx_caps vmx_capabilities; 198 199 /* APICv posted interrupt vector */ 200 static int pirvec = -1; 201 202 static uint_t vpid_alloc_failed; 203 204 int guest_l1d_flush; 205 int guest_l1d_flush_sw; 206 207 /* MSR save region is composed of an array of 'struct msr_entry' */ 208 struct msr_entry { 209 uint32_t index; 210 uint32_t reserved; 211 uint64_t val; 212 }; 213 214 static struct msr_entry msr_load_list[1] __aligned(16); 215 216 /* 217 * The definitions of SDT probes for VMX. 218 */ 219 220 /* BEGIN CSTYLED */ 221 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 222 "struct vmx *", "int", "struct vm_exit *"); 223 224 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 225 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 226 227 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 228 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 229 230 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 231 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 232 233 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 234 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 235 236 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 237 "struct vmx *", "int", "struct vm_exit *"); 238 239 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 240 "struct vmx *", "int", "struct vm_exit *"); 241 242 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 243 "struct vmx *", "int", "struct vm_exit *"); 244 245 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 246 "struct vmx *", "int", "struct vm_exit *"); 247 248 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 249 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 250 251 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 252 "struct vmx *", "int", "struct vm_exit *"); 253 254 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 255 "struct vmx *", "int", "struct vm_exit *"); 256 257 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 258 "struct vmx *", "int", "struct vm_exit *"); 259 260 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 261 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 262 263 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 264 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 265 266 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 267 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 268 269 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 270 "struct vmx *", "int", "struct vm_exit *"); 271 272 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 273 "struct vmx *", "int", "struct vm_exit *"); 274 275 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 276 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 277 278 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 279 "struct vmx *", "int", "struct vm_exit *"); 280 281 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 282 "struct vmx *", "int", "struct vm_exit *"); 283 284 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 285 "struct vmx *", "int", "struct vm_exit *"); 286 287 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 288 "struct vmx *", "int", "struct vm_exit *"); 289 290 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 291 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 292 293 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 294 "struct vmx *", "int", "struct vm_exit *", "int"); 295 /* END CSTYLED */ 296 297 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 298 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 299 static void vmx_apply_tsc_adjust(struct vmx *, int); 300 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 301 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 302 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 303 304 static void 305 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 306 { 307 /* 308 * Allow readonly access to the following x2APIC MSRs from the guest. 309 */ 310 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 311 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 312 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 313 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 314 315 for (uint_t i = 0; i < 8; i++) { 316 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 317 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 318 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 319 } 320 321 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 323 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 324 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 325 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 329 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 330 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 331 332 /* 333 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 334 * 335 * These registers get special treatment described in the section 336 * "Virtualizing MSR-Based APIC Accesses". 337 */ 338 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 339 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 340 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 341 } 342 343 static ulong_t 344 vmx_fix_cr0(ulong_t cr0) 345 { 346 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 347 } 348 349 /* 350 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 351 * the value observable from the guest. 352 */ 353 static ulong_t 354 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 355 { 356 return ((cr0 & ~cr0_ones_mask) | 357 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 358 } 359 360 static ulong_t 361 vmx_fix_cr4(ulong_t cr4) 362 { 363 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 364 } 365 366 /* 367 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 368 * the value observable from the guest. 369 */ 370 static ulong_t 371 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 372 { 373 return ((cr4 & ~cr4_ones_mask) | 374 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 375 } 376 377 static void 378 vpid_free(int vpid) 379 { 380 if (vpid < 0 || vpid > 0xffff) 381 panic("vpid_free: invalid vpid %d", vpid); 382 383 /* 384 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 385 * the unit number allocator. 386 */ 387 388 if (vpid > VM_MAXCPU) 389 hma_vmx_vpid_free((uint16_t)vpid); 390 } 391 392 static void 393 vpid_alloc(uint16_t *vpid, int num) 394 { 395 int i, x; 396 397 if (num <= 0 || num > VM_MAXCPU) 398 panic("invalid number of vpids requested: %d", num); 399 400 /* 401 * If the "enable vpid" execution control is not enabled then the 402 * VPID is required to be 0 for all vcpus. 403 */ 404 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 405 for (i = 0; i < num; i++) 406 vpid[i] = 0; 407 return; 408 } 409 410 /* 411 * Allocate a unique VPID for each vcpu from the unit number allocator. 412 */ 413 for (i = 0; i < num; i++) { 414 uint16_t tmp; 415 416 tmp = hma_vmx_vpid_alloc(); 417 x = (tmp == 0) ? -1 : tmp; 418 419 if (x == -1) 420 break; 421 else 422 vpid[i] = x; 423 } 424 425 if (i < num) { 426 atomic_add_int(&vpid_alloc_failed, 1); 427 428 /* 429 * If the unit number allocator does not have enough unique 430 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 431 * 432 * These VPIDs are not be unique across VMs but this does not 433 * affect correctness because the combined mappings are also 434 * tagged with the EP4TA which is unique for each VM. 435 * 436 * It is still sub-optimal because the invvpid will invalidate 437 * combined mappings for a particular VPID across all EP4TAs. 438 */ 439 while (i-- > 0) 440 vpid_free(vpid[i]); 441 442 for (i = 0; i < num; i++) 443 vpid[i] = i + 1; 444 } 445 } 446 447 static int 448 vmx_cleanup(void) 449 { 450 /* This is taken care of by the hma registration */ 451 return (0); 452 } 453 454 static void 455 vmx_restore(void) 456 { 457 /* No-op on illumos */ 458 } 459 460 static int 461 vmx_init(void) 462 { 463 int error; 464 uint64_t fixed0, fixed1; 465 uint32_t tmp; 466 enum vmx_caps avail_caps = VMX_CAP_NONE; 467 468 /* Check support for primary processor-based VM-execution controls */ 469 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 470 MSR_VMX_TRUE_PROCBASED_CTLS, 471 PROCBASED_CTLS_ONE_SETTING, 472 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 473 if (error) { 474 printf("vmx_init: processor does not support desired primary " 475 "processor-based controls\n"); 476 return (error); 477 } 478 479 /* Clear the processor-based ctl bits that are set on demand */ 480 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 481 482 /* Check support for secondary processor-based VM-execution controls */ 483 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 484 MSR_VMX_PROCBASED_CTLS2, 485 PROCBASED_CTLS2_ONE_SETTING, 486 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 487 if (error) { 488 printf("vmx_init: processor does not support desired secondary " 489 "processor-based controls\n"); 490 return (error); 491 } 492 493 /* Check support for VPID */ 494 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 495 MSR_VMX_PROCBASED_CTLS2, 496 PROCBASED2_ENABLE_VPID, 497 0, &tmp); 498 if (error == 0) 499 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 500 501 /* Check support for pin-based VM-execution controls */ 502 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 503 MSR_VMX_TRUE_PINBASED_CTLS, 504 PINBASED_CTLS_ONE_SETTING, 505 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 506 if (error) { 507 printf("vmx_init: processor does not support desired " 508 "pin-based controls\n"); 509 return (error); 510 } 511 512 /* Check support for VM-exit controls */ 513 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 514 VM_EXIT_CTLS_ONE_SETTING, 515 VM_EXIT_CTLS_ZERO_SETTING, 516 &exit_ctls); 517 if (error) { 518 printf("vmx_init: processor does not support desired " 519 "exit controls\n"); 520 return (error); 521 } 522 523 /* Check support for VM-entry controls */ 524 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 525 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 526 &entry_ctls); 527 if (error) { 528 printf("vmx_init: processor does not support desired " 529 "entry controls\n"); 530 return (error); 531 } 532 533 /* 534 * Check support for optional features by testing them 535 * as individual bits 536 */ 537 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 538 MSR_VMX_TRUE_PROCBASED_CTLS, 539 PROCBASED_HLT_EXITING, 0, 540 &tmp) == 0); 541 542 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 543 MSR_VMX_PROCBASED_CTLS, 544 PROCBASED_MTF, 0, 545 &tmp) == 0); 546 547 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 548 MSR_VMX_TRUE_PROCBASED_CTLS, 549 PROCBASED_PAUSE_EXITING, 0, 550 &tmp) == 0); 551 552 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 553 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 554 &tmp) == 0); 555 556 /* 557 * Check for APIC virtualization capabilities: 558 * - TPR shadowing 559 * - Full APICv (with or without x2APIC support) 560 * - Posted interrupt handling 561 */ 562 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 563 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 564 avail_caps |= VMX_CAP_TPR_SHADOW; 565 566 const uint32_t apicv_bits = 567 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 568 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 569 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 570 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 571 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 572 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 573 avail_caps |= VMX_CAP_APICV; 574 575 /* 576 * It may make sense in the future to differentiate 577 * hardware (or software) configurations with APICv but 578 * no support for accelerating x2APIC mode. 579 */ 580 avail_caps |= VMX_CAP_APICV_X2APIC; 581 582 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 583 MSR_VMX_TRUE_PINBASED_CTLS, 584 PINBASED_POSTED_INTERRUPT, 0, &tmp); 585 if (error == 0) { 586 /* 587 * If the PSM-provided interfaces for requesting 588 * and using a PIR IPI vector are present, use 589 * them for posted interrupts. 590 */ 591 if (psm_get_pir_ipivect != NULL && 592 psm_send_pir_ipi != NULL) { 593 pirvec = psm_get_pir_ipivect(); 594 avail_caps |= VMX_CAP_APICV_PIR; 595 } 596 } 597 } 598 } 599 600 /* 601 * Check for necessary EPT capabilities 602 * 603 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 604 * hypervisor intends to utilize dirty page tracking. 605 */ 606 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 607 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 608 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 609 return (EINVAL); 610 } 611 612 #ifdef __FreeBSD__ 613 guest_l1d_flush = (cpu_ia32_arch_caps & 614 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 615 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 616 617 /* 618 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 619 * available. Otherwise fall back to the software flush 620 * method which loads enough data from the kernel text to 621 * flush existing L1D content, both on VMX entry and on NMI 622 * return. 623 */ 624 if (guest_l1d_flush) { 625 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 626 guest_l1d_flush_sw = 1; 627 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 628 &guest_l1d_flush_sw); 629 } 630 if (guest_l1d_flush_sw) { 631 if (nmi_flush_l1d_sw <= 1) 632 nmi_flush_l1d_sw = 1; 633 } else { 634 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 635 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 636 } 637 } 638 #else 639 /* L1D flushing is taken care of by smt_acquire() and friends */ 640 guest_l1d_flush = 0; 641 #endif /* __FreeBSD__ */ 642 643 /* 644 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 645 */ 646 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 647 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 648 cr0_ones_mask = fixed0 & fixed1; 649 cr0_zeros_mask = ~fixed0 & ~fixed1; 650 651 /* 652 * Since Unrestricted Guest was already verified present, CR0_PE and 653 * CR0_PG are allowed to be set to zero in VMX non-root operation 654 */ 655 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 656 657 /* 658 * Do not allow the guest to set CR0_NW or CR0_CD. 659 */ 660 cr0_zeros_mask |= (CR0_NW | CR0_CD); 661 662 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 663 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 664 cr4_ones_mask = fixed0 & fixed1; 665 cr4_zeros_mask = ~fixed0 & ~fixed1; 666 667 vmx_msr_init(); 668 669 vmx_capabilities = avail_caps; 670 vmx_initialized = 1; 671 672 return (0); 673 } 674 675 static void 676 vmx_trigger_hostintr(int vector) 677 { 678 VERIFY(vector >= 32 && vector <= 255); 679 vmx_call_isr(vector - 32); 680 } 681 682 static void * 683 vmx_vminit(struct vm *vm) 684 { 685 uint16_t vpid[VM_MAXCPU]; 686 int i, error, datasel; 687 struct vmx *vmx; 688 uint32_t exc_bitmap; 689 uint16_t maxcpus; 690 uint32_t proc_ctls, proc2_ctls, pin_ctls; 691 uint64_t apic_access_pa = UINT64_MAX; 692 693 vmx = kmem_zalloc(sizeof (struct vmx), KM_SLEEP); 694 VERIFY3U((uintptr_t)vmx & PAGE_MASK, ==, 0); 695 696 vmx->vm = vm; 697 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 698 699 /* 700 * Clean up EP4TA-tagged guest-physical and combined mappings 701 * 702 * VMX transitions are not required to invalidate any guest physical 703 * mappings. So, it may be possible for stale guest physical mappings 704 * to be present in the processor TLBs. 705 * 706 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 707 */ 708 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 709 710 vmx_msr_bitmap_initialize(vmx); 711 712 vpid_alloc(vpid, VM_MAXCPU); 713 714 /* Grab the established defaults */ 715 proc_ctls = procbased_ctls; 716 proc2_ctls = procbased_ctls2; 717 pin_ctls = pinbased_ctls; 718 /* For now, default to the available capabilities */ 719 vmx->vmx_caps = vmx_capabilities; 720 721 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 722 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 723 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 724 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 725 } 726 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 727 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 728 729 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 730 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 731 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 732 733 /* 734 * Allocate a page of memory to back the APIC access address for 735 * when APICv features are in use. Guest MMIO accesses should 736 * never actually reach this page, but rather be intercepted. 737 */ 738 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 739 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 740 apic_access_pa = vtophys(vmx->apic_access_page); 741 742 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 743 apic_access_pa); 744 /* XXX this should really return an error to the caller */ 745 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 746 } 747 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 748 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 749 750 pin_ctls |= PINBASED_POSTED_INTERRUPT; 751 } 752 753 maxcpus = vm_get_maxcpus(vm); 754 datasel = vmm_get_host_datasel(); 755 for (i = 0; i < maxcpus; i++) { 756 /* 757 * Cache physical address lookups for various components which 758 * may be required inside the critical_enter() section implied 759 * by VMPTRLD() below. 760 */ 761 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 762 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 763 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 764 765 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 766 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 767 768 vmx_msr_guest_init(vmx, i); 769 770 vmcs_load(vmx->vmcs_pa[i]); 771 772 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 773 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 774 775 /* Load the control registers */ 776 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 777 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 778 779 /* Load the segment selectors */ 780 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 781 782 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 783 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 784 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 785 786 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 787 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 788 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 789 790 /* 791 * Configure host sysenter MSRs to be restored on VM exit. 792 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 793 * vmx_run. 794 */ 795 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 796 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 797 rdmsr(MSR_SYSENTER_EIP_MSR)); 798 799 /* instruction pointer */ 800 if (no_flush_rsb) { 801 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 802 } else { 803 vmcs_write(VMCS_HOST_RIP, 804 (uint64_t)vmx_exit_guest_flush_rsb); 805 } 806 807 /* link pointer */ 808 vmcs_write(VMCS_LINK_POINTER, ~0); 809 810 vmcs_write(VMCS_EPTP, vmx->eptp); 811 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 812 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 813 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 814 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 815 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 816 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 817 vmcs_write(VMCS_VPID, vpid[i]); 818 819 if (guest_l1d_flush && !guest_l1d_flush_sw) { 820 vmcs_write(VMCS_ENTRY_MSR_LOAD, 821 vtophys(&msr_load_list[0])); 822 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 823 nitems(msr_load_list)); 824 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 825 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 826 } 827 828 /* exception bitmap */ 829 if (vcpu_trace_exceptions(vm, i)) 830 exc_bitmap = 0xffffffff; 831 else 832 exc_bitmap = 1 << IDT_MC; 833 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 834 835 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 836 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 837 838 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 839 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 840 } 841 842 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 843 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 844 vmcs_write(VMCS_EOI_EXIT0, 0); 845 vmcs_write(VMCS_EOI_EXIT1, 0); 846 vmcs_write(VMCS_EOI_EXIT2, 0); 847 vmcs_write(VMCS_EOI_EXIT3, 0); 848 } 849 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 850 vmcs_write(VMCS_PIR_VECTOR, pirvec); 851 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 852 } 853 854 /* 855 * Set up the CR0/4 masks and configure the read shadow state 856 * to the power-on register value from the Intel Sys Arch. 857 * CR0 - 0x60000010 858 * CR4 - 0 859 */ 860 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 861 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 862 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 863 vmcs_write(VMCS_CR4_SHADOW, 0); 864 865 vmcs_clear(vmx->vmcs_pa[i]); 866 867 vmx->cap[i].set = 0; 868 vmx->cap[i].proc_ctls = proc_ctls; 869 vmx->cap[i].proc_ctls2 = proc2_ctls; 870 vmx->cap[i].exc_bitmap = exc_bitmap; 871 872 vmx->state[i].nextrip = ~0; 873 vmx->state[i].lastcpu = NOCPU; 874 vmx->state[i].vpid = vpid[i]; 875 } 876 877 return (vmx); 878 } 879 880 static int 881 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 882 { 883 int handled; 884 885 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 886 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 887 (uint64_t *)&vmxctx->guest_rdx); 888 return (handled); 889 } 890 891 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 892 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 893 894 #define INVVPID_TYPE_ADDRESS 0UL 895 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 896 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 897 898 struct invvpid_desc { 899 uint16_t vpid; 900 uint16_t _res1; 901 uint32_t _res2; 902 uint64_t linear_addr; 903 }; 904 CTASSERT(sizeof (struct invvpid_desc) == 16); 905 906 static __inline void 907 invvpid(uint64_t type, struct invvpid_desc desc) 908 { 909 int error; 910 911 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 912 uint64_t, desc.linear_addr); 913 914 __asm __volatile("invvpid %[desc], %[type];" 915 VMX_SET_ERROR_CODE_ASM 916 : [error] "=r" (error) 917 : [desc] "m" (desc), [type] "r" (type) 918 : "memory"); 919 920 if (error) { 921 panic("invvpid error %d", error); 922 } 923 } 924 925 /* 926 * Invalidate guest mappings identified by its VPID from the TLB. 927 * 928 * This is effectively a flush of the guest TLB, removing only "combined 929 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 930 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 931 */ 932 static void 933 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 934 { 935 struct vmxstate *vmxstate; 936 struct vmspace *vms; 937 938 vmxstate = &vmx->state[vcpu]; 939 if (vmxstate->vpid == 0) { 940 return; 941 } 942 943 if (!running) { 944 /* 945 * Set the 'lastcpu' to an invalid host cpu. 946 * 947 * This will invalidate TLB entries tagged with the vcpu's 948 * vpid the next time it runs via vmx_set_pcpu_defaults(). 949 */ 950 vmxstate->lastcpu = NOCPU; 951 return; 952 } 953 954 /* 955 * Invalidate all mappings tagged with 'vpid' 956 * 957 * This is done when a vCPU moves between host CPUs, where there may be 958 * stale TLB entries for this VPID on the target, or if emulated actions 959 * in the guest CPU have incurred an explicit TLB flush. 960 */ 961 vms = vm_get_vmspace(vmx->vm); 962 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 963 struct invvpid_desc invvpid_desc = { 964 .vpid = vmxstate->vpid, 965 .linear_addr = 0, 966 ._res1 = 0, 967 ._res2 = 0, 968 }; 969 970 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 971 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 972 } else { 973 /* 974 * The INVVPID can be skipped if an INVEPT is going to be 975 * performed before entering the guest. The INVEPT will 976 * invalidate combined mappings for the EP4TA associated with 977 * this guest, in all VPIDs. 978 */ 979 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 980 } 981 } 982 983 static __inline void 984 invept(uint64_t type, uint64_t eptp) 985 { 986 int error; 987 struct invept_desc { 988 uint64_t eptp; 989 uint64_t _resv; 990 } desc = { eptp, 0 }; 991 992 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 993 994 __asm __volatile("invept %[desc], %[type];" 995 VMX_SET_ERROR_CODE_ASM 996 : [error] "=r" (error) 997 : [desc] "m" (desc), [type] "r" (type) 998 : "memory"); 999 1000 if (error != 0) { 1001 panic("invvpid error %d", error); 1002 } 1003 } 1004 1005 static void 1006 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1007 { 1008 struct vmxstate *vmxstate; 1009 1010 /* 1011 * Regardless of whether the VM appears to have migrated between CPUs, 1012 * save the host sysenter stack pointer. As it points to the kernel 1013 * stack of each thread, the correct value must be maintained for every 1014 * trip into the critical section. 1015 */ 1016 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1017 1018 /* 1019 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1020 * migration between host CPUs with differing TSC values. 1021 */ 1022 vmx_apply_tsc_adjust(vmx, vcpu); 1023 1024 vmxstate = &vmx->state[vcpu]; 1025 if (vmxstate->lastcpu == curcpu) 1026 return; 1027 1028 vmxstate->lastcpu = curcpu; 1029 1030 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1031 1032 /* Load the per-CPU IDT address */ 1033 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1034 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1035 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1036 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1037 vmx_invvpid(vmx, vcpu, 1); 1038 } 1039 1040 /* 1041 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1042 */ 1043 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1044 1045 static __inline void 1046 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1047 { 1048 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1049 /* Enable interrupt window exiting */ 1050 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1051 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1052 } 1053 } 1054 1055 static __inline void 1056 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1057 { 1058 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1059 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1060 1061 /* Disable interrupt window exiting */ 1062 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1063 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1064 } 1065 1066 static __inline bool 1067 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1068 { 1069 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1070 } 1071 1072 static __inline void 1073 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1074 { 1075 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1076 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1077 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1078 } 1079 } 1080 1081 static __inline void 1082 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1083 { 1084 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1085 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1086 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1087 } 1088 1089 /* 1090 * Set the TSC adjustment, taking into account the offsets measured between 1091 * host physical CPUs. This is required even if the guest has not set a TSC 1092 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1093 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1094 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1095 */ 1096 static void 1097 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1098 { 1099 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1100 1101 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1102 1103 if (vmx->tsc_offset_active[vcpu] != offset) { 1104 vmcs_write(VMCS_TSC_OFFSET, offset); 1105 vmx->tsc_offset_active[vcpu] = offset; 1106 } 1107 } 1108 1109 CTASSERT(VMCS_INTR_T_HWINTR == VM_INTINFO_HWINTR); 1110 CTASSERT(VMCS_INTR_T_NMI == VM_INTINFO_NMI); 1111 CTASSERT(VMCS_INTR_T_HWEXCEPTION == VM_INTINFO_HWEXCP); 1112 CTASSERT(VMCS_INTR_T_SWINTR == VM_INTINFO_SWINTR); 1113 CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION == VM_INTINFO_RESV5); 1114 CTASSERT(VMCS_INTR_T_SWEXCEPTION == VM_INTINFO_RESV6); 1115 CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID == VM_INTINFO_DEL_ERRCODE); 1116 CTASSERT(VMCS_INTR_T_MASK == VM_INTINFO_MASK_TYPE); 1117 1118 static uint64_t 1119 vmx_idtvec_to_intinfo(uint32_t info) 1120 { 1121 ASSERT(info & VMCS_IDT_VEC_VALID); 1122 1123 const uint32_t type = info & VMCS_INTR_T_MASK; 1124 const uint8_t vec = info & 0xff; 1125 1126 switch (type) { 1127 case VMCS_INTR_T_HWINTR: 1128 case VMCS_INTR_T_NMI: 1129 case VMCS_INTR_T_HWEXCEPTION: 1130 case VMCS_INTR_T_SWINTR: 1131 case VMCS_INTR_T_PRIV_SWEXCEPTION: 1132 case VMCS_INTR_T_SWEXCEPTION: 1133 break; 1134 default: 1135 panic("unexpected event type 0x%03x", type); 1136 } 1137 1138 uint64_t intinfo = VM_INTINFO_VALID | type | vec; 1139 if (info & VMCS_IDT_VEC_ERRCODE_VALID) { 1140 const uint32_t errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR); 1141 intinfo |= (uint64_t)errcode << 32; 1142 } 1143 1144 return (intinfo); 1145 } 1146 1147 static void 1148 vmx_inject_intinfo(uint64_t info) 1149 { 1150 ASSERT(VM_INTINFO_PENDING(info)); 1151 ASSERT0(info & VM_INTINFO_MASK_RSVD); 1152 1153 /* 1154 * The bhyve format matches that of the VMCS, which is ensured by the 1155 * CTASSERTs above. 1156 */ 1157 uint32_t inject = info; 1158 switch (VM_INTINFO_VECTOR(info)) { 1159 case IDT_BP: 1160 case IDT_OF: 1161 /* 1162 * VT-x requires #BP and #OF to be injected as software 1163 * exceptions. 1164 */ 1165 inject &= ~VMCS_INTR_T_MASK; 1166 inject |= VMCS_INTR_T_SWEXCEPTION; 1167 break; 1168 default: 1169 break; 1170 } 1171 1172 if (VM_INTINFO_HAS_ERRCODE(info)) { 1173 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1174 VM_INTINFO_ERRCODE(info)); 1175 } 1176 vmcs_write(VMCS_ENTRY_INTR_INFO, inject); 1177 } 1178 1179 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1180 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1181 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1182 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1183 1184 static void 1185 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1186 { 1187 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1188 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1189 1190 /* 1191 * Inject the virtual NMI. The vector must be the NMI IDT entry 1192 * or the VMCS entry check will fail. 1193 */ 1194 vmcs_write(VMCS_ENTRY_INTR_INFO, 1195 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1196 1197 /* Clear the request */ 1198 vm_nmi_clear(vmx->vm, vcpu); 1199 } 1200 1201 /* 1202 * Inject exceptions, NMIs, and ExtINTs. 1203 * 1204 * The logic behind these are complicated and may involve mutex contention, so 1205 * the injection is performed without the protection of host CPU interrupts 1206 * being disabled. This means a racing notification could be "lost", 1207 * necessitating a later call to vmx_inject_recheck() to close that window 1208 * of opportunity. 1209 */ 1210 static enum event_inject_state 1211 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1212 { 1213 uint64_t entryinfo; 1214 uint32_t gi, info; 1215 int vector; 1216 enum event_inject_state state; 1217 1218 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1219 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1220 state = EIS_CAN_INJECT; 1221 1222 /* Clear any interrupt blocking if the guest %rip has changed */ 1223 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1224 gi &= ~HWINTR_BLOCKING; 1225 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1226 } 1227 1228 /* 1229 * It could be that an interrupt is already pending for injection from 1230 * the VMCS. This would be the case if the vCPU exited for conditions 1231 * such as an AST before a vm-entry delivered the injection. 1232 */ 1233 if ((info & VMCS_INTR_VALID) != 0) { 1234 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1235 } 1236 1237 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1238 vmx_inject_intinfo(entryinfo); 1239 state = EIS_EV_INJECTED; 1240 } 1241 1242 if (vm_nmi_pending(vmx->vm, vcpu)) { 1243 /* 1244 * If there are no conditions blocking NMI injection then inject 1245 * it directly here otherwise enable "NMI window exiting" to 1246 * inject it as soon as we can. 1247 * 1248 * According to the Intel manual, some CPUs do not allow NMI 1249 * injection when STI_BLOCKING is active. That check is 1250 * enforced here, regardless of CPU capability. If running on a 1251 * CPU without such a restriction it will immediately exit and 1252 * the NMI will be injected in the "NMI window exiting" handler. 1253 */ 1254 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1255 if (state == EIS_CAN_INJECT) { 1256 vmx_inject_nmi(vmx, vcpu); 1257 state = EIS_EV_INJECTED; 1258 } else { 1259 return (state | EIS_REQ_EXIT); 1260 } 1261 } else { 1262 vmx_set_nmi_window_exiting(vmx, vcpu); 1263 } 1264 } 1265 1266 if (vm_extint_pending(vmx->vm, vcpu)) { 1267 if (state != EIS_CAN_INJECT) { 1268 return (state | EIS_REQ_EXIT); 1269 } 1270 if ((gi & HWINTR_BLOCKING) != 0 || 1271 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1272 return (EIS_GI_BLOCK); 1273 } 1274 1275 /* Ask the legacy pic for a vector to inject */ 1276 vatpic_pending_intr(vmx->vm, &vector); 1277 1278 /* 1279 * From the Intel SDM, Volume 3, Section "Maskable 1280 * Hardware Interrupts": 1281 * - maskable interrupt vectors [0,255] can be delivered 1282 * through the INTR pin. 1283 */ 1284 KASSERT(vector >= 0 && vector <= 255, 1285 ("invalid vector %d from INTR", vector)); 1286 1287 /* Inject the interrupt */ 1288 vmcs_write(VMCS_ENTRY_INTR_INFO, 1289 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1290 1291 vm_extint_clear(vmx->vm, vcpu); 1292 vatpic_intr_accepted(vmx->vm, vector); 1293 state = EIS_EV_INJECTED; 1294 } 1295 1296 return (state); 1297 } 1298 1299 /* 1300 * Inject any interrupts pending on the vLAPIC. 1301 * 1302 * This is done with host CPU interrupts disabled so notification IPIs, either 1303 * from the standard vCPU notification or APICv posted interrupts, will be 1304 * queued on the host APIC and recognized when entering VMX context. 1305 */ 1306 static enum event_inject_state 1307 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1308 { 1309 int vector; 1310 1311 if (!vlapic_pending_intr(vlapic, &vector)) { 1312 return (EIS_CAN_INJECT); 1313 } 1314 1315 /* 1316 * From the Intel SDM, Volume 3, Section "Maskable 1317 * Hardware Interrupts": 1318 * - maskable interrupt vectors [16,255] can be delivered 1319 * through the local APIC. 1320 */ 1321 KASSERT(vector >= 16 && vector <= 255, 1322 ("invalid vector %d from local APIC", vector)); 1323 1324 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1325 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1326 uint16_t status_new = (status_old & 0xff00) | vector; 1327 1328 /* 1329 * The APICv state will have been synced into the vLAPIC 1330 * as part of vlapic_pending_intr(). Prepare the VMCS 1331 * for the to-be-injected pending interrupt. 1332 */ 1333 if (status_new > status_old) { 1334 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1335 } 1336 1337 /* 1338 * Ensure VMCS state regarding EOI traps is kept in sync 1339 * with the TMRs in the vlapic. 1340 */ 1341 vmx_apicv_sync_tmr(vlapic); 1342 1343 /* 1344 * The rest of the injection process for injecting the 1345 * interrupt(s) is handled by APICv. It does not preclude other 1346 * event injection from occurring. 1347 */ 1348 return (EIS_CAN_INJECT); 1349 } 1350 1351 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1352 1353 /* Does guest interruptability block injection? */ 1354 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1355 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1356 return (EIS_GI_BLOCK); 1357 } 1358 1359 /* Inject the interrupt */ 1360 vmcs_write(VMCS_ENTRY_INTR_INFO, 1361 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1362 1363 /* Update the Local APIC ISR */ 1364 vlapic_intr_accepted(vlapic, vector); 1365 1366 return (EIS_EV_INJECTED); 1367 } 1368 1369 /* 1370 * Re-check for events to be injected. 1371 * 1372 * Once host CPU interrupts are disabled, check for the presence of any events 1373 * which require injection processing. If an exit is required upon injection, 1374 * or once the guest becomes interruptable, that will be configured too. 1375 */ 1376 static bool 1377 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1378 { 1379 if (state == EIS_CAN_INJECT) { 1380 if (vm_nmi_pending(vmx->vm, vcpu) && 1381 !vmx_nmi_window_exiting(vmx, vcpu)) { 1382 /* queued NMI not blocked by NMI-window-exiting */ 1383 return (true); 1384 } 1385 if (vm_extint_pending(vmx->vm, vcpu)) { 1386 /* queued ExtINT not blocked by existing injection */ 1387 return (true); 1388 } 1389 } else { 1390 if ((state & EIS_REQ_EXIT) != 0) { 1391 /* 1392 * Use a self-IPI to force an immediate exit after 1393 * event injection has occurred. 1394 */ 1395 poke_cpu(CPU->cpu_id); 1396 } else { 1397 /* 1398 * If any event is being injected, an exit immediately 1399 * upon becoming interruptable again will allow pending 1400 * or newly queued events to be injected in a timely 1401 * manner. 1402 */ 1403 vmx_set_int_window_exiting(vmx, vcpu); 1404 } 1405 } 1406 return (false); 1407 } 1408 1409 /* 1410 * If the Virtual NMIs execution control is '1' then the logical processor 1411 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1412 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1413 * virtual-NMI blocking. 1414 * 1415 * This unblocking occurs even if the IRET causes a fault. In this case the 1416 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1417 */ 1418 static void 1419 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1420 { 1421 uint32_t gi; 1422 1423 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1424 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1425 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1426 } 1427 1428 static void 1429 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1430 { 1431 uint32_t gi; 1432 1433 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1434 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1435 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1436 } 1437 1438 static void 1439 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1440 { 1441 uint32_t gi; 1442 1443 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1444 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1445 ("NMI blocking is not in effect %x", gi)); 1446 } 1447 1448 static int 1449 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1450 { 1451 struct vmxctx *vmxctx; 1452 uint64_t xcrval; 1453 const struct xsave_limits *limits; 1454 1455 vmxctx = &vmx->ctx[vcpu]; 1456 limits = vmm_get_xsave_limits(); 1457 1458 /* 1459 * Note that the processor raises a GP# fault on its own if 1460 * xsetbv is executed for CPL != 0, so we do not have to 1461 * emulate that fault here. 1462 */ 1463 1464 /* Only xcr0 is supported. */ 1465 if (vmxctx->guest_rcx != 0) { 1466 vm_inject_gp(vmx->vm, vcpu); 1467 return (HANDLED); 1468 } 1469 1470 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1471 if (!limits->xsave_enabled || 1472 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1473 vm_inject_ud(vmx->vm, vcpu); 1474 return (HANDLED); 1475 } 1476 1477 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1478 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1479 vm_inject_gp(vmx->vm, vcpu); 1480 return (HANDLED); 1481 } 1482 1483 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1484 vm_inject_gp(vmx->vm, vcpu); 1485 return (HANDLED); 1486 } 1487 1488 /* AVX (YMM_Hi128) requires SSE. */ 1489 if (xcrval & XFEATURE_ENABLED_AVX && 1490 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1491 vm_inject_gp(vmx->vm, vcpu); 1492 return (HANDLED); 1493 } 1494 1495 /* 1496 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1497 * ZMM_Hi256, and Hi16_ZMM. 1498 */ 1499 if (xcrval & XFEATURE_AVX512 && 1500 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1501 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1502 vm_inject_gp(vmx->vm, vcpu); 1503 return (HANDLED); 1504 } 1505 1506 /* 1507 * Intel MPX requires both bound register state flags to be 1508 * set. 1509 */ 1510 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1511 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1512 vm_inject_gp(vmx->vm, vcpu); 1513 return (HANDLED); 1514 } 1515 1516 /* 1517 * This runs "inside" vmrun() with the guest's FPU state, so 1518 * modifying xcr0 directly modifies the guest's xcr0, not the 1519 * host's. 1520 */ 1521 load_xcr(0, xcrval); 1522 return (HANDLED); 1523 } 1524 1525 static uint64_t 1526 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1527 { 1528 const struct vmxctx *vmxctx; 1529 1530 vmxctx = &vmx->ctx[vcpu]; 1531 1532 switch (ident) { 1533 case 0: 1534 return (vmxctx->guest_rax); 1535 case 1: 1536 return (vmxctx->guest_rcx); 1537 case 2: 1538 return (vmxctx->guest_rdx); 1539 case 3: 1540 return (vmxctx->guest_rbx); 1541 case 4: 1542 return (vmcs_read(VMCS_GUEST_RSP)); 1543 case 5: 1544 return (vmxctx->guest_rbp); 1545 case 6: 1546 return (vmxctx->guest_rsi); 1547 case 7: 1548 return (vmxctx->guest_rdi); 1549 case 8: 1550 return (vmxctx->guest_r8); 1551 case 9: 1552 return (vmxctx->guest_r9); 1553 case 10: 1554 return (vmxctx->guest_r10); 1555 case 11: 1556 return (vmxctx->guest_r11); 1557 case 12: 1558 return (vmxctx->guest_r12); 1559 case 13: 1560 return (vmxctx->guest_r13); 1561 case 14: 1562 return (vmxctx->guest_r14); 1563 case 15: 1564 return (vmxctx->guest_r15); 1565 default: 1566 panic("invalid vmx register %d", ident); 1567 } 1568 } 1569 1570 static void 1571 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1572 { 1573 struct vmxctx *vmxctx; 1574 1575 vmxctx = &vmx->ctx[vcpu]; 1576 1577 switch (ident) { 1578 case 0: 1579 vmxctx->guest_rax = regval; 1580 break; 1581 case 1: 1582 vmxctx->guest_rcx = regval; 1583 break; 1584 case 2: 1585 vmxctx->guest_rdx = regval; 1586 break; 1587 case 3: 1588 vmxctx->guest_rbx = regval; 1589 break; 1590 case 4: 1591 vmcs_write(VMCS_GUEST_RSP, regval); 1592 break; 1593 case 5: 1594 vmxctx->guest_rbp = regval; 1595 break; 1596 case 6: 1597 vmxctx->guest_rsi = regval; 1598 break; 1599 case 7: 1600 vmxctx->guest_rdi = regval; 1601 break; 1602 case 8: 1603 vmxctx->guest_r8 = regval; 1604 break; 1605 case 9: 1606 vmxctx->guest_r9 = regval; 1607 break; 1608 case 10: 1609 vmxctx->guest_r10 = regval; 1610 break; 1611 case 11: 1612 vmxctx->guest_r11 = regval; 1613 break; 1614 case 12: 1615 vmxctx->guest_r12 = regval; 1616 break; 1617 case 13: 1618 vmxctx->guest_r13 = regval; 1619 break; 1620 case 14: 1621 vmxctx->guest_r14 = regval; 1622 break; 1623 case 15: 1624 vmxctx->guest_r15 = regval; 1625 break; 1626 default: 1627 panic("invalid vmx register %d", ident); 1628 } 1629 } 1630 1631 static void 1632 vmx_sync_efer_state(struct vmx *vmx, int vcpu, uint64_t efer) 1633 { 1634 uint64_t ctrl; 1635 1636 /* 1637 * If the "load EFER" VM-entry control is 1 (which we require) then the 1638 * value of EFER.LMA must be identical to "IA-32e mode guest" bit in the 1639 * VM-entry control. 1640 */ 1641 ctrl = vmcs_read(VMCS_ENTRY_CTLS); 1642 if ((efer & EFER_LMA) != 0) { 1643 ctrl |= VM_ENTRY_GUEST_LMA; 1644 } else { 1645 ctrl &= ~VM_ENTRY_GUEST_LMA; 1646 } 1647 vmcs_write(VMCS_ENTRY_CTLS, ctrl); 1648 } 1649 1650 static int 1651 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1652 { 1653 uint64_t crval, regval; 1654 1655 /* We only handle mov to %cr0 at this time */ 1656 if ((exitqual & 0xf0) != 0x00) 1657 return (UNHANDLED); 1658 1659 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1660 1661 vmcs_write(VMCS_CR0_SHADOW, regval); 1662 1663 crval = regval | cr0_ones_mask; 1664 crval &= ~cr0_zeros_mask; 1665 1666 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1667 const uint64_t diff = crval ^ old; 1668 /* Flush the TLB if the paging or write-protect bits are changing */ 1669 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1670 vmx_invvpid(vmx, vcpu, 1); 1671 } 1672 1673 vmcs_write(VMCS_GUEST_CR0, crval); 1674 1675 if (regval & CR0_PG) { 1676 uint64_t efer; 1677 1678 /* Keep EFER.LMA properly updated if paging is enabled */ 1679 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1680 if (efer & EFER_LME) { 1681 efer |= EFER_LMA; 1682 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1683 vmx_sync_efer_state(vmx, vcpu, efer); 1684 } 1685 } 1686 1687 return (HANDLED); 1688 } 1689 1690 static int 1691 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1692 { 1693 uint64_t crval, regval; 1694 1695 /* We only handle mov to %cr4 at this time */ 1696 if ((exitqual & 0xf0) != 0x00) 1697 return (UNHANDLED); 1698 1699 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1700 1701 vmcs_write(VMCS_CR4_SHADOW, regval); 1702 1703 crval = regval | cr4_ones_mask; 1704 crval &= ~cr4_zeros_mask; 1705 vmcs_write(VMCS_GUEST_CR4, crval); 1706 1707 return (HANDLED); 1708 } 1709 1710 static int 1711 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1712 { 1713 struct vlapic *vlapic; 1714 uint64_t cr8; 1715 int regnum; 1716 1717 /* We only handle mov %cr8 to/from a register at this time. */ 1718 if ((exitqual & 0xe0) != 0x00) { 1719 return (UNHANDLED); 1720 } 1721 1722 vlapic = vm_lapic(vmx->vm, vcpu); 1723 regnum = (exitqual >> 8) & 0xf; 1724 if (exitqual & 0x10) { 1725 cr8 = vlapic_get_cr8(vlapic); 1726 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1727 } else { 1728 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1729 vlapic_set_cr8(vlapic, cr8); 1730 } 1731 1732 return (HANDLED); 1733 } 1734 1735 /* 1736 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1737 */ 1738 static int 1739 vmx_cpl(void) 1740 { 1741 uint32_t ssar; 1742 1743 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1744 return ((ssar >> 5) & 0x3); 1745 } 1746 1747 static enum vm_cpu_mode 1748 vmx_cpu_mode(void) 1749 { 1750 uint32_t csar; 1751 1752 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1753 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1754 if (csar & 0x2000) 1755 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1756 else 1757 return (CPU_MODE_COMPATIBILITY); 1758 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1759 return (CPU_MODE_PROTECTED); 1760 } else { 1761 return (CPU_MODE_REAL); 1762 } 1763 } 1764 1765 static enum vm_paging_mode 1766 vmx_paging_mode(void) 1767 { 1768 1769 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1770 return (PAGING_MODE_FLAT); 1771 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1772 return (PAGING_MODE_32); 1773 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1774 return (PAGING_MODE_64); 1775 else 1776 return (PAGING_MODE_PAE); 1777 } 1778 1779 static void 1780 vmx_paging_info(struct vm_guest_paging *paging) 1781 { 1782 paging->cr3 = vmcs_read(VMCS_GUEST_CR3); 1783 paging->cpl = vmx_cpl(); 1784 paging->cpu_mode = vmx_cpu_mode(); 1785 paging->paging_mode = vmx_paging_mode(); 1786 } 1787 1788 static void 1789 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1790 uint64_t gla) 1791 { 1792 struct vm_guest_paging paging; 1793 uint32_t csar; 1794 1795 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1796 vmexit->inst_length = 0; 1797 vmexit->u.mmio_emul.gpa = gpa; 1798 vmexit->u.mmio_emul.gla = gla; 1799 vmx_paging_info(&paging); 1800 1801 switch (paging.cpu_mode) { 1802 case CPU_MODE_REAL: 1803 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1804 vmexit->u.mmio_emul.cs_d = 0; 1805 break; 1806 case CPU_MODE_PROTECTED: 1807 case CPU_MODE_COMPATIBILITY: 1808 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1809 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1810 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1811 break; 1812 default: 1813 vmexit->u.mmio_emul.cs_base = 0; 1814 vmexit->u.mmio_emul.cs_d = 0; 1815 break; 1816 } 1817 1818 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1819 } 1820 1821 static void 1822 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1823 uint32_t eax) 1824 { 1825 struct vm_guest_paging paging; 1826 struct vm_inout *inout; 1827 1828 inout = &vmexit->u.inout; 1829 1830 inout->bytes = (qual & 0x7) + 1; 1831 inout->flags = 0; 1832 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1833 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1834 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1835 inout->port = (uint16_t)(qual >> 16); 1836 inout->eax = eax; 1837 if (inout->flags & INOUT_STR) { 1838 uint64_t inst_info; 1839 1840 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1841 1842 /* 1843 * According to the SDM, bits 9:7 encode the address size of the 1844 * ins/outs operation, but only values 0/1/2 are expected, 1845 * corresponding to 16/32/64 bit sizes. 1846 */ 1847 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1848 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1849 inout->addrsize == 8); 1850 1851 if (inout->flags & INOUT_IN) { 1852 /* 1853 * The bits describing the segment in INSTRUCTION_INFO 1854 * are not defined for ins, leaving it to system 1855 * software to assume %es (encoded as 0) 1856 */ 1857 inout->segment = 0; 1858 } else { 1859 /* 1860 * Bits 15-17 encode the segment for OUTS. 1861 * This value follows the standard x86 segment order. 1862 */ 1863 inout->segment = (inst_info >> 15) & 0x7; 1864 } 1865 } 1866 1867 vmexit->exitcode = VM_EXITCODE_INOUT; 1868 vmx_paging_info(&paging); 1869 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1870 1871 /* The in/out emulation will handle advancing %rip */ 1872 vmexit->inst_length = 0; 1873 } 1874 1875 static int 1876 ept_fault_type(uint64_t ept_qual) 1877 { 1878 int fault_type; 1879 1880 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1881 fault_type = PROT_WRITE; 1882 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1883 fault_type = PROT_EXEC; 1884 else 1885 fault_type = PROT_READ; 1886 1887 return (fault_type); 1888 } 1889 1890 static bool 1891 ept_emulation_fault(uint64_t ept_qual) 1892 { 1893 int read, write; 1894 1895 /* EPT fault on an instruction fetch doesn't make sense here */ 1896 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1897 return (false); 1898 1899 /* EPT fault must be a read fault or a write fault */ 1900 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1901 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1902 if ((read | write) == 0) 1903 return (false); 1904 1905 /* 1906 * The EPT violation must have been caused by accessing a 1907 * guest-physical address that is a translation of a guest-linear 1908 * address. 1909 */ 1910 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1911 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1912 return (false); 1913 } 1914 1915 return (true); 1916 } 1917 1918 static __inline int 1919 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1920 { 1921 uint32_t proc_ctls2; 1922 1923 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1924 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1925 } 1926 1927 static __inline int 1928 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1929 { 1930 uint32_t proc_ctls2; 1931 1932 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1933 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1934 } 1935 1936 static int 1937 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1938 uint64_t qual) 1939 { 1940 const uint_t offset = APIC_WRITE_OFFSET(qual); 1941 1942 if (!apic_access_virtualization(vmx, vcpuid)) { 1943 /* 1944 * In general there should not be any APIC write VM-exits 1945 * unless APIC-access virtualization is enabled. 1946 * 1947 * However self-IPI virtualization can legitimately trigger 1948 * an APIC-write VM-exit so treat it specially. 1949 */ 1950 if (x2apic_virtualization(vmx, vcpuid) && 1951 offset == APIC_OFFSET_SELF_IPI) { 1952 const uint32_t *apic_regs = 1953 (uint32_t *)(vlapic->apic_page); 1954 const uint32_t vector = 1955 apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1956 1957 vlapic_self_ipi_handler(vlapic, vector); 1958 return (HANDLED); 1959 } else 1960 return (UNHANDLED); 1961 } 1962 1963 switch (offset) { 1964 case APIC_OFFSET_ID: 1965 vlapic_id_write_handler(vlapic); 1966 break; 1967 case APIC_OFFSET_LDR: 1968 vlapic_ldr_write_handler(vlapic); 1969 break; 1970 case APIC_OFFSET_DFR: 1971 vlapic_dfr_write_handler(vlapic); 1972 break; 1973 case APIC_OFFSET_SVR: 1974 vlapic_svr_write_handler(vlapic); 1975 break; 1976 case APIC_OFFSET_ESR: 1977 vlapic_esr_write_handler(vlapic); 1978 break; 1979 case APIC_OFFSET_ICR_LOW: 1980 vlapic_icrlo_write_handler(vlapic); 1981 break; 1982 case APIC_OFFSET_CMCI_LVT: 1983 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1984 vlapic_lvt_write_handler(vlapic, offset); 1985 break; 1986 case APIC_OFFSET_TIMER_ICR: 1987 vlapic_icrtmr_write_handler(vlapic); 1988 break; 1989 case APIC_OFFSET_TIMER_DCR: 1990 vlapic_dcr_write_handler(vlapic); 1991 break; 1992 default: 1993 return (UNHANDLED); 1994 } 1995 return (HANDLED); 1996 } 1997 1998 static bool 1999 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2000 { 2001 2002 if (apic_access_virtualization(vmx, vcpuid) && 2003 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2004 return (true); 2005 else 2006 return (false); 2007 } 2008 2009 static int 2010 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2011 { 2012 uint64_t qual; 2013 int access_type, offset, allowed; 2014 struct vie *vie; 2015 2016 if (!apic_access_virtualization(vmx, vcpuid)) 2017 return (UNHANDLED); 2018 2019 qual = vmexit->u.vmx.exit_qualification; 2020 access_type = APIC_ACCESS_TYPE(qual); 2021 offset = APIC_ACCESS_OFFSET(qual); 2022 2023 allowed = 0; 2024 if (access_type == 0) { 2025 /* 2026 * Read data access to the following registers is expected. 2027 */ 2028 switch (offset) { 2029 case APIC_OFFSET_APR: 2030 case APIC_OFFSET_PPR: 2031 case APIC_OFFSET_RRR: 2032 case APIC_OFFSET_CMCI_LVT: 2033 case APIC_OFFSET_TIMER_CCR: 2034 allowed = 1; 2035 break; 2036 default: 2037 break; 2038 } 2039 } else if (access_type == 1) { 2040 /* 2041 * Write data access to the following registers is expected. 2042 */ 2043 switch (offset) { 2044 case APIC_OFFSET_VER: 2045 case APIC_OFFSET_APR: 2046 case APIC_OFFSET_PPR: 2047 case APIC_OFFSET_RRR: 2048 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2049 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2050 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2051 case APIC_OFFSET_CMCI_LVT: 2052 case APIC_OFFSET_TIMER_CCR: 2053 allowed = 1; 2054 break; 2055 default: 2056 break; 2057 } 2058 } 2059 2060 if (allowed) { 2061 vie = vm_vie_ctx(vmx->vm, vcpuid); 2062 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2063 VIE_INVALID_GLA); 2064 } 2065 2066 /* 2067 * Regardless of whether the APIC-access is allowed this handler 2068 * always returns UNHANDLED: 2069 * - if the access is allowed then it is handled by emulating the 2070 * instruction that caused the VM-exit (outside the critical section) 2071 * - if the access is not allowed then it will be converted to an 2072 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2073 */ 2074 return (UNHANDLED); 2075 } 2076 2077 static enum task_switch_reason 2078 vmx_task_switch_reason(uint64_t qual) 2079 { 2080 int reason; 2081 2082 reason = (qual >> 30) & 0x3; 2083 switch (reason) { 2084 case 0: 2085 return (TSR_CALL); 2086 case 1: 2087 return (TSR_IRET); 2088 case 2: 2089 return (TSR_JMP); 2090 case 3: 2091 return (TSR_IDT_GATE); 2092 default: 2093 panic("%s: invalid reason %d", __func__, reason); 2094 } 2095 } 2096 2097 static int 2098 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit, 2099 bool is_wrmsr) 2100 { 2101 struct vmxctx *vmxctx = &vmx->ctx[vcpuid]; 2102 const uint32_t ecx = vmxctx->guest_rcx; 2103 vm_msr_result_t res; 2104 uint64_t val = 0; 2105 2106 if (is_wrmsr) { 2107 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2108 val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax; 2109 2110 if (vlapic_owned_msr(ecx)) { 2111 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2112 2113 res = vlapic_wrmsr(vlapic, ecx, val); 2114 } else { 2115 res = vmx_wrmsr(vmx, vcpuid, ecx, val); 2116 } 2117 } else { 2118 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2119 2120 if (vlapic_owned_msr(ecx)) { 2121 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2122 2123 res = vlapic_rdmsr(vlapic, ecx, &val); 2124 } else { 2125 res = vmx_rdmsr(vmx, vcpuid, ecx, &val); 2126 } 2127 } 2128 2129 switch (res) { 2130 case VMR_OK: 2131 /* Store rdmsr result in the appropriate registers */ 2132 if (!is_wrmsr) { 2133 vmxctx->guest_rax = (uint32_t)val; 2134 vmxctx->guest_rdx = val >> 32; 2135 } 2136 return (HANDLED); 2137 case VMR_GP: 2138 vm_inject_gp(vmx->vm, vcpuid); 2139 return (HANDLED); 2140 case VMR_UNHANLDED: 2141 vmexit->exitcode = is_wrmsr ? 2142 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 2143 vmexit->u.msr.code = ecx; 2144 vmexit->u.msr.wval = val; 2145 return (UNHANDLED); 2146 default: 2147 panic("unexpected msr result %u\n", res); 2148 } 2149 } 2150 2151 static int 2152 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2153 { 2154 int error, errcode, errcode_valid, handled; 2155 struct vmxctx *vmxctx; 2156 struct vie *vie; 2157 struct vlapic *vlapic; 2158 struct vm_task_switch *ts; 2159 uint32_t idtvec_info, intr_info; 2160 uint32_t intr_type, intr_vec, reason; 2161 uint64_t qual, gpa; 2162 2163 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2164 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2165 2166 handled = UNHANDLED; 2167 vmxctx = &vmx->ctx[vcpu]; 2168 2169 qual = vmexit->u.vmx.exit_qualification; 2170 reason = vmexit->u.vmx.exit_reason; 2171 vmexit->exitcode = VM_EXITCODE_BOGUS; 2172 2173 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2174 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2175 2176 /* 2177 * VM-entry failures during or after loading guest state. 2178 * 2179 * These VM-exits are uncommon but must be handled specially 2180 * as most VM-exit fields are not populated as usual. 2181 */ 2182 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2183 vmm_call_trap(T_MCE); 2184 return (1); 2185 } 2186 2187 /* 2188 * VM exits that can be triggered during event delivery need to 2189 * be handled specially by re-injecting the event if the IDT 2190 * vectoring information field's valid bit is set. 2191 * 2192 * See "Information for VM Exits During Event Delivery" in Intel SDM 2193 * for details. 2194 */ 2195 idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO); 2196 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2197 /* Record exit intinfo */ 2198 VERIFY0(vm_exit_intinfo(vmx->vm, vcpu, 2199 vmx_idtvec_to_intinfo(idtvec_info))); 2200 2201 /* 2202 * If 'virtual NMIs' are being used and the VM-exit 2203 * happened while injecting an NMI during the previous 2204 * VM-entry, then clear "blocking by NMI" in the 2205 * Guest Interruptibility-State so the NMI can be 2206 * reinjected on the subsequent VM-entry. 2207 * 2208 * However, if the NMI was being delivered through a task 2209 * gate, then the new task must start execution with NMIs 2210 * blocked so don't clear NMI blocking in this case. 2211 */ 2212 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2213 if (intr_type == VMCS_INTR_T_NMI) { 2214 if (reason != EXIT_REASON_TASK_SWITCH) 2215 vmx_clear_nmi_blocking(vmx, vcpu); 2216 else 2217 vmx_assert_nmi_blocking(vmx, vcpu); 2218 } 2219 2220 /* 2221 * Update VM-entry instruction length if the event being 2222 * delivered was a software interrupt or software exception. 2223 */ 2224 if (intr_type == VMCS_INTR_T_SWINTR || 2225 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2226 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2227 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2228 } 2229 } 2230 2231 switch (reason) { 2232 case EXIT_REASON_TRIPLE_FAULT: 2233 (void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT); 2234 handled = HANDLED; 2235 break; 2236 case EXIT_REASON_TASK_SWITCH: 2237 ts = &vmexit->u.task_switch; 2238 ts->tsssel = qual & 0xffff; 2239 ts->reason = vmx_task_switch_reason(qual); 2240 ts->ext = 0; 2241 ts->errcode_valid = 0; 2242 vmx_paging_info(&ts->paging); 2243 /* 2244 * If the task switch was due to a CALL, JMP, IRET, software 2245 * interrupt (INT n) or software exception (INT3, INTO), 2246 * then the saved %rip references the instruction that caused 2247 * the task switch. The instruction length field in the VMCS 2248 * is valid in this case. 2249 * 2250 * In all other cases (e.g., NMI, hardware exception) the 2251 * saved %rip is one that would have been saved in the old TSS 2252 * had the task switch completed normally so the instruction 2253 * length field is not needed in this case and is explicitly 2254 * set to 0. 2255 */ 2256 if (ts->reason == TSR_IDT_GATE) { 2257 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2258 ("invalid idtvec_info %x for IDT task switch", 2259 idtvec_info)); 2260 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2261 if (intr_type != VMCS_INTR_T_SWINTR && 2262 intr_type != VMCS_INTR_T_SWEXCEPTION && 2263 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2264 /* Task switch triggered by external event */ 2265 ts->ext = 1; 2266 vmexit->inst_length = 0; 2267 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2268 ts->errcode_valid = 1; 2269 ts->errcode = 2270 vmcs_read(VMCS_IDT_VECTORING_ERROR); 2271 } 2272 } 2273 } 2274 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2275 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2276 break; 2277 case EXIT_REASON_CR_ACCESS: 2278 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2279 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2280 switch (qual & 0xf) { 2281 case 0: 2282 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2283 break; 2284 case 4: 2285 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2286 break; 2287 case 8: 2288 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2289 break; 2290 } 2291 break; 2292 case EXIT_REASON_RDMSR: 2293 case EXIT_REASON_WRMSR: 2294 handled = vmx_handle_msr(vmx, vcpu, vmexit, 2295 reason == EXIT_REASON_WRMSR); 2296 break; 2297 case EXIT_REASON_HLT: 2298 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2299 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2300 vmexit->exitcode = VM_EXITCODE_HLT; 2301 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2302 break; 2303 case EXIT_REASON_MTF: 2304 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2305 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2306 vmexit->exitcode = VM_EXITCODE_MTRAP; 2307 vmexit->inst_length = 0; 2308 break; 2309 case EXIT_REASON_PAUSE: 2310 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2311 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2312 vmexit->exitcode = VM_EXITCODE_PAUSE; 2313 break; 2314 case EXIT_REASON_INTR_WINDOW: 2315 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2316 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2317 vmx_clear_int_window_exiting(vmx, vcpu); 2318 return (1); 2319 case EXIT_REASON_EXT_INTR: 2320 /* 2321 * External interrupts serve only to cause VM exits and allow 2322 * the host interrupt handler to run. 2323 * 2324 * If this external interrupt triggers a virtual interrupt 2325 * to a VM, then that state will be recorded by the 2326 * host interrupt handler in the VM's softc. We will inject 2327 * this virtual interrupt during the subsequent VM enter. 2328 */ 2329 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2330 SDT_PROBE4(vmm, vmx, exit, interrupt, 2331 vmx, vcpu, vmexit, intr_info); 2332 2333 /* 2334 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2335 * This appears to be a bug in VMware Fusion? 2336 */ 2337 if (!(intr_info & VMCS_INTR_VALID)) 2338 return (1); 2339 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2340 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2341 ("VM exit interruption info invalid: %x", intr_info)); 2342 vmx_trigger_hostintr(intr_info & 0xff); 2343 2344 /* 2345 * This is special. We want to treat this as an 'handled' 2346 * VM-exit but not increment the instruction pointer. 2347 */ 2348 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2349 return (1); 2350 case EXIT_REASON_NMI_WINDOW: 2351 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2352 /* Exit to allow the pending virtual NMI to be injected */ 2353 if (vm_nmi_pending(vmx->vm, vcpu)) 2354 vmx_inject_nmi(vmx, vcpu); 2355 vmx_clear_nmi_window_exiting(vmx, vcpu); 2356 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2357 return (1); 2358 case EXIT_REASON_INOUT: 2359 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2360 vie = vm_vie_ctx(vmx->vm, vcpu); 2361 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2362 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2363 break; 2364 case EXIT_REASON_CPUID: 2365 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2366 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2367 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2368 break; 2369 case EXIT_REASON_EXCEPTION: 2370 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2371 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2372 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2373 ("VM exit interruption info invalid: %x", intr_info)); 2374 2375 intr_vec = intr_info & 0xff; 2376 intr_type = intr_info & VMCS_INTR_T_MASK; 2377 2378 /* 2379 * If Virtual NMIs control is 1 and the VM-exit is due to a 2380 * fault encountered during the execution of IRET then we must 2381 * restore the state of "virtual-NMI blocking" before resuming 2382 * the guest. 2383 * 2384 * See "Resuming Guest Software after Handling an Exception". 2385 * See "Information for VM Exits Due to Vectored Events". 2386 */ 2387 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2388 (intr_vec != IDT_DF) && 2389 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2390 vmx_restore_nmi_blocking(vmx, vcpu); 2391 2392 /* 2393 * The NMI has already been handled in vmx_exit_handle_nmi(). 2394 */ 2395 if (intr_type == VMCS_INTR_T_NMI) 2396 return (1); 2397 2398 /* 2399 * Call the machine check handler by hand. Also don't reflect 2400 * the machine check back into the guest. 2401 */ 2402 if (intr_vec == IDT_MC) { 2403 vmm_call_trap(T_MCE); 2404 return (1); 2405 } 2406 2407 /* 2408 * If the hypervisor has requested user exits for 2409 * debug exceptions, bounce them out to userland. 2410 */ 2411 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2412 intr_vec == IDT_BP && 2413 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2414 vmexit->exitcode = VM_EXITCODE_BPT; 2415 vmexit->u.bpt.inst_length = vmexit->inst_length; 2416 vmexit->inst_length = 0; 2417 break; 2418 } 2419 2420 if (intr_vec == IDT_PF) { 2421 vmxctx->guest_cr2 = qual; 2422 } 2423 2424 /* 2425 * Software exceptions exhibit trap-like behavior. This in 2426 * turn requires populating the VM-entry instruction length 2427 * so that the %rip in the trap frame is past the INT3/INTO 2428 * instruction. 2429 */ 2430 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2431 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2432 2433 /* Reflect all other exceptions back into the guest */ 2434 errcode_valid = errcode = 0; 2435 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2436 errcode_valid = 1; 2437 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2438 } 2439 SDT_PROBE5(vmm, vmx, exit, exception, 2440 vmx, vcpu, vmexit, intr_vec, errcode); 2441 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2442 errcode_valid, errcode, 0); 2443 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2444 __func__, error)); 2445 return (1); 2446 2447 case EXIT_REASON_EPT_FAULT: 2448 /* 2449 * If 'gpa' lies within the address space allocated to 2450 * memory then this must be a nested page fault otherwise 2451 * this must be an instruction that accesses MMIO space. 2452 */ 2453 gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS); 2454 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2455 apic_access_fault(vmx, vcpu, gpa)) { 2456 vmexit->exitcode = VM_EXITCODE_PAGING; 2457 vmexit->inst_length = 0; 2458 vmexit->u.paging.gpa = gpa; 2459 vmexit->u.paging.fault_type = ept_fault_type(qual); 2460 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2461 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2462 vmx, vcpu, vmexit, gpa, qual); 2463 } else if (ept_emulation_fault(qual)) { 2464 vie = vm_vie_ctx(vmx->vm, vcpu); 2465 vmexit_mmio_emul(vmexit, vie, gpa, 2466 vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)); 2467 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2468 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2469 vmx, vcpu, vmexit, gpa); 2470 } 2471 /* 2472 * If Virtual NMIs control is 1 and the VM-exit is due to an 2473 * EPT fault during the execution of IRET then we must restore 2474 * the state of "virtual-NMI blocking" before resuming. 2475 * 2476 * See description of "NMI unblocking due to IRET" in 2477 * "Exit Qualification for EPT Violations". 2478 */ 2479 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2480 (qual & EXIT_QUAL_NMIUDTI) != 0) 2481 vmx_restore_nmi_blocking(vmx, vcpu); 2482 break; 2483 case EXIT_REASON_VIRTUALIZED_EOI: 2484 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2485 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2486 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2487 vmexit->inst_length = 0; /* trap-like */ 2488 break; 2489 case EXIT_REASON_APIC_ACCESS: 2490 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2491 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2492 break; 2493 case EXIT_REASON_APIC_WRITE: 2494 /* 2495 * APIC-write VM exit is trap-like so the %rip is already 2496 * pointing to the next instruction. 2497 */ 2498 vmexit->inst_length = 0; 2499 vlapic = vm_lapic(vmx->vm, vcpu); 2500 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2501 vmx, vcpu, vmexit, vlapic); 2502 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2503 break; 2504 case EXIT_REASON_XSETBV: 2505 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2506 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2507 break; 2508 case EXIT_REASON_MONITOR: 2509 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2510 vmexit->exitcode = VM_EXITCODE_MONITOR; 2511 break; 2512 case EXIT_REASON_MWAIT: 2513 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2514 vmexit->exitcode = VM_EXITCODE_MWAIT; 2515 break; 2516 case EXIT_REASON_TPR: 2517 vlapic = vm_lapic(vmx->vm, vcpu); 2518 vlapic_sync_tpr(vlapic); 2519 vmexit->inst_length = 0; 2520 handled = HANDLED; 2521 break; 2522 case EXIT_REASON_VMCALL: 2523 case EXIT_REASON_VMCLEAR: 2524 case EXIT_REASON_VMLAUNCH: 2525 case EXIT_REASON_VMPTRLD: 2526 case EXIT_REASON_VMPTRST: 2527 case EXIT_REASON_VMREAD: 2528 case EXIT_REASON_VMRESUME: 2529 case EXIT_REASON_VMWRITE: 2530 case EXIT_REASON_VMXOFF: 2531 case EXIT_REASON_VMXON: 2532 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2533 vmexit->exitcode = VM_EXITCODE_VMINSN; 2534 break; 2535 default: 2536 SDT_PROBE4(vmm, vmx, exit, unknown, 2537 vmx, vcpu, vmexit, reason); 2538 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2539 break; 2540 } 2541 2542 if (handled) { 2543 /* 2544 * It is possible that control is returned to userland 2545 * even though we were able to handle the VM exit in the 2546 * kernel. 2547 * 2548 * In such a case we want to make sure that the userland 2549 * restarts guest execution at the instruction *after* 2550 * the one we just processed. Therefore we update the 2551 * guest rip in the VMCS and in 'vmexit'. 2552 */ 2553 vmexit->rip += vmexit->inst_length; 2554 vmexit->inst_length = 0; 2555 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2556 } else { 2557 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2558 /* 2559 * If this VM exit was not claimed by anybody then 2560 * treat it as a generic VMX exit. 2561 */ 2562 vmexit->exitcode = VM_EXITCODE_VMX; 2563 vmexit->u.vmx.status = VM_SUCCESS; 2564 vmexit->u.vmx.inst_type = 0; 2565 vmexit->u.vmx.inst_error = 0; 2566 } else { 2567 /* 2568 * The exitcode and collateral have been populated. 2569 * The VM exit will be processed further in userland. 2570 */ 2571 } 2572 } 2573 2574 SDT_PROBE4(vmm, vmx, exit, return, 2575 vmx, vcpu, vmexit, handled); 2576 return (handled); 2577 } 2578 2579 static void 2580 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2581 { 2582 2583 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2584 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2585 vmxctx->inst_fail_status)); 2586 2587 vmexit->inst_length = 0; 2588 vmexit->exitcode = VM_EXITCODE_VMX; 2589 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2590 vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR); 2591 vmexit->u.vmx.exit_reason = ~0; 2592 vmexit->u.vmx.exit_qualification = ~0; 2593 2594 switch (rc) { 2595 case VMX_VMRESUME_ERROR: 2596 case VMX_VMLAUNCH_ERROR: 2597 case VMX_INVEPT_ERROR: 2598 case VMX_VMWRITE_ERROR: 2599 vmexit->u.vmx.inst_type = rc; 2600 break; 2601 default: 2602 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2603 } 2604 } 2605 2606 /* 2607 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2608 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2609 * sufficient to simply vector to the NMI handler via a software interrupt. 2610 * However, this must be done before maskable interrupts are enabled 2611 * otherwise the "iret" issued by an interrupt handler will incorrectly 2612 * clear NMI blocking. 2613 */ 2614 static __inline void 2615 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2616 { 2617 ASSERT(!interrupts_enabled()); 2618 2619 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2620 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2621 ASSERT(intr_info & VMCS_INTR_VALID); 2622 2623 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2624 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2625 vmm_call_trap(T_NMIFLT); 2626 } 2627 } 2628 } 2629 2630 static __inline void 2631 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2632 { 2633 uint64_t rflags; 2634 2635 /* Save host control debug registers. */ 2636 vmxctx->host_dr7 = rdr7(); 2637 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2638 2639 /* 2640 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2641 * exceptions in the host based on the guest DRx values. The 2642 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2643 */ 2644 load_dr7(0); 2645 wrmsr(MSR_DEBUGCTLMSR, 0); 2646 2647 /* 2648 * Disable single stepping the kernel to avoid corrupting the 2649 * guest DR6. A debugger might still be able to corrupt the 2650 * guest DR6 by setting a breakpoint after this point and then 2651 * single stepping. 2652 */ 2653 rflags = read_rflags(); 2654 vmxctx->host_tf = rflags & PSL_T; 2655 write_rflags(rflags & ~PSL_T); 2656 2657 /* Save host debug registers. */ 2658 vmxctx->host_dr0 = rdr0(); 2659 vmxctx->host_dr1 = rdr1(); 2660 vmxctx->host_dr2 = rdr2(); 2661 vmxctx->host_dr3 = rdr3(); 2662 vmxctx->host_dr6 = rdr6(); 2663 2664 /* Restore guest debug registers. */ 2665 load_dr0(vmxctx->guest_dr0); 2666 load_dr1(vmxctx->guest_dr1); 2667 load_dr2(vmxctx->guest_dr2); 2668 load_dr3(vmxctx->guest_dr3); 2669 load_dr6(vmxctx->guest_dr6); 2670 } 2671 2672 static __inline void 2673 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2674 { 2675 2676 /* Save guest debug registers. */ 2677 vmxctx->guest_dr0 = rdr0(); 2678 vmxctx->guest_dr1 = rdr1(); 2679 vmxctx->guest_dr2 = rdr2(); 2680 vmxctx->guest_dr3 = rdr3(); 2681 vmxctx->guest_dr6 = rdr6(); 2682 2683 /* 2684 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2685 * PSL_T last. 2686 */ 2687 load_dr0(vmxctx->host_dr0); 2688 load_dr1(vmxctx->host_dr1); 2689 load_dr2(vmxctx->host_dr2); 2690 load_dr3(vmxctx->host_dr3); 2691 load_dr6(vmxctx->host_dr6); 2692 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2693 load_dr7(vmxctx->host_dr7); 2694 write_rflags(read_rflags() | vmxctx->host_tf); 2695 } 2696 2697 static int 2698 vmx_run(void *arg, int vcpu, uint64_t rip) 2699 { 2700 int rc, handled, launched; 2701 struct vmx *vmx; 2702 struct vm *vm; 2703 struct vmxctx *vmxctx; 2704 uintptr_t vmcs_pa; 2705 struct vm_exit *vmexit; 2706 struct vlapic *vlapic; 2707 uint32_t exit_reason; 2708 bool tpr_shadow_active; 2709 vm_client_t *vmc; 2710 2711 vmx = arg; 2712 vm = vmx->vm; 2713 vmcs_pa = vmx->vmcs_pa[vcpu]; 2714 vmxctx = &vmx->ctx[vcpu]; 2715 vlapic = vm_lapic(vm, vcpu); 2716 vmexit = vm_exitinfo(vm, vcpu); 2717 vmc = vm_get_vmclient(vm, vcpu); 2718 launched = 0; 2719 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2720 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2721 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2722 2723 vmx_msr_guest_enter(vmx, vcpu); 2724 2725 vmcs_load(vmcs_pa); 2726 2727 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2728 vmx->vmcs_state[vcpu] = VS_LOADED; 2729 2730 /* 2731 * XXX 2732 * We do this every time because we may setup the virtual machine 2733 * from a different process than the one that actually runs it. 2734 * 2735 * If the life of a virtual machine was spent entirely in the context 2736 * of a single process we could do this once in vmx_vminit(). 2737 */ 2738 vmcs_write(VMCS_HOST_CR3, rcr3()); 2739 2740 vmcs_write(VMCS_GUEST_RIP, rip); 2741 vmx_set_pcpu_defaults(vmx, vcpu); 2742 do { 2743 enum event_inject_state inject_state; 2744 uint64_t eptgen; 2745 2746 ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip); 2747 2748 handled = UNHANDLED; 2749 2750 /* 2751 * Perform initial event/exception/interrupt injection before 2752 * host CPU interrupts are disabled. 2753 */ 2754 inject_state = vmx_inject_events(vmx, vcpu, rip); 2755 2756 /* 2757 * Interrupts are disabled from this point on until the 2758 * guest starts executing. This is done for the following 2759 * reasons: 2760 * 2761 * If an AST is asserted on this thread after the check below, 2762 * then the IPI_AST notification will not be lost, because it 2763 * will cause a VM exit due to external interrupt as soon as 2764 * the guest state is loaded. 2765 * 2766 * A posted interrupt after vmx_inject_vlapic() will not be 2767 * "lost" because it will be held pending in the host APIC 2768 * because interrupts are disabled. The pending interrupt will 2769 * be recognized as soon as the guest state is loaded. 2770 * 2771 * The same reasoning applies to the IPI generated by vmspace 2772 * invalidation. 2773 */ 2774 disable_intr(); 2775 2776 /* 2777 * If not precluded by existing events, inject any interrupt 2778 * pending on the vLAPIC. As a lock-less operation, it is safe 2779 * (and prudent) to perform with host CPU interrupts disabled. 2780 */ 2781 if (inject_state == EIS_CAN_INJECT) { 2782 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2783 } 2784 2785 /* 2786 * Check for vCPU bail-out conditions. This must be done after 2787 * vmx_inject_events() to detect a triple-fault condition. 2788 */ 2789 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2790 enable_intr(); 2791 break; 2792 } 2793 2794 if (vcpu_run_state_pending(vm, vcpu)) { 2795 enable_intr(); 2796 vm_exit_run_state(vmx->vm, vcpu, rip); 2797 break; 2798 } 2799 2800 /* 2801 * If subsequent activity queued events which require injection 2802 * handling, take another lap to handle them. 2803 */ 2804 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2805 enable_intr(); 2806 handled = HANDLED; 2807 continue; 2808 } 2809 2810 if ((rc = smt_acquire()) != 1) { 2811 enable_intr(); 2812 vmexit->rip = rip; 2813 vmexit->inst_length = 0; 2814 if (rc == -1) { 2815 vmexit->exitcode = VM_EXITCODE_HT; 2816 } else { 2817 vmexit->exitcode = VM_EXITCODE_BOGUS; 2818 handled = HANDLED; 2819 } 2820 break; 2821 } 2822 2823 /* 2824 * If this thread has gone off-cpu due to mutex operations 2825 * during vmx_run, the VMCS will have been unloaded, forcing a 2826 * re-VMLAUNCH as opposed to VMRESUME. 2827 */ 2828 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2829 /* 2830 * Restoration of the GDT limit is taken care of by 2831 * vmx_savectx(). Since the maximum practical index for the 2832 * IDT is 255, restoring its limits from the post-VMX-exit 2833 * default of 0xffff is not a concern. 2834 * 2835 * Only 64-bit hypervisor callers are allowed, which forgoes 2836 * the need to restore any LDT descriptor. Toss an error to 2837 * anyone attempting to break that rule. 2838 */ 2839 if (curproc->p_model != DATAMODEL_LP64) { 2840 smt_release(); 2841 enable_intr(); 2842 bzero(vmexit, sizeof (*vmexit)); 2843 vmexit->rip = rip; 2844 vmexit->exitcode = VM_EXITCODE_VMX; 2845 vmexit->u.vmx.status = VM_FAIL_INVALID; 2846 handled = UNHANDLED; 2847 break; 2848 } 2849 2850 if (tpr_shadow_active) { 2851 vmx_tpr_shadow_enter(vlapic); 2852 } 2853 2854 /* 2855 * Indicate activation of vmspace (EPT) table just prior to VMX 2856 * entry, checking for the necessity of an invept invalidation. 2857 */ 2858 eptgen = vmc_table_enter(vmc); 2859 if (vmx->eptgen[curcpu] != eptgen) { 2860 /* 2861 * VMspace generation does not match what was previously 2862 * used on this host CPU, so all mappings associated 2863 * with this EP4TA must be invalidated. 2864 */ 2865 invept(1, vmx->eptp); 2866 vmx->eptgen[curcpu] = eptgen; 2867 } 2868 2869 vcpu_ustate_change(vm, vcpu, VU_RUN); 2870 vmx_dr_enter_guest(vmxctx); 2871 2872 /* Perform VMX entry */ 2873 rc = vmx_enter_guest(vmxctx, vmx, launched); 2874 2875 vmx_dr_leave_guest(vmxctx); 2876 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2877 2878 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2879 smt_release(); 2880 2881 if (tpr_shadow_active) { 2882 vmx_tpr_shadow_exit(vlapic); 2883 } 2884 2885 /* Collect some information for VM exit processing */ 2886 vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP); 2887 vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH); 2888 vmexit->u.vmx.exit_reason = exit_reason = 2889 (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK); 2890 vmexit->u.vmx.exit_qualification = 2891 vmcs_read(VMCS_EXIT_QUALIFICATION); 2892 /* Update 'nextrip' */ 2893 vmx->state[vcpu].nextrip = rip; 2894 2895 if (rc == VMX_GUEST_VMEXIT) { 2896 vmx_exit_handle_possible_nmi(vmexit); 2897 } 2898 enable_intr(); 2899 vmc_table_exit(vmc); 2900 2901 if (rc == VMX_GUEST_VMEXIT) { 2902 handled = vmx_exit_process(vmx, vcpu, vmexit); 2903 } else { 2904 vmx_exit_inst_error(vmxctx, rc, vmexit); 2905 } 2906 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2907 uint32_t, exit_reason); 2908 rip = vmexit->rip; 2909 } while (handled); 2910 2911 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2912 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2913 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2914 vmexit->exitcode); 2915 } 2916 2917 vmcs_clear(vmcs_pa); 2918 vmx_msr_guest_exit(vmx, vcpu); 2919 2920 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 2921 vmx->vmcs_state[vcpu] = VS_NONE; 2922 2923 return (0); 2924 } 2925 2926 static void 2927 vmx_vmcleanup(void *arg) 2928 { 2929 int i; 2930 struct vmx *vmx = arg; 2931 uint16_t maxcpus; 2932 2933 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2934 (void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2935 kmem_free(vmx->apic_access_page, PAGESIZE); 2936 } else { 2937 VERIFY3P(vmx->apic_access_page, ==, NULL); 2938 } 2939 2940 vmx_msr_bitmap_destroy(vmx); 2941 2942 maxcpus = vm_get_maxcpus(vmx->vm); 2943 for (i = 0; i < maxcpus; i++) 2944 vpid_free(vmx->state[i].vpid); 2945 2946 kmem_free(vmx, sizeof (*vmx)); 2947 } 2948 2949 /* 2950 * Ensure that the VMCS for this vcpu is loaded. 2951 * Returns true if a VMCS load was required. 2952 */ 2953 static bool 2954 vmx_vmcs_access_ensure(struct vmx *vmx, int vcpu) 2955 { 2956 int hostcpu; 2957 2958 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 2959 if (hostcpu != curcpu) { 2960 panic("unexpected vcpu migration %d != %d", 2961 hostcpu, curcpu); 2962 } 2963 /* Earlier logic already took care of the load */ 2964 return (false); 2965 } else { 2966 vmcs_load(vmx->vmcs_pa[vcpu]); 2967 return (true); 2968 } 2969 } 2970 2971 static void 2972 vmx_vmcs_access_done(struct vmx *vmx, int vcpu) 2973 { 2974 int hostcpu; 2975 2976 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 2977 if (hostcpu != curcpu) { 2978 panic("unexpected vcpu migration %d != %d", 2979 hostcpu, curcpu); 2980 } 2981 /* Later logic will take care of the unload */ 2982 } else { 2983 vmcs_clear(vmx->vmcs_pa[vcpu]); 2984 } 2985 } 2986 2987 static uint64_t * 2988 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2989 { 2990 switch (reg) { 2991 case VM_REG_GUEST_RAX: 2992 return (&vmxctx->guest_rax); 2993 case VM_REG_GUEST_RBX: 2994 return (&vmxctx->guest_rbx); 2995 case VM_REG_GUEST_RCX: 2996 return (&vmxctx->guest_rcx); 2997 case VM_REG_GUEST_RDX: 2998 return (&vmxctx->guest_rdx); 2999 case VM_REG_GUEST_RSI: 3000 return (&vmxctx->guest_rsi); 3001 case VM_REG_GUEST_RDI: 3002 return (&vmxctx->guest_rdi); 3003 case VM_REG_GUEST_RBP: 3004 return (&vmxctx->guest_rbp); 3005 case VM_REG_GUEST_R8: 3006 return (&vmxctx->guest_r8); 3007 case VM_REG_GUEST_R9: 3008 return (&vmxctx->guest_r9); 3009 case VM_REG_GUEST_R10: 3010 return (&vmxctx->guest_r10); 3011 case VM_REG_GUEST_R11: 3012 return (&vmxctx->guest_r11); 3013 case VM_REG_GUEST_R12: 3014 return (&vmxctx->guest_r12); 3015 case VM_REG_GUEST_R13: 3016 return (&vmxctx->guest_r13); 3017 case VM_REG_GUEST_R14: 3018 return (&vmxctx->guest_r14); 3019 case VM_REG_GUEST_R15: 3020 return (&vmxctx->guest_r15); 3021 case VM_REG_GUEST_CR2: 3022 return (&vmxctx->guest_cr2); 3023 case VM_REG_GUEST_DR0: 3024 return (&vmxctx->guest_dr0); 3025 case VM_REG_GUEST_DR1: 3026 return (&vmxctx->guest_dr1); 3027 case VM_REG_GUEST_DR2: 3028 return (&vmxctx->guest_dr2); 3029 case VM_REG_GUEST_DR3: 3030 return (&vmxctx->guest_dr3); 3031 case VM_REG_GUEST_DR6: 3032 return (&vmxctx->guest_dr6); 3033 default: 3034 break; 3035 } 3036 return (NULL); 3037 } 3038 3039 static int 3040 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3041 { 3042 struct vmx *vmx = arg; 3043 uint64_t *regp; 3044 3045 /* VMCS access not required for ctx reads */ 3046 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3047 *retval = *regp; 3048 return (0); 3049 } 3050 3051 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3052 int err = 0; 3053 3054 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3055 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3056 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3057 } else { 3058 uint32_t encoding; 3059 3060 encoding = vmcs_field_encoding(reg); 3061 switch (encoding) { 3062 case VMCS_GUEST_CR0: 3063 /* Take the shadow bits into account */ 3064 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3065 vmcs_read(VMCS_CR0_SHADOW)); 3066 break; 3067 case VMCS_GUEST_CR4: 3068 /* Take the shadow bits into account */ 3069 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3070 vmcs_read(VMCS_CR4_SHADOW)); 3071 break; 3072 case VMCS_INVALID_ENCODING: 3073 err = EINVAL; 3074 break; 3075 default: 3076 *retval = vmcs_read(encoding); 3077 break; 3078 } 3079 } 3080 3081 if (vmcs_loaded) { 3082 vmx_vmcs_access_done(vmx, vcpu); 3083 } 3084 return (err); 3085 } 3086 3087 static int 3088 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3089 { 3090 struct vmx *vmx = arg; 3091 uint64_t *regp; 3092 3093 /* VMCS access not required for ctx writes */ 3094 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3095 *regp = val; 3096 return (0); 3097 } 3098 3099 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3100 int err = 0; 3101 3102 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3103 if (val != 0) { 3104 /* 3105 * Forcing the vcpu into an interrupt shadow is not 3106 * presently supported. 3107 */ 3108 err = EINVAL; 3109 } else { 3110 uint64_t gi; 3111 3112 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3113 gi &= ~HWINTR_BLOCKING; 3114 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3115 err = 0; 3116 } 3117 } else { 3118 uint32_t encoding; 3119 3120 err = 0; 3121 encoding = vmcs_field_encoding(reg); 3122 switch (encoding) { 3123 case VMCS_GUEST_IA32_EFER: 3124 vmcs_write(encoding, val); 3125 vmx_sync_efer_state(vmx, vcpu, val); 3126 break; 3127 case VMCS_GUEST_CR0: 3128 /* 3129 * The guest is not allowed to modify certain bits in 3130 * %cr0 and %cr4. To maintain the illusion of full 3131 * control, they have shadow versions which contain the 3132 * guest-perceived (via reads from the register) values 3133 * as opposed to the guest-effective values. 3134 * 3135 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3136 */ 3137 vmcs_write(VMCS_CR0_SHADOW, val); 3138 vmcs_write(encoding, vmx_fix_cr0(val)); 3139 break; 3140 case VMCS_GUEST_CR4: 3141 /* See above for detail on %cr4 shadowing */ 3142 vmcs_write(VMCS_CR4_SHADOW, val); 3143 vmcs_write(encoding, vmx_fix_cr4(val)); 3144 break; 3145 case VMCS_GUEST_CR3: 3146 vmcs_write(encoding, val); 3147 /* 3148 * Invalidate the guest vcpu's TLB mappings to emulate 3149 * the behavior of updating %cr3. 3150 * 3151 * XXX the processor retains global mappings when %cr3 3152 * is updated but vmx_invvpid() does not. 3153 */ 3154 vmx_invvpid(vmx, vcpu, 3155 vcpu_is_running(vmx->vm, vcpu, NULL)); 3156 break; 3157 case VMCS_INVALID_ENCODING: 3158 err = EINVAL; 3159 break; 3160 default: 3161 vmcs_write(encoding, val); 3162 break; 3163 } 3164 } 3165 3166 if (vmcs_loaded) { 3167 vmx_vmcs_access_done(vmx, vcpu); 3168 } 3169 return (err); 3170 } 3171 3172 static int 3173 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3174 { 3175 struct vmx *vmx = arg; 3176 uint32_t base, limit, access; 3177 3178 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3179 3180 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3181 desc->base = vmcs_read(base); 3182 desc->limit = vmcs_read(limit); 3183 if (access != VMCS_INVALID_ENCODING) { 3184 desc->access = vmcs_read(access); 3185 } else { 3186 desc->access = 0; 3187 } 3188 3189 if (vmcs_loaded) { 3190 vmx_vmcs_access_done(vmx, vcpu); 3191 } 3192 return (0); 3193 } 3194 3195 static int 3196 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3197 { 3198 struct vmx *vmx = arg; 3199 uint32_t base, limit, access; 3200 3201 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3202 3203 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3204 vmcs_write(base, desc->base); 3205 vmcs_write(limit, desc->limit); 3206 if (access != VMCS_INVALID_ENCODING) { 3207 vmcs_write(access, desc->access); 3208 } 3209 3210 if (vmcs_loaded) { 3211 vmx_vmcs_access_done(vmx, vcpu); 3212 } 3213 return (0); 3214 } 3215 3216 static uint64_t * 3217 vmx_msr_ptr(struct vmx *vmx, int vcpu, uint32_t msr) 3218 { 3219 uint64_t *guest_msrs = vmx->guest_msrs[vcpu]; 3220 3221 switch (msr) { 3222 case MSR_LSTAR: 3223 return (&guest_msrs[IDX_MSR_LSTAR]); 3224 case MSR_CSTAR: 3225 return (&guest_msrs[IDX_MSR_CSTAR]); 3226 case MSR_STAR: 3227 return (&guest_msrs[IDX_MSR_STAR]); 3228 case MSR_SF_MASK: 3229 return (&guest_msrs[IDX_MSR_SF_MASK]); 3230 case MSR_KGSBASE: 3231 return (&guest_msrs[IDX_MSR_KGSBASE]); 3232 case MSR_PAT: 3233 return (&guest_msrs[IDX_MSR_PAT]); 3234 default: 3235 return (NULL); 3236 } 3237 } 3238 3239 static int 3240 vmx_msr_get(void *arg, int vcpu, uint32_t msr, uint64_t *valp) 3241 { 3242 struct vmx *vmx = arg; 3243 3244 ASSERT(valp != NULL); 3245 3246 const uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3247 if (msrp != NULL) { 3248 *valp = *msrp; 3249 return (0); 3250 } 3251 3252 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3253 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3254 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3255 3256 *valp = vmcs_read(vmcs_enc); 3257 3258 if (vmcs_loaded) { 3259 vmx_vmcs_access_done(vmx, vcpu); 3260 } 3261 return (0); 3262 } 3263 3264 return (EINVAL); 3265 } 3266 3267 static int 3268 vmx_msr_set(void *arg, int vcpu, uint32_t msr, uint64_t val) 3269 { 3270 struct vmx *vmx = arg; 3271 3272 /* TODO: mask value */ 3273 3274 uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3275 if (msrp != NULL) { 3276 *msrp = val; 3277 return (0); 3278 } 3279 3280 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3281 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3282 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3283 3284 vmcs_write(vmcs_enc, val); 3285 3286 if (msr == MSR_EFER) { 3287 vmx_sync_efer_state(vmx, vcpu, val); 3288 } 3289 3290 if (vmcs_loaded) { 3291 vmx_vmcs_access_done(vmx, vcpu); 3292 } 3293 return (0); 3294 } 3295 return (EINVAL); 3296 } 3297 3298 static int 3299 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3300 { 3301 struct vmx *vmx = arg; 3302 int vcap; 3303 int ret; 3304 3305 ret = ENOENT; 3306 3307 vcap = vmx->cap[vcpu].set; 3308 3309 switch (type) { 3310 case VM_CAP_HALT_EXIT: 3311 if (cap_halt_exit) 3312 ret = 0; 3313 break; 3314 case VM_CAP_PAUSE_EXIT: 3315 if (cap_pause_exit) 3316 ret = 0; 3317 break; 3318 case VM_CAP_MTRAP_EXIT: 3319 if (cap_monitor_trap) 3320 ret = 0; 3321 break; 3322 case VM_CAP_ENABLE_INVPCID: 3323 if (cap_invpcid) 3324 ret = 0; 3325 break; 3326 case VM_CAP_BPT_EXIT: 3327 ret = 0; 3328 break; 3329 default: 3330 break; 3331 } 3332 3333 if (ret == 0) 3334 *retval = (vcap & (1 << type)) ? 1 : 0; 3335 3336 return (ret); 3337 } 3338 3339 static int 3340 vmx_setcap(void *arg, int vcpu, int type, int val) 3341 { 3342 struct vmx *vmx = arg; 3343 uint32_t baseval, reg, flag; 3344 uint32_t *pptr; 3345 int error; 3346 3347 error = ENOENT; 3348 pptr = NULL; 3349 3350 switch (type) { 3351 case VM_CAP_HALT_EXIT: 3352 if (cap_halt_exit) { 3353 error = 0; 3354 pptr = &vmx->cap[vcpu].proc_ctls; 3355 baseval = *pptr; 3356 flag = PROCBASED_HLT_EXITING; 3357 reg = VMCS_PRI_PROC_BASED_CTLS; 3358 } 3359 break; 3360 case VM_CAP_MTRAP_EXIT: 3361 if (cap_monitor_trap) { 3362 error = 0; 3363 pptr = &vmx->cap[vcpu].proc_ctls; 3364 baseval = *pptr; 3365 flag = PROCBASED_MTF; 3366 reg = VMCS_PRI_PROC_BASED_CTLS; 3367 } 3368 break; 3369 case VM_CAP_PAUSE_EXIT: 3370 if (cap_pause_exit) { 3371 error = 0; 3372 pptr = &vmx->cap[vcpu].proc_ctls; 3373 baseval = *pptr; 3374 flag = PROCBASED_PAUSE_EXITING; 3375 reg = VMCS_PRI_PROC_BASED_CTLS; 3376 } 3377 break; 3378 case VM_CAP_ENABLE_INVPCID: 3379 if (cap_invpcid) { 3380 error = 0; 3381 pptr = &vmx->cap[vcpu].proc_ctls2; 3382 baseval = *pptr; 3383 flag = PROCBASED2_ENABLE_INVPCID; 3384 reg = VMCS_SEC_PROC_BASED_CTLS; 3385 } 3386 break; 3387 case VM_CAP_BPT_EXIT: 3388 error = 0; 3389 3390 /* Don't change the bitmap if we are tracing all exceptions. */ 3391 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3392 pptr = &vmx->cap[vcpu].exc_bitmap; 3393 baseval = *pptr; 3394 flag = (1 << IDT_BP); 3395 reg = VMCS_EXCEPTION_BITMAP; 3396 } 3397 break; 3398 default: 3399 break; 3400 } 3401 3402 if (error != 0) { 3403 return (error); 3404 } 3405 3406 if (pptr != NULL) { 3407 if (val) { 3408 baseval |= flag; 3409 } else { 3410 baseval &= ~flag; 3411 } 3412 vmcs_load(vmx->vmcs_pa[vcpu]); 3413 vmcs_write(reg, baseval); 3414 vmcs_clear(vmx->vmcs_pa[vcpu]); 3415 3416 /* 3417 * Update optional stored flags, and record 3418 * setting 3419 */ 3420 *pptr = baseval; 3421 } 3422 3423 if (val) { 3424 vmx->cap[vcpu].set |= (1 << type); 3425 } else { 3426 vmx->cap[vcpu].set &= ~(1 << type); 3427 } 3428 3429 return (0); 3430 } 3431 3432 struct vlapic_vtx { 3433 struct vlapic vlapic; 3434 3435 /* Align to the nearest cacheline */ 3436 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3437 3438 /* TMR handling state for posted interrupts */ 3439 uint32_t tmr_active[8]; 3440 uint32_t pending_level[8]; 3441 uint32_t pending_edge[8]; 3442 3443 struct pir_desc *pir_desc; 3444 struct vmx *vmx; 3445 uint_t pending_prio; 3446 boolean_t tmr_sync; 3447 }; 3448 3449 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3450 3451 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3452 3453 static vcpu_notify_t 3454 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3455 { 3456 struct vlapic_vtx *vlapic_vtx; 3457 struct pir_desc *pir_desc; 3458 uint32_t mask, tmrval; 3459 int idx; 3460 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3461 3462 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3463 pir_desc = vlapic_vtx->pir_desc; 3464 idx = vector / 32; 3465 mask = 1UL << (vector % 32); 3466 3467 /* 3468 * If the currently asserted TMRs do not match the state requested by 3469 * the incoming interrupt, an exit will be required to reconcile those 3470 * bits in the APIC page. This will keep the vLAPIC behavior in line 3471 * with the architecturally defined expectations. 3472 * 3473 * If actors of mixed types (edge and level) are racing against the same 3474 * vector (toggling its TMR bit back and forth), the results could 3475 * inconsistent. Such circumstances are considered a rare edge case and 3476 * are never expected to be found in the wild. 3477 */ 3478 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3479 if (!level) { 3480 if ((tmrval & mask) != 0) { 3481 /* Edge-triggered interrupt needs TMR de-asserted */ 3482 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3483 atomic_store_rel_long(&pir_desc->pending, 1); 3484 return (VCPU_NOTIFY_EXIT); 3485 } 3486 } else { 3487 if ((tmrval & mask) == 0) { 3488 /* Level-triggered interrupt needs TMR asserted */ 3489 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3490 atomic_store_rel_long(&pir_desc->pending, 1); 3491 return (VCPU_NOTIFY_EXIT); 3492 } 3493 } 3494 3495 /* 3496 * If the interrupt request does not require manipulation of the TMRs 3497 * for delivery, set it in PIR descriptor. It cannot be inserted into 3498 * the APIC page while the vCPU might be running. 3499 */ 3500 atomic_set_int(&pir_desc->pir[idx], mask); 3501 3502 /* 3503 * A notification is required whenever the 'pending' bit makes a 3504 * transition from 0->1. 3505 * 3506 * Even if the 'pending' bit is already asserted, notification about 3507 * the incoming interrupt may still be necessary. For example, if a 3508 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3509 * the 0->1 'pending' transition with a notification, but the vCPU 3510 * would ignore the interrupt for the time being. The same vCPU would 3511 * need to then be notified if a high-priority interrupt arrived which 3512 * satisfied the PPR. 3513 * 3514 * The priorities of interrupts injected while 'pending' is asserted 3515 * are tracked in a custom bitfield 'pending_prio'. Should the 3516 * to-be-injected interrupt exceed the priorities already present, the 3517 * notification is sent. The priorities recorded in 'pending_prio' are 3518 * cleared whenever the 'pending' bit makes another 0->1 transition. 3519 */ 3520 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3521 notify = VCPU_NOTIFY_APIC; 3522 vlapic_vtx->pending_prio = 0; 3523 } else { 3524 const uint_t old_prio = vlapic_vtx->pending_prio; 3525 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3526 3527 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3528 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3529 notify = VCPU_NOTIFY_APIC; 3530 } 3531 } 3532 3533 return (notify); 3534 } 3535 3536 static void 3537 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3538 { 3539 /* 3540 * When APICv is enabled for an instance, the traditional interrupt 3541 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3542 * used and the CPU does the heavy lifting of virtual interrupt 3543 * delivery. For that reason vmx_intr_accepted() should never be called 3544 * when APICv is enabled. 3545 */ 3546 panic("vmx_intr_accepted: not expected to be called"); 3547 } 3548 3549 static void 3550 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3551 { 3552 struct vlapic_vtx *vlapic_vtx; 3553 const uint32_t *tmrs; 3554 3555 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3556 tmrs = &vlapic_vtx->tmr_active[0]; 3557 3558 if (!vlapic_vtx->tmr_sync) { 3559 return; 3560 } 3561 3562 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3563 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3564 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3565 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3566 vlapic_vtx->tmr_sync = B_FALSE; 3567 } 3568 3569 static void 3570 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3571 { 3572 struct vmx *vmx; 3573 uint32_t proc_ctls; 3574 int vcpuid; 3575 3576 vcpuid = vlapic->vcpuid; 3577 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3578 3579 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3580 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3581 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3582 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3583 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3584 3585 vmcs_load(vmx->vmcs_pa[vcpuid]); 3586 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3587 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3588 } 3589 3590 static void 3591 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3592 { 3593 struct vmx *vmx; 3594 uint32_t proc_ctls2; 3595 int vcpuid; 3596 3597 vcpuid = vlapic->vcpuid; 3598 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3599 3600 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3601 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3602 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3603 3604 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3605 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3606 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3607 3608 vmcs_load(vmx->vmcs_pa[vcpuid]); 3609 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3610 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3611 3612 vmx_allow_x2apic_msrs(vmx, vcpuid); 3613 } 3614 3615 static void 3616 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3617 { 3618 psm_send_pir_ipi(hostcpu); 3619 } 3620 3621 static void 3622 vmx_apicv_sync(struct vlapic *vlapic) 3623 { 3624 struct vlapic_vtx *vlapic_vtx; 3625 struct pir_desc *pir_desc; 3626 struct LAPIC *lapic; 3627 uint_t i; 3628 3629 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3630 pir_desc = vlapic_vtx->pir_desc; 3631 lapic = vlapic->apic_page; 3632 3633 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3634 return; 3635 } 3636 3637 vlapic_vtx->pending_prio = 0; 3638 3639 /* Make sure the invalid (0-15) vectors are not set */ 3640 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3641 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3642 ASSERT0(pir_desc->pir[0] & 0xffff); 3643 3644 for (i = 0; i <= 7; i++) { 3645 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3646 uint32_t *irrp = &lapic->irr0 + (i * 4); 3647 3648 const uint32_t pending_level = 3649 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3650 const uint32_t pending_edge = 3651 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3652 const uint32_t pending_inject = 3653 atomic_readandclear_int(&pir_desc->pir[i]); 3654 3655 if (pending_level != 0) { 3656 /* 3657 * Level-triggered interrupts assert their corresponding 3658 * bit in the TMR when queued in IRR. 3659 */ 3660 *tmrp |= pending_level; 3661 *irrp |= pending_level; 3662 } 3663 if (pending_edge != 0) { 3664 /* 3665 * When queuing an edge-triggered interrupt in IRR, the 3666 * corresponding bit in the TMR is cleared. 3667 */ 3668 *tmrp &= ~pending_edge; 3669 *irrp |= pending_edge; 3670 } 3671 if (pending_inject != 0) { 3672 /* 3673 * Interrupts which do not require a change to the TMR 3674 * (because it already matches the necessary state) can 3675 * simply be queued in IRR. 3676 */ 3677 *irrp |= pending_inject; 3678 } 3679 3680 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3681 /* Check if VMX EOI triggers require updating. */ 3682 vlapic_vtx->tmr_active[i] = *tmrp; 3683 vlapic_vtx->tmr_sync = B_TRUE; 3684 } 3685 } 3686 } 3687 3688 static void 3689 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3690 { 3691 /* 3692 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3693 * TPR falls below a threshold priority. That threshold is set to the 3694 * current TPR priority, since guest interrupt status should be 3695 * re-evaluated if its TPR is set lower. 3696 */ 3697 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3698 } 3699 3700 static void 3701 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3702 { 3703 /* 3704 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3705 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3706 * the PPR is updated to reflect any change in the TPR here. 3707 */ 3708 vlapic_sync_tpr(vlapic); 3709 } 3710 3711 static struct vlapic * 3712 vmx_vlapic_init(void *arg, int vcpuid) 3713 { 3714 struct vmx *vmx = arg; 3715 struct vlapic_vtx *vlapic_vtx; 3716 struct vlapic *vlapic; 3717 3718 vlapic_vtx = kmem_zalloc(sizeof (struct vlapic_vtx), KM_SLEEP); 3719 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3720 vlapic_vtx->vmx = vmx; 3721 3722 vlapic = &vlapic_vtx->vlapic; 3723 vlapic->vm = vmx->vm; 3724 vlapic->vcpuid = vcpuid; 3725 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3726 3727 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3728 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3729 } 3730 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3731 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3732 vlapic->ops.sync_state = vmx_apicv_sync; 3733 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3734 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3735 3736 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3737 vlapic->ops.post_intr = vmx_apicv_notify; 3738 } 3739 } 3740 3741 vlapic_init(vlapic); 3742 3743 return (vlapic); 3744 } 3745 3746 static void 3747 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3748 { 3749 vlapic_cleanup(vlapic); 3750 kmem_free(vlapic, sizeof (struct vlapic_vtx)); 3751 } 3752 3753 static void 3754 vmx_savectx(void *arg, int vcpu) 3755 { 3756 struct vmx *vmx = arg; 3757 3758 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3759 vmcs_clear(vmx->vmcs_pa[vcpu]); 3760 vmx_msr_guest_exit(vmx, vcpu); 3761 /* 3762 * Having VMCLEARed the VMCS, it can no longer be re-entered 3763 * with VMRESUME, but must be VMLAUNCHed again. 3764 */ 3765 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3766 } 3767 3768 reset_gdtr_limit(); 3769 } 3770 3771 static void 3772 vmx_restorectx(void *arg, int vcpu) 3773 { 3774 struct vmx *vmx = arg; 3775 3776 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3777 3778 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3779 vmx_msr_guest_enter(vmx, vcpu); 3780 vmcs_load(vmx->vmcs_pa[vcpu]); 3781 } 3782 } 3783 3784 struct vmm_ops vmm_ops_intel = { 3785 .init = vmx_init, 3786 .cleanup = vmx_cleanup, 3787 .resume = vmx_restore, 3788 3789 .vminit = vmx_vminit, 3790 .vmrun = vmx_run, 3791 .vmcleanup = vmx_vmcleanup, 3792 .vmgetreg = vmx_getreg, 3793 .vmsetreg = vmx_setreg, 3794 .vmgetdesc = vmx_getdesc, 3795 .vmsetdesc = vmx_setdesc, 3796 .vmgetcap = vmx_getcap, 3797 .vmsetcap = vmx_setcap, 3798 .vlapic_init = vmx_vlapic_init, 3799 .vlapic_cleanup = vmx_vlapic_cleanup, 3800 3801 .vmsavectx = vmx_savectx, 3802 .vmrestorectx = vmx_restorectx, 3803 3804 .vmgetmsr = vmx_msr_get, 3805 .vmsetmsr = vmx_msr_set, 3806 }; 3807 3808 /* Side-effect free HW validation derived from checks in vmx_init. */ 3809 int 3810 vmx_x86_supported(const char **msg) 3811 { 3812 int error; 3813 uint32_t tmp; 3814 3815 ASSERT(msg != NULL); 3816 3817 /* Check support for primary processor-based VM-execution controls */ 3818 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3819 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3820 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3821 if (error) { 3822 *msg = "processor does not support desired primary " 3823 "processor-based controls"; 3824 return (error); 3825 } 3826 3827 /* Check support for secondary processor-based VM-execution controls */ 3828 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3829 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3830 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3831 if (error) { 3832 *msg = "processor does not support desired secondary " 3833 "processor-based controls"; 3834 return (error); 3835 } 3836 3837 /* Check support for pin-based VM-execution controls */ 3838 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3839 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3840 PINBASED_CTLS_ZERO_SETTING, &tmp); 3841 if (error) { 3842 *msg = "processor does not support desired pin-based controls"; 3843 return (error); 3844 } 3845 3846 /* Check support for VM-exit controls */ 3847 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3848 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3849 if (error) { 3850 *msg = "processor does not support desired exit controls"; 3851 return (error); 3852 } 3853 3854 /* Check support for VM-entry controls */ 3855 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3856 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3857 if (error) { 3858 *msg = "processor does not support desired entry controls"; 3859 return (error); 3860 } 3861 3862 /* Unrestricted guest is nominally optional, but not for us. */ 3863 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3864 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3865 if (error) { 3866 *msg = "processor does not support desired unrestricted guest " 3867 "controls"; 3868 return (error); 3869 } 3870 3871 return (0); 3872 } 3873