1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2022 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/pcpu.h> 54 #include <sys/proc.h> 55 #include <sys/sysctl.h> 56 57 #include <sys/x86_archext.h> 58 #include <sys/smp_impldefs.h> 59 #include <sys/smt.h> 60 #include <sys/hma.h> 61 #include <sys/trap.h> 62 #include <sys/archsystm.h> 63 64 #include <machine/psl.h> 65 #include <machine/cpufunc.h> 66 #include <machine/md_var.h> 67 #include <machine/reg.h> 68 #include <machine/segments.h> 69 #include <machine/specialreg.h> 70 #include <machine/vmparam.h> 71 #include <sys/vmm_vm.h> 72 #include <sys/vmm_kernel.h> 73 74 #include <machine/vmm.h> 75 #include <machine/vmm_dev.h> 76 #include <sys/vmm_instruction_emul.h> 77 #include "vmm_lapic.h" 78 #include "vmm_host.h" 79 #include "vmm_ioport.h" 80 #include "vmm_ktr.h" 81 #include "vmm_stat.h" 82 #include "vatpic.h" 83 #include "vlapic.h" 84 #include "vlapic_priv.h" 85 86 #include "vmcs.h" 87 #include "vmx.h" 88 #include "vmx_msr.h" 89 #include "x86.h" 90 #include "vmx_controls.h" 91 92 #define PINBASED_CTLS_ONE_SETTING \ 93 (PINBASED_EXTINT_EXITING | \ 94 PINBASED_NMI_EXITING | \ 95 PINBASED_VIRTUAL_NMI) 96 #define PINBASED_CTLS_ZERO_SETTING 0 97 98 #define PROCBASED_CTLS_WINDOW_SETTING \ 99 (PROCBASED_INT_WINDOW_EXITING | \ 100 PROCBASED_NMI_WINDOW_EXITING) 101 102 /* We consider TSC offset a necessity for unsynched TSC handling */ 103 #define PROCBASED_CTLS_ONE_SETTING \ 104 (PROCBASED_SECONDARY_CONTROLS | \ 105 PROCBASED_TSC_OFFSET | \ 106 PROCBASED_MWAIT_EXITING | \ 107 PROCBASED_MONITOR_EXITING | \ 108 PROCBASED_IO_EXITING | \ 109 PROCBASED_MSR_BITMAPS | \ 110 PROCBASED_CTLS_WINDOW_SETTING | \ 111 PROCBASED_CR8_LOAD_EXITING | \ 112 PROCBASED_CR8_STORE_EXITING) 113 114 #define PROCBASED_CTLS_ZERO_SETTING \ 115 (PROCBASED_CR3_LOAD_EXITING | \ 116 PROCBASED_CR3_STORE_EXITING | \ 117 PROCBASED_IO_BITMAPS) 118 119 /* 120 * EPT and Unrestricted Guest are considered necessities. The latter is not a 121 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 122 * without a bootrom starting in real mode. 123 */ 124 #define PROCBASED_CTLS2_ONE_SETTING \ 125 (PROCBASED2_ENABLE_EPT | \ 126 PROCBASED2_UNRESTRICTED_GUEST) 127 #define PROCBASED_CTLS2_ZERO_SETTING 0 128 129 #define VM_EXIT_CTLS_ONE_SETTING \ 130 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 131 VM_EXIT_HOST_LMA | \ 132 VM_EXIT_LOAD_PAT | \ 133 VM_EXIT_SAVE_EFER | \ 134 VM_EXIT_LOAD_EFER | \ 135 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 136 137 #define VM_EXIT_CTLS_ZERO_SETTING 0 138 139 #define VM_ENTRY_CTLS_ONE_SETTING \ 140 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 141 VM_ENTRY_LOAD_EFER) 142 143 #define VM_ENTRY_CTLS_ZERO_SETTING \ 144 (VM_ENTRY_INTO_SMM | \ 145 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 146 147 /* 148 * Cover the EPT capabilities used by bhyve at present: 149 * - 4-level page walks 150 * - write-back memory type 151 * - INVEPT operations (all types) 152 * - INVVPID operations (single-context only) 153 */ 154 #define EPT_CAPS_REQUIRED \ 155 (IA32_VMX_EPT_VPID_PWL4 | \ 156 IA32_VMX_EPT_VPID_TYPE_WB | \ 157 IA32_VMX_EPT_VPID_INVEPT | \ 158 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 159 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 160 IA32_VMX_EPT_VPID_INVVPID | \ 161 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 162 163 #define HANDLED 1 164 #define UNHANDLED 0 165 166 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 167 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 168 169 SYSCTL_DECL(_hw_vmm); 170 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 171 NULL); 172 173 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 174 static uint32_t exit_ctls, entry_ctls; 175 176 static uint64_t cr0_ones_mask, cr0_zeros_mask; 177 178 static uint64_t cr4_ones_mask, cr4_zeros_mask; 179 180 static int vmx_initialized; 181 182 /* Do not flush RSB upon vmexit */ 183 static int no_flush_rsb; 184 185 /* 186 * Optional capabilities 187 */ 188 189 /* HLT triggers a VM-exit */ 190 static int cap_halt_exit; 191 192 /* PAUSE triggers a VM-exit */ 193 static int cap_pause_exit; 194 195 /* Monitor trap flag */ 196 static int cap_monitor_trap; 197 198 /* Guests are allowed to use INVPCID */ 199 static int cap_invpcid; 200 201 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 202 static enum vmx_caps vmx_capabilities; 203 204 /* APICv posted interrupt vector */ 205 static int pirvec = -1; 206 207 static uint_t vpid_alloc_failed; 208 209 int guest_l1d_flush; 210 int guest_l1d_flush_sw; 211 212 /* MSR save region is composed of an array of 'struct msr_entry' */ 213 struct msr_entry { 214 uint32_t index; 215 uint32_t reserved; 216 uint64_t val; 217 }; 218 219 static struct msr_entry msr_load_list[1] __aligned(16); 220 221 /* 222 * The definitions of SDT probes for VMX. 223 */ 224 225 /* BEGIN CSTYLED */ 226 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 227 "struct vmx *", "int", "struct vm_exit *"); 228 229 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 230 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 231 232 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 233 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 234 235 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 236 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 237 238 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 239 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 240 241 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 242 "struct vmx *", "int", "struct vm_exit *"); 243 244 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 245 "struct vmx *", "int", "struct vm_exit *"); 246 247 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 248 "struct vmx *", "int", "struct vm_exit *"); 249 250 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 251 "struct vmx *", "int", "struct vm_exit *"); 252 253 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 254 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 255 256 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 257 "struct vmx *", "int", "struct vm_exit *"); 258 259 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 260 "struct vmx *", "int", "struct vm_exit *"); 261 262 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 263 "struct vmx *", "int", "struct vm_exit *"); 264 265 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 266 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 267 268 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 269 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 270 271 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 272 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 273 274 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 275 "struct vmx *", "int", "struct vm_exit *"); 276 277 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 278 "struct vmx *", "int", "struct vm_exit *"); 279 280 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 281 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 282 283 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 284 "struct vmx *", "int", "struct vm_exit *"); 285 286 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 287 "struct vmx *", "int", "struct vm_exit *"); 288 289 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 290 "struct vmx *", "int", "struct vm_exit *"); 291 292 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 293 "struct vmx *", "int", "struct vm_exit *"); 294 295 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 296 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 297 298 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 299 "struct vmx *", "int", "struct vm_exit *", "int"); 300 /* END CSTYLED */ 301 302 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 303 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 304 static void vmx_apply_tsc_adjust(struct vmx *, int); 305 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 306 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 307 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 308 309 static void 310 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 311 { 312 /* 313 * Allow readonly access to the following x2APIC MSRs from the guest. 314 */ 315 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 316 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 317 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 318 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 319 320 for (uint_t i = 0; i < 8; i++) { 321 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 323 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 324 } 325 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 329 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 330 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 331 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 332 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 333 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 334 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 335 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 336 337 /* 338 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 339 * 340 * These registers get special treatment described in the section 341 * "Virtualizing MSR-Based APIC Accesses". 342 */ 343 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 344 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 345 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 346 } 347 348 static ulong_t 349 vmx_fix_cr0(ulong_t cr0) 350 { 351 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 352 } 353 354 /* 355 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 356 * the value observable from the guest. 357 */ 358 static ulong_t 359 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 360 { 361 return ((cr0 & ~cr0_ones_mask) | 362 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 363 } 364 365 static ulong_t 366 vmx_fix_cr4(ulong_t cr4) 367 { 368 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 369 } 370 371 /* 372 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 373 * the value observable from the guest. 374 */ 375 static ulong_t 376 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 377 { 378 return ((cr4 & ~cr4_ones_mask) | 379 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 380 } 381 382 static void 383 vpid_free(int vpid) 384 { 385 if (vpid < 0 || vpid > 0xffff) 386 panic("vpid_free: invalid vpid %d", vpid); 387 388 /* 389 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 390 * the unit number allocator. 391 */ 392 393 if (vpid > VM_MAXCPU) 394 hma_vmx_vpid_free((uint16_t)vpid); 395 } 396 397 static void 398 vpid_alloc(uint16_t *vpid, int num) 399 { 400 int i, x; 401 402 if (num <= 0 || num > VM_MAXCPU) 403 panic("invalid number of vpids requested: %d", num); 404 405 /* 406 * If the "enable vpid" execution control is not enabled then the 407 * VPID is required to be 0 for all vcpus. 408 */ 409 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 410 for (i = 0; i < num; i++) 411 vpid[i] = 0; 412 return; 413 } 414 415 /* 416 * Allocate a unique VPID for each vcpu from the unit number allocator. 417 */ 418 for (i = 0; i < num; i++) { 419 uint16_t tmp; 420 421 tmp = hma_vmx_vpid_alloc(); 422 x = (tmp == 0) ? -1 : tmp; 423 424 if (x == -1) 425 break; 426 else 427 vpid[i] = x; 428 } 429 430 if (i < num) { 431 atomic_add_int(&vpid_alloc_failed, 1); 432 433 /* 434 * If the unit number allocator does not have enough unique 435 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 436 * 437 * These VPIDs are not be unique across VMs but this does not 438 * affect correctness because the combined mappings are also 439 * tagged with the EP4TA which is unique for each VM. 440 * 441 * It is still sub-optimal because the invvpid will invalidate 442 * combined mappings for a particular VPID across all EP4TAs. 443 */ 444 while (i-- > 0) 445 vpid_free(vpid[i]); 446 447 for (i = 0; i < num; i++) 448 vpid[i] = i + 1; 449 } 450 } 451 452 static int 453 vmx_cleanup(void) 454 { 455 /* This is taken care of by the hma registration */ 456 return (0); 457 } 458 459 static void 460 vmx_restore(void) 461 { 462 /* No-op on illumos */ 463 } 464 465 static int 466 vmx_init(void) 467 { 468 int error; 469 uint64_t fixed0, fixed1; 470 uint32_t tmp; 471 enum vmx_caps avail_caps = VMX_CAP_NONE; 472 473 /* Check support for primary processor-based VM-execution controls */ 474 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 475 MSR_VMX_TRUE_PROCBASED_CTLS, 476 PROCBASED_CTLS_ONE_SETTING, 477 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 478 if (error) { 479 printf("vmx_init: processor does not support desired primary " 480 "processor-based controls\n"); 481 return (error); 482 } 483 484 /* Clear the processor-based ctl bits that are set on demand */ 485 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 486 487 /* Check support for secondary processor-based VM-execution controls */ 488 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 489 MSR_VMX_PROCBASED_CTLS2, 490 PROCBASED_CTLS2_ONE_SETTING, 491 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 492 if (error) { 493 printf("vmx_init: processor does not support desired secondary " 494 "processor-based controls\n"); 495 return (error); 496 } 497 498 /* Check support for VPID */ 499 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 500 MSR_VMX_PROCBASED_CTLS2, 501 PROCBASED2_ENABLE_VPID, 502 0, &tmp); 503 if (error == 0) 504 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 505 506 /* Check support for pin-based VM-execution controls */ 507 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 508 MSR_VMX_TRUE_PINBASED_CTLS, 509 PINBASED_CTLS_ONE_SETTING, 510 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 511 if (error) { 512 printf("vmx_init: processor does not support desired " 513 "pin-based controls\n"); 514 return (error); 515 } 516 517 /* Check support for VM-exit controls */ 518 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 519 VM_EXIT_CTLS_ONE_SETTING, 520 VM_EXIT_CTLS_ZERO_SETTING, 521 &exit_ctls); 522 if (error) { 523 printf("vmx_init: processor does not support desired " 524 "exit controls\n"); 525 return (error); 526 } 527 528 /* Check support for VM-entry controls */ 529 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 530 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 531 &entry_ctls); 532 if (error) { 533 printf("vmx_init: processor does not support desired " 534 "entry controls\n"); 535 return (error); 536 } 537 538 /* 539 * Check support for optional features by testing them 540 * as individual bits 541 */ 542 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 543 MSR_VMX_TRUE_PROCBASED_CTLS, 544 PROCBASED_HLT_EXITING, 0, 545 &tmp) == 0); 546 547 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 548 MSR_VMX_PROCBASED_CTLS, 549 PROCBASED_MTF, 0, 550 &tmp) == 0); 551 552 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 553 MSR_VMX_TRUE_PROCBASED_CTLS, 554 PROCBASED_PAUSE_EXITING, 0, 555 &tmp) == 0); 556 557 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 558 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 559 &tmp) == 0); 560 561 /* 562 * Check for APIC virtualization capabilities: 563 * - TPR shadowing 564 * - Full APICv (with or without x2APIC support) 565 * - Posted interrupt handling 566 */ 567 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 568 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 569 avail_caps |= VMX_CAP_TPR_SHADOW; 570 571 const uint32_t apicv_bits = 572 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 573 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 574 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 575 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 576 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 577 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 578 avail_caps |= VMX_CAP_APICV; 579 580 /* 581 * It may make sense in the future to differentiate 582 * hardware (or software) configurations with APICv but 583 * no support for accelerating x2APIC mode. 584 */ 585 avail_caps |= VMX_CAP_APICV_X2APIC; 586 587 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 588 MSR_VMX_TRUE_PINBASED_CTLS, 589 PINBASED_POSTED_INTERRUPT, 0, &tmp); 590 if (error == 0) { 591 /* 592 * If the PSM-provided interfaces for requesting 593 * and using a PIR IPI vector are present, use 594 * them for posted interrupts. 595 */ 596 if (psm_get_pir_ipivect != NULL && 597 psm_send_pir_ipi != NULL) { 598 pirvec = psm_get_pir_ipivect(); 599 avail_caps |= VMX_CAP_APICV_PIR; 600 } 601 } 602 } 603 } 604 605 /* 606 * Check for necessary EPT capabilities 607 * 608 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 609 * hypervisor intends to utilize dirty page tracking. 610 */ 611 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 612 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 613 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 614 return (EINVAL); 615 } 616 617 #ifdef __FreeBSD__ 618 guest_l1d_flush = (cpu_ia32_arch_caps & 619 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 620 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 621 622 /* 623 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 624 * available. Otherwise fall back to the software flush 625 * method which loads enough data from the kernel text to 626 * flush existing L1D content, both on VMX entry and on NMI 627 * return. 628 */ 629 if (guest_l1d_flush) { 630 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 631 guest_l1d_flush_sw = 1; 632 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 633 &guest_l1d_flush_sw); 634 } 635 if (guest_l1d_flush_sw) { 636 if (nmi_flush_l1d_sw <= 1) 637 nmi_flush_l1d_sw = 1; 638 } else { 639 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 640 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 641 } 642 } 643 #else 644 /* L1D flushing is taken care of by smt_acquire() and friends */ 645 guest_l1d_flush = 0; 646 #endif /* __FreeBSD__ */ 647 648 /* 649 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 650 */ 651 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 652 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 653 cr0_ones_mask = fixed0 & fixed1; 654 cr0_zeros_mask = ~fixed0 & ~fixed1; 655 656 /* 657 * Since Unrestricted Guest was already verified present, CR0_PE and 658 * CR0_PG are allowed to be set to zero in VMX non-root operation 659 */ 660 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 661 662 /* 663 * Do not allow the guest to set CR0_NW or CR0_CD. 664 */ 665 cr0_zeros_mask |= (CR0_NW | CR0_CD); 666 667 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 668 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 669 cr4_ones_mask = fixed0 & fixed1; 670 cr4_zeros_mask = ~fixed0 & ~fixed1; 671 672 vmx_msr_init(); 673 674 vmx_capabilities = avail_caps; 675 vmx_initialized = 1; 676 677 return (0); 678 } 679 680 static void 681 vmx_trigger_hostintr(int vector) 682 { 683 VERIFY(vector >= 32 && vector <= 255); 684 vmx_call_isr(vector - 32); 685 } 686 687 static void * 688 vmx_vminit(struct vm *vm) 689 { 690 uint16_t vpid[VM_MAXCPU]; 691 int i, error, datasel; 692 struct vmx *vmx; 693 uint32_t exc_bitmap; 694 uint16_t maxcpus; 695 uint32_t proc_ctls, proc2_ctls, pin_ctls; 696 uint64_t apic_access_pa = UINT64_MAX; 697 698 vmx = malloc(sizeof (struct vmx), M_VMX, M_WAITOK | M_ZERO); 699 if ((uintptr_t)vmx & PAGE_MASK) { 700 panic("malloc of struct vmx not aligned on %d byte boundary", 701 PAGE_SIZE); 702 } 703 vmx->vm = vm; 704 705 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 706 707 /* 708 * Clean up EP4TA-tagged guest-physical and combined mappings 709 * 710 * VMX transitions are not required to invalidate any guest physical 711 * mappings. So, it may be possible for stale guest physical mappings 712 * to be present in the processor TLBs. 713 * 714 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 715 */ 716 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 717 718 vmx_msr_bitmap_initialize(vmx); 719 720 vpid_alloc(vpid, VM_MAXCPU); 721 722 /* Grab the established defaults */ 723 proc_ctls = procbased_ctls; 724 proc2_ctls = procbased_ctls2; 725 pin_ctls = pinbased_ctls; 726 /* For now, default to the available capabilities */ 727 vmx->vmx_caps = vmx_capabilities; 728 729 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 730 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 731 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 732 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 733 } 734 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 735 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 736 737 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 738 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 739 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 740 741 /* 742 * Allocate a page of memory to back the APIC access address for 743 * when APICv features are in use. Guest MMIO accesses should 744 * never actually reach this page, but rather be intercepted. 745 */ 746 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 747 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 748 apic_access_pa = vtophys(vmx->apic_access_page); 749 750 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 751 apic_access_pa); 752 /* XXX this should really return an error to the caller */ 753 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 754 } 755 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 756 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 757 758 pin_ctls |= PINBASED_POSTED_INTERRUPT; 759 } 760 761 maxcpus = vm_get_maxcpus(vm); 762 datasel = vmm_get_host_datasel(); 763 for (i = 0; i < maxcpus; i++) { 764 /* 765 * Cache physical address lookups for various components which 766 * may be required inside the critical_enter() section implied 767 * by VMPTRLD() below. 768 */ 769 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 770 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 771 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 772 773 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 774 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 775 776 vmx_msr_guest_init(vmx, i); 777 778 vmcs_load(vmx->vmcs_pa[i]); 779 780 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 781 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 782 783 /* Load the control registers */ 784 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 785 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 786 787 /* Load the segment selectors */ 788 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 789 790 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 791 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 792 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 793 794 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 795 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 796 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 797 798 /* 799 * Configure host sysenter MSRs to be restored on VM exit. 800 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 801 * vmx_run. 802 */ 803 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 804 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 805 rdmsr(MSR_SYSENTER_EIP_MSR)); 806 807 /* instruction pointer */ 808 if (no_flush_rsb) { 809 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 810 } else { 811 vmcs_write(VMCS_HOST_RIP, 812 (uint64_t)vmx_exit_guest_flush_rsb); 813 } 814 815 /* link pointer */ 816 vmcs_write(VMCS_LINK_POINTER, ~0); 817 818 vmcs_write(VMCS_EPTP, vmx->eptp); 819 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 820 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 821 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 822 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 823 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 824 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 825 vmcs_write(VMCS_VPID, vpid[i]); 826 827 if (guest_l1d_flush && !guest_l1d_flush_sw) { 828 vmcs_write(VMCS_ENTRY_MSR_LOAD, 829 vtophys(&msr_load_list[0])); 830 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 831 nitems(msr_load_list)); 832 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 833 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 834 } 835 836 /* exception bitmap */ 837 if (vcpu_trace_exceptions(vm, i)) 838 exc_bitmap = 0xffffffff; 839 else 840 exc_bitmap = 1 << IDT_MC; 841 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 842 843 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 844 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 845 846 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 847 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 848 } 849 850 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 851 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 852 vmcs_write(VMCS_EOI_EXIT0, 0); 853 vmcs_write(VMCS_EOI_EXIT1, 0); 854 vmcs_write(VMCS_EOI_EXIT2, 0); 855 vmcs_write(VMCS_EOI_EXIT3, 0); 856 } 857 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 858 vmcs_write(VMCS_PIR_VECTOR, pirvec); 859 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 860 } 861 862 /* 863 * Set up the CR0/4 masks and configure the read shadow state 864 * to the power-on register value from the Intel Sys Arch. 865 * CR0 - 0x60000010 866 * CR4 - 0 867 */ 868 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 869 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 870 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 871 vmcs_write(VMCS_CR4_SHADOW, 0); 872 873 vmcs_clear(vmx->vmcs_pa[i]); 874 875 vmx->cap[i].set = 0; 876 vmx->cap[i].proc_ctls = proc_ctls; 877 vmx->cap[i].proc_ctls2 = proc2_ctls; 878 vmx->cap[i].exc_bitmap = exc_bitmap; 879 880 vmx->state[i].nextrip = ~0; 881 vmx->state[i].lastcpu = NOCPU; 882 vmx->state[i].vpid = vpid[i]; 883 } 884 885 return (vmx); 886 } 887 888 static int 889 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 890 { 891 int handled; 892 893 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 894 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 895 (uint64_t *)&vmxctx->guest_rdx); 896 return (handled); 897 } 898 899 static __inline void 900 vmx_run_trace(struct vmx *vmx, int vcpu) 901 { 902 #ifdef KTR 903 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip()); 904 #endif 905 } 906 907 static __inline void 908 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 909 { 910 #ifdef KTR 911 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 912 #endif 913 } 914 915 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 916 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 917 918 #define INVVPID_TYPE_ADDRESS 0UL 919 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 920 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 921 922 struct invvpid_desc { 923 uint16_t vpid; 924 uint16_t _res1; 925 uint32_t _res2; 926 uint64_t linear_addr; 927 }; 928 CTASSERT(sizeof (struct invvpid_desc) == 16); 929 930 static __inline void 931 invvpid(uint64_t type, struct invvpid_desc desc) 932 { 933 int error; 934 935 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 936 uint64_t, desc.linear_addr); 937 938 __asm __volatile("invvpid %[desc], %[type];" 939 VMX_SET_ERROR_CODE_ASM 940 : [error] "=r" (error) 941 : [desc] "m" (desc), [type] "r" (type) 942 : "memory"); 943 944 if (error) { 945 panic("invvpid error %d", error); 946 } 947 } 948 949 /* 950 * Invalidate guest mappings identified by its VPID from the TLB. 951 * 952 * This is effectively a flush of the guest TLB, removing only "combined 953 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 954 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 955 */ 956 static void 957 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 958 { 959 struct vmxstate *vmxstate; 960 struct vmspace *vms; 961 962 vmxstate = &vmx->state[vcpu]; 963 if (vmxstate->vpid == 0) { 964 return; 965 } 966 967 if (!running) { 968 /* 969 * Set the 'lastcpu' to an invalid host cpu. 970 * 971 * This will invalidate TLB entries tagged with the vcpu's 972 * vpid the next time it runs via vmx_set_pcpu_defaults(). 973 */ 974 vmxstate->lastcpu = NOCPU; 975 return; 976 } 977 978 /* 979 * Invalidate all mappings tagged with 'vpid' 980 * 981 * This is done when a vCPU moves between host CPUs, where there may be 982 * stale TLB entries for this VPID on the target, or if emulated actions 983 * in the guest CPU have incurred an explicit TLB flush. 984 */ 985 vms = vm_get_vmspace(vmx->vm); 986 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 987 struct invvpid_desc invvpid_desc = { 988 .vpid = vmxstate->vpid, 989 .linear_addr = 0, 990 ._res1 = 0, 991 ._res2 = 0, 992 }; 993 994 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 995 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 996 } else { 997 /* 998 * The INVVPID can be skipped if an INVEPT is going to be 999 * performed before entering the guest. The INVEPT will 1000 * invalidate combined mappings for the EP4TA associated with 1001 * this guest, in all VPIDs. 1002 */ 1003 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1004 } 1005 } 1006 1007 static __inline void 1008 invept(uint64_t type, uint64_t eptp) 1009 { 1010 int error; 1011 struct invept_desc { 1012 uint64_t eptp; 1013 uint64_t _resv; 1014 } desc = { eptp, 0 }; 1015 1016 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 1017 1018 __asm __volatile("invept %[desc], %[type];" 1019 VMX_SET_ERROR_CODE_ASM 1020 : [error] "=r" (error) 1021 : [desc] "m" (desc), [type] "r" (type) 1022 : "memory"); 1023 1024 if (error != 0) { 1025 panic("invvpid error %d", error); 1026 } 1027 } 1028 1029 static void 1030 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1031 { 1032 struct vmxstate *vmxstate; 1033 1034 /* 1035 * Regardless of whether the VM appears to have migrated between CPUs, 1036 * save the host sysenter stack pointer. As it points to the kernel 1037 * stack of each thread, the correct value must be maintained for every 1038 * trip into the critical section. 1039 */ 1040 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1041 1042 /* 1043 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1044 * migration between host CPUs with differing TSC values. 1045 */ 1046 vmx_apply_tsc_adjust(vmx, vcpu); 1047 1048 vmxstate = &vmx->state[vcpu]; 1049 if (vmxstate->lastcpu == curcpu) 1050 return; 1051 1052 vmxstate->lastcpu = curcpu; 1053 1054 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1055 1056 /* Load the per-CPU IDT address */ 1057 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1058 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1059 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1060 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1061 vmx_invvpid(vmx, vcpu, 1); 1062 } 1063 1064 /* 1065 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1066 */ 1067 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1068 1069 static __inline void 1070 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1071 { 1072 1073 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1074 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1075 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1076 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1077 } 1078 } 1079 1080 static __inline void 1081 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1082 { 1083 1084 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1085 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1086 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1087 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1088 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1089 } 1090 1091 static __inline bool 1092 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1093 { 1094 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1095 } 1096 1097 static __inline void 1098 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1099 { 1100 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1101 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1102 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1103 } 1104 } 1105 1106 static __inline void 1107 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1108 { 1109 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1110 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1111 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1112 } 1113 1114 /* 1115 * Set the TSC adjustment, taking into account the offsets measured between 1116 * host physical CPUs. This is required even if the guest has not set a TSC 1117 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1118 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1119 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1120 */ 1121 static void 1122 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1123 { 1124 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1125 1126 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1127 1128 if (vmx->tsc_offset_active[vcpu] != offset) { 1129 vmcs_write(VMCS_TSC_OFFSET, offset); 1130 vmx->tsc_offset_active[vcpu] = offset; 1131 } 1132 } 1133 1134 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1135 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1136 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1137 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1138 1139 static void 1140 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1141 { 1142 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1143 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1144 1145 /* 1146 * Inject the virtual NMI. The vector must be the NMI IDT entry 1147 * or the VMCS entry check will fail. 1148 */ 1149 vmcs_write(VMCS_ENTRY_INTR_INFO, 1150 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1151 1152 /* Clear the request */ 1153 vm_nmi_clear(vmx->vm, vcpu); 1154 } 1155 1156 /* 1157 * Inject exceptions, NMIs, and ExtINTs. 1158 * 1159 * The logic behind these are complicated and may involve mutex contention, so 1160 * the injection is performed without the protection of host CPU interrupts 1161 * being disabled. This means a racing notification could be "lost", 1162 * necessitating a later call to vmx_inject_recheck() to close that window 1163 * of opportunity. 1164 */ 1165 static enum event_inject_state 1166 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1167 { 1168 uint64_t entryinfo; 1169 uint32_t gi, info; 1170 int vector; 1171 enum event_inject_state state; 1172 1173 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1174 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1175 state = EIS_CAN_INJECT; 1176 1177 /* Clear any interrupt blocking if the guest %rip has changed */ 1178 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1179 gi &= ~HWINTR_BLOCKING; 1180 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1181 } 1182 1183 /* 1184 * It could be that an interrupt is already pending for injection from 1185 * the VMCS. This would be the case if the vCPU exited for conditions 1186 * such as an AST before a vm-entry delivered the injection. 1187 */ 1188 if ((info & VMCS_INTR_VALID) != 0) { 1189 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1190 } 1191 1192 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1193 ASSERT(entryinfo & VMCS_INTR_VALID); 1194 1195 info = entryinfo; 1196 vector = info & 0xff; 1197 if (vector == IDT_BP || vector == IDT_OF) { 1198 /* 1199 * VT-x requires #BP and #OF to be injected as software 1200 * exceptions. 1201 */ 1202 info &= ~VMCS_INTR_T_MASK; 1203 info |= VMCS_INTR_T_SWEXCEPTION; 1204 } 1205 1206 if (info & VMCS_INTR_DEL_ERRCODE) { 1207 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1208 } 1209 1210 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1211 state = EIS_EV_INJECTED; 1212 } 1213 1214 if (vm_nmi_pending(vmx->vm, vcpu)) { 1215 /* 1216 * If there are no conditions blocking NMI injection then inject 1217 * it directly here otherwise enable "NMI window exiting" to 1218 * inject it as soon as we can. 1219 * 1220 * According to the Intel manual, some CPUs do not allow NMI 1221 * injection when STI_BLOCKING is active. That check is 1222 * enforced here, regardless of CPU capability. If running on a 1223 * CPU without such a restriction it will immediately exit and 1224 * the NMI will be injected in the "NMI window exiting" handler. 1225 */ 1226 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1227 if (state == EIS_CAN_INJECT) { 1228 vmx_inject_nmi(vmx, vcpu); 1229 state = EIS_EV_INJECTED; 1230 } else { 1231 return (state | EIS_REQ_EXIT); 1232 } 1233 } else { 1234 vmx_set_nmi_window_exiting(vmx, vcpu); 1235 } 1236 } 1237 1238 if (vm_extint_pending(vmx->vm, vcpu)) { 1239 if (state != EIS_CAN_INJECT) { 1240 return (state | EIS_REQ_EXIT); 1241 } 1242 if ((gi & HWINTR_BLOCKING) != 0 || 1243 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1244 return (EIS_GI_BLOCK); 1245 } 1246 1247 /* Ask the legacy pic for a vector to inject */ 1248 vatpic_pending_intr(vmx->vm, &vector); 1249 1250 /* 1251 * From the Intel SDM, Volume 3, Section "Maskable 1252 * Hardware Interrupts": 1253 * - maskable interrupt vectors [0,255] can be delivered 1254 * through the INTR pin. 1255 */ 1256 KASSERT(vector >= 0 && vector <= 255, 1257 ("invalid vector %d from INTR", vector)); 1258 1259 /* Inject the interrupt */ 1260 vmcs_write(VMCS_ENTRY_INTR_INFO, 1261 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1262 1263 vm_extint_clear(vmx->vm, vcpu); 1264 vatpic_intr_accepted(vmx->vm, vector); 1265 state = EIS_EV_INJECTED; 1266 } 1267 1268 return (state); 1269 } 1270 1271 /* 1272 * Inject any interrupts pending on the vLAPIC. 1273 * 1274 * This is done with host CPU interrupts disabled so notification IPIs, either 1275 * from the standard vCPU notification or APICv posted interrupts, will be 1276 * queued on the host APIC and recognized when entering VMX context. 1277 */ 1278 static enum event_inject_state 1279 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1280 { 1281 int vector; 1282 1283 if (!vlapic_pending_intr(vlapic, &vector)) { 1284 return (EIS_CAN_INJECT); 1285 } 1286 1287 /* 1288 * From the Intel SDM, Volume 3, Section "Maskable 1289 * Hardware Interrupts": 1290 * - maskable interrupt vectors [16,255] can be delivered 1291 * through the local APIC. 1292 */ 1293 KASSERT(vector >= 16 && vector <= 255, 1294 ("invalid vector %d from local APIC", vector)); 1295 1296 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1297 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1298 uint16_t status_new = (status_old & 0xff00) | vector; 1299 1300 /* 1301 * The APICv state will have been synced into the vLAPIC 1302 * as part of vlapic_pending_intr(). Prepare the VMCS 1303 * for the to-be-injected pending interrupt. 1304 */ 1305 if (status_new > status_old) { 1306 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1307 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, 1308 "vmx_inject_interrupts: guest_intr_status " 1309 "changed from 0x%04x to 0x%04x", 1310 status_old, status_new); 1311 } 1312 1313 /* 1314 * Ensure VMCS state regarding EOI traps is kept in sync 1315 * with the TMRs in the vlapic. 1316 */ 1317 vmx_apicv_sync_tmr(vlapic); 1318 1319 /* 1320 * The rest of the injection process for injecting the 1321 * interrupt(s) is handled by APICv. It does not preclude other 1322 * event injection from occurring. 1323 */ 1324 return (EIS_CAN_INJECT); 1325 } 1326 1327 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1328 1329 /* Does guest interruptability block injection? */ 1330 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1331 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1332 return (EIS_GI_BLOCK); 1333 } 1334 1335 /* Inject the interrupt */ 1336 vmcs_write(VMCS_ENTRY_INTR_INFO, 1337 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1338 1339 /* Update the Local APIC ISR */ 1340 vlapic_intr_accepted(vlapic, vector); 1341 1342 return (EIS_EV_INJECTED); 1343 } 1344 1345 /* 1346 * Re-check for events to be injected. 1347 * 1348 * Once host CPU interrupts are disabled, check for the presence of any events 1349 * which require injection processing. If an exit is required upon injection, 1350 * or once the guest becomes interruptable, that will be configured too. 1351 */ 1352 static bool 1353 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1354 { 1355 if (state == EIS_CAN_INJECT) { 1356 if (vm_nmi_pending(vmx->vm, vcpu) && 1357 !vmx_nmi_window_exiting(vmx, vcpu)) { 1358 /* queued NMI not blocked by NMI-window-exiting */ 1359 return (true); 1360 } 1361 if (vm_extint_pending(vmx->vm, vcpu)) { 1362 /* queued ExtINT not blocked by existing injection */ 1363 return (true); 1364 } 1365 } else { 1366 if ((state & EIS_REQ_EXIT) != 0) { 1367 /* 1368 * Use a self-IPI to force an immediate exit after 1369 * event injection has occurred. 1370 */ 1371 poke_cpu(CPU->cpu_id); 1372 } else { 1373 /* 1374 * If any event is being injected, an exit immediately 1375 * upon becoming interruptable again will allow pending 1376 * or newly queued events to be injected in a timely 1377 * manner. 1378 */ 1379 vmx_set_int_window_exiting(vmx, vcpu); 1380 } 1381 } 1382 return (false); 1383 } 1384 1385 /* 1386 * If the Virtual NMIs execution control is '1' then the logical processor 1387 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1388 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1389 * virtual-NMI blocking. 1390 * 1391 * This unblocking occurs even if the IRET causes a fault. In this case the 1392 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1393 */ 1394 static void 1395 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1396 { 1397 uint32_t gi; 1398 1399 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1400 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1401 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1402 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1403 } 1404 1405 static void 1406 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1407 { 1408 uint32_t gi; 1409 1410 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1411 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1412 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1413 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1414 } 1415 1416 static void 1417 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1418 { 1419 uint32_t gi; 1420 1421 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1422 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1423 ("NMI blocking is not in effect %x", gi)); 1424 } 1425 1426 static int 1427 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1428 { 1429 struct vmxctx *vmxctx; 1430 uint64_t xcrval; 1431 const struct xsave_limits *limits; 1432 1433 vmxctx = &vmx->ctx[vcpu]; 1434 limits = vmm_get_xsave_limits(); 1435 1436 /* 1437 * Note that the processor raises a GP# fault on its own if 1438 * xsetbv is executed for CPL != 0, so we do not have to 1439 * emulate that fault here. 1440 */ 1441 1442 /* Only xcr0 is supported. */ 1443 if (vmxctx->guest_rcx != 0) { 1444 vm_inject_gp(vmx->vm, vcpu); 1445 return (HANDLED); 1446 } 1447 1448 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1449 if (!limits->xsave_enabled || 1450 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1451 vm_inject_ud(vmx->vm, vcpu); 1452 return (HANDLED); 1453 } 1454 1455 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1456 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1457 vm_inject_gp(vmx->vm, vcpu); 1458 return (HANDLED); 1459 } 1460 1461 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1462 vm_inject_gp(vmx->vm, vcpu); 1463 return (HANDLED); 1464 } 1465 1466 /* AVX (YMM_Hi128) requires SSE. */ 1467 if (xcrval & XFEATURE_ENABLED_AVX && 1468 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1469 vm_inject_gp(vmx->vm, vcpu); 1470 return (HANDLED); 1471 } 1472 1473 /* 1474 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1475 * ZMM_Hi256, and Hi16_ZMM. 1476 */ 1477 if (xcrval & XFEATURE_AVX512 && 1478 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1479 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1480 vm_inject_gp(vmx->vm, vcpu); 1481 return (HANDLED); 1482 } 1483 1484 /* 1485 * Intel MPX requires both bound register state flags to be 1486 * set. 1487 */ 1488 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1489 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1490 vm_inject_gp(vmx->vm, vcpu); 1491 return (HANDLED); 1492 } 1493 1494 /* 1495 * This runs "inside" vmrun() with the guest's FPU state, so 1496 * modifying xcr0 directly modifies the guest's xcr0, not the 1497 * host's. 1498 */ 1499 load_xcr(0, xcrval); 1500 return (HANDLED); 1501 } 1502 1503 static uint64_t 1504 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1505 { 1506 const struct vmxctx *vmxctx; 1507 1508 vmxctx = &vmx->ctx[vcpu]; 1509 1510 switch (ident) { 1511 case 0: 1512 return (vmxctx->guest_rax); 1513 case 1: 1514 return (vmxctx->guest_rcx); 1515 case 2: 1516 return (vmxctx->guest_rdx); 1517 case 3: 1518 return (vmxctx->guest_rbx); 1519 case 4: 1520 return (vmcs_read(VMCS_GUEST_RSP)); 1521 case 5: 1522 return (vmxctx->guest_rbp); 1523 case 6: 1524 return (vmxctx->guest_rsi); 1525 case 7: 1526 return (vmxctx->guest_rdi); 1527 case 8: 1528 return (vmxctx->guest_r8); 1529 case 9: 1530 return (vmxctx->guest_r9); 1531 case 10: 1532 return (vmxctx->guest_r10); 1533 case 11: 1534 return (vmxctx->guest_r11); 1535 case 12: 1536 return (vmxctx->guest_r12); 1537 case 13: 1538 return (vmxctx->guest_r13); 1539 case 14: 1540 return (vmxctx->guest_r14); 1541 case 15: 1542 return (vmxctx->guest_r15); 1543 default: 1544 panic("invalid vmx register %d", ident); 1545 } 1546 } 1547 1548 static void 1549 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1550 { 1551 struct vmxctx *vmxctx; 1552 1553 vmxctx = &vmx->ctx[vcpu]; 1554 1555 switch (ident) { 1556 case 0: 1557 vmxctx->guest_rax = regval; 1558 break; 1559 case 1: 1560 vmxctx->guest_rcx = regval; 1561 break; 1562 case 2: 1563 vmxctx->guest_rdx = regval; 1564 break; 1565 case 3: 1566 vmxctx->guest_rbx = regval; 1567 break; 1568 case 4: 1569 vmcs_write(VMCS_GUEST_RSP, regval); 1570 break; 1571 case 5: 1572 vmxctx->guest_rbp = regval; 1573 break; 1574 case 6: 1575 vmxctx->guest_rsi = regval; 1576 break; 1577 case 7: 1578 vmxctx->guest_rdi = regval; 1579 break; 1580 case 8: 1581 vmxctx->guest_r8 = regval; 1582 break; 1583 case 9: 1584 vmxctx->guest_r9 = regval; 1585 break; 1586 case 10: 1587 vmxctx->guest_r10 = regval; 1588 break; 1589 case 11: 1590 vmxctx->guest_r11 = regval; 1591 break; 1592 case 12: 1593 vmxctx->guest_r12 = regval; 1594 break; 1595 case 13: 1596 vmxctx->guest_r13 = regval; 1597 break; 1598 case 14: 1599 vmxctx->guest_r14 = regval; 1600 break; 1601 case 15: 1602 vmxctx->guest_r15 = regval; 1603 break; 1604 default: 1605 panic("invalid vmx register %d", ident); 1606 } 1607 } 1608 1609 static int 1610 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1611 { 1612 uint64_t crval, regval; 1613 1614 /* We only handle mov to %cr0 at this time */ 1615 if ((exitqual & 0xf0) != 0x00) 1616 return (UNHANDLED); 1617 1618 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1619 1620 vmcs_write(VMCS_CR0_SHADOW, regval); 1621 1622 crval = regval | cr0_ones_mask; 1623 crval &= ~cr0_zeros_mask; 1624 1625 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1626 const uint64_t diff = crval ^ old; 1627 /* Flush the TLB if the paging or write-protect bits are changing */ 1628 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1629 vmx_invvpid(vmx, vcpu, 1); 1630 } 1631 1632 vmcs_write(VMCS_GUEST_CR0, crval); 1633 1634 if (regval & CR0_PG) { 1635 uint64_t efer, entry_ctls; 1636 1637 /* 1638 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1639 * the "IA-32e mode guest" bit in VM-entry control must be 1640 * equal. 1641 */ 1642 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1643 if (efer & EFER_LME) { 1644 efer |= EFER_LMA; 1645 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1646 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1647 entry_ctls |= VM_ENTRY_GUEST_LMA; 1648 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1649 } 1650 } 1651 1652 return (HANDLED); 1653 } 1654 1655 static int 1656 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1657 { 1658 uint64_t crval, regval; 1659 1660 /* We only handle mov to %cr4 at this time */ 1661 if ((exitqual & 0xf0) != 0x00) 1662 return (UNHANDLED); 1663 1664 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1665 1666 vmcs_write(VMCS_CR4_SHADOW, regval); 1667 1668 crval = regval | cr4_ones_mask; 1669 crval &= ~cr4_zeros_mask; 1670 vmcs_write(VMCS_GUEST_CR4, crval); 1671 1672 return (HANDLED); 1673 } 1674 1675 static int 1676 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1677 { 1678 struct vlapic *vlapic; 1679 uint64_t cr8; 1680 int regnum; 1681 1682 /* We only handle mov %cr8 to/from a register at this time. */ 1683 if ((exitqual & 0xe0) != 0x00) { 1684 return (UNHANDLED); 1685 } 1686 1687 vlapic = vm_lapic(vmx->vm, vcpu); 1688 regnum = (exitqual >> 8) & 0xf; 1689 if (exitqual & 0x10) { 1690 cr8 = vlapic_get_cr8(vlapic); 1691 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1692 } else { 1693 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1694 vlapic_set_cr8(vlapic, cr8); 1695 } 1696 1697 return (HANDLED); 1698 } 1699 1700 /* 1701 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1702 */ 1703 static int 1704 vmx_cpl(void) 1705 { 1706 uint32_t ssar; 1707 1708 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1709 return ((ssar >> 5) & 0x3); 1710 } 1711 1712 static enum vm_cpu_mode 1713 vmx_cpu_mode(void) 1714 { 1715 uint32_t csar; 1716 1717 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1718 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1719 if (csar & 0x2000) 1720 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1721 else 1722 return (CPU_MODE_COMPATIBILITY); 1723 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1724 return (CPU_MODE_PROTECTED); 1725 } else { 1726 return (CPU_MODE_REAL); 1727 } 1728 } 1729 1730 static enum vm_paging_mode 1731 vmx_paging_mode(void) 1732 { 1733 1734 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1735 return (PAGING_MODE_FLAT); 1736 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1737 return (PAGING_MODE_32); 1738 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1739 return (PAGING_MODE_64); 1740 else 1741 return (PAGING_MODE_PAE); 1742 } 1743 1744 static void 1745 vmx_paging_info(struct vm_guest_paging *paging) 1746 { 1747 paging->cr3 = vmcs_guest_cr3(); 1748 paging->cpl = vmx_cpl(); 1749 paging->cpu_mode = vmx_cpu_mode(); 1750 paging->paging_mode = vmx_paging_mode(); 1751 } 1752 1753 static void 1754 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1755 uint64_t gla) 1756 { 1757 struct vm_guest_paging paging; 1758 uint32_t csar; 1759 1760 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1761 vmexit->inst_length = 0; 1762 vmexit->u.mmio_emul.gpa = gpa; 1763 vmexit->u.mmio_emul.gla = gla; 1764 vmx_paging_info(&paging); 1765 1766 switch (paging.cpu_mode) { 1767 case CPU_MODE_REAL: 1768 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1769 vmexit->u.mmio_emul.cs_d = 0; 1770 break; 1771 case CPU_MODE_PROTECTED: 1772 case CPU_MODE_COMPATIBILITY: 1773 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1774 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1775 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1776 break; 1777 default: 1778 vmexit->u.mmio_emul.cs_base = 0; 1779 vmexit->u.mmio_emul.cs_d = 0; 1780 break; 1781 } 1782 1783 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1784 } 1785 1786 static void 1787 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1788 uint32_t eax) 1789 { 1790 struct vm_guest_paging paging; 1791 struct vm_inout *inout; 1792 1793 inout = &vmexit->u.inout; 1794 1795 inout->bytes = (qual & 0x7) + 1; 1796 inout->flags = 0; 1797 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1798 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1799 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1800 inout->port = (uint16_t)(qual >> 16); 1801 inout->eax = eax; 1802 if (inout->flags & INOUT_STR) { 1803 uint64_t inst_info; 1804 1805 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1806 1807 /* 1808 * According to the SDM, bits 9:7 encode the address size of the 1809 * ins/outs operation, but only values 0/1/2 are expected, 1810 * corresponding to 16/32/64 bit sizes. 1811 */ 1812 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1813 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1814 inout->addrsize == 8); 1815 1816 if (inout->flags & INOUT_IN) { 1817 /* 1818 * The bits describing the segment in INSTRUCTION_INFO 1819 * are not defined for ins, leaving it to system 1820 * software to assume %es (encoded as 0) 1821 */ 1822 inout->segment = 0; 1823 } else { 1824 /* 1825 * Bits 15-17 encode the segment for OUTS. 1826 * This value follows the standard x86 segment order. 1827 */ 1828 inout->segment = (inst_info >> 15) & 0x7; 1829 } 1830 } 1831 1832 vmexit->exitcode = VM_EXITCODE_INOUT; 1833 vmx_paging_info(&paging); 1834 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1835 1836 /* The in/out emulation will handle advancing %rip */ 1837 vmexit->inst_length = 0; 1838 } 1839 1840 static int 1841 ept_fault_type(uint64_t ept_qual) 1842 { 1843 int fault_type; 1844 1845 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1846 fault_type = PROT_WRITE; 1847 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1848 fault_type = PROT_EXEC; 1849 else 1850 fault_type = PROT_READ; 1851 1852 return (fault_type); 1853 } 1854 1855 static bool 1856 ept_emulation_fault(uint64_t ept_qual) 1857 { 1858 int read, write; 1859 1860 /* EPT fault on an instruction fetch doesn't make sense here */ 1861 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1862 return (false); 1863 1864 /* EPT fault must be a read fault or a write fault */ 1865 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1866 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1867 if ((read | write) == 0) 1868 return (false); 1869 1870 /* 1871 * The EPT violation must have been caused by accessing a 1872 * guest-physical address that is a translation of a guest-linear 1873 * address. 1874 */ 1875 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1876 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1877 return (false); 1878 } 1879 1880 return (true); 1881 } 1882 1883 static __inline int 1884 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1885 { 1886 uint32_t proc_ctls2; 1887 1888 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1889 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1890 } 1891 1892 static __inline int 1893 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1894 { 1895 uint32_t proc_ctls2; 1896 1897 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1898 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1899 } 1900 1901 static int 1902 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1903 uint64_t qual) 1904 { 1905 const uint_t offset = APIC_WRITE_OFFSET(qual); 1906 1907 if (!apic_access_virtualization(vmx, vcpuid)) { 1908 /* 1909 * In general there should not be any APIC write VM-exits 1910 * unless APIC-access virtualization is enabled. 1911 * 1912 * However self-IPI virtualization can legitimately trigger 1913 * an APIC-write VM-exit so treat it specially. 1914 */ 1915 if (x2apic_virtualization(vmx, vcpuid) && 1916 offset == APIC_OFFSET_SELF_IPI) { 1917 const uint32_t *apic_regs = 1918 (uint32_t *)(vlapic->apic_page); 1919 const uint32_t vector = 1920 apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1921 1922 vlapic_self_ipi_handler(vlapic, vector); 1923 return (HANDLED); 1924 } else 1925 return (UNHANDLED); 1926 } 1927 1928 switch (offset) { 1929 case APIC_OFFSET_ID: 1930 vlapic_id_write_handler(vlapic); 1931 break; 1932 case APIC_OFFSET_LDR: 1933 vlapic_ldr_write_handler(vlapic); 1934 break; 1935 case APIC_OFFSET_DFR: 1936 vlapic_dfr_write_handler(vlapic); 1937 break; 1938 case APIC_OFFSET_SVR: 1939 vlapic_svr_write_handler(vlapic); 1940 break; 1941 case APIC_OFFSET_ESR: 1942 vlapic_esr_write_handler(vlapic); 1943 break; 1944 case APIC_OFFSET_ICR_LOW: 1945 vlapic_icrlo_write_handler(vlapic); 1946 break; 1947 case APIC_OFFSET_CMCI_LVT: 1948 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1949 vlapic_lvt_write_handler(vlapic, offset); 1950 break; 1951 case APIC_OFFSET_TIMER_ICR: 1952 vlapic_icrtmr_write_handler(vlapic); 1953 break; 1954 case APIC_OFFSET_TIMER_DCR: 1955 vlapic_dcr_write_handler(vlapic); 1956 break; 1957 default: 1958 return (UNHANDLED); 1959 } 1960 return (HANDLED); 1961 } 1962 1963 static bool 1964 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1965 { 1966 1967 if (apic_access_virtualization(vmx, vcpuid) && 1968 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1969 return (true); 1970 else 1971 return (false); 1972 } 1973 1974 static int 1975 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1976 { 1977 uint64_t qual; 1978 int access_type, offset, allowed; 1979 struct vie *vie; 1980 1981 if (!apic_access_virtualization(vmx, vcpuid)) 1982 return (UNHANDLED); 1983 1984 qual = vmexit->u.vmx.exit_qualification; 1985 access_type = APIC_ACCESS_TYPE(qual); 1986 offset = APIC_ACCESS_OFFSET(qual); 1987 1988 allowed = 0; 1989 if (access_type == 0) { 1990 /* 1991 * Read data access to the following registers is expected. 1992 */ 1993 switch (offset) { 1994 case APIC_OFFSET_APR: 1995 case APIC_OFFSET_PPR: 1996 case APIC_OFFSET_RRR: 1997 case APIC_OFFSET_CMCI_LVT: 1998 case APIC_OFFSET_TIMER_CCR: 1999 allowed = 1; 2000 break; 2001 default: 2002 break; 2003 } 2004 } else if (access_type == 1) { 2005 /* 2006 * Write data access to the following registers is expected. 2007 */ 2008 switch (offset) { 2009 case APIC_OFFSET_VER: 2010 case APIC_OFFSET_APR: 2011 case APIC_OFFSET_PPR: 2012 case APIC_OFFSET_RRR: 2013 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2014 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2015 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2016 case APIC_OFFSET_CMCI_LVT: 2017 case APIC_OFFSET_TIMER_CCR: 2018 allowed = 1; 2019 break; 2020 default: 2021 break; 2022 } 2023 } 2024 2025 if (allowed) { 2026 vie = vm_vie_ctx(vmx->vm, vcpuid); 2027 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2028 VIE_INVALID_GLA); 2029 } 2030 2031 /* 2032 * Regardless of whether the APIC-access is allowed this handler 2033 * always returns UNHANDLED: 2034 * - if the access is allowed then it is handled by emulating the 2035 * instruction that caused the VM-exit (outside the critical section) 2036 * - if the access is not allowed then it will be converted to an 2037 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2038 */ 2039 return (UNHANDLED); 2040 } 2041 2042 static enum task_switch_reason 2043 vmx_task_switch_reason(uint64_t qual) 2044 { 2045 int reason; 2046 2047 reason = (qual >> 30) & 0x3; 2048 switch (reason) { 2049 case 0: 2050 return (TSR_CALL); 2051 case 1: 2052 return (TSR_IRET); 2053 case 2: 2054 return (TSR_JMP); 2055 case 3: 2056 return (TSR_IDT_GATE); 2057 default: 2058 panic("%s: invalid reason %d", __func__, reason); 2059 } 2060 } 2061 2062 static int 2063 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit, 2064 bool is_wrmsr) 2065 { 2066 struct vmxctx *vmxctx = &vmx->ctx[vcpuid]; 2067 const uint32_t ecx = vmxctx->guest_rcx; 2068 vm_msr_result_t res; 2069 uint64_t val = 0; 2070 2071 if (is_wrmsr) { 2072 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2073 val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax; 2074 2075 if (vlapic_owned_msr(ecx)) { 2076 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2077 2078 res = vlapic_wrmsr(vlapic, ecx, val); 2079 } else { 2080 res = vmx_wrmsr(vmx, vcpuid, ecx, val); 2081 } 2082 } else { 2083 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2084 2085 if (vlapic_owned_msr(ecx)) { 2086 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2087 2088 res = vlapic_rdmsr(vlapic, ecx, &val); 2089 } else { 2090 res = vmx_rdmsr(vmx, vcpuid, ecx, &val); 2091 } 2092 } 2093 2094 switch (res) { 2095 case VMR_OK: 2096 /* Store rdmsr result in the appropriate registers */ 2097 if (!is_wrmsr) { 2098 vmxctx->guest_rax = (uint32_t)val; 2099 vmxctx->guest_rdx = val >> 32; 2100 } 2101 return (HANDLED); 2102 case VMR_GP: 2103 vm_inject_gp(vmx->vm, vcpuid); 2104 return (HANDLED); 2105 case VMR_UNHANLDED: 2106 vmexit->exitcode = is_wrmsr ? 2107 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 2108 vmexit->u.msr.code = ecx; 2109 vmexit->u.msr.wval = val; 2110 return (UNHANDLED); 2111 default: 2112 panic("unexpected msr result %u\n", res); 2113 } 2114 } 2115 2116 static int 2117 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2118 { 2119 int error, errcode, errcode_valid, handled; 2120 struct vmxctx *vmxctx; 2121 struct vie *vie; 2122 struct vlapic *vlapic; 2123 struct vm_task_switch *ts; 2124 uint32_t idtvec_info, idtvec_err, intr_info; 2125 uint32_t intr_type, intr_vec, reason; 2126 uint64_t exitintinfo, qual, gpa; 2127 2128 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2129 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2130 2131 handled = UNHANDLED; 2132 vmxctx = &vmx->ctx[vcpu]; 2133 2134 qual = vmexit->u.vmx.exit_qualification; 2135 reason = vmexit->u.vmx.exit_reason; 2136 vmexit->exitcode = VM_EXITCODE_BOGUS; 2137 2138 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2139 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2140 2141 /* 2142 * VM-entry failures during or after loading guest state. 2143 * 2144 * These VM-exits are uncommon but must be handled specially 2145 * as most VM-exit fields are not populated as usual. 2146 */ 2147 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2148 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2149 vmm_call_trap(T_MCE); 2150 return (1); 2151 } 2152 2153 /* 2154 * VM exits that can be triggered during event delivery need to 2155 * be handled specially by re-injecting the event if the IDT 2156 * vectoring information field's valid bit is set. 2157 * 2158 * See "Information for VM Exits During Event Delivery" in Intel SDM 2159 * for details. 2160 */ 2161 idtvec_info = vmcs_idt_vectoring_info(); 2162 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2163 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2164 exitintinfo = idtvec_info; 2165 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2166 idtvec_err = vmcs_idt_vectoring_err(); 2167 exitintinfo |= (uint64_t)idtvec_err << 32; 2168 } 2169 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2170 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2171 __func__, error)); 2172 2173 /* 2174 * If 'virtual NMIs' are being used and the VM-exit 2175 * happened while injecting an NMI during the previous 2176 * VM-entry, then clear "blocking by NMI" in the 2177 * Guest Interruptibility-State so the NMI can be 2178 * reinjected on the subsequent VM-entry. 2179 * 2180 * However, if the NMI was being delivered through a task 2181 * gate, then the new task must start execution with NMIs 2182 * blocked so don't clear NMI blocking in this case. 2183 */ 2184 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2185 if (intr_type == VMCS_INTR_T_NMI) { 2186 if (reason != EXIT_REASON_TASK_SWITCH) 2187 vmx_clear_nmi_blocking(vmx, vcpu); 2188 else 2189 vmx_assert_nmi_blocking(vmx, vcpu); 2190 } 2191 2192 /* 2193 * Update VM-entry instruction length if the event being 2194 * delivered was a software interrupt or software exception. 2195 */ 2196 if (intr_type == VMCS_INTR_T_SWINTR || 2197 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2198 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2199 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2200 } 2201 } 2202 2203 switch (reason) { 2204 case EXIT_REASON_TRIPLE_FAULT: 2205 (void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT); 2206 handled = HANDLED; 2207 break; 2208 case EXIT_REASON_TASK_SWITCH: 2209 ts = &vmexit->u.task_switch; 2210 ts->tsssel = qual & 0xffff; 2211 ts->reason = vmx_task_switch_reason(qual); 2212 ts->ext = 0; 2213 ts->errcode_valid = 0; 2214 vmx_paging_info(&ts->paging); 2215 /* 2216 * If the task switch was due to a CALL, JMP, IRET, software 2217 * interrupt (INT n) or software exception (INT3, INTO), 2218 * then the saved %rip references the instruction that caused 2219 * the task switch. The instruction length field in the VMCS 2220 * is valid in this case. 2221 * 2222 * In all other cases (e.g., NMI, hardware exception) the 2223 * saved %rip is one that would have been saved in the old TSS 2224 * had the task switch completed normally so the instruction 2225 * length field is not needed in this case and is explicitly 2226 * set to 0. 2227 */ 2228 if (ts->reason == TSR_IDT_GATE) { 2229 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2230 ("invalid idtvec_info %x for IDT task switch", 2231 idtvec_info)); 2232 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2233 if (intr_type != VMCS_INTR_T_SWINTR && 2234 intr_type != VMCS_INTR_T_SWEXCEPTION && 2235 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2236 /* Task switch triggered by external event */ 2237 ts->ext = 1; 2238 vmexit->inst_length = 0; 2239 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2240 ts->errcode_valid = 1; 2241 ts->errcode = vmcs_idt_vectoring_err(); 2242 } 2243 } 2244 } 2245 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2246 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2247 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2248 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2249 ts->ext ? "external" : "internal", 2250 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2251 break; 2252 case EXIT_REASON_CR_ACCESS: 2253 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2254 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2255 switch (qual & 0xf) { 2256 case 0: 2257 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2258 break; 2259 case 4: 2260 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2261 break; 2262 case 8: 2263 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2264 break; 2265 } 2266 break; 2267 case EXIT_REASON_RDMSR: 2268 case EXIT_REASON_WRMSR: 2269 handled = vmx_handle_msr(vmx, vcpu, vmexit, 2270 reason == EXIT_REASON_WRMSR); 2271 break; 2272 case EXIT_REASON_HLT: 2273 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2274 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2275 vmexit->exitcode = VM_EXITCODE_HLT; 2276 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2277 break; 2278 case EXIT_REASON_MTF: 2279 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2280 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2281 vmexit->exitcode = VM_EXITCODE_MTRAP; 2282 vmexit->inst_length = 0; 2283 break; 2284 case EXIT_REASON_PAUSE: 2285 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2286 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2287 vmexit->exitcode = VM_EXITCODE_PAUSE; 2288 break; 2289 case EXIT_REASON_INTR_WINDOW: 2290 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2291 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2292 vmx_clear_int_window_exiting(vmx, vcpu); 2293 return (1); 2294 case EXIT_REASON_EXT_INTR: 2295 /* 2296 * External interrupts serve only to cause VM exits and allow 2297 * the host interrupt handler to run. 2298 * 2299 * If this external interrupt triggers a virtual interrupt 2300 * to a VM, then that state will be recorded by the 2301 * host interrupt handler in the VM's softc. We will inject 2302 * this virtual interrupt during the subsequent VM enter. 2303 */ 2304 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2305 SDT_PROBE4(vmm, vmx, exit, interrupt, 2306 vmx, vcpu, vmexit, intr_info); 2307 2308 /* 2309 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2310 * This appears to be a bug in VMware Fusion? 2311 */ 2312 if (!(intr_info & VMCS_INTR_VALID)) 2313 return (1); 2314 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2315 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2316 ("VM exit interruption info invalid: %x", intr_info)); 2317 vmx_trigger_hostintr(intr_info & 0xff); 2318 2319 /* 2320 * This is special. We want to treat this as an 'handled' 2321 * VM-exit but not increment the instruction pointer. 2322 */ 2323 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2324 return (1); 2325 case EXIT_REASON_NMI_WINDOW: 2326 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2327 /* Exit to allow the pending virtual NMI to be injected */ 2328 if (vm_nmi_pending(vmx->vm, vcpu)) 2329 vmx_inject_nmi(vmx, vcpu); 2330 vmx_clear_nmi_window_exiting(vmx, vcpu); 2331 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2332 return (1); 2333 case EXIT_REASON_INOUT: 2334 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2335 vie = vm_vie_ctx(vmx->vm, vcpu); 2336 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2337 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2338 break; 2339 case EXIT_REASON_CPUID: 2340 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2341 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2342 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2343 break; 2344 case EXIT_REASON_EXCEPTION: 2345 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2346 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2347 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2348 ("VM exit interruption info invalid: %x", intr_info)); 2349 2350 intr_vec = intr_info & 0xff; 2351 intr_type = intr_info & VMCS_INTR_T_MASK; 2352 2353 /* 2354 * If Virtual NMIs control is 1 and the VM-exit is due to a 2355 * fault encountered during the execution of IRET then we must 2356 * restore the state of "virtual-NMI blocking" before resuming 2357 * the guest. 2358 * 2359 * See "Resuming Guest Software after Handling an Exception". 2360 * See "Information for VM Exits Due to Vectored Events". 2361 */ 2362 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2363 (intr_vec != IDT_DF) && 2364 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2365 vmx_restore_nmi_blocking(vmx, vcpu); 2366 2367 /* 2368 * The NMI has already been handled in vmx_exit_handle_nmi(). 2369 */ 2370 if (intr_type == VMCS_INTR_T_NMI) 2371 return (1); 2372 2373 /* 2374 * Call the machine check handler by hand. Also don't reflect 2375 * the machine check back into the guest. 2376 */ 2377 if (intr_vec == IDT_MC) { 2378 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2379 vmm_call_trap(T_MCE); 2380 return (1); 2381 } 2382 2383 /* 2384 * If the hypervisor has requested user exits for 2385 * debug exceptions, bounce them out to userland. 2386 */ 2387 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2388 intr_vec == IDT_BP && 2389 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2390 vmexit->exitcode = VM_EXITCODE_BPT; 2391 vmexit->u.bpt.inst_length = vmexit->inst_length; 2392 vmexit->inst_length = 0; 2393 break; 2394 } 2395 2396 if (intr_vec == IDT_PF) { 2397 vmxctx->guest_cr2 = qual; 2398 } 2399 2400 /* 2401 * Software exceptions exhibit trap-like behavior. This in 2402 * turn requires populating the VM-entry instruction length 2403 * so that the %rip in the trap frame is past the INT3/INTO 2404 * instruction. 2405 */ 2406 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2407 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2408 2409 /* Reflect all other exceptions back into the guest */ 2410 errcode_valid = errcode = 0; 2411 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2412 errcode_valid = 1; 2413 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2414 } 2415 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%x into " 2416 "the guest", intr_vec, errcode); 2417 SDT_PROBE5(vmm, vmx, exit, exception, 2418 vmx, vcpu, vmexit, intr_vec, errcode); 2419 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2420 errcode_valid, errcode, 0); 2421 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2422 __func__, error)); 2423 return (1); 2424 2425 case EXIT_REASON_EPT_FAULT: 2426 /* 2427 * If 'gpa' lies within the address space allocated to 2428 * memory then this must be a nested page fault otherwise 2429 * this must be an instruction that accesses MMIO space. 2430 */ 2431 gpa = vmcs_gpa(); 2432 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2433 apic_access_fault(vmx, vcpu, gpa)) { 2434 vmexit->exitcode = VM_EXITCODE_PAGING; 2435 vmexit->inst_length = 0; 2436 vmexit->u.paging.gpa = gpa; 2437 vmexit->u.paging.fault_type = ept_fault_type(qual); 2438 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2439 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2440 vmx, vcpu, vmexit, gpa, qual); 2441 } else if (ept_emulation_fault(qual)) { 2442 vie = vm_vie_ctx(vmx->vm, vcpu); 2443 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla()); 2444 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2445 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2446 vmx, vcpu, vmexit, gpa); 2447 } 2448 /* 2449 * If Virtual NMIs control is 1 and the VM-exit is due to an 2450 * EPT fault during the execution of IRET then we must restore 2451 * the state of "virtual-NMI blocking" before resuming. 2452 * 2453 * See description of "NMI unblocking due to IRET" in 2454 * "Exit Qualification for EPT Violations". 2455 */ 2456 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2457 (qual & EXIT_QUAL_NMIUDTI) != 0) 2458 vmx_restore_nmi_blocking(vmx, vcpu); 2459 break; 2460 case EXIT_REASON_VIRTUALIZED_EOI: 2461 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2462 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2463 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2464 vmexit->inst_length = 0; /* trap-like */ 2465 break; 2466 case EXIT_REASON_APIC_ACCESS: 2467 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2468 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2469 break; 2470 case EXIT_REASON_APIC_WRITE: 2471 /* 2472 * APIC-write VM exit is trap-like so the %rip is already 2473 * pointing to the next instruction. 2474 */ 2475 vmexit->inst_length = 0; 2476 vlapic = vm_lapic(vmx->vm, vcpu); 2477 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2478 vmx, vcpu, vmexit, vlapic); 2479 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2480 break; 2481 case EXIT_REASON_XSETBV: 2482 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2483 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2484 break; 2485 case EXIT_REASON_MONITOR: 2486 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2487 vmexit->exitcode = VM_EXITCODE_MONITOR; 2488 break; 2489 case EXIT_REASON_MWAIT: 2490 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2491 vmexit->exitcode = VM_EXITCODE_MWAIT; 2492 break; 2493 case EXIT_REASON_TPR: 2494 vlapic = vm_lapic(vmx->vm, vcpu); 2495 vlapic_sync_tpr(vlapic); 2496 vmexit->inst_length = 0; 2497 handled = HANDLED; 2498 break; 2499 case EXIT_REASON_VMCALL: 2500 case EXIT_REASON_VMCLEAR: 2501 case EXIT_REASON_VMLAUNCH: 2502 case EXIT_REASON_VMPTRLD: 2503 case EXIT_REASON_VMPTRST: 2504 case EXIT_REASON_VMREAD: 2505 case EXIT_REASON_VMRESUME: 2506 case EXIT_REASON_VMWRITE: 2507 case EXIT_REASON_VMXOFF: 2508 case EXIT_REASON_VMXON: 2509 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2510 vmexit->exitcode = VM_EXITCODE_VMINSN; 2511 break; 2512 default: 2513 SDT_PROBE4(vmm, vmx, exit, unknown, 2514 vmx, vcpu, vmexit, reason); 2515 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2516 break; 2517 } 2518 2519 if (handled) { 2520 /* 2521 * It is possible that control is returned to userland 2522 * even though we were able to handle the VM exit in the 2523 * kernel. 2524 * 2525 * In such a case we want to make sure that the userland 2526 * restarts guest execution at the instruction *after* 2527 * the one we just processed. Therefore we update the 2528 * guest rip in the VMCS and in 'vmexit'. 2529 */ 2530 vmexit->rip += vmexit->inst_length; 2531 vmexit->inst_length = 0; 2532 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2533 } else { 2534 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2535 /* 2536 * If this VM exit was not claimed by anybody then 2537 * treat it as a generic VMX exit. 2538 */ 2539 vmexit->exitcode = VM_EXITCODE_VMX; 2540 vmexit->u.vmx.status = VM_SUCCESS; 2541 vmexit->u.vmx.inst_type = 0; 2542 vmexit->u.vmx.inst_error = 0; 2543 } else { 2544 /* 2545 * The exitcode and collateral have been populated. 2546 * The VM exit will be processed further in userland. 2547 */ 2548 } 2549 } 2550 2551 SDT_PROBE4(vmm, vmx, exit, return, 2552 vmx, vcpu, vmexit, handled); 2553 return (handled); 2554 } 2555 2556 static void 2557 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2558 { 2559 2560 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2561 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2562 vmxctx->inst_fail_status)); 2563 2564 vmexit->inst_length = 0; 2565 vmexit->exitcode = VM_EXITCODE_VMX; 2566 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2567 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2568 vmexit->u.vmx.exit_reason = ~0; 2569 vmexit->u.vmx.exit_qualification = ~0; 2570 2571 switch (rc) { 2572 case VMX_VMRESUME_ERROR: 2573 case VMX_VMLAUNCH_ERROR: 2574 case VMX_INVEPT_ERROR: 2575 case VMX_VMWRITE_ERROR: 2576 vmexit->u.vmx.inst_type = rc; 2577 break; 2578 default: 2579 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2580 } 2581 } 2582 2583 /* 2584 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2585 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2586 * sufficient to simply vector to the NMI handler via a software interrupt. 2587 * However, this must be done before maskable interrupts are enabled 2588 * otherwise the "iret" issued by an interrupt handler will incorrectly 2589 * clear NMI blocking. 2590 */ 2591 static __inline void 2592 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2593 { 2594 ASSERT(!interrupts_enabled()); 2595 2596 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2597 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2598 ASSERT(intr_info & VMCS_INTR_VALID); 2599 2600 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2601 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2602 vmm_call_trap(T_NMIFLT); 2603 } 2604 } 2605 } 2606 2607 static __inline void 2608 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2609 { 2610 uint64_t rflags; 2611 2612 /* Save host control debug registers. */ 2613 vmxctx->host_dr7 = rdr7(); 2614 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2615 2616 /* 2617 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2618 * exceptions in the host based on the guest DRx values. The 2619 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2620 */ 2621 load_dr7(0); 2622 wrmsr(MSR_DEBUGCTLMSR, 0); 2623 2624 /* 2625 * Disable single stepping the kernel to avoid corrupting the 2626 * guest DR6. A debugger might still be able to corrupt the 2627 * guest DR6 by setting a breakpoint after this point and then 2628 * single stepping. 2629 */ 2630 rflags = read_rflags(); 2631 vmxctx->host_tf = rflags & PSL_T; 2632 write_rflags(rflags & ~PSL_T); 2633 2634 /* Save host debug registers. */ 2635 vmxctx->host_dr0 = rdr0(); 2636 vmxctx->host_dr1 = rdr1(); 2637 vmxctx->host_dr2 = rdr2(); 2638 vmxctx->host_dr3 = rdr3(); 2639 vmxctx->host_dr6 = rdr6(); 2640 2641 /* Restore guest debug registers. */ 2642 load_dr0(vmxctx->guest_dr0); 2643 load_dr1(vmxctx->guest_dr1); 2644 load_dr2(vmxctx->guest_dr2); 2645 load_dr3(vmxctx->guest_dr3); 2646 load_dr6(vmxctx->guest_dr6); 2647 } 2648 2649 static __inline void 2650 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2651 { 2652 2653 /* Save guest debug registers. */ 2654 vmxctx->guest_dr0 = rdr0(); 2655 vmxctx->guest_dr1 = rdr1(); 2656 vmxctx->guest_dr2 = rdr2(); 2657 vmxctx->guest_dr3 = rdr3(); 2658 vmxctx->guest_dr6 = rdr6(); 2659 2660 /* 2661 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2662 * PSL_T last. 2663 */ 2664 load_dr0(vmxctx->host_dr0); 2665 load_dr1(vmxctx->host_dr1); 2666 load_dr2(vmxctx->host_dr2); 2667 load_dr3(vmxctx->host_dr3); 2668 load_dr6(vmxctx->host_dr6); 2669 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2670 load_dr7(vmxctx->host_dr7); 2671 write_rflags(read_rflags() | vmxctx->host_tf); 2672 } 2673 2674 static int 2675 vmx_run(void *arg, int vcpu, uint64_t rip) 2676 { 2677 int rc, handled, launched; 2678 struct vmx *vmx; 2679 struct vm *vm; 2680 struct vmxctx *vmxctx; 2681 uintptr_t vmcs_pa; 2682 struct vm_exit *vmexit; 2683 struct vlapic *vlapic; 2684 uint32_t exit_reason; 2685 bool tpr_shadow_active; 2686 vm_client_t *vmc; 2687 2688 vmx = arg; 2689 vm = vmx->vm; 2690 vmcs_pa = vmx->vmcs_pa[vcpu]; 2691 vmxctx = &vmx->ctx[vcpu]; 2692 vlapic = vm_lapic(vm, vcpu); 2693 vmexit = vm_exitinfo(vm, vcpu); 2694 vmc = vm_get_vmclient(vm, vcpu); 2695 launched = 0; 2696 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2697 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2698 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2699 2700 vmx_msr_guest_enter(vmx, vcpu); 2701 2702 vmcs_load(vmcs_pa); 2703 2704 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2705 vmx->vmcs_state[vcpu] = VS_LOADED; 2706 2707 /* 2708 * XXX 2709 * We do this every time because we may setup the virtual machine 2710 * from a different process than the one that actually runs it. 2711 * 2712 * If the life of a virtual machine was spent entirely in the context 2713 * of a single process we could do this once in vmx_vminit(). 2714 */ 2715 vmcs_write(VMCS_HOST_CR3, rcr3()); 2716 2717 vmcs_write(VMCS_GUEST_RIP, rip); 2718 vmx_set_pcpu_defaults(vmx, vcpu); 2719 do { 2720 enum event_inject_state inject_state; 2721 uint64_t eptgen; 2722 2723 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2724 "%lx/%lx", __func__, vmcs_guest_rip(), rip)); 2725 2726 handled = UNHANDLED; 2727 2728 /* 2729 * Perform initial event/exception/interrupt injection before 2730 * host CPU interrupts are disabled. 2731 */ 2732 inject_state = vmx_inject_events(vmx, vcpu, rip); 2733 2734 /* 2735 * Interrupts are disabled from this point on until the 2736 * guest starts executing. This is done for the following 2737 * reasons: 2738 * 2739 * If an AST is asserted on this thread after the check below, 2740 * then the IPI_AST notification will not be lost, because it 2741 * will cause a VM exit due to external interrupt as soon as 2742 * the guest state is loaded. 2743 * 2744 * A posted interrupt after vmx_inject_vlapic() will not be 2745 * "lost" because it will be held pending in the host APIC 2746 * because interrupts are disabled. The pending interrupt will 2747 * be recognized as soon as the guest state is loaded. 2748 * 2749 * The same reasoning applies to the IPI generated by vmspace 2750 * invalidation. 2751 */ 2752 disable_intr(); 2753 2754 /* 2755 * If not precluded by existing events, inject any interrupt 2756 * pending on the vLAPIC. As a lock-less operation, it is safe 2757 * (and prudent) to perform with host CPU interrupts disabled. 2758 */ 2759 if (inject_state == EIS_CAN_INJECT) { 2760 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2761 } 2762 2763 /* 2764 * Check for vCPU bail-out conditions. This must be done after 2765 * vmx_inject_events() to detect a triple-fault condition. 2766 */ 2767 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2768 enable_intr(); 2769 break; 2770 } 2771 2772 if (vcpu_run_state_pending(vm, vcpu)) { 2773 enable_intr(); 2774 vm_exit_run_state(vmx->vm, vcpu, rip); 2775 break; 2776 } 2777 2778 /* 2779 * If subsequent activity queued events which require injection 2780 * handling, take another lap to handle them. 2781 */ 2782 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2783 enable_intr(); 2784 handled = HANDLED; 2785 continue; 2786 } 2787 2788 if ((rc = smt_acquire()) != 1) { 2789 enable_intr(); 2790 vmexit->rip = rip; 2791 vmexit->inst_length = 0; 2792 if (rc == -1) { 2793 vmexit->exitcode = VM_EXITCODE_HT; 2794 } else { 2795 vmexit->exitcode = VM_EXITCODE_BOGUS; 2796 handled = HANDLED; 2797 } 2798 break; 2799 } 2800 2801 /* 2802 * If this thread has gone off-cpu due to mutex operations 2803 * during vmx_run, the VMCS will have been unloaded, forcing a 2804 * re-VMLAUNCH as opposed to VMRESUME. 2805 */ 2806 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2807 /* 2808 * Restoration of the GDT limit is taken care of by 2809 * vmx_savectx(). Since the maximum practical index for the 2810 * IDT is 255, restoring its limits from the post-VMX-exit 2811 * default of 0xffff is not a concern. 2812 * 2813 * Only 64-bit hypervisor callers are allowed, which forgoes 2814 * the need to restore any LDT descriptor. Toss an error to 2815 * anyone attempting to break that rule. 2816 */ 2817 if (curproc->p_model != DATAMODEL_LP64) { 2818 smt_release(); 2819 enable_intr(); 2820 bzero(vmexit, sizeof (*vmexit)); 2821 vmexit->rip = rip; 2822 vmexit->exitcode = VM_EXITCODE_VMX; 2823 vmexit->u.vmx.status = VM_FAIL_INVALID; 2824 handled = UNHANDLED; 2825 break; 2826 } 2827 2828 if (tpr_shadow_active) { 2829 vmx_tpr_shadow_enter(vlapic); 2830 } 2831 2832 /* 2833 * Indicate activation of vmspace (EPT) table just prior to VMX 2834 * entry, checking for the necessity of an invept invalidation. 2835 */ 2836 eptgen = vmc_table_enter(vmc); 2837 if (vmx->eptgen[curcpu] != eptgen) { 2838 /* 2839 * VMspace generation does not match what was previously 2840 * used on this host CPU, so all mappings associated 2841 * with this EP4TA must be invalidated. 2842 */ 2843 invept(1, vmx->eptp); 2844 vmx->eptgen[curcpu] = eptgen; 2845 } 2846 2847 vmx_run_trace(vmx, vcpu); 2848 vcpu_ustate_change(vm, vcpu, VU_RUN); 2849 vmx_dr_enter_guest(vmxctx); 2850 2851 /* Perform VMX entry */ 2852 rc = vmx_enter_guest(vmxctx, vmx, launched); 2853 2854 vmx_dr_leave_guest(vmxctx); 2855 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2856 2857 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2858 smt_release(); 2859 2860 if (tpr_shadow_active) { 2861 vmx_tpr_shadow_exit(vlapic); 2862 } 2863 2864 /* Collect some information for VM exit processing */ 2865 vmexit->rip = rip = vmcs_guest_rip(); 2866 vmexit->inst_length = vmexit_instruction_length(); 2867 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2868 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2869 /* Update 'nextrip' */ 2870 vmx->state[vcpu].nextrip = rip; 2871 2872 if (rc == VMX_GUEST_VMEXIT) { 2873 vmx_exit_handle_possible_nmi(vmexit); 2874 } 2875 enable_intr(); 2876 vmc_table_exit(vmc); 2877 2878 if (rc == VMX_GUEST_VMEXIT) { 2879 handled = vmx_exit_process(vmx, vcpu, vmexit); 2880 } else { 2881 vmx_exit_inst_error(vmxctx, rc, vmexit); 2882 } 2883 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2884 uint32_t, exit_reason); 2885 rip = vmexit->rip; 2886 } while (handled); 2887 2888 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2889 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2890 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2891 vmexit->exitcode); 2892 } 2893 2894 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2895 vmexit->exitcode); 2896 2897 vmcs_clear(vmcs_pa); 2898 vmx_msr_guest_exit(vmx, vcpu); 2899 2900 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 2901 vmx->vmcs_state[vcpu] = VS_NONE; 2902 2903 return (0); 2904 } 2905 2906 static void 2907 vmx_vmcleanup(void *arg) 2908 { 2909 int i; 2910 struct vmx *vmx = arg; 2911 uint16_t maxcpus; 2912 2913 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2914 (void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2915 kmem_free(vmx->apic_access_page, PAGESIZE); 2916 } else { 2917 VERIFY3P(vmx->apic_access_page, ==, NULL); 2918 } 2919 2920 vmx_msr_bitmap_destroy(vmx); 2921 2922 maxcpus = vm_get_maxcpus(vmx->vm); 2923 for (i = 0; i < maxcpus; i++) 2924 vpid_free(vmx->state[i].vpid); 2925 2926 free(vmx, M_VMX); 2927 } 2928 2929 static uint64_t * 2930 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2931 { 2932 switch (reg) { 2933 case VM_REG_GUEST_RAX: 2934 return (&vmxctx->guest_rax); 2935 case VM_REG_GUEST_RBX: 2936 return (&vmxctx->guest_rbx); 2937 case VM_REG_GUEST_RCX: 2938 return (&vmxctx->guest_rcx); 2939 case VM_REG_GUEST_RDX: 2940 return (&vmxctx->guest_rdx); 2941 case VM_REG_GUEST_RSI: 2942 return (&vmxctx->guest_rsi); 2943 case VM_REG_GUEST_RDI: 2944 return (&vmxctx->guest_rdi); 2945 case VM_REG_GUEST_RBP: 2946 return (&vmxctx->guest_rbp); 2947 case VM_REG_GUEST_R8: 2948 return (&vmxctx->guest_r8); 2949 case VM_REG_GUEST_R9: 2950 return (&vmxctx->guest_r9); 2951 case VM_REG_GUEST_R10: 2952 return (&vmxctx->guest_r10); 2953 case VM_REG_GUEST_R11: 2954 return (&vmxctx->guest_r11); 2955 case VM_REG_GUEST_R12: 2956 return (&vmxctx->guest_r12); 2957 case VM_REG_GUEST_R13: 2958 return (&vmxctx->guest_r13); 2959 case VM_REG_GUEST_R14: 2960 return (&vmxctx->guest_r14); 2961 case VM_REG_GUEST_R15: 2962 return (&vmxctx->guest_r15); 2963 case VM_REG_GUEST_CR2: 2964 return (&vmxctx->guest_cr2); 2965 case VM_REG_GUEST_DR0: 2966 return (&vmxctx->guest_dr0); 2967 case VM_REG_GUEST_DR1: 2968 return (&vmxctx->guest_dr1); 2969 case VM_REG_GUEST_DR2: 2970 return (&vmxctx->guest_dr2); 2971 case VM_REG_GUEST_DR3: 2972 return (&vmxctx->guest_dr3); 2973 case VM_REG_GUEST_DR6: 2974 return (&vmxctx->guest_dr6); 2975 default: 2976 break; 2977 } 2978 return (NULL); 2979 } 2980 2981 static int 2982 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2983 { 2984 int running, hostcpu, err; 2985 struct vmx *vmx = arg; 2986 uint64_t *regp; 2987 2988 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2989 if (running && hostcpu != curcpu) 2990 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2991 2992 /* VMCS access not required for ctx reads */ 2993 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 2994 *retval = *regp; 2995 return (0); 2996 } 2997 2998 if (!running) { 2999 vmcs_load(vmx->vmcs_pa[vcpu]); 3000 } 3001 3002 err = 0; 3003 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3004 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3005 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3006 } else { 3007 uint32_t encoding; 3008 3009 encoding = vmcs_field_encoding(reg); 3010 switch (encoding) { 3011 case VMCS_GUEST_CR0: 3012 /* Take the shadow bits into account */ 3013 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3014 vmcs_read(VMCS_CR0_SHADOW)); 3015 break; 3016 case VMCS_GUEST_CR4: 3017 /* Take the shadow bits into account */ 3018 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3019 vmcs_read(VMCS_CR4_SHADOW)); 3020 break; 3021 case VMCS_INVALID_ENCODING: 3022 err = EINVAL; 3023 break; 3024 default: 3025 *retval = vmcs_read(encoding); 3026 break; 3027 } 3028 } 3029 3030 if (!running) { 3031 vmcs_clear(vmx->vmcs_pa[vcpu]); 3032 } 3033 3034 return (err); 3035 } 3036 3037 static int 3038 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3039 { 3040 int running, hostcpu, error; 3041 struct vmx *vmx = arg; 3042 uint64_t *regp; 3043 3044 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3045 if (running && hostcpu != curcpu) 3046 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3047 3048 /* VMCS access not required for ctx writes */ 3049 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3050 *regp = val; 3051 return (0); 3052 } 3053 3054 if (!running) { 3055 vmcs_load(vmx->vmcs_pa[vcpu]); 3056 } 3057 3058 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3059 if (val != 0) { 3060 /* 3061 * Forcing the vcpu into an interrupt shadow is not 3062 * presently supported. 3063 */ 3064 error = EINVAL; 3065 } else { 3066 uint64_t gi; 3067 3068 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3069 gi &= ~HWINTR_BLOCKING; 3070 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3071 error = 0; 3072 } 3073 } else { 3074 uint32_t encoding; 3075 3076 error = 0; 3077 encoding = vmcs_field_encoding(reg); 3078 switch (encoding) { 3079 case VMCS_GUEST_IA32_EFER: 3080 /* 3081 * If the "load EFER" VM-entry control is 1 then the 3082 * value of EFER.LMA must be identical to "IA-32e mode 3083 * guest" bit in the VM-entry control. 3084 */ 3085 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) { 3086 uint64_t ctls; 3087 3088 ctls = vmcs_read(VMCS_ENTRY_CTLS); 3089 if (val & EFER_LMA) { 3090 ctls |= VM_ENTRY_GUEST_LMA; 3091 } else { 3092 ctls &= ~VM_ENTRY_GUEST_LMA; 3093 } 3094 vmcs_write(VMCS_ENTRY_CTLS, ctls); 3095 } 3096 vmcs_write(encoding, val); 3097 break; 3098 case VMCS_GUEST_CR0: 3099 /* 3100 * The guest is not allowed to modify certain bits in 3101 * %cr0 and %cr4. To maintain the illusion of full 3102 * control, they have shadow versions which contain the 3103 * guest-perceived (via reads from the register) values 3104 * as opposed to the guest-effective values. 3105 * 3106 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3107 */ 3108 vmcs_write(VMCS_CR0_SHADOW, val); 3109 vmcs_write(encoding, vmx_fix_cr0(val)); 3110 break; 3111 case VMCS_GUEST_CR4: 3112 /* See above for detail on %cr4 shadowing */ 3113 vmcs_write(VMCS_CR4_SHADOW, val); 3114 vmcs_write(encoding, vmx_fix_cr4(val)); 3115 break; 3116 case VMCS_GUEST_CR3: 3117 vmcs_write(encoding, val); 3118 /* 3119 * Invalidate the guest vcpu's TLB mappings to emulate 3120 * the behavior of updating %cr3. 3121 * 3122 * XXX the processor retains global mappings when %cr3 3123 * is updated but vmx_invvpid() does not. 3124 */ 3125 vmx_invvpid(vmx, vcpu, running); 3126 break; 3127 case VMCS_INVALID_ENCODING: 3128 error = EINVAL; 3129 break; 3130 default: 3131 vmcs_write(encoding, val); 3132 break; 3133 } 3134 } 3135 3136 if (!running) { 3137 vmcs_clear(vmx->vmcs_pa[vcpu]); 3138 } 3139 3140 return (error); 3141 } 3142 3143 static int 3144 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3145 { 3146 int hostcpu, running; 3147 struct vmx *vmx = arg; 3148 uint32_t base, limit, access; 3149 3150 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3151 if (running && hostcpu != curcpu) 3152 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3153 3154 if (!running) { 3155 vmcs_load(vmx->vmcs_pa[vcpu]); 3156 } 3157 3158 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3159 desc->base = vmcs_read(base); 3160 desc->limit = vmcs_read(limit); 3161 if (access != VMCS_INVALID_ENCODING) { 3162 desc->access = vmcs_read(access); 3163 } else { 3164 desc->access = 0; 3165 } 3166 3167 if (!running) { 3168 vmcs_clear(vmx->vmcs_pa[vcpu]); 3169 } 3170 return (0); 3171 } 3172 3173 static int 3174 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3175 { 3176 int hostcpu, running; 3177 struct vmx *vmx = arg; 3178 uint32_t base, limit, access; 3179 3180 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3181 if (running && hostcpu != curcpu) 3182 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3183 3184 if (!running) { 3185 vmcs_load(vmx->vmcs_pa[vcpu]); 3186 } 3187 3188 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3189 vmcs_write(base, desc->base); 3190 vmcs_write(limit, desc->limit); 3191 if (access != VMCS_INVALID_ENCODING) { 3192 vmcs_write(access, desc->access); 3193 } 3194 3195 if (!running) { 3196 vmcs_clear(vmx->vmcs_pa[vcpu]); 3197 } 3198 return (0); 3199 } 3200 3201 static int 3202 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3203 { 3204 struct vmx *vmx = arg; 3205 int vcap; 3206 int ret; 3207 3208 ret = ENOENT; 3209 3210 vcap = vmx->cap[vcpu].set; 3211 3212 switch (type) { 3213 case VM_CAP_HALT_EXIT: 3214 if (cap_halt_exit) 3215 ret = 0; 3216 break; 3217 case VM_CAP_PAUSE_EXIT: 3218 if (cap_pause_exit) 3219 ret = 0; 3220 break; 3221 case VM_CAP_MTRAP_EXIT: 3222 if (cap_monitor_trap) 3223 ret = 0; 3224 break; 3225 case VM_CAP_ENABLE_INVPCID: 3226 if (cap_invpcid) 3227 ret = 0; 3228 break; 3229 case VM_CAP_BPT_EXIT: 3230 ret = 0; 3231 break; 3232 default: 3233 break; 3234 } 3235 3236 if (ret == 0) 3237 *retval = (vcap & (1 << type)) ? 1 : 0; 3238 3239 return (ret); 3240 } 3241 3242 static int 3243 vmx_setcap(void *arg, int vcpu, int type, int val) 3244 { 3245 struct vmx *vmx = arg; 3246 uint32_t baseval, reg, flag; 3247 uint32_t *pptr; 3248 int error; 3249 3250 error = ENOENT; 3251 pptr = NULL; 3252 3253 switch (type) { 3254 case VM_CAP_HALT_EXIT: 3255 if (cap_halt_exit) { 3256 error = 0; 3257 pptr = &vmx->cap[vcpu].proc_ctls; 3258 baseval = *pptr; 3259 flag = PROCBASED_HLT_EXITING; 3260 reg = VMCS_PRI_PROC_BASED_CTLS; 3261 } 3262 break; 3263 case VM_CAP_MTRAP_EXIT: 3264 if (cap_monitor_trap) { 3265 error = 0; 3266 pptr = &vmx->cap[vcpu].proc_ctls; 3267 baseval = *pptr; 3268 flag = PROCBASED_MTF; 3269 reg = VMCS_PRI_PROC_BASED_CTLS; 3270 } 3271 break; 3272 case VM_CAP_PAUSE_EXIT: 3273 if (cap_pause_exit) { 3274 error = 0; 3275 pptr = &vmx->cap[vcpu].proc_ctls; 3276 baseval = *pptr; 3277 flag = PROCBASED_PAUSE_EXITING; 3278 reg = VMCS_PRI_PROC_BASED_CTLS; 3279 } 3280 break; 3281 case VM_CAP_ENABLE_INVPCID: 3282 if (cap_invpcid) { 3283 error = 0; 3284 pptr = &vmx->cap[vcpu].proc_ctls2; 3285 baseval = *pptr; 3286 flag = PROCBASED2_ENABLE_INVPCID; 3287 reg = VMCS_SEC_PROC_BASED_CTLS; 3288 } 3289 break; 3290 case VM_CAP_BPT_EXIT: 3291 error = 0; 3292 3293 /* Don't change the bitmap if we are tracing all exceptions. */ 3294 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3295 pptr = &vmx->cap[vcpu].exc_bitmap; 3296 baseval = *pptr; 3297 flag = (1 << IDT_BP); 3298 reg = VMCS_EXCEPTION_BITMAP; 3299 } 3300 break; 3301 default: 3302 break; 3303 } 3304 3305 if (error != 0) { 3306 return (error); 3307 } 3308 3309 if (pptr != NULL) { 3310 if (val) { 3311 baseval |= flag; 3312 } else { 3313 baseval &= ~flag; 3314 } 3315 vmcs_load(vmx->vmcs_pa[vcpu]); 3316 vmcs_write(reg, baseval); 3317 vmcs_clear(vmx->vmcs_pa[vcpu]); 3318 3319 /* 3320 * Update optional stored flags, and record 3321 * setting 3322 */ 3323 *pptr = baseval; 3324 } 3325 3326 if (val) { 3327 vmx->cap[vcpu].set |= (1 << type); 3328 } else { 3329 vmx->cap[vcpu].set &= ~(1 << type); 3330 } 3331 3332 return (0); 3333 } 3334 3335 struct vlapic_vtx { 3336 struct vlapic vlapic; 3337 3338 /* Align to the nearest cacheline */ 3339 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3340 3341 /* TMR handling state for posted interrupts */ 3342 uint32_t tmr_active[8]; 3343 uint32_t pending_level[8]; 3344 uint32_t pending_edge[8]; 3345 3346 struct pir_desc *pir_desc; 3347 struct vmx *vmx; 3348 uint_t pending_prio; 3349 boolean_t tmr_sync; 3350 }; 3351 3352 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3353 3354 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3355 3356 static vcpu_notify_t 3357 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3358 { 3359 struct vlapic_vtx *vlapic_vtx; 3360 struct pir_desc *pir_desc; 3361 uint32_t mask, tmrval; 3362 int idx; 3363 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3364 3365 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3366 pir_desc = vlapic_vtx->pir_desc; 3367 idx = vector / 32; 3368 mask = 1UL << (vector % 32); 3369 3370 /* 3371 * If the currently asserted TMRs do not match the state requested by 3372 * the incoming interrupt, an exit will be required to reconcile those 3373 * bits in the APIC page. This will keep the vLAPIC behavior in line 3374 * with the architecturally defined expectations. 3375 * 3376 * If actors of mixed types (edge and level) are racing against the same 3377 * vector (toggling its TMR bit back and forth), the results could 3378 * inconsistent. Such circumstances are considered a rare edge case and 3379 * are never expected to be found in the wild. 3380 */ 3381 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3382 if (!level) { 3383 if ((tmrval & mask) != 0) { 3384 /* Edge-triggered interrupt needs TMR de-asserted */ 3385 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3386 atomic_store_rel_long(&pir_desc->pending, 1); 3387 return (VCPU_NOTIFY_EXIT); 3388 } 3389 } else { 3390 if ((tmrval & mask) == 0) { 3391 /* Level-triggered interrupt needs TMR asserted */ 3392 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3393 atomic_store_rel_long(&pir_desc->pending, 1); 3394 return (VCPU_NOTIFY_EXIT); 3395 } 3396 } 3397 3398 /* 3399 * If the interrupt request does not require manipulation of the TMRs 3400 * for delivery, set it in PIR descriptor. It cannot be inserted into 3401 * the APIC page while the vCPU might be running. 3402 */ 3403 atomic_set_int(&pir_desc->pir[idx], mask); 3404 3405 /* 3406 * A notification is required whenever the 'pending' bit makes a 3407 * transition from 0->1. 3408 * 3409 * Even if the 'pending' bit is already asserted, notification about 3410 * the incoming interrupt may still be necessary. For example, if a 3411 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3412 * the 0->1 'pending' transition with a notification, but the vCPU 3413 * would ignore the interrupt for the time being. The same vCPU would 3414 * need to then be notified if a high-priority interrupt arrived which 3415 * satisfied the PPR. 3416 * 3417 * The priorities of interrupts injected while 'pending' is asserted 3418 * are tracked in a custom bitfield 'pending_prio'. Should the 3419 * to-be-injected interrupt exceed the priorities already present, the 3420 * notification is sent. The priorities recorded in 'pending_prio' are 3421 * cleared whenever the 'pending' bit makes another 0->1 transition. 3422 */ 3423 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3424 notify = VCPU_NOTIFY_APIC; 3425 vlapic_vtx->pending_prio = 0; 3426 } else { 3427 const uint_t old_prio = vlapic_vtx->pending_prio; 3428 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3429 3430 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3431 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3432 notify = VCPU_NOTIFY_APIC; 3433 } 3434 } 3435 3436 return (notify); 3437 } 3438 3439 static void 3440 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3441 { 3442 /* 3443 * When APICv is enabled for an instance, the traditional interrupt 3444 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3445 * used and the CPU does the heavy lifting of virtual interrupt 3446 * delivery. For that reason vmx_intr_accepted() should never be called 3447 * when APICv is enabled. 3448 */ 3449 panic("vmx_intr_accepted: not expected to be called"); 3450 } 3451 3452 static void 3453 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3454 { 3455 struct vlapic_vtx *vlapic_vtx; 3456 const uint32_t *tmrs; 3457 3458 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3459 tmrs = &vlapic_vtx->tmr_active[0]; 3460 3461 if (!vlapic_vtx->tmr_sync) { 3462 return; 3463 } 3464 3465 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3466 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3467 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3468 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3469 vlapic_vtx->tmr_sync = B_FALSE; 3470 } 3471 3472 static void 3473 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3474 { 3475 struct vmx *vmx; 3476 uint32_t proc_ctls; 3477 int vcpuid; 3478 3479 vcpuid = vlapic->vcpuid; 3480 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3481 3482 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3483 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3484 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3485 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3486 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3487 3488 vmcs_load(vmx->vmcs_pa[vcpuid]); 3489 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3490 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3491 } 3492 3493 static void 3494 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3495 { 3496 struct vmx *vmx; 3497 uint32_t proc_ctls2; 3498 int vcpuid; 3499 3500 vcpuid = vlapic->vcpuid; 3501 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3502 3503 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3504 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3505 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3506 3507 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3508 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3509 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3510 3511 vmcs_load(vmx->vmcs_pa[vcpuid]); 3512 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3513 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3514 3515 vmx_allow_x2apic_msrs(vmx, vcpuid); 3516 } 3517 3518 static void 3519 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3520 { 3521 psm_send_pir_ipi(hostcpu); 3522 } 3523 3524 static void 3525 vmx_apicv_sync(struct vlapic *vlapic) 3526 { 3527 struct vlapic_vtx *vlapic_vtx; 3528 struct pir_desc *pir_desc; 3529 struct LAPIC *lapic; 3530 uint_t i; 3531 3532 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3533 pir_desc = vlapic_vtx->pir_desc; 3534 lapic = vlapic->apic_page; 3535 3536 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3537 return; 3538 } 3539 3540 vlapic_vtx->pending_prio = 0; 3541 3542 /* Make sure the invalid (0-15) vectors are not set */ 3543 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3544 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3545 ASSERT0(pir_desc->pir[0] & 0xffff); 3546 3547 for (i = 0; i <= 7; i++) { 3548 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3549 uint32_t *irrp = &lapic->irr0 + (i * 4); 3550 3551 const uint32_t pending_level = 3552 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3553 const uint32_t pending_edge = 3554 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3555 const uint32_t pending_inject = 3556 atomic_readandclear_int(&pir_desc->pir[i]); 3557 3558 if (pending_level != 0) { 3559 /* 3560 * Level-triggered interrupts assert their corresponding 3561 * bit in the TMR when queued in IRR. 3562 */ 3563 *tmrp |= pending_level; 3564 *irrp |= pending_level; 3565 } 3566 if (pending_edge != 0) { 3567 /* 3568 * When queuing an edge-triggered interrupt in IRR, the 3569 * corresponding bit in the TMR is cleared. 3570 */ 3571 *tmrp &= ~pending_edge; 3572 *irrp |= pending_edge; 3573 } 3574 if (pending_inject != 0) { 3575 /* 3576 * Interrupts which do not require a change to the TMR 3577 * (because it already matches the necessary state) can 3578 * simply be queued in IRR. 3579 */ 3580 *irrp |= pending_inject; 3581 } 3582 3583 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3584 /* Check if VMX EOI triggers require updating. */ 3585 vlapic_vtx->tmr_active[i] = *tmrp; 3586 vlapic_vtx->tmr_sync = B_TRUE; 3587 } 3588 } 3589 } 3590 3591 static void 3592 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3593 { 3594 /* 3595 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3596 * TPR falls below a threshold priority. That threshold is set to the 3597 * current TPR priority, since guest interrupt status should be 3598 * re-evaluated if its TPR is set lower. 3599 */ 3600 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3601 } 3602 3603 static void 3604 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3605 { 3606 /* 3607 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3608 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3609 * the PPR is updated to reflect any change in the TPR here. 3610 */ 3611 vlapic_sync_tpr(vlapic); 3612 } 3613 3614 static struct vlapic * 3615 vmx_vlapic_init(void *arg, int vcpuid) 3616 { 3617 struct vmx *vmx; 3618 struct vlapic *vlapic; 3619 struct vlapic_vtx *vlapic_vtx; 3620 3621 vmx = arg; 3622 3623 vlapic = malloc(sizeof (struct vlapic_vtx), M_VLAPIC, 3624 M_WAITOK | M_ZERO); 3625 vlapic->vm = vmx->vm; 3626 vlapic->vcpuid = vcpuid; 3627 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3628 3629 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3630 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3631 vlapic_vtx->vmx = vmx; 3632 3633 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3634 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3635 } 3636 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3637 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3638 vlapic->ops.sync_state = vmx_apicv_sync; 3639 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3640 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3641 3642 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3643 vlapic->ops.post_intr = vmx_apicv_notify; 3644 } 3645 } 3646 3647 vlapic_init(vlapic); 3648 3649 return (vlapic); 3650 } 3651 3652 static void 3653 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3654 { 3655 3656 vlapic_cleanup(vlapic); 3657 free(vlapic, M_VLAPIC); 3658 } 3659 3660 static void 3661 vmx_savectx(void *arg, int vcpu) 3662 { 3663 struct vmx *vmx = arg; 3664 3665 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3666 vmcs_clear(vmx->vmcs_pa[vcpu]); 3667 vmx_msr_guest_exit(vmx, vcpu); 3668 /* 3669 * Having VMCLEARed the VMCS, it can no longer be re-entered 3670 * with VMRESUME, but must be VMLAUNCHed again. 3671 */ 3672 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3673 } 3674 3675 reset_gdtr_limit(); 3676 } 3677 3678 static void 3679 vmx_restorectx(void *arg, int vcpu) 3680 { 3681 struct vmx *vmx = arg; 3682 3683 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3684 3685 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3686 vmx_msr_guest_enter(vmx, vcpu); 3687 vmcs_load(vmx->vmcs_pa[vcpu]); 3688 } 3689 } 3690 3691 struct vmm_ops vmm_ops_intel = { 3692 .init = vmx_init, 3693 .cleanup = vmx_cleanup, 3694 .resume = vmx_restore, 3695 3696 .vminit = vmx_vminit, 3697 .vmrun = vmx_run, 3698 .vmcleanup = vmx_vmcleanup, 3699 .vmgetreg = vmx_getreg, 3700 .vmsetreg = vmx_setreg, 3701 .vmgetdesc = vmx_getdesc, 3702 .vmsetdesc = vmx_setdesc, 3703 .vmgetcap = vmx_getcap, 3704 .vmsetcap = vmx_setcap, 3705 .vlapic_init = vmx_vlapic_init, 3706 .vlapic_cleanup = vmx_vlapic_cleanup, 3707 3708 .vmsavectx = vmx_savectx, 3709 .vmrestorectx = vmx_restorectx, 3710 }; 3711 3712 /* Side-effect free HW validation derived from checks in vmx_init. */ 3713 int 3714 vmx_x86_supported(const char **msg) 3715 { 3716 int error; 3717 uint32_t tmp; 3718 3719 ASSERT(msg != NULL); 3720 3721 /* Check support for primary processor-based VM-execution controls */ 3722 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3723 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3724 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3725 if (error) { 3726 *msg = "processor does not support desired primary " 3727 "processor-based controls"; 3728 return (error); 3729 } 3730 3731 /* Check support for secondary processor-based VM-execution controls */ 3732 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3733 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3734 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3735 if (error) { 3736 *msg = "processor does not support desired secondary " 3737 "processor-based controls"; 3738 return (error); 3739 } 3740 3741 /* Check support for pin-based VM-execution controls */ 3742 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3743 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3744 PINBASED_CTLS_ZERO_SETTING, &tmp); 3745 if (error) { 3746 *msg = "processor does not support desired pin-based controls"; 3747 return (error); 3748 } 3749 3750 /* Check support for VM-exit controls */ 3751 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3752 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3753 if (error) { 3754 *msg = "processor does not support desired exit controls"; 3755 return (error); 3756 } 3757 3758 /* Check support for VM-entry controls */ 3759 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3760 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3761 if (error) { 3762 *msg = "processor does not support desired entry controls"; 3763 return (error); 3764 } 3765 3766 /* Unrestricted guest is nominally optional, but not for us. */ 3767 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3768 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3769 if (error) { 3770 *msg = "processor does not support desired unrestricted guest " 3771 "controls"; 3772 return (error); 3773 } 3774 3775 return (0); 3776 } 3777