1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2021 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/pcpu.h> 54 #include <sys/proc.h> 55 #include <sys/sysctl.h> 56 57 #include <sys/x86_archext.h> 58 #include <sys/smp_impldefs.h> 59 #include <sys/smt.h> 60 #include <sys/hma.h> 61 #include <sys/trap.h> 62 #include <sys/archsystm.h> 63 64 #include <machine/psl.h> 65 #include <machine/cpufunc.h> 66 #include <machine/md_var.h> 67 #include <machine/reg.h> 68 #include <machine/segments.h> 69 #include <machine/specialreg.h> 70 #include <machine/vmparam.h> 71 #include <sys/vmm_vm.h> 72 73 #include <machine/vmm.h> 74 #include <machine/vmm_dev.h> 75 #include <sys/vmm_instruction_emul.h> 76 #include "vmm_lapic.h" 77 #include "vmm_host.h" 78 #include "vmm_ioport.h" 79 #include "vmm_ktr.h" 80 #include "vmm_stat.h" 81 #include "vatpic.h" 82 #include "vlapic.h" 83 #include "vlapic_priv.h" 84 85 #include "vmcs.h" 86 #include "vmx.h" 87 #include "vmx_msr.h" 88 #include "x86.h" 89 #include "vmx_controls.h" 90 91 #define PINBASED_CTLS_ONE_SETTING \ 92 (PINBASED_EXTINT_EXITING | \ 93 PINBASED_NMI_EXITING | \ 94 PINBASED_VIRTUAL_NMI) 95 #define PINBASED_CTLS_ZERO_SETTING 0 96 97 #define PROCBASED_CTLS_WINDOW_SETTING \ 98 (PROCBASED_INT_WINDOW_EXITING | \ 99 PROCBASED_NMI_WINDOW_EXITING) 100 101 /* We consider TSC offset a necessity for unsynched TSC handling */ 102 #define PROCBASED_CTLS_ONE_SETTING \ 103 (PROCBASED_SECONDARY_CONTROLS | \ 104 PROCBASED_TSC_OFFSET | \ 105 PROCBASED_MWAIT_EXITING | \ 106 PROCBASED_MONITOR_EXITING | \ 107 PROCBASED_IO_EXITING | \ 108 PROCBASED_MSR_BITMAPS | \ 109 PROCBASED_CTLS_WINDOW_SETTING | \ 110 PROCBASED_CR8_LOAD_EXITING | \ 111 PROCBASED_CR8_STORE_EXITING) 112 113 #define PROCBASED_CTLS_ZERO_SETTING \ 114 (PROCBASED_CR3_LOAD_EXITING | \ 115 PROCBASED_CR3_STORE_EXITING | \ 116 PROCBASED_IO_BITMAPS) 117 118 /* 119 * EPT and Unrestricted Guest are considered necessities. The latter is not a 120 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 121 * without a bootrom starting in real mode. 122 */ 123 #define PROCBASED_CTLS2_ONE_SETTING \ 124 (PROCBASED2_ENABLE_EPT | \ 125 PROCBASED2_UNRESTRICTED_GUEST) 126 #define PROCBASED_CTLS2_ZERO_SETTING 0 127 128 #define VM_EXIT_CTLS_ONE_SETTING \ 129 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 130 VM_EXIT_HOST_LMA | \ 131 VM_EXIT_LOAD_PAT | \ 132 VM_EXIT_SAVE_EFER | \ 133 VM_EXIT_LOAD_EFER | \ 134 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 135 136 #define VM_EXIT_CTLS_ZERO_SETTING 0 137 138 #define VM_ENTRY_CTLS_ONE_SETTING \ 139 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 140 VM_ENTRY_LOAD_EFER) 141 142 #define VM_ENTRY_CTLS_ZERO_SETTING \ 143 (VM_ENTRY_INTO_SMM | \ 144 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 145 146 /* 147 * Cover the EPT capabilities used by bhyve at present: 148 * - 4-level page walks 149 * - write-back memory type 150 * - INVEPT operations (all types) 151 * - INVVPID operations (single-context only) 152 */ 153 #define EPT_CAPS_REQUIRED \ 154 (IA32_VMX_EPT_VPID_PWL4 | \ 155 IA32_VMX_EPT_VPID_TYPE_WB | \ 156 IA32_VMX_EPT_VPID_INVEPT | \ 157 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 158 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 159 IA32_VMX_EPT_VPID_INVVPID | \ 160 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 161 162 #define HANDLED 1 163 #define UNHANDLED 0 164 165 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 166 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 167 168 SYSCTL_DECL(_hw_vmm); 169 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 170 NULL); 171 172 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 173 static uint32_t exit_ctls, entry_ctls; 174 175 static uint64_t cr0_ones_mask, cr0_zeros_mask; 176 177 static uint64_t cr4_ones_mask, cr4_zeros_mask; 178 179 static int vmx_initialized; 180 181 /* Do not flush RSB upon vmexit */ 182 static int no_flush_rsb; 183 184 /* 185 * Optional capabilities 186 */ 187 188 /* HLT triggers a VM-exit */ 189 static int cap_halt_exit; 190 191 /* PAUSE triggers a VM-exit */ 192 static int cap_pause_exit; 193 194 /* Monitor trap flag */ 195 static int cap_monitor_trap; 196 197 /* Guests are allowed to use INVPCID */ 198 static int cap_invpcid; 199 200 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 201 static enum vmx_caps vmx_capabilities; 202 203 /* APICv posted interrupt vector */ 204 static int pirvec = -1; 205 206 static uint_t vpid_alloc_failed; 207 208 int guest_l1d_flush; 209 int guest_l1d_flush_sw; 210 211 /* MSR save region is composed of an array of 'struct msr_entry' */ 212 struct msr_entry { 213 uint32_t index; 214 uint32_t reserved; 215 uint64_t val; 216 }; 217 218 static struct msr_entry msr_load_list[1] __aligned(16); 219 220 /* 221 * The definitions of SDT probes for VMX. 222 */ 223 224 /* BEGIN CSTYLED */ 225 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 226 "struct vmx *", "int", "struct vm_exit *"); 227 228 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 229 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 230 231 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 232 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 233 234 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 235 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 236 237 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 238 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 239 240 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 241 "struct vmx *", "int", "struct vm_exit *"); 242 243 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 244 "struct vmx *", "int", "struct vm_exit *"); 245 246 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 247 "struct vmx *", "int", "struct vm_exit *"); 248 249 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 250 "struct vmx *", "int", "struct vm_exit *"); 251 252 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 253 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 254 255 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 259 "struct vmx *", "int", "struct vm_exit *"); 260 261 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 262 "struct vmx *", "int", "struct vm_exit *"); 263 264 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 265 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 266 267 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 268 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 269 270 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 271 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 272 273 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 274 "struct vmx *", "int", "struct vm_exit *"); 275 276 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 277 "struct vmx *", "int", "struct vm_exit *"); 278 279 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 280 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 281 282 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 283 "struct vmx *", "int", "struct vm_exit *"); 284 285 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 286 "struct vmx *", "int", "struct vm_exit *"); 287 288 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 289 "struct vmx *", "int", "struct vm_exit *"); 290 291 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 292 "struct vmx *", "int", "struct vm_exit *"); 293 294 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 295 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 296 297 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 298 "struct vmx *", "int", "struct vm_exit *", "int"); 299 /* END CSTYLED */ 300 301 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 302 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 303 static void vmx_apply_tsc_adjust(struct vmx *, int); 304 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 305 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 306 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 307 308 static void 309 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 310 { 311 /* 312 * Allow readonly access to the following x2APIC MSRs from the guest. 313 */ 314 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 315 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 316 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 317 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 318 319 for (uint_t i = 0; i < 8; i++) { 320 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 321 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 323 } 324 325 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 329 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 330 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 331 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 332 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 333 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 334 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 335 336 /* 337 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 338 * 339 * These registers get special treatment described in the section 340 * "Virtualizing MSR-Based APIC Accesses". 341 */ 342 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 343 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 344 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 345 } 346 347 static ulong_t 348 vmx_fix_cr0(ulong_t cr0) 349 { 350 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 351 } 352 353 /* 354 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 355 * the value observable from the guest. 356 */ 357 static ulong_t 358 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 359 { 360 return ((cr0 & ~cr0_ones_mask) | 361 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 362 } 363 364 static ulong_t 365 vmx_fix_cr4(ulong_t cr4) 366 { 367 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 368 } 369 370 /* 371 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 372 * the value observable from the guest. 373 */ 374 static ulong_t 375 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 376 { 377 return ((cr4 & ~cr4_ones_mask) | 378 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 379 } 380 381 static void 382 vpid_free(int vpid) 383 { 384 if (vpid < 0 || vpid > 0xffff) 385 panic("vpid_free: invalid vpid %d", vpid); 386 387 /* 388 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 389 * the unit number allocator. 390 */ 391 392 if (vpid > VM_MAXCPU) 393 hma_vmx_vpid_free((uint16_t)vpid); 394 } 395 396 static void 397 vpid_alloc(uint16_t *vpid, int num) 398 { 399 int i, x; 400 401 if (num <= 0 || num > VM_MAXCPU) 402 panic("invalid number of vpids requested: %d", num); 403 404 /* 405 * If the "enable vpid" execution control is not enabled then the 406 * VPID is required to be 0 for all vcpus. 407 */ 408 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 409 for (i = 0; i < num; i++) 410 vpid[i] = 0; 411 return; 412 } 413 414 /* 415 * Allocate a unique VPID for each vcpu from the unit number allocator. 416 */ 417 for (i = 0; i < num; i++) { 418 uint16_t tmp; 419 420 tmp = hma_vmx_vpid_alloc(); 421 x = (tmp == 0) ? -1 : tmp; 422 423 if (x == -1) 424 break; 425 else 426 vpid[i] = x; 427 } 428 429 if (i < num) { 430 atomic_add_int(&vpid_alloc_failed, 1); 431 432 /* 433 * If the unit number allocator does not have enough unique 434 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 435 * 436 * These VPIDs are not be unique across VMs but this does not 437 * affect correctness because the combined mappings are also 438 * tagged with the EP4TA which is unique for each VM. 439 * 440 * It is still sub-optimal because the invvpid will invalidate 441 * combined mappings for a particular VPID across all EP4TAs. 442 */ 443 while (i-- > 0) 444 vpid_free(vpid[i]); 445 446 for (i = 0; i < num; i++) 447 vpid[i] = i + 1; 448 } 449 } 450 451 static int 452 vmx_cleanup(void) 453 { 454 /* This is taken care of by the hma registration */ 455 return (0); 456 } 457 458 static void 459 vmx_restore(void) 460 { 461 /* No-op on illumos */ 462 } 463 464 static int 465 vmx_init(void) 466 { 467 int error; 468 uint64_t fixed0, fixed1; 469 uint32_t tmp; 470 enum vmx_caps avail_caps = VMX_CAP_NONE; 471 472 /* Check support for primary processor-based VM-execution controls */ 473 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 474 MSR_VMX_TRUE_PROCBASED_CTLS, 475 PROCBASED_CTLS_ONE_SETTING, 476 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 477 if (error) { 478 printf("vmx_init: processor does not support desired primary " 479 "processor-based controls\n"); 480 return (error); 481 } 482 483 /* Clear the processor-based ctl bits that are set on demand */ 484 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 485 486 /* Check support for secondary processor-based VM-execution controls */ 487 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 488 MSR_VMX_PROCBASED_CTLS2, 489 PROCBASED_CTLS2_ONE_SETTING, 490 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 491 if (error) { 492 printf("vmx_init: processor does not support desired secondary " 493 "processor-based controls\n"); 494 return (error); 495 } 496 497 /* Check support for VPID */ 498 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 499 MSR_VMX_PROCBASED_CTLS2, 500 PROCBASED2_ENABLE_VPID, 501 0, &tmp); 502 if (error == 0) 503 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 504 505 /* Check support for pin-based VM-execution controls */ 506 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 507 MSR_VMX_TRUE_PINBASED_CTLS, 508 PINBASED_CTLS_ONE_SETTING, 509 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 510 if (error) { 511 printf("vmx_init: processor does not support desired " 512 "pin-based controls\n"); 513 return (error); 514 } 515 516 /* Check support for VM-exit controls */ 517 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 518 VM_EXIT_CTLS_ONE_SETTING, 519 VM_EXIT_CTLS_ZERO_SETTING, 520 &exit_ctls); 521 if (error) { 522 printf("vmx_init: processor does not support desired " 523 "exit controls\n"); 524 return (error); 525 } 526 527 /* Check support for VM-entry controls */ 528 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 529 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 530 &entry_ctls); 531 if (error) { 532 printf("vmx_init: processor does not support desired " 533 "entry controls\n"); 534 return (error); 535 } 536 537 /* 538 * Check support for optional features by testing them 539 * as individual bits 540 */ 541 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 542 MSR_VMX_TRUE_PROCBASED_CTLS, 543 PROCBASED_HLT_EXITING, 0, 544 &tmp) == 0); 545 546 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 547 MSR_VMX_PROCBASED_CTLS, 548 PROCBASED_MTF, 0, 549 &tmp) == 0); 550 551 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 552 MSR_VMX_TRUE_PROCBASED_CTLS, 553 PROCBASED_PAUSE_EXITING, 0, 554 &tmp) == 0); 555 556 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 557 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 558 &tmp) == 0); 559 560 /* 561 * Check for APIC virtualization capabilities: 562 * - TPR shadowing 563 * - Full APICv (with or without x2APIC support) 564 * - Posted interrupt handling 565 */ 566 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 567 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 568 avail_caps |= VMX_CAP_TPR_SHADOW; 569 570 const uint32_t apicv_bits = 571 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 572 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 573 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 574 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 575 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 576 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 577 avail_caps |= VMX_CAP_APICV; 578 579 /* 580 * It may make sense in the future to differentiate 581 * hardware (or software) configurations with APICv but 582 * no support for accelerating x2APIC mode. 583 */ 584 avail_caps |= VMX_CAP_APICV_X2APIC; 585 586 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 587 MSR_VMX_TRUE_PINBASED_CTLS, 588 PINBASED_POSTED_INTERRUPT, 0, &tmp); 589 if (error == 0) { 590 /* 591 * If the PSM-provided interfaces for requesting 592 * and using a PIR IPI vector are present, use 593 * them for posted interrupts. 594 */ 595 if (psm_get_pir_ipivect != NULL && 596 psm_send_pir_ipi != NULL) { 597 pirvec = psm_get_pir_ipivect(); 598 avail_caps |= VMX_CAP_APICV_PIR; 599 } 600 } 601 } 602 } 603 604 /* 605 * Check for necessary EPT capabilities 606 * 607 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 608 * hypervisor intends to utilize dirty page tracking. 609 */ 610 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 611 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 612 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 613 return (EINVAL); 614 } 615 616 #ifdef __FreeBSD__ 617 guest_l1d_flush = (cpu_ia32_arch_caps & 618 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 619 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 620 621 /* 622 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 623 * available. Otherwise fall back to the software flush 624 * method which loads enough data from the kernel text to 625 * flush existing L1D content, both on VMX entry and on NMI 626 * return. 627 */ 628 if (guest_l1d_flush) { 629 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 630 guest_l1d_flush_sw = 1; 631 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 632 &guest_l1d_flush_sw); 633 } 634 if (guest_l1d_flush_sw) { 635 if (nmi_flush_l1d_sw <= 1) 636 nmi_flush_l1d_sw = 1; 637 } else { 638 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 639 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 640 } 641 } 642 #else 643 /* L1D flushing is taken care of by smt_acquire() and friends */ 644 guest_l1d_flush = 0; 645 #endif /* __FreeBSD__ */ 646 647 /* 648 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 649 */ 650 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 651 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 652 cr0_ones_mask = fixed0 & fixed1; 653 cr0_zeros_mask = ~fixed0 & ~fixed1; 654 655 /* 656 * Since Unrestricted Guest was already verified present, CR0_PE and 657 * CR0_PG are allowed to be set to zero in VMX non-root operation 658 */ 659 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 660 661 /* 662 * Do not allow the guest to set CR0_NW or CR0_CD. 663 */ 664 cr0_zeros_mask |= (CR0_NW | CR0_CD); 665 666 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 667 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 668 cr4_ones_mask = fixed0 & fixed1; 669 cr4_zeros_mask = ~fixed0 & ~fixed1; 670 671 vmx_msr_init(); 672 673 vmx_capabilities = avail_caps; 674 vmx_initialized = 1; 675 676 return (0); 677 } 678 679 static void 680 vmx_trigger_hostintr(int vector) 681 { 682 VERIFY(vector >= 32 && vector <= 255); 683 vmx_call_isr(vector - 32); 684 } 685 686 static void * 687 vmx_vminit(struct vm *vm) 688 { 689 uint16_t vpid[VM_MAXCPU]; 690 int i, error, datasel; 691 struct vmx *vmx; 692 uint32_t exc_bitmap; 693 uint16_t maxcpus; 694 uint32_t proc_ctls, proc2_ctls, pin_ctls; 695 uint64_t apic_access_pa = UINT64_MAX; 696 697 vmx = malloc(sizeof (struct vmx), M_VMX, M_WAITOK | M_ZERO); 698 if ((uintptr_t)vmx & PAGE_MASK) { 699 panic("malloc of struct vmx not aligned on %d byte boundary", 700 PAGE_SIZE); 701 } 702 vmx->vm = vm; 703 704 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 705 706 /* 707 * Clean up EP4TA-tagged guest-physical and combined mappings 708 * 709 * VMX transitions are not required to invalidate any guest physical 710 * mappings. So, it may be possible for stale guest physical mappings 711 * to be present in the processor TLBs. 712 * 713 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 714 */ 715 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 716 717 vmx_msr_bitmap_initialize(vmx); 718 719 vpid_alloc(vpid, VM_MAXCPU); 720 721 /* Grab the established defaults */ 722 proc_ctls = procbased_ctls; 723 proc2_ctls = procbased_ctls2; 724 pin_ctls = pinbased_ctls; 725 /* For now, default to the available capabilities */ 726 vmx->vmx_caps = vmx_capabilities; 727 728 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 729 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 730 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 731 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 732 } 733 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 734 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 735 736 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 737 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 738 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 739 740 /* 741 * Allocate a page of memory to back the APIC access address for 742 * when APICv features are in use. Guest MMIO accesses should 743 * never actually reach this page, but rather be intercepted. 744 */ 745 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 746 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 747 apic_access_pa = vtophys(vmx->apic_access_page); 748 749 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 750 apic_access_pa); 751 /* XXX this should really return an error to the caller */ 752 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 753 } 754 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 755 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 756 757 pin_ctls |= PINBASED_POSTED_INTERRUPT; 758 } 759 760 maxcpus = vm_get_maxcpus(vm); 761 datasel = vmm_get_host_datasel(); 762 for (i = 0; i < maxcpus; i++) { 763 /* 764 * Cache physical address lookups for various components which 765 * may be required inside the critical_enter() section implied 766 * by VMPTRLD() below. 767 */ 768 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 769 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 770 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 771 772 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 773 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 774 775 vmx_msr_guest_init(vmx, i); 776 777 vmcs_load(vmx->vmcs_pa[i]); 778 779 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 780 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 781 782 /* Load the control registers */ 783 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 784 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 785 786 /* Load the segment selectors */ 787 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 788 789 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 790 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 791 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 792 793 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 794 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 795 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 796 797 /* 798 * Configure host sysenter MSRs to be restored on VM exit. 799 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 800 * vmx_run. 801 */ 802 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 803 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 804 rdmsr(MSR_SYSENTER_EIP_MSR)); 805 806 /* instruction pointer */ 807 if (no_flush_rsb) { 808 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 809 } else { 810 vmcs_write(VMCS_HOST_RIP, 811 (uint64_t)vmx_exit_guest_flush_rsb); 812 } 813 814 /* link pointer */ 815 vmcs_write(VMCS_LINK_POINTER, ~0); 816 817 vmcs_write(VMCS_EPTP, vmx->eptp); 818 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 819 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 820 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 821 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 822 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 823 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 824 vmcs_write(VMCS_VPID, vpid[i]); 825 826 if (guest_l1d_flush && !guest_l1d_flush_sw) { 827 vmcs_write(VMCS_ENTRY_MSR_LOAD, 828 vtophys(&msr_load_list[0])); 829 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 830 nitems(msr_load_list)); 831 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 832 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 833 } 834 835 /* exception bitmap */ 836 if (vcpu_trace_exceptions(vm, i)) 837 exc_bitmap = 0xffffffff; 838 else 839 exc_bitmap = 1 << IDT_MC; 840 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 841 842 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 843 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 844 845 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 846 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 847 } 848 849 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 850 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 851 vmcs_write(VMCS_EOI_EXIT0, 0); 852 vmcs_write(VMCS_EOI_EXIT1, 0); 853 vmcs_write(VMCS_EOI_EXIT2, 0); 854 vmcs_write(VMCS_EOI_EXIT3, 0); 855 } 856 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 857 vmcs_write(VMCS_PIR_VECTOR, pirvec); 858 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 859 } 860 861 /* 862 * Set up the CR0/4 masks and configure the read shadow state 863 * to the power-on register value from the Intel Sys Arch. 864 * CR0 - 0x60000010 865 * CR4 - 0 866 */ 867 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 868 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 869 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 870 vmcs_write(VMCS_CR4_SHADOW, 0); 871 872 vmcs_clear(vmx->vmcs_pa[i]); 873 874 vmx->cap[i].set = 0; 875 vmx->cap[i].proc_ctls = proc_ctls; 876 vmx->cap[i].proc_ctls2 = proc2_ctls; 877 vmx->cap[i].exc_bitmap = exc_bitmap; 878 879 vmx->state[i].nextrip = ~0; 880 vmx->state[i].lastcpu = NOCPU; 881 vmx->state[i].vpid = vpid[i]; 882 } 883 884 return (vmx); 885 } 886 887 static int 888 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 889 { 890 int handled; 891 892 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 893 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 894 (uint64_t *)&vmxctx->guest_rdx); 895 return (handled); 896 } 897 898 static __inline void 899 vmx_run_trace(struct vmx *vmx, int vcpu) 900 { 901 #ifdef KTR 902 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip()); 903 #endif 904 } 905 906 static __inline void 907 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 908 { 909 #ifdef KTR 910 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 911 #endif 912 } 913 914 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 915 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 916 917 #define INVVPID_TYPE_ADDRESS 0UL 918 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 919 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 920 921 struct invvpid_desc { 922 uint16_t vpid; 923 uint16_t _res1; 924 uint32_t _res2; 925 uint64_t linear_addr; 926 }; 927 CTASSERT(sizeof (struct invvpid_desc) == 16); 928 929 static __inline void 930 invvpid(uint64_t type, struct invvpid_desc desc) 931 { 932 int error; 933 934 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 935 uint64_t, desc.linear_addr); 936 937 __asm __volatile("invvpid %[desc], %[type];" 938 VMX_SET_ERROR_CODE_ASM 939 : [error] "=r" (error) 940 : [desc] "m" (desc), [type] "r" (type) 941 : "memory"); 942 943 if (error) { 944 panic("invvpid error %d", error); 945 } 946 } 947 948 /* 949 * Invalidate guest mappings identified by its VPID from the TLB. 950 * 951 * This is effectively a flush of the guest TLB, removing only "combined 952 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 953 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 954 */ 955 static void 956 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 957 { 958 struct vmxstate *vmxstate; 959 struct vmspace *vms; 960 961 vmxstate = &vmx->state[vcpu]; 962 if (vmxstate->vpid == 0) { 963 return; 964 } 965 966 if (!running) { 967 /* 968 * Set the 'lastcpu' to an invalid host cpu. 969 * 970 * This will invalidate TLB entries tagged with the vcpu's 971 * vpid the next time it runs via vmx_set_pcpu_defaults(). 972 */ 973 vmxstate->lastcpu = NOCPU; 974 return; 975 } 976 977 /* 978 * Invalidate all mappings tagged with 'vpid' 979 * 980 * This is done when a vCPU moves between host CPUs, where there may be 981 * stale TLB entries for this VPID on the target, or if emulated actions 982 * in the guest CPU have incurred an explicit TLB flush. 983 */ 984 vms = vm_get_vmspace(vmx->vm); 985 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 986 struct invvpid_desc invvpid_desc = { 987 .vpid = vmxstate->vpid, 988 .linear_addr = 0, 989 ._res1 = 0, 990 ._res2 = 0, 991 }; 992 993 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 994 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 995 } else { 996 /* 997 * The INVVPID can be skipped if an INVEPT is going to be 998 * performed before entering the guest. The INVEPT will 999 * invalidate combined mappings for the EP4TA associated with 1000 * this guest, in all VPIDs. 1001 */ 1002 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1003 } 1004 } 1005 1006 static __inline void 1007 invept(uint64_t type, uint64_t eptp) 1008 { 1009 int error; 1010 struct invept_desc { 1011 uint64_t eptp; 1012 uint64_t _resv; 1013 } desc = { eptp, 0 }; 1014 1015 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 1016 1017 __asm __volatile("invept %[desc], %[type];" 1018 VMX_SET_ERROR_CODE_ASM 1019 : [error] "=r" (error) 1020 : [desc] "m" (desc), [type] "r" (type) 1021 : "memory"); 1022 1023 if (error != 0) { 1024 panic("invvpid error %d", error); 1025 } 1026 } 1027 1028 static void 1029 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1030 { 1031 struct vmxstate *vmxstate; 1032 1033 /* 1034 * Regardless of whether the VM appears to have migrated between CPUs, 1035 * save the host sysenter stack pointer. As it points to the kernel 1036 * stack of each thread, the correct value must be maintained for every 1037 * trip into the critical section. 1038 */ 1039 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1040 1041 /* 1042 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1043 * migration between host CPUs with differing TSC values. 1044 */ 1045 vmx_apply_tsc_adjust(vmx, vcpu); 1046 1047 vmxstate = &vmx->state[vcpu]; 1048 if (vmxstate->lastcpu == curcpu) 1049 return; 1050 1051 vmxstate->lastcpu = curcpu; 1052 1053 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1054 1055 /* Load the per-CPU IDT address */ 1056 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1057 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1058 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1059 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1060 vmx_invvpid(vmx, vcpu, 1); 1061 } 1062 1063 /* 1064 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1065 */ 1066 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1067 1068 static __inline void 1069 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1070 { 1071 1072 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1073 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1074 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1075 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1076 } 1077 } 1078 1079 static __inline void 1080 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1081 { 1082 1083 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1084 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1085 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1086 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1087 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1088 } 1089 1090 static __inline bool 1091 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1092 { 1093 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1094 } 1095 1096 static __inline void 1097 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1098 { 1099 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1100 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1101 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1102 } 1103 } 1104 1105 static __inline void 1106 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1107 { 1108 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1109 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1110 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1111 } 1112 1113 /* 1114 * Set the TSC adjustment, taking into account the offsets measured between 1115 * host physical CPUs. This is required even if the guest has not set a TSC 1116 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1117 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1118 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1119 */ 1120 static void 1121 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1122 { 1123 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1124 1125 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1126 1127 if (vmx->tsc_offset_active[vcpu] != offset) { 1128 vmcs_write(VMCS_TSC_OFFSET, offset); 1129 vmx->tsc_offset_active[vcpu] = offset; 1130 } 1131 } 1132 1133 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1134 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1135 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1136 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1137 1138 static void 1139 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1140 { 1141 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1142 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1143 1144 /* 1145 * Inject the virtual NMI. The vector must be the NMI IDT entry 1146 * or the VMCS entry check will fail. 1147 */ 1148 vmcs_write(VMCS_ENTRY_INTR_INFO, 1149 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1150 1151 /* Clear the request */ 1152 vm_nmi_clear(vmx->vm, vcpu); 1153 } 1154 1155 /* 1156 * Inject exceptions, NMIs, and ExtINTs. 1157 * 1158 * The logic behind these are complicated and may involve mutex contention, so 1159 * the injection is performed without the protection of host CPU interrupts 1160 * being disabled. This means a racing notification could be "lost", 1161 * necessitating a later call to vmx_inject_recheck() to close that window 1162 * of opportunity. 1163 */ 1164 static enum event_inject_state 1165 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1166 { 1167 uint64_t entryinfo; 1168 uint32_t gi, info; 1169 int vector; 1170 enum event_inject_state state; 1171 1172 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1173 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1174 state = EIS_CAN_INJECT; 1175 1176 /* Clear any interrupt blocking if the guest %rip has changed */ 1177 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1178 gi &= ~HWINTR_BLOCKING; 1179 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1180 } 1181 1182 /* 1183 * It could be that an interrupt is already pending for injection from 1184 * the VMCS. This would be the case if the vCPU exited for conditions 1185 * such as an AST before a vm-entry delivered the injection. 1186 */ 1187 if ((info & VMCS_INTR_VALID) != 0) { 1188 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1189 } 1190 1191 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1192 ASSERT(entryinfo & VMCS_INTR_VALID); 1193 1194 info = entryinfo; 1195 vector = info & 0xff; 1196 if (vector == IDT_BP || vector == IDT_OF) { 1197 /* 1198 * VT-x requires #BP and #OF to be injected as software 1199 * exceptions. 1200 */ 1201 info &= ~VMCS_INTR_T_MASK; 1202 info |= VMCS_INTR_T_SWEXCEPTION; 1203 } 1204 1205 if (info & VMCS_INTR_DEL_ERRCODE) { 1206 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1207 } 1208 1209 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1210 state = EIS_EV_INJECTED; 1211 } 1212 1213 if (vm_nmi_pending(vmx->vm, vcpu)) { 1214 /* 1215 * If there are no conditions blocking NMI injection then inject 1216 * it directly here otherwise enable "NMI window exiting" to 1217 * inject it as soon as we can. 1218 * 1219 * According to the Intel manual, some CPUs do not allow NMI 1220 * injection when STI_BLOCKING is active. That check is 1221 * enforced here, regardless of CPU capability. If running on a 1222 * CPU without such a restriction it will immediately exit and 1223 * the NMI will be injected in the "NMI window exiting" handler. 1224 */ 1225 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1226 if (state == EIS_CAN_INJECT) { 1227 vmx_inject_nmi(vmx, vcpu); 1228 state = EIS_EV_INJECTED; 1229 } else { 1230 return (state | EIS_REQ_EXIT); 1231 } 1232 } else { 1233 vmx_set_nmi_window_exiting(vmx, vcpu); 1234 } 1235 } 1236 1237 if (vm_extint_pending(vmx->vm, vcpu)) { 1238 if (state != EIS_CAN_INJECT) { 1239 return (state | EIS_REQ_EXIT); 1240 } 1241 if ((gi & HWINTR_BLOCKING) != 0 || 1242 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1243 return (EIS_GI_BLOCK); 1244 } 1245 1246 /* Ask the legacy pic for a vector to inject */ 1247 vatpic_pending_intr(vmx->vm, &vector); 1248 1249 /* 1250 * From the Intel SDM, Volume 3, Section "Maskable 1251 * Hardware Interrupts": 1252 * - maskable interrupt vectors [0,255] can be delivered 1253 * through the INTR pin. 1254 */ 1255 KASSERT(vector >= 0 && vector <= 255, 1256 ("invalid vector %d from INTR", vector)); 1257 1258 /* Inject the interrupt */ 1259 vmcs_write(VMCS_ENTRY_INTR_INFO, 1260 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1261 1262 vm_extint_clear(vmx->vm, vcpu); 1263 vatpic_intr_accepted(vmx->vm, vector); 1264 state = EIS_EV_INJECTED; 1265 } 1266 1267 return (state); 1268 } 1269 1270 /* 1271 * Inject any interrupts pending on the vLAPIC. 1272 * 1273 * This is done with host CPU interrupts disabled so notification IPIs, either 1274 * from the standard vCPU notification or APICv posted interrupts, will be 1275 * queued on the host APIC and recognized when entering VMX context. 1276 */ 1277 static enum event_inject_state 1278 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1279 { 1280 int vector; 1281 1282 if (!vlapic_pending_intr(vlapic, &vector)) { 1283 return (EIS_CAN_INJECT); 1284 } 1285 1286 /* 1287 * From the Intel SDM, Volume 3, Section "Maskable 1288 * Hardware Interrupts": 1289 * - maskable interrupt vectors [16,255] can be delivered 1290 * through the local APIC. 1291 */ 1292 KASSERT(vector >= 16 && vector <= 255, 1293 ("invalid vector %d from local APIC", vector)); 1294 1295 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1296 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1297 uint16_t status_new = (status_old & 0xff00) | vector; 1298 1299 /* 1300 * The APICv state will have been synced into the vLAPIC 1301 * as part of vlapic_pending_intr(). Prepare the VMCS 1302 * for the to-be-injected pending interrupt. 1303 */ 1304 if (status_new > status_old) { 1305 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1306 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, 1307 "vmx_inject_interrupts: guest_intr_status " 1308 "changed from 0x%04x to 0x%04x", 1309 status_old, status_new); 1310 } 1311 1312 /* 1313 * Ensure VMCS state regarding EOI traps is kept in sync 1314 * with the TMRs in the vlapic. 1315 */ 1316 vmx_apicv_sync_tmr(vlapic); 1317 1318 /* 1319 * The rest of the injection process for injecting the 1320 * interrupt(s) is handled by APICv. It does not preclude other 1321 * event injection from occurring. 1322 */ 1323 return (EIS_CAN_INJECT); 1324 } 1325 1326 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1327 1328 /* Does guest interruptability block injection? */ 1329 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1330 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1331 return (EIS_GI_BLOCK); 1332 } 1333 1334 /* Inject the interrupt */ 1335 vmcs_write(VMCS_ENTRY_INTR_INFO, 1336 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1337 1338 /* Update the Local APIC ISR */ 1339 vlapic_intr_accepted(vlapic, vector); 1340 1341 return (EIS_EV_INJECTED); 1342 } 1343 1344 /* 1345 * Re-check for events to be injected. 1346 * 1347 * Once host CPU interrupts are disabled, check for the presence of any events 1348 * which require injection processing. If an exit is required upon injection, 1349 * or once the guest becomes interruptable, that will be configured too. 1350 */ 1351 static bool 1352 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1353 { 1354 if (state == EIS_CAN_INJECT) { 1355 if (vm_nmi_pending(vmx->vm, vcpu) && 1356 !vmx_nmi_window_exiting(vmx, vcpu)) { 1357 /* queued NMI not blocked by NMI-window-exiting */ 1358 return (true); 1359 } 1360 if (vm_extint_pending(vmx->vm, vcpu)) { 1361 /* queued ExtINT not blocked by existing injection */ 1362 return (true); 1363 } 1364 } else { 1365 if ((state & EIS_REQ_EXIT) != 0) { 1366 /* 1367 * Use a self-IPI to force an immediate exit after 1368 * event injection has occurred. 1369 */ 1370 poke_cpu(CPU->cpu_id); 1371 } else { 1372 /* 1373 * If any event is being injected, an exit immediately 1374 * upon becoming interruptable again will allow pending 1375 * or newly queued events to be injected in a timely 1376 * manner. 1377 */ 1378 vmx_set_int_window_exiting(vmx, vcpu); 1379 } 1380 } 1381 return (false); 1382 } 1383 1384 /* 1385 * If the Virtual NMIs execution control is '1' then the logical processor 1386 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1387 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1388 * virtual-NMI blocking. 1389 * 1390 * This unblocking occurs even if the IRET causes a fault. In this case the 1391 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1392 */ 1393 static void 1394 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1395 { 1396 uint32_t gi; 1397 1398 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1399 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1400 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1401 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1402 } 1403 1404 static void 1405 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1406 { 1407 uint32_t gi; 1408 1409 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1410 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1411 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1412 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1413 } 1414 1415 static void 1416 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1417 { 1418 uint32_t gi; 1419 1420 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1421 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1422 ("NMI blocking is not in effect %x", gi)); 1423 } 1424 1425 static int 1426 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1427 { 1428 struct vmxctx *vmxctx; 1429 uint64_t xcrval; 1430 const struct xsave_limits *limits; 1431 1432 vmxctx = &vmx->ctx[vcpu]; 1433 limits = vmm_get_xsave_limits(); 1434 1435 /* 1436 * Note that the processor raises a GP# fault on its own if 1437 * xsetbv is executed for CPL != 0, so we do not have to 1438 * emulate that fault here. 1439 */ 1440 1441 /* Only xcr0 is supported. */ 1442 if (vmxctx->guest_rcx != 0) { 1443 vm_inject_gp(vmx->vm, vcpu); 1444 return (HANDLED); 1445 } 1446 1447 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1448 if (!limits->xsave_enabled || 1449 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1450 vm_inject_ud(vmx->vm, vcpu); 1451 return (HANDLED); 1452 } 1453 1454 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1455 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1456 vm_inject_gp(vmx->vm, vcpu); 1457 return (HANDLED); 1458 } 1459 1460 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1461 vm_inject_gp(vmx->vm, vcpu); 1462 return (HANDLED); 1463 } 1464 1465 /* AVX (YMM_Hi128) requires SSE. */ 1466 if (xcrval & XFEATURE_ENABLED_AVX && 1467 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1468 vm_inject_gp(vmx->vm, vcpu); 1469 return (HANDLED); 1470 } 1471 1472 /* 1473 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1474 * ZMM_Hi256, and Hi16_ZMM. 1475 */ 1476 if (xcrval & XFEATURE_AVX512 && 1477 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1478 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1479 vm_inject_gp(vmx->vm, vcpu); 1480 return (HANDLED); 1481 } 1482 1483 /* 1484 * Intel MPX requires both bound register state flags to be 1485 * set. 1486 */ 1487 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1488 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1489 vm_inject_gp(vmx->vm, vcpu); 1490 return (HANDLED); 1491 } 1492 1493 /* 1494 * This runs "inside" vmrun() with the guest's FPU state, so 1495 * modifying xcr0 directly modifies the guest's xcr0, not the 1496 * host's. 1497 */ 1498 load_xcr(0, xcrval); 1499 return (HANDLED); 1500 } 1501 1502 static uint64_t 1503 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1504 { 1505 const struct vmxctx *vmxctx; 1506 1507 vmxctx = &vmx->ctx[vcpu]; 1508 1509 switch (ident) { 1510 case 0: 1511 return (vmxctx->guest_rax); 1512 case 1: 1513 return (vmxctx->guest_rcx); 1514 case 2: 1515 return (vmxctx->guest_rdx); 1516 case 3: 1517 return (vmxctx->guest_rbx); 1518 case 4: 1519 return (vmcs_read(VMCS_GUEST_RSP)); 1520 case 5: 1521 return (vmxctx->guest_rbp); 1522 case 6: 1523 return (vmxctx->guest_rsi); 1524 case 7: 1525 return (vmxctx->guest_rdi); 1526 case 8: 1527 return (vmxctx->guest_r8); 1528 case 9: 1529 return (vmxctx->guest_r9); 1530 case 10: 1531 return (vmxctx->guest_r10); 1532 case 11: 1533 return (vmxctx->guest_r11); 1534 case 12: 1535 return (vmxctx->guest_r12); 1536 case 13: 1537 return (vmxctx->guest_r13); 1538 case 14: 1539 return (vmxctx->guest_r14); 1540 case 15: 1541 return (vmxctx->guest_r15); 1542 default: 1543 panic("invalid vmx register %d", ident); 1544 } 1545 } 1546 1547 static void 1548 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1549 { 1550 struct vmxctx *vmxctx; 1551 1552 vmxctx = &vmx->ctx[vcpu]; 1553 1554 switch (ident) { 1555 case 0: 1556 vmxctx->guest_rax = regval; 1557 break; 1558 case 1: 1559 vmxctx->guest_rcx = regval; 1560 break; 1561 case 2: 1562 vmxctx->guest_rdx = regval; 1563 break; 1564 case 3: 1565 vmxctx->guest_rbx = regval; 1566 break; 1567 case 4: 1568 vmcs_write(VMCS_GUEST_RSP, regval); 1569 break; 1570 case 5: 1571 vmxctx->guest_rbp = regval; 1572 break; 1573 case 6: 1574 vmxctx->guest_rsi = regval; 1575 break; 1576 case 7: 1577 vmxctx->guest_rdi = regval; 1578 break; 1579 case 8: 1580 vmxctx->guest_r8 = regval; 1581 break; 1582 case 9: 1583 vmxctx->guest_r9 = regval; 1584 break; 1585 case 10: 1586 vmxctx->guest_r10 = regval; 1587 break; 1588 case 11: 1589 vmxctx->guest_r11 = regval; 1590 break; 1591 case 12: 1592 vmxctx->guest_r12 = regval; 1593 break; 1594 case 13: 1595 vmxctx->guest_r13 = regval; 1596 break; 1597 case 14: 1598 vmxctx->guest_r14 = regval; 1599 break; 1600 case 15: 1601 vmxctx->guest_r15 = regval; 1602 break; 1603 default: 1604 panic("invalid vmx register %d", ident); 1605 } 1606 } 1607 1608 static int 1609 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1610 { 1611 uint64_t crval, regval; 1612 1613 /* We only handle mov to %cr0 at this time */ 1614 if ((exitqual & 0xf0) != 0x00) 1615 return (UNHANDLED); 1616 1617 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1618 1619 vmcs_write(VMCS_CR0_SHADOW, regval); 1620 1621 crval = regval | cr0_ones_mask; 1622 crval &= ~cr0_zeros_mask; 1623 1624 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1625 const uint64_t diff = crval ^ old; 1626 /* Flush the TLB if the paging or write-protect bits are changing */ 1627 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1628 vmx_invvpid(vmx, vcpu, 1); 1629 } 1630 1631 vmcs_write(VMCS_GUEST_CR0, crval); 1632 1633 if (regval & CR0_PG) { 1634 uint64_t efer, entry_ctls; 1635 1636 /* 1637 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1638 * the "IA-32e mode guest" bit in VM-entry control must be 1639 * equal. 1640 */ 1641 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1642 if (efer & EFER_LME) { 1643 efer |= EFER_LMA; 1644 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1645 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1646 entry_ctls |= VM_ENTRY_GUEST_LMA; 1647 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1648 } 1649 } 1650 1651 return (HANDLED); 1652 } 1653 1654 static int 1655 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1656 { 1657 uint64_t crval, regval; 1658 1659 /* We only handle mov to %cr4 at this time */ 1660 if ((exitqual & 0xf0) != 0x00) 1661 return (UNHANDLED); 1662 1663 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1664 1665 vmcs_write(VMCS_CR4_SHADOW, regval); 1666 1667 crval = regval | cr4_ones_mask; 1668 crval &= ~cr4_zeros_mask; 1669 vmcs_write(VMCS_GUEST_CR4, crval); 1670 1671 return (HANDLED); 1672 } 1673 1674 static int 1675 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1676 { 1677 struct vlapic *vlapic; 1678 uint64_t cr8; 1679 int regnum; 1680 1681 /* We only handle mov %cr8 to/from a register at this time. */ 1682 if ((exitqual & 0xe0) != 0x00) { 1683 return (UNHANDLED); 1684 } 1685 1686 vlapic = vm_lapic(vmx->vm, vcpu); 1687 regnum = (exitqual >> 8) & 0xf; 1688 if (exitqual & 0x10) { 1689 cr8 = vlapic_get_cr8(vlapic); 1690 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1691 } else { 1692 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1693 vlapic_set_cr8(vlapic, cr8); 1694 } 1695 1696 return (HANDLED); 1697 } 1698 1699 /* 1700 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1701 */ 1702 static int 1703 vmx_cpl(void) 1704 { 1705 uint32_t ssar; 1706 1707 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1708 return ((ssar >> 5) & 0x3); 1709 } 1710 1711 static enum vm_cpu_mode 1712 vmx_cpu_mode(void) 1713 { 1714 uint32_t csar; 1715 1716 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1717 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1718 if (csar & 0x2000) 1719 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1720 else 1721 return (CPU_MODE_COMPATIBILITY); 1722 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1723 return (CPU_MODE_PROTECTED); 1724 } else { 1725 return (CPU_MODE_REAL); 1726 } 1727 } 1728 1729 static enum vm_paging_mode 1730 vmx_paging_mode(void) 1731 { 1732 1733 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1734 return (PAGING_MODE_FLAT); 1735 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1736 return (PAGING_MODE_32); 1737 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1738 return (PAGING_MODE_64); 1739 else 1740 return (PAGING_MODE_PAE); 1741 } 1742 1743 static void 1744 vmx_paging_info(struct vm_guest_paging *paging) 1745 { 1746 paging->cr3 = vmcs_guest_cr3(); 1747 paging->cpl = vmx_cpl(); 1748 paging->cpu_mode = vmx_cpu_mode(); 1749 paging->paging_mode = vmx_paging_mode(); 1750 } 1751 1752 static void 1753 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1754 uint64_t gla) 1755 { 1756 struct vm_guest_paging paging; 1757 uint32_t csar; 1758 1759 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1760 vmexit->inst_length = 0; 1761 vmexit->u.mmio_emul.gpa = gpa; 1762 vmexit->u.mmio_emul.gla = gla; 1763 vmx_paging_info(&paging); 1764 1765 switch (paging.cpu_mode) { 1766 case CPU_MODE_REAL: 1767 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1768 vmexit->u.mmio_emul.cs_d = 0; 1769 break; 1770 case CPU_MODE_PROTECTED: 1771 case CPU_MODE_COMPATIBILITY: 1772 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1773 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1774 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1775 break; 1776 default: 1777 vmexit->u.mmio_emul.cs_base = 0; 1778 vmexit->u.mmio_emul.cs_d = 0; 1779 break; 1780 } 1781 1782 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1783 } 1784 1785 static void 1786 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1787 uint32_t eax) 1788 { 1789 struct vm_guest_paging paging; 1790 struct vm_inout *inout; 1791 1792 inout = &vmexit->u.inout; 1793 1794 inout->bytes = (qual & 0x7) + 1; 1795 inout->flags = 0; 1796 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1797 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1798 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1799 inout->port = (uint16_t)(qual >> 16); 1800 inout->eax = eax; 1801 if (inout->flags & INOUT_STR) { 1802 uint64_t inst_info; 1803 1804 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1805 1806 /* 1807 * According to the SDM, bits 9:7 encode the address size of the 1808 * ins/outs operation, but only values 0/1/2 are expected, 1809 * corresponding to 16/32/64 bit sizes. 1810 */ 1811 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1812 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1813 inout->addrsize == 8); 1814 1815 if (inout->flags & INOUT_IN) { 1816 /* 1817 * The bits describing the segment in INSTRUCTION_INFO 1818 * are not defined for ins, leaving it to system 1819 * software to assume %es (encoded as 0) 1820 */ 1821 inout->segment = 0; 1822 } else { 1823 /* 1824 * Bits 15-17 encode the segment for OUTS. 1825 * This value follows the standard x86 segment order. 1826 */ 1827 inout->segment = (inst_info >> 15) & 0x7; 1828 } 1829 } 1830 1831 vmexit->exitcode = VM_EXITCODE_INOUT; 1832 vmx_paging_info(&paging); 1833 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1834 1835 /* The in/out emulation will handle advancing %rip */ 1836 vmexit->inst_length = 0; 1837 } 1838 1839 static int 1840 ept_fault_type(uint64_t ept_qual) 1841 { 1842 int fault_type; 1843 1844 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1845 fault_type = PROT_WRITE; 1846 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1847 fault_type = PROT_EXEC; 1848 else 1849 fault_type = PROT_READ; 1850 1851 return (fault_type); 1852 } 1853 1854 static bool 1855 ept_emulation_fault(uint64_t ept_qual) 1856 { 1857 int read, write; 1858 1859 /* EPT fault on an instruction fetch doesn't make sense here */ 1860 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1861 return (false); 1862 1863 /* EPT fault must be a read fault or a write fault */ 1864 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1865 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1866 if ((read | write) == 0) 1867 return (false); 1868 1869 /* 1870 * The EPT violation must have been caused by accessing a 1871 * guest-physical address that is a translation of a guest-linear 1872 * address. 1873 */ 1874 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1875 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1876 return (false); 1877 } 1878 1879 return (true); 1880 } 1881 1882 static __inline int 1883 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1884 { 1885 uint32_t proc_ctls2; 1886 1887 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1888 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1889 } 1890 1891 static __inline int 1892 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1893 { 1894 uint32_t proc_ctls2; 1895 1896 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1897 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1898 } 1899 1900 static int 1901 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1902 uint64_t qual) 1903 { 1904 int handled, offset; 1905 uint32_t *apic_regs, vector; 1906 1907 handled = HANDLED; 1908 offset = APIC_WRITE_OFFSET(qual); 1909 1910 if (!apic_access_virtualization(vmx, vcpuid)) { 1911 /* 1912 * In general there should not be any APIC write VM-exits 1913 * unless APIC-access virtualization is enabled. 1914 * 1915 * However self-IPI virtualization can legitimately trigger 1916 * an APIC-write VM-exit so treat it specially. 1917 */ 1918 if (x2apic_virtualization(vmx, vcpuid) && 1919 offset == APIC_OFFSET_SELF_IPI) { 1920 apic_regs = (uint32_t *)(vlapic->apic_page); 1921 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1922 vlapic_self_ipi_handler(vlapic, vector); 1923 return (HANDLED); 1924 } else 1925 return (UNHANDLED); 1926 } 1927 1928 switch (offset) { 1929 case APIC_OFFSET_ID: 1930 vlapic_id_write_handler(vlapic); 1931 break; 1932 case APIC_OFFSET_LDR: 1933 vlapic_ldr_write_handler(vlapic); 1934 break; 1935 case APIC_OFFSET_DFR: 1936 vlapic_dfr_write_handler(vlapic); 1937 break; 1938 case APIC_OFFSET_SVR: 1939 vlapic_svr_write_handler(vlapic); 1940 break; 1941 case APIC_OFFSET_ESR: 1942 vlapic_esr_write_handler(vlapic); 1943 break; 1944 case APIC_OFFSET_ICR_LOW: 1945 if (vlapic_icrlo_write_handler(vlapic) != 0) { 1946 handled = UNHANDLED; 1947 } 1948 break; 1949 case APIC_OFFSET_CMCI_LVT: 1950 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1951 vlapic_lvt_write_handler(vlapic, offset); 1952 break; 1953 case APIC_OFFSET_TIMER_ICR: 1954 vlapic_icrtmr_write_handler(vlapic); 1955 break; 1956 case APIC_OFFSET_TIMER_DCR: 1957 vlapic_dcr_write_handler(vlapic); 1958 break; 1959 default: 1960 handled = UNHANDLED; 1961 break; 1962 } 1963 return (handled); 1964 } 1965 1966 static bool 1967 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1968 { 1969 1970 if (apic_access_virtualization(vmx, vcpuid) && 1971 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1972 return (true); 1973 else 1974 return (false); 1975 } 1976 1977 static int 1978 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1979 { 1980 uint64_t qual; 1981 int access_type, offset, allowed; 1982 struct vie *vie; 1983 1984 if (!apic_access_virtualization(vmx, vcpuid)) 1985 return (UNHANDLED); 1986 1987 qual = vmexit->u.vmx.exit_qualification; 1988 access_type = APIC_ACCESS_TYPE(qual); 1989 offset = APIC_ACCESS_OFFSET(qual); 1990 1991 allowed = 0; 1992 if (access_type == 0) { 1993 /* 1994 * Read data access to the following registers is expected. 1995 */ 1996 switch (offset) { 1997 case APIC_OFFSET_APR: 1998 case APIC_OFFSET_PPR: 1999 case APIC_OFFSET_RRR: 2000 case APIC_OFFSET_CMCI_LVT: 2001 case APIC_OFFSET_TIMER_CCR: 2002 allowed = 1; 2003 break; 2004 default: 2005 break; 2006 } 2007 } else if (access_type == 1) { 2008 /* 2009 * Write data access to the following registers is expected. 2010 */ 2011 switch (offset) { 2012 case APIC_OFFSET_VER: 2013 case APIC_OFFSET_APR: 2014 case APIC_OFFSET_PPR: 2015 case APIC_OFFSET_RRR: 2016 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2017 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2018 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2019 case APIC_OFFSET_CMCI_LVT: 2020 case APIC_OFFSET_TIMER_CCR: 2021 allowed = 1; 2022 break; 2023 default: 2024 break; 2025 } 2026 } 2027 2028 if (allowed) { 2029 vie = vm_vie_ctx(vmx->vm, vcpuid); 2030 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2031 VIE_INVALID_GLA); 2032 } 2033 2034 /* 2035 * Regardless of whether the APIC-access is allowed this handler 2036 * always returns UNHANDLED: 2037 * - if the access is allowed then it is handled by emulating the 2038 * instruction that caused the VM-exit (outside the critical section) 2039 * - if the access is not allowed then it will be converted to an 2040 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2041 */ 2042 return (UNHANDLED); 2043 } 2044 2045 static enum task_switch_reason 2046 vmx_task_switch_reason(uint64_t qual) 2047 { 2048 int reason; 2049 2050 reason = (qual >> 30) & 0x3; 2051 switch (reason) { 2052 case 0: 2053 return (TSR_CALL); 2054 case 1: 2055 return (TSR_IRET); 2056 case 2: 2057 return (TSR_JMP); 2058 case 3: 2059 return (TSR_IDT_GATE); 2060 default: 2061 panic("%s: invalid reason %d", __func__, reason); 2062 } 2063 } 2064 2065 static int 2066 emulate_wrmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t val) 2067 { 2068 int error; 2069 2070 if (lapic_msr(num)) 2071 error = lapic_wrmsr(vmx->vm, vcpuid, num, val); 2072 else 2073 error = vmx_wrmsr(vmx, vcpuid, num, val); 2074 2075 return (error); 2076 } 2077 2078 static int 2079 emulate_rdmsr(struct vmx *vmx, int vcpuid, uint_t num) 2080 { 2081 uint64_t result; 2082 int error; 2083 2084 if (lapic_msr(num)) 2085 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result); 2086 else 2087 error = vmx_rdmsr(vmx, vcpuid, num, &result); 2088 2089 if (error == 0) { 2090 vmx->ctx[vcpuid].guest_rax = (uint32_t)result; 2091 vmx->ctx[vcpuid].guest_rdx = result >> 32; 2092 } 2093 2094 return (error); 2095 } 2096 2097 static int 2098 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2099 { 2100 int error, errcode, errcode_valid, handled; 2101 struct vmxctx *vmxctx; 2102 struct vie *vie; 2103 struct vlapic *vlapic; 2104 struct vm_task_switch *ts; 2105 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info; 2106 uint32_t intr_type, intr_vec, reason; 2107 uint64_t exitintinfo, qual, gpa; 2108 2109 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2110 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2111 2112 handled = UNHANDLED; 2113 vmxctx = &vmx->ctx[vcpu]; 2114 2115 qual = vmexit->u.vmx.exit_qualification; 2116 reason = vmexit->u.vmx.exit_reason; 2117 vmexit->exitcode = VM_EXITCODE_BOGUS; 2118 2119 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2120 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2121 2122 /* 2123 * VM-entry failures during or after loading guest state. 2124 * 2125 * These VM-exits are uncommon but must be handled specially 2126 * as most VM-exit fields are not populated as usual. 2127 */ 2128 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2129 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2130 vmm_call_trap(T_MCE); 2131 return (1); 2132 } 2133 2134 /* 2135 * VM exits that can be triggered during event delivery need to 2136 * be handled specially by re-injecting the event if the IDT 2137 * vectoring information field's valid bit is set. 2138 * 2139 * See "Information for VM Exits During Event Delivery" in Intel SDM 2140 * for details. 2141 */ 2142 idtvec_info = vmcs_idt_vectoring_info(); 2143 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2144 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2145 exitintinfo = idtvec_info; 2146 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2147 idtvec_err = vmcs_idt_vectoring_err(); 2148 exitintinfo |= (uint64_t)idtvec_err << 32; 2149 } 2150 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2151 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2152 __func__, error)); 2153 2154 /* 2155 * If 'virtual NMIs' are being used and the VM-exit 2156 * happened while injecting an NMI during the previous 2157 * VM-entry, then clear "blocking by NMI" in the 2158 * Guest Interruptibility-State so the NMI can be 2159 * reinjected on the subsequent VM-entry. 2160 * 2161 * However, if the NMI was being delivered through a task 2162 * gate, then the new task must start execution with NMIs 2163 * blocked so don't clear NMI blocking in this case. 2164 */ 2165 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2166 if (intr_type == VMCS_INTR_T_NMI) { 2167 if (reason != EXIT_REASON_TASK_SWITCH) 2168 vmx_clear_nmi_blocking(vmx, vcpu); 2169 else 2170 vmx_assert_nmi_blocking(vmx, vcpu); 2171 } 2172 2173 /* 2174 * Update VM-entry instruction length if the event being 2175 * delivered was a software interrupt or software exception. 2176 */ 2177 if (intr_type == VMCS_INTR_T_SWINTR || 2178 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2179 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2180 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2181 } 2182 } 2183 2184 switch (reason) { 2185 case EXIT_REASON_TASK_SWITCH: 2186 ts = &vmexit->u.task_switch; 2187 ts->tsssel = qual & 0xffff; 2188 ts->reason = vmx_task_switch_reason(qual); 2189 ts->ext = 0; 2190 ts->errcode_valid = 0; 2191 vmx_paging_info(&ts->paging); 2192 /* 2193 * If the task switch was due to a CALL, JMP, IRET, software 2194 * interrupt (INT n) or software exception (INT3, INTO), 2195 * then the saved %rip references the instruction that caused 2196 * the task switch. The instruction length field in the VMCS 2197 * is valid in this case. 2198 * 2199 * In all other cases (e.g., NMI, hardware exception) the 2200 * saved %rip is one that would have been saved in the old TSS 2201 * had the task switch completed normally so the instruction 2202 * length field is not needed in this case and is explicitly 2203 * set to 0. 2204 */ 2205 if (ts->reason == TSR_IDT_GATE) { 2206 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2207 ("invalid idtvec_info %x for IDT task switch", 2208 idtvec_info)); 2209 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2210 if (intr_type != VMCS_INTR_T_SWINTR && 2211 intr_type != VMCS_INTR_T_SWEXCEPTION && 2212 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2213 /* Task switch triggered by external event */ 2214 ts->ext = 1; 2215 vmexit->inst_length = 0; 2216 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2217 ts->errcode_valid = 1; 2218 ts->errcode = vmcs_idt_vectoring_err(); 2219 } 2220 } 2221 } 2222 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2223 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2224 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2225 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2226 ts->ext ? "external" : "internal", 2227 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2228 break; 2229 case EXIT_REASON_CR_ACCESS: 2230 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2231 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2232 switch (qual & 0xf) { 2233 case 0: 2234 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2235 break; 2236 case 4: 2237 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2238 break; 2239 case 8: 2240 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2241 break; 2242 } 2243 break; 2244 case EXIT_REASON_RDMSR: 2245 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2246 ecx = vmxctx->guest_rcx; 2247 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2248 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2249 error = emulate_rdmsr(vmx, vcpu, ecx); 2250 if (error == 0) { 2251 handled = HANDLED; 2252 } else if (error > 0) { 2253 vmexit->exitcode = VM_EXITCODE_RDMSR; 2254 vmexit->u.msr.code = ecx; 2255 } else { 2256 /* Return to userspace with a valid exitcode */ 2257 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2258 ("emulate_rdmsr retu with bogus exitcode")); 2259 } 2260 break; 2261 case EXIT_REASON_WRMSR: 2262 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2263 eax = vmxctx->guest_rax; 2264 ecx = vmxctx->guest_rcx; 2265 edx = vmxctx->guest_rdx; 2266 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2267 ecx, (uint64_t)edx << 32 | eax); 2268 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2269 (uint64_t)edx << 32 | eax); 2270 error = emulate_wrmsr(vmx, vcpu, ecx, 2271 (uint64_t)edx << 32 | eax); 2272 if (error == 0) { 2273 handled = HANDLED; 2274 } else if (error > 0) { 2275 vmexit->exitcode = VM_EXITCODE_WRMSR; 2276 vmexit->u.msr.code = ecx; 2277 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2278 } else { 2279 /* Return to userspace with a valid exitcode */ 2280 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2281 ("emulate_wrmsr retu with bogus exitcode")); 2282 } 2283 break; 2284 case EXIT_REASON_HLT: 2285 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2286 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2287 vmexit->exitcode = VM_EXITCODE_HLT; 2288 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2289 break; 2290 case EXIT_REASON_MTF: 2291 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2292 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2293 vmexit->exitcode = VM_EXITCODE_MTRAP; 2294 vmexit->inst_length = 0; 2295 break; 2296 case EXIT_REASON_PAUSE: 2297 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2298 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2299 vmexit->exitcode = VM_EXITCODE_PAUSE; 2300 break; 2301 case EXIT_REASON_INTR_WINDOW: 2302 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2303 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2304 vmx_clear_int_window_exiting(vmx, vcpu); 2305 return (1); 2306 case EXIT_REASON_EXT_INTR: 2307 /* 2308 * External interrupts serve only to cause VM exits and allow 2309 * the host interrupt handler to run. 2310 * 2311 * If this external interrupt triggers a virtual interrupt 2312 * to a VM, then that state will be recorded by the 2313 * host interrupt handler in the VM's softc. We will inject 2314 * this virtual interrupt during the subsequent VM enter. 2315 */ 2316 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2317 SDT_PROBE4(vmm, vmx, exit, interrupt, 2318 vmx, vcpu, vmexit, intr_info); 2319 2320 /* 2321 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2322 * This appears to be a bug in VMware Fusion? 2323 */ 2324 if (!(intr_info & VMCS_INTR_VALID)) 2325 return (1); 2326 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2327 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2328 ("VM exit interruption info invalid: %x", intr_info)); 2329 vmx_trigger_hostintr(intr_info & 0xff); 2330 2331 /* 2332 * This is special. We want to treat this as an 'handled' 2333 * VM-exit but not increment the instruction pointer. 2334 */ 2335 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2336 return (1); 2337 case EXIT_REASON_NMI_WINDOW: 2338 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2339 /* Exit to allow the pending virtual NMI to be injected */ 2340 if (vm_nmi_pending(vmx->vm, vcpu)) 2341 vmx_inject_nmi(vmx, vcpu); 2342 vmx_clear_nmi_window_exiting(vmx, vcpu); 2343 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2344 return (1); 2345 case EXIT_REASON_INOUT: 2346 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2347 vie = vm_vie_ctx(vmx->vm, vcpu); 2348 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2349 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2350 break; 2351 case EXIT_REASON_CPUID: 2352 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2353 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2354 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2355 break; 2356 case EXIT_REASON_EXCEPTION: 2357 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2358 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2359 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2360 ("VM exit interruption info invalid: %x", intr_info)); 2361 2362 intr_vec = intr_info & 0xff; 2363 intr_type = intr_info & VMCS_INTR_T_MASK; 2364 2365 /* 2366 * If Virtual NMIs control is 1 and the VM-exit is due to a 2367 * fault encountered during the execution of IRET then we must 2368 * restore the state of "virtual-NMI blocking" before resuming 2369 * the guest. 2370 * 2371 * See "Resuming Guest Software after Handling an Exception". 2372 * See "Information for VM Exits Due to Vectored Events". 2373 */ 2374 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2375 (intr_vec != IDT_DF) && 2376 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2377 vmx_restore_nmi_blocking(vmx, vcpu); 2378 2379 /* 2380 * The NMI has already been handled in vmx_exit_handle_nmi(). 2381 */ 2382 if (intr_type == VMCS_INTR_T_NMI) 2383 return (1); 2384 2385 /* 2386 * Call the machine check handler by hand. Also don't reflect 2387 * the machine check back into the guest. 2388 */ 2389 if (intr_vec == IDT_MC) { 2390 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2391 vmm_call_trap(T_MCE); 2392 return (1); 2393 } 2394 2395 /* 2396 * If the hypervisor has requested user exits for 2397 * debug exceptions, bounce them out to userland. 2398 */ 2399 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2400 intr_vec == IDT_BP && 2401 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2402 vmexit->exitcode = VM_EXITCODE_BPT; 2403 vmexit->u.bpt.inst_length = vmexit->inst_length; 2404 vmexit->inst_length = 0; 2405 break; 2406 } 2407 2408 if (intr_vec == IDT_PF) { 2409 vmxctx->guest_cr2 = qual; 2410 } 2411 2412 /* 2413 * Software exceptions exhibit trap-like behavior. This in 2414 * turn requires populating the VM-entry instruction length 2415 * so that the %rip in the trap frame is past the INT3/INTO 2416 * instruction. 2417 */ 2418 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2419 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2420 2421 /* Reflect all other exceptions back into the guest */ 2422 errcode_valid = errcode = 0; 2423 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2424 errcode_valid = 1; 2425 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2426 } 2427 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%x into " 2428 "the guest", intr_vec, errcode); 2429 SDT_PROBE5(vmm, vmx, exit, exception, 2430 vmx, vcpu, vmexit, intr_vec, errcode); 2431 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2432 errcode_valid, errcode, 0); 2433 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2434 __func__, error)); 2435 return (1); 2436 2437 case EXIT_REASON_EPT_FAULT: 2438 /* 2439 * If 'gpa' lies within the address space allocated to 2440 * memory then this must be a nested page fault otherwise 2441 * this must be an instruction that accesses MMIO space. 2442 */ 2443 gpa = vmcs_gpa(); 2444 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2445 apic_access_fault(vmx, vcpu, gpa)) { 2446 vmexit->exitcode = VM_EXITCODE_PAGING; 2447 vmexit->inst_length = 0; 2448 vmexit->u.paging.gpa = gpa; 2449 vmexit->u.paging.fault_type = ept_fault_type(qual); 2450 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2451 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2452 vmx, vcpu, vmexit, gpa, qual); 2453 } else if (ept_emulation_fault(qual)) { 2454 vie = vm_vie_ctx(vmx->vm, vcpu); 2455 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla()); 2456 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2457 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2458 vmx, vcpu, vmexit, gpa); 2459 } 2460 /* 2461 * If Virtual NMIs control is 1 and the VM-exit is due to an 2462 * EPT fault during the execution of IRET then we must restore 2463 * the state of "virtual-NMI blocking" before resuming. 2464 * 2465 * See description of "NMI unblocking due to IRET" in 2466 * "Exit Qualification for EPT Violations". 2467 */ 2468 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2469 (qual & EXIT_QUAL_NMIUDTI) != 0) 2470 vmx_restore_nmi_blocking(vmx, vcpu); 2471 break; 2472 case EXIT_REASON_VIRTUALIZED_EOI: 2473 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2474 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2475 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2476 vmexit->inst_length = 0; /* trap-like */ 2477 break; 2478 case EXIT_REASON_APIC_ACCESS: 2479 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2480 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2481 break; 2482 case EXIT_REASON_APIC_WRITE: 2483 /* 2484 * APIC-write VM exit is trap-like so the %rip is already 2485 * pointing to the next instruction. 2486 */ 2487 vmexit->inst_length = 0; 2488 vlapic = vm_lapic(vmx->vm, vcpu); 2489 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2490 vmx, vcpu, vmexit, vlapic); 2491 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2492 break; 2493 case EXIT_REASON_XSETBV: 2494 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2495 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2496 break; 2497 case EXIT_REASON_MONITOR: 2498 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2499 vmexit->exitcode = VM_EXITCODE_MONITOR; 2500 break; 2501 case EXIT_REASON_MWAIT: 2502 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2503 vmexit->exitcode = VM_EXITCODE_MWAIT; 2504 break; 2505 case EXIT_REASON_TPR: 2506 vlapic = vm_lapic(vmx->vm, vcpu); 2507 vlapic_sync_tpr(vlapic); 2508 vmexit->inst_length = 0; 2509 handled = HANDLED; 2510 break; 2511 case EXIT_REASON_VMCALL: 2512 case EXIT_REASON_VMCLEAR: 2513 case EXIT_REASON_VMLAUNCH: 2514 case EXIT_REASON_VMPTRLD: 2515 case EXIT_REASON_VMPTRST: 2516 case EXIT_REASON_VMREAD: 2517 case EXIT_REASON_VMRESUME: 2518 case EXIT_REASON_VMWRITE: 2519 case EXIT_REASON_VMXOFF: 2520 case EXIT_REASON_VMXON: 2521 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2522 vmexit->exitcode = VM_EXITCODE_VMINSN; 2523 break; 2524 default: 2525 SDT_PROBE4(vmm, vmx, exit, unknown, 2526 vmx, vcpu, vmexit, reason); 2527 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2528 break; 2529 } 2530 2531 if (handled) { 2532 /* 2533 * It is possible that control is returned to userland 2534 * even though we were able to handle the VM exit in the 2535 * kernel. 2536 * 2537 * In such a case we want to make sure that the userland 2538 * restarts guest execution at the instruction *after* 2539 * the one we just processed. Therefore we update the 2540 * guest rip in the VMCS and in 'vmexit'. 2541 */ 2542 vmexit->rip += vmexit->inst_length; 2543 vmexit->inst_length = 0; 2544 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2545 } else { 2546 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2547 /* 2548 * If this VM exit was not claimed by anybody then 2549 * treat it as a generic VMX exit. 2550 */ 2551 vmexit->exitcode = VM_EXITCODE_VMX; 2552 vmexit->u.vmx.status = VM_SUCCESS; 2553 vmexit->u.vmx.inst_type = 0; 2554 vmexit->u.vmx.inst_error = 0; 2555 } else { 2556 /* 2557 * The exitcode and collateral have been populated. 2558 * The VM exit will be processed further in userland. 2559 */ 2560 } 2561 } 2562 2563 SDT_PROBE4(vmm, vmx, exit, return, 2564 vmx, vcpu, vmexit, handled); 2565 return (handled); 2566 } 2567 2568 static void 2569 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2570 { 2571 2572 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2573 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2574 vmxctx->inst_fail_status)); 2575 2576 vmexit->inst_length = 0; 2577 vmexit->exitcode = VM_EXITCODE_VMX; 2578 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2579 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2580 vmexit->u.vmx.exit_reason = ~0; 2581 vmexit->u.vmx.exit_qualification = ~0; 2582 2583 switch (rc) { 2584 case VMX_VMRESUME_ERROR: 2585 case VMX_VMLAUNCH_ERROR: 2586 case VMX_INVEPT_ERROR: 2587 case VMX_VMWRITE_ERROR: 2588 vmexit->u.vmx.inst_type = rc; 2589 break; 2590 default: 2591 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2592 } 2593 } 2594 2595 /* 2596 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2597 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2598 * sufficient to simply vector to the NMI handler via a software interrupt. 2599 * However, this must be done before maskable interrupts are enabled 2600 * otherwise the "iret" issued by an interrupt handler will incorrectly 2601 * clear NMI blocking. 2602 */ 2603 static __inline void 2604 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2605 { 2606 ASSERT(!interrupts_enabled()); 2607 2608 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2609 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2610 ASSERT(intr_info & VMCS_INTR_VALID); 2611 2612 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2613 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2614 vmm_call_trap(T_NMIFLT); 2615 } 2616 } 2617 } 2618 2619 static __inline void 2620 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2621 { 2622 uint64_t rflags; 2623 2624 /* Save host control debug registers. */ 2625 vmxctx->host_dr7 = rdr7(); 2626 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2627 2628 /* 2629 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2630 * exceptions in the host based on the guest DRx values. The 2631 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2632 */ 2633 load_dr7(0); 2634 wrmsr(MSR_DEBUGCTLMSR, 0); 2635 2636 /* 2637 * Disable single stepping the kernel to avoid corrupting the 2638 * guest DR6. A debugger might still be able to corrupt the 2639 * guest DR6 by setting a breakpoint after this point and then 2640 * single stepping. 2641 */ 2642 rflags = read_rflags(); 2643 vmxctx->host_tf = rflags & PSL_T; 2644 write_rflags(rflags & ~PSL_T); 2645 2646 /* Save host debug registers. */ 2647 vmxctx->host_dr0 = rdr0(); 2648 vmxctx->host_dr1 = rdr1(); 2649 vmxctx->host_dr2 = rdr2(); 2650 vmxctx->host_dr3 = rdr3(); 2651 vmxctx->host_dr6 = rdr6(); 2652 2653 /* Restore guest debug registers. */ 2654 load_dr0(vmxctx->guest_dr0); 2655 load_dr1(vmxctx->guest_dr1); 2656 load_dr2(vmxctx->guest_dr2); 2657 load_dr3(vmxctx->guest_dr3); 2658 load_dr6(vmxctx->guest_dr6); 2659 } 2660 2661 static __inline void 2662 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2663 { 2664 2665 /* Save guest debug registers. */ 2666 vmxctx->guest_dr0 = rdr0(); 2667 vmxctx->guest_dr1 = rdr1(); 2668 vmxctx->guest_dr2 = rdr2(); 2669 vmxctx->guest_dr3 = rdr3(); 2670 vmxctx->guest_dr6 = rdr6(); 2671 2672 /* 2673 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2674 * PSL_T last. 2675 */ 2676 load_dr0(vmxctx->host_dr0); 2677 load_dr1(vmxctx->host_dr1); 2678 load_dr2(vmxctx->host_dr2); 2679 load_dr3(vmxctx->host_dr3); 2680 load_dr6(vmxctx->host_dr6); 2681 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2682 load_dr7(vmxctx->host_dr7); 2683 write_rflags(read_rflags() | vmxctx->host_tf); 2684 } 2685 2686 static int 2687 vmx_run(void *arg, int vcpu, uint64_t rip) 2688 { 2689 int rc, handled, launched; 2690 struct vmx *vmx; 2691 struct vm *vm; 2692 struct vmxctx *vmxctx; 2693 uintptr_t vmcs_pa; 2694 struct vm_exit *vmexit; 2695 struct vlapic *vlapic; 2696 uint32_t exit_reason; 2697 bool tpr_shadow_active; 2698 vm_client_t *vmc; 2699 2700 vmx = arg; 2701 vm = vmx->vm; 2702 vmcs_pa = vmx->vmcs_pa[vcpu]; 2703 vmxctx = &vmx->ctx[vcpu]; 2704 vlapic = vm_lapic(vm, vcpu); 2705 vmexit = vm_exitinfo(vm, vcpu); 2706 vmc = vm_get_vmclient(vm, vcpu); 2707 launched = 0; 2708 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2709 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2710 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2711 2712 vmx_msr_guest_enter(vmx, vcpu); 2713 2714 vmcs_load(vmcs_pa); 2715 2716 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2717 vmx->vmcs_state[vcpu] = VS_LOADED; 2718 2719 /* 2720 * XXX 2721 * We do this every time because we may setup the virtual machine 2722 * from a different process than the one that actually runs it. 2723 * 2724 * If the life of a virtual machine was spent entirely in the context 2725 * of a single process we could do this once in vmx_vminit(). 2726 */ 2727 vmcs_write(VMCS_HOST_CR3, rcr3()); 2728 2729 vmcs_write(VMCS_GUEST_RIP, rip); 2730 vmx_set_pcpu_defaults(vmx, vcpu); 2731 do { 2732 enum event_inject_state inject_state; 2733 uint64_t eptgen; 2734 2735 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2736 "%lx/%lx", __func__, vmcs_guest_rip(), rip)); 2737 2738 handled = UNHANDLED; 2739 2740 /* 2741 * Perform initial event/exception/interrupt injection before 2742 * host CPU interrupts are disabled. 2743 */ 2744 inject_state = vmx_inject_events(vmx, vcpu, rip); 2745 2746 /* 2747 * Interrupts are disabled from this point on until the 2748 * guest starts executing. This is done for the following 2749 * reasons: 2750 * 2751 * If an AST is asserted on this thread after the check below, 2752 * then the IPI_AST notification will not be lost, because it 2753 * will cause a VM exit due to external interrupt as soon as 2754 * the guest state is loaded. 2755 * 2756 * A posted interrupt after vmx_inject_vlapic() will not be 2757 * "lost" because it will be held pending in the host APIC 2758 * because interrupts are disabled. The pending interrupt will 2759 * be recognized as soon as the guest state is loaded. 2760 * 2761 * The same reasoning applies to the IPI generated by vmspace 2762 * invalidation. 2763 */ 2764 disable_intr(); 2765 2766 /* 2767 * If not precluded by existing events, inject any interrupt 2768 * pending on the vLAPIC. As a lock-less operation, it is safe 2769 * (and prudent) to perform with host CPU interrupts disabled. 2770 */ 2771 if (inject_state == EIS_CAN_INJECT) { 2772 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2773 } 2774 2775 /* 2776 * Check for vCPU bail-out conditions. This must be done after 2777 * vmx_inject_events() to detect a triple-fault condition. 2778 */ 2779 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2780 enable_intr(); 2781 break; 2782 } 2783 2784 if (vcpu_run_state_pending(vm, vcpu)) { 2785 enable_intr(); 2786 vm_exit_run_state(vmx->vm, vcpu, rip); 2787 break; 2788 } 2789 2790 /* 2791 * If subsequent activity queued events which require injection 2792 * handling, take another lap to handle them. 2793 */ 2794 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2795 enable_intr(); 2796 handled = HANDLED; 2797 continue; 2798 } 2799 2800 if ((rc = smt_acquire()) != 1) { 2801 enable_intr(); 2802 vmexit->rip = rip; 2803 vmexit->inst_length = 0; 2804 if (rc == -1) { 2805 vmexit->exitcode = VM_EXITCODE_HT; 2806 } else { 2807 vmexit->exitcode = VM_EXITCODE_BOGUS; 2808 handled = HANDLED; 2809 } 2810 break; 2811 } 2812 2813 /* 2814 * If this thread has gone off-cpu due to mutex operations 2815 * during vmx_run, the VMCS will have been unloaded, forcing a 2816 * re-VMLAUNCH as opposed to VMRESUME. 2817 */ 2818 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2819 /* 2820 * Restoration of the GDT limit is taken care of by 2821 * vmx_savectx(). Since the maximum practical index for the 2822 * IDT is 255, restoring its limits from the post-VMX-exit 2823 * default of 0xffff is not a concern. 2824 * 2825 * Only 64-bit hypervisor callers are allowed, which forgoes 2826 * the need to restore any LDT descriptor. Toss an error to 2827 * anyone attempting to break that rule. 2828 */ 2829 if (curproc->p_model != DATAMODEL_LP64) { 2830 smt_release(); 2831 enable_intr(); 2832 bzero(vmexit, sizeof (*vmexit)); 2833 vmexit->rip = rip; 2834 vmexit->exitcode = VM_EXITCODE_VMX; 2835 vmexit->u.vmx.status = VM_FAIL_INVALID; 2836 handled = UNHANDLED; 2837 break; 2838 } 2839 2840 if (tpr_shadow_active) { 2841 vmx_tpr_shadow_enter(vlapic); 2842 } 2843 2844 /* 2845 * Indicate activation of vmspace (EPT) table just prior to VMX 2846 * entry, checking for the necessity of an invept invalidation. 2847 */ 2848 eptgen = vmc_table_enter(vmc); 2849 if (vmx->eptgen[curcpu] != eptgen) { 2850 /* 2851 * VMspace generation does not match what was previously 2852 * used on this host CPU, so all mappings associated 2853 * with this EP4TA must be invalidated. 2854 */ 2855 invept(1, vmx->eptp); 2856 vmx->eptgen[curcpu] = eptgen; 2857 } 2858 2859 vmx_run_trace(vmx, vcpu); 2860 vcpu_ustate_change(vm, vcpu, VU_RUN); 2861 vmx_dr_enter_guest(vmxctx); 2862 2863 /* Perform VMX entry */ 2864 rc = vmx_enter_guest(vmxctx, vmx, launched); 2865 2866 vmx_dr_leave_guest(vmxctx); 2867 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2868 2869 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2870 smt_release(); 2871 2872 if (tpr_shadow_active) { 2873 vmx_tpr_shadow_exit(vlapic); 2874 } 2875 2876 /* Collect some information for VM exit processing */ 2877 vmexit->rip = rip = vmcs_guest_rip(); 2878 vmexit->inst_length = vmexit_instruction_length(); 2879 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2880 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2881 /* Update 'nextrip' */ 2882 vmx->state[vcpu].nextrip = rip; 2883 2884 if (rc == VMX_GUEST_VMEXIT) { 2885 vmx_exit_handle_possible_nmi(vmexit); 2886 } 2887 enable_intr(); 2888 vmc_table_exit(vmc); 2889 2890 if (rc == VMX_GUEST_VMEXIT) { 2891 handled = vmx_exit_process(vmx, vcpu, vmexit); 2892 } else { 2893 vmx_exit_inst_error(vmxctx, rc, vmexit); 2894 } 2895 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2896 uint32_t, exit_reason); 2897 rip = vmexit->rip; 2898 } while (handled); 2899 2900 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2901 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2902 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2903 vmexit->exitcode); 2904 } 2905 2906 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2907 vmexit->exitcode); 2908 2909 vmcs_clear(vmcs_pa); 2910 vmx_msr_guest_exit(vmx, vcpu); 2911 2912 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 2913 vmx->vmcs_state[vcpu] = VS_NONE; 2914 2915 return (0); 2916 } 2917 2918 static void 2919 vmx_vmcleanup(void *arg) 2920 { 2921 int i; 2922 struct vmx *vmx = arg; 2923 uint16_t maxcpus; 2924 2925 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2926 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2927 kmem_free(vmx->apic_access_page, PAGESIZE); 2928 } else { 2929 VERIFY3P(vmx->apic_access_page, ==, NULL); 2930 } 2931 2932 vmx_msr_bitmap_destroy(vmx); 2933 2934 maxcpus = vm_get_maxcpus(vmx->vm); 2935 for (i = 0; i < maxcpus; i++) 2936 vpid_free(vmx->state[i].vpid); 2937 2938 free(vmx, M_VMX); 2939 } 2940 2941 static uint64_t * 2942 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2943 { 2944 switch (reg) { 2945 case VM_REG_GUEST_RAX: 2946 return (&vmxctx->guest_rax); 2947 case VM_REG_GUEST_RBX: 2948 return (&vmxctx->guest_rbx); 2949 case VM_REG_GUEST_RCX: 2950 return (&vmxctx->guest_rcx); 2951 case VM_REG_GUEST_RDX: 2952 return (&vmxctx->guest_rdx); 2953 case VM_REG_GUEST_RSI: 2954 return (&vmxctx->guest_rsi); 2955 case VM_REG_GUEST_RDI: 2956 return (&vmxctx->guest_rdi); 2957 case VM_REG_GUEST_RBP: 2958 return (&vmxctx->guest_rbp); 2959 case VM_REG_GUEST_R8: 2960 return (&vmxctx->guest_r8); 2961 case VM_REG_GUEST_R9: 2962 return (&vmxctx->guest_r9); 2963 case VM_REG_GUEST_R10: 2964 return (&vmxctx->guest_r10); 2965 case VM_REG_GUEST_R11: 2966 return (&vmxctx->guest_r11); 2967 case VM_REG_GUEST_R12: 2968 return (&vmxctx->guest_r12); 2969 case VM_REG_GUEST_R13: 2970 return (&vmxctx->guest_r13); 2971 case VM_REG_GUEST_R14: 2972 return (&vmxctx->guest_r14); 2973 case VM_REG_GUEST_R15: 2974 return (&vmxctx->guest_r15); 2975 case VM_REG_GUEST_CR2: 2976 return (&vmxctx->guest_cr2); 2977 case VM_REG_GUEST_DR0: 2978 return (&vmxctx->guest_dr0); 2979 case VM_REG_GUEST_DR1: 2980 return (&vmxctx->guest_dr1); 2981 case VM_REG_GUEST_DR2: 2982 return (&vmxctx->guest_dr2); 2983 case VM_REG_GUEST_DR3: 2984 return (&vmxctx->guest_dr3); 2985 case VM_REG_GUEST_DR6: 2986 return (&vmxctx->guest_dr6); 2987 default: 2988 break; 2989 } 2990 return (NULL); 2991 } 2992 2993 static int 2994 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2995 { 2996 int running, hostcpu, err; 2997 struct vmx *vmx = arg; 2998 uint64_t *regp; 2999 3000 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3001 if (running && hostcpu != curcpu) 3002 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 3003 3004 /* VMCS access not required for ctx reads */ 3005 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3006 *retval = *regp; 3007 return (0); 3008 } 3009 3010 if (!running) { 3011 vmcs_load(vmx->vmcs_pa[vcpu]); 3012 } 3013 3014 err = 0; 3015 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3016 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3017 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3018 } else { 3019 uint32_t encoding; 3020 3021 encoding = vmcs_field_encoding(reg); 3022 switch (encoding) { 3023 case VMCS_GUEST_CR0: 3024 /* Take the shadow bits into account */ 3025 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3026 vmcs_read(VMCS_CR0_SHADOW)); 3027 break; 3028 case VMCS_GUEST_CR4: 3029 /* Take the shadow bits into account */ 3030 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3031 vmcs_read(VMCS_CR4_SHADOW)); 3032 break; 3033 case VMCS_INVALID_ENCODING: 3034 err = EINVAL; 3035 break; 3036 default: 3037 *retval = vmcs_read(encoding); 3038 break; 3039 } 3040 } 3041 3042 if (!running) { 3043 vmcs_clear(vmx->vmcs_pa[vcpu]); 3044 } 3045 3046 return (err); 3047 } 3048 3049 static int 3050 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3051 { 3052 int running, hostcpu, error; 3053 struct vmx *vmx = arg; 3054 uint64_t *regp; 3055 3056 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3057 if (running && hostcpu != curcpu) 3058 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3059 3060 /* VMCS access not required for ctx writes */ 3061 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3062 *regp = val; 3063 return (0); 3064 } 3065 3066 if (!running) { 3067 vmcs_load(vmx->vmcs_pa[vcpu]); 3068 } 3069 3070 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3071 if (val != 0) { 3072 /* 3073 * Forcing the vcpu into an interrupt shadow is not 3074 * presently supported. 3075 */ 3076 error = EINVAL; 3077 } else { 3078 uint64_t gi; 3079 3080 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3081 gi &= ~HWINTR_BLOCKING; 3082 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3083 error = 0; 3084 } 3085 } else { 3086 uint32_t encoding; 3087 3088 error = 0; 3089 encoding = vmcs_field_encoding(reg); 3090 switch (encoding) { 3091 case VMCS_GUEST_IA32_EFER: 3092 /* 3093 * If the "load EFER" VM-entry control is 1 then the 3094 * value of EFER.LMA must be identical to "IA-32e mode 3095 * guest" bit in the VM-entry control. 3096 */ 3097 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) { 3098 uint64_t ctls; 3099 3100 ctls = vmcs_read(VMCS_ENTRY_CTLS); 3101 if (val & EFER_LMA) { 3102 ctls |= VM_ENTRY_GUEST_LMA; 3103 } else { 3104 ctls &= ~VM_ENTRY_GUEST_LMA; 3105 } 3106 vmcs_write(VMCS_ENTRY_CTLS, ctls); 3107 } 3108 vmcs_write(encoding, val); 3109 break; 3110 case VMCS_GUEST_CR0: 3111 /* 3112 * The guest is not allowed to modify certain bits in 3113 * %cr0 and %cr4. To maintain the illusion of full 3114 * control, they have shadow versions which contain the 3115 * guest-perceived (via reads from the register) values 3116 * as opposed to the guest-effective values. 3117 * 3118 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3119 */ 3120 vmcs_write(VMCS_CR0_SHADOW, val); 3121 vmcs_write(encoding, vmx_fix_cr0(val)); 3122 break; 3123 case VMCS_GUEST_CR4: 3124 /* See above for detail on %cr4 shadowing */ 3125 vmcs_write(VMCS_CR4_SHADOW, val); 3126 vmcs_write(encoding, vmx_fix_cr4(val)); 3127 break; 3128 case VMCS_GUEST_CR3: 3129 vmcs_write(encoding, val); 3130 /* 3131 * Invalidate the guest vcpu's TLB mappings to emulate 3132 * the behavior of updating %cr3. 3133 * 3134 * XXX the processor retains global mappings when %cr3 3135 * is updated but vmx_invvpid() does not. 3136 */ 3137 vmx_invvpid(vmx, vcpu, running); 3138 break; 3139 case VMCS_INVALID_ENCODING: 3140 error = EINVAL; 3141 break; 3142 default: 3143 vmcs_write(encoding, val); 3144 break; 3145 } 3146 } 3147 3148 if (!running) { 3149 vmcs_clear(vmx->vmcs_pa[vcpu]); 3150 } 3151 3152 return (error); 3153 } 3154 3155 static int 3156 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3157 { 3158 int hostcpu, running; 3159 struct vmx *vmx = arg; 3160 uint32_t base, limit, access; 3161 3162 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3163 if (running && hostcpu != curcpu) 3164 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3165 3166 if (!running) { 3167 vmcs_load(vmx->vmcs_pa[vcpu]); 3168 } 3169 3170 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3171 desc->base = vmcs_read(base); 3172 desc->limit = vmcs_read(limit); 3173 if (access != VMCS_INVALID_ENCODING) { 3174 desc->access = vmcs_read(access); 3175 } else { 3176 desc->access = 0; 3177 } 3178 3179 if (!running) { 3180 vmcs_clear(vmx->vmcs_pa[vcpu]); 3181 } 3182 return (0); 3183 } 3184 3185 static int 3186 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3187 { 3188 int hostcpu, running; 3189 struct vmx *vmx = arg; 3190 uint32_t base, limit, access; 3191 3192 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3193 if (running && hostcpu != curcpu) 3194 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3195 3196 if (!running) { 3197 vmcs_load(vmx->vmcs_pa[vcpu]); 3198 } 3199 3200 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3201 vmcs_write(base, desc->base); 3202 vmcs_write(limit, desc->limit); 3203 if (access != VMCS_INVALID_ENCODING) { 3204 vmcs_write(access, desc->access); 3205 } 3206 3207 if (!running) { 3208 vmcs_clear(vmx->vmcs_pa[vcpu]); 3209 } 3210 return (0); 3211 } 3212 3213 static int 3214 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3215 { 3216 struct vmx *vmx = arg; 3217 int vcap; 3218 int ret; 3219 3220 ret = ENOENT; 3221 3222 vcap = vmx->cap[vcpu].set; 3223 3224 switch (type) { 3225 case VM_CAP_HALT_EXIT: 3226 if (cap_halt_exit) 3227 ret = 0; 3228 break; 3229 case VM_CAP_PAUSE_EXIT: 3230 if (cap_pause_exit) 3231 ret = 0; 3232 break; 3233 case VM_CAP_MTRAP_EXIT: 3234 if (cap_monitor_trap) 3235 ret = 0; 3236 break; 3237 case VM_CAP_ENABLE_INVPCID: 3238 if (cap_invpcid) 3239 ret = 0; 3240 break; 3241 case VM_CAP_BPT_EXIT: 3242 ret = 0; 3243 break; 3244 default: 3245 break; 3246 } 3247 3248 if (ret == 0) 3249 *retval = (vcap & (1 << type)) ? 1 : 0; 3250 3251 return (ret); 3252 } 3253 3254 static int 3255 vmx_setcap(void *arg, int vcpu, int type, int val) 3256 { 3257 struct vmx *vmx = arg; 3258 uint32_t baseval, reg, flag; 3259 uint32_t *pptr; 3260 int error; 3261 3262 error = ENOENT; 3263 pptr = NULL; 3264 3265 switch (type) { 3266 case VM_CAP_HALT_EXIT: 3267 if (cap_halt_exit) { 3268 error = 0; 3269 pptr = &vmx->cap[vcpu].proc_ctls; 3270 baseval = *pptr; 3271 flag = PROCBASED_HLT_EXITING; 3272 reg = VMCS_PRI_PROC_BASED_CTLS; 3273 } 3274 break; 3275 case VM_CAP_MTRAP_EXIT: 3276 if (cap_monitor_trap) { 3277 error = 0; 3278 pptr = &vmx->cap[vcpu].proc_ctls; 3279 baseval = *pptr; 3280 flag = PROCBASED_MTF; 3281 reg = VMCS_PRI_PROC_BASED_CTLS; 3282 } 3283 break; 3284 case VM_CAP_PAUSE_EXIT: 3285 if (cap_pause_exit) { 3286 error = 0; 3287 pptr = &vmx->cap[vcpu].proc_ctls; 3288 baseval = *pptr; 3289 flag = PROCBASED_PAUSE_EXITING; 3290 reg = VMCS_PRI_PROC_BASED_CTLS; 3291 } 3292 break; 3293 case VM_CAP_ENABLE_INVPCID: 3294 if (cap_invpcid) { 3295 error = 0; 3296 pptr = &vmx->cap[vcpu].proc_ctls2; 3297 baseval = *pptr; 3298 flag = PROCBASED2_ENABLE_INVPCID; 3299 reg = VMCS_SEC_PROC_BASED_CTLS; 3300 } 3301 break; 3302 case VM_CAP_BPT_EXIT: 3303 error = 0; 3304 3305 /* Don't change the bitmap if we are tracing all exceptions. */ 3306 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3307 pptr = &vmx->cap[vcpu].exc_bitmap; 3308 baseval = *pptr; 3309 flag = (1 << IDT_BP); 3310 reg = VMCS_EXCEPTION_BITMAP; 3311 } 3312 break; 3313 default: 3314 break; 3315 } 3316 3317 if (error != 0) { 3318 return (error); 3319 } 3320 3321 if (pptr != NULL) { 3322 if (val) { 3323 baseval |= flag; 3324 } else { 3325 baseval &= ~flag; 3326 } 3327 vmcs_load(vmx->vmcs_pa[vcpu]); 3328 vmcs_write(reg, baseval); 3329 vmcs_clear(vmx->vmcs_pa[vcpu]); 3330 3331 /* 3332 * Update optional stored flags, and record 3333 * setting 3334 */ 3335 *pptr = baseval; 3336 } 3337 3338 if (val) { 3339 vmx->cap[vcpu].set |= (1 << type); 3340 } else { 3341 vmx->cap[vcpu].set &= ~(1 << type); 3342 } 3343 3344 return (0); 3345 } 3346 3347 struct vlapic_vtx { 3348 struct vlapic vlapic; 3349 3350 /* Align to the nearest cacheline */ 3351 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3352 3353 /* TMR handling state for posted interrupts */ 3354 uint32_t tmr_active[8]; 3355 uint32_t pending_level[8]; 3356 uint32_t pending_edge[8]; 3357 3358 struct pir_desc *pir_desc; 3359 struct vmx *vmx; 3360 uint_t pending_prio; 3361 boolean_t tmr_sync; 3362 }; 3363 3364 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3365 3366 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3367 3368 static vcpu_notify_t 3369 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3370 { 3371 struct vlapic_vtx *vlapic_vtx; 3372 struct pir_desc *pir_desc; 3373 uint32_t mask, tmrval; 3374 int idx; 3375 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3376 3377 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3378 pir_desc = vlapic_vtx->pir_desc; 3379 idx = vector / 32; 3380 mask = 1UL << (vector % 32); 3381 3382 /* 3383 * If the currently asserted TMRs do not match the state requested by 3384 * the incoming interrupt, an exit will be required to reconcile those 3385 * bits in the APIC page. This will keep the vLAPIC behavior in line 3386 * with the architecturally defined expectations. 3387 * 3388 * If actors of mixed types (edge and level) are racing against the same 3389 * vector (toggling its TMR bit back and forth), the results could 3390 * inconsistent. Such circumstances are considered a rare edge case and 3391 * are never expected to be found in the wild. 3392 */ 3393 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3394 if (!level) { 3395 if ((tmrval & mask) != 0) { 3396 /* Edge-triggered interrupt needs TMR de-asserted */ 3397 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3398 atomic_store_rel_long(&pir_desc->pending, 1); 3399 return (VCPU_NOTIFY_EXIT); 3400 } 3401 } else { 3402 if ((tmrval & mask) == 0) { 3403 /* Level-triggered interrupt needs TMR asserted */ 3404 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3405 atomic_store_rel_long(&pir_desc->pending, 1); 3406 return (VCPU_NOTIFY_EXIT); 3407 } 3408 } 3409 3410 /* 3411 * If the interrupt request does not require manipulation of the TMRs 3412 * for delivery, set it in PIR descriptor. It cannot be inserted into 3413 * the APIC page while the vCPU might be running. 3414 */ 3415 atomic_set_int(&pir_desc->pir[idx], mask); 3416 3417 /* 3418 * A notification is required whenever the 'pending' bit makes a 3419 * transition from 0->1. 3420 * 3421 * Even if the 'pending' bit is already asserted, notification about 3422 * the incoming interrupt may still be necessary. For example, if a 3423 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3424 * the 0->1 'pending' transition with a notification, but the vCPU 3425 * would ignore the interrupt for the time being. The same vCPU would 3426 * need to then be notified if a high-priority interrupt arrived which 3427 * satisfied the PPR. 3428 * 3429 * The priorities of interrupts injected while 'pending' is asserted 3430 * are tracked in a custom bitfield 'pending_prio'. Should the 3431 * to-be-injected interrupt exceed the priorities already present, the 3432 * notification is sent. The priorities recorded in 'pending_prio' are 3433 * cleared whenever the 'pending' bit makes another 0->1 transition. 3434 */ 3435 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3436 notify = VCPU_NOTIFY_APIC; 3437 vlapic_vtx->pending_prio = 0; 3438 } else { 3439 const uint_t old_prio = vlapic_vtx->pending_prio; 3440 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3441 3442 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3443 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3444 notify = VCPU_NOTIFY_APIC; 3445 } 3446 } 3447 3448 return (notify); 3449 } 3450 3451 static void 3452 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3453 { 3454 /* 3455 * When APICv is enabled for an instance, the traditional interrupt 3456 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3457 * used and the CPU does the heavy lifting of virtual interrupt 3458 * delivery. For that reason vmx_intr_accepted() should never be called 3459 * when APICv is enabled. 3460 */ 3461 panic("vmx_intr_accepted: not expected to be called"); 3462 } 3463 3464 static void 3465 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3466 { 3467 struct vlapic_vtx *vlapic_vtx; 3468 const uint32_t *tmrs; 3469 3470 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3471 tmrs = &vlapic_vtx->tmr_active[0]; 3472 3473 if (!vlapic_vtx->tmr_sync) { 3474 return; 3475 } 3476 3477 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3478 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3479 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3480 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3481 vlapic_vtx->tmr_sync = B_FALSE; 3482 } 3483 3484 static void 3485 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3486 { 3487 struct vmx *vmx; 3488 uint32_t proc_ctls; 3489 int vcpuid; 3490 3491 vcpuid = vlapic->vcpuid; 3492 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3493 3494 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3495 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3496 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3497 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3498 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3499 3500 vmcs_load(vmx->vmcs_pa[vcpuid]); 3501 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3502 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3503 } 3504 3505 static void 3506 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3507 { 3508 struct vmx *vmx; 3509 uint32_t proc_ctls2; 3510 int vcpuid; 3511 3512 vcpuid = vlapic->vcpuid; 3513 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3514 3515 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3516 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3517 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3518 3519 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3520 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3521 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3522 3523 vmcs_load(vmx->vmcs_pa[vcpuid]); 3524 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3525 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3526 3527 vmx_allow_x2apic_msrs(vmx, vcpuid); 3528 } 3529 3530 static void 3531 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3532 { 3533 psm_send_pir_ipi(hostcpu); 3534 } 3535 3536 static void 3537 vmx_apicv_sync(struct vlapic *vlapic) 3538 { 3539 struct vlapic_vtx *vlapic_vtx; 3540 struct pir_desc *pir_desc; 3541 struct LAPIC *lapic; 3542 uint_t i; 3543 3544 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3545 pir_desc = vlapic_vtx->pir_desc; 3546 lapic = vlapic->apic_page; 3547 3548 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3549 return; 3550 } 3551 3552 vlapic_vtx->pending_prio = 0; 3553 3554 /* Make sure the invalid (0-15) vectors are not set */ 3555 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3556 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3557 ASSERT0(pir_desc->pir[0] & 0xffff); 3558 3559 for (i = 0; i <= 7; i++) { 3560 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3561 uint32_t *irrp = &lapic->irr0 + (i * 4); 3562 3563 const uint32_t pending_level = 3564 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3565 const uint32_t pending_edge = 3566 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3567 const uint32_t pending_inject = 3568 atomic_readandclear_int(&pir_desc->pir[i]); 3569 3570 if (pending_level != 0) { 3571 /* 3572 * Level-triggered interrupts assert their corresponding 3573 * bit in the TMR when queued in IRR. 3574 */ 3575 *tmrp |= pending_level; 3576 *irrp |= pending_level; 3577 } 3578 if (pending_edge != 0) { 3579 /* 3580 * When queuing an edge-triggered interrupt in IRR, the 3581 * corresponding bit in the TMR is cleared. 3582 */ 3583 *tmrp &= ~pending_edge; 3584 *irrp |= pending_edge; 3585 } 3586 if (pending_inject != 0) { 3587 /* 3588 * Interrupts which do not require a change to the TMR 3589 * (because it already matches the necessary state) can 3590 * simply be queued in IRR. 3591 */ 3592 *irrp |= pending_inject; 3593 } 3594 3595 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3596 /* Check if VMX EOI triggers require updating. */ 3597 vlapic_vtx->tmr_active[i] = *tmrp; 3598 vlapic_vtx->tmr_sync = B_TRUE; 3599 } 3600 } 3601 } 3602 3603 static void 3604 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3605 { 3606 /* 3607 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3608 * TPR falls below a threshold priority. That threshold is set to the 3609 * current TPR priority, since guest interrupt status should be 3610 * re-evaluated if its TPR is set lower. 3611 */ 3612 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3613 } 3614 3615 static void 3616 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3617 { 3618 /* 3619 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3620 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3621 * the PPR is updated to reflect any change in the TPR here. 3622 */ 3623 vlapic_sync_tpr(vlapic); 3624 } 3625 3626 static struct vlapic * 3627 vmx_vlapic_init(void *arg, int vcpuid) 3628 { 3629 struct vmx *vmx; 3630 struct vlapic *vlapic; 3631 struct vlapic_vtx *vlapic_vtx; 3632 3633 vmx = arg; 3634 3635 vlapic = malloc(sizeof (struct vlapic_vtx), M_VLAPIC, 3636 M_WAITOK | M_ZERO); 3637 vlapic->vm = vmx->vm; 3638 vlapic->vcpuid = vcpuid; 3639 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3640 3641 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3642 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3643 vlapic_vtx->vmx = vmx; 3644 3645 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3646 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3647 } 3648 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3649 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3650 vlapic->ops.sync_state = vmx_apicv_sync; 3651 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3652 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3653 3654 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3655 vlapic->ops.post_intr = vmx_apicv_notify; 3656 } 3657 } 3658 3659 vlapic_init(vlapic); 3660 3661 return (vlapic); 3662 } 3663 3664 static void 3665 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3666 { 3667 3668 vlapic_cleanup(vlapic); 3669 free(vlapic, M_VLAPIC); 3670 } 3671 3672 static void 3673 vmx_savectx(void *arg, int vcpu) 3674 { 3675 struct vmx *vmx = arg; 3676 3677 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3678 vmcs_clear(vmx->vmcs_pa[vcpu]); 3679 vmx_msr_guest_exit(vmx, vcpu); 3680 /* 3681 * Having VMCLEARed the VMCS, it can no longer be re-entered 3682 * with VMRESUME, but must be VMLAUNCHed again. 3683 */ 3684 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3685 } 3686 3687 reset_gdtr_limit(); 3688 } 3689 3690 static void 3691 vmx_restorectx(void *arg, int vcpu) 3692 { 3693 struct vmx *vmx = arg; 3694 3695 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3696 3697 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3698 vmx_msr_guest_enter(vmx, vcpu); 3699 vmcs_load(vmx->vmcs_pa[vcpu]); 3700 } 3701 } 3702 3703 struct vmm_ops vmm_ops_intel = { 3704 .init = vmx_init, 3705 .cleanup = vmx_cleanup, 3706 .resume = vmx_restore, 3707 3708 .vminit = vmx_vminit, 3709 .vmrun = vmx_run, 3710 .vmcleanup = vmx_vmcleanup, 3711 .vmgetreg = vmx_getreg, 3712 .vmsetreg = vmx_setreg, 3713 .vmgetdesc = vmx_getdesc, 3714 .vmsetdesc = vmx_setdesc, 3715 .vmgetcap = vmx_getcap, 3716 .vmsetcap = vmx_setcap, 3717 .vlapic_init = vmx_vlapic_init, 3718 .vlapic_cleanup = vmx_vlapic_cleanup, 3719 3720 .vmsavectx = vmx_savectx, 3721 .vmrestorectx = vmx_restorectx, 3722 }; 3723 3724 /* Side-effect free HW validation derived from checks in vmx_init. */ 3725 int 3726 vmx_x86_supported(const char **msg) 3727 { 3728 int error; 3729 uint32_t tmp; 3730 3731 ASSERT(msg != NULL); 3732 3733 /* Check support for primary processor-based VM-execution controls */ 3734 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3735 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3736 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3737 if (error) { 3738 *msg = "processor does not support desired primary " 3739 "processor-based controls"; 3740 return (error); 3741 } 3742 3743 /* Check support for secondary processor-based VM-execution controls */ 3744 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3745 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3746 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3747 if (error) { 3748 *msg = "processor does not support desired secondary " 3749 "processor-based controls"; 3750 return (error); 3751 } 3752 3753 /* Check support for pin-based VM-execution controls */ 3754 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3755 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3756 PINBASED_CTLS_ZERO_SETTING, &tmp); 3757 if (error) { 3758 *msg = "processor does not support desired pin-based controls"; 3759 return (error); 3760 } 3761 3762 /* Check support for VM-exit controls */ 3763 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3764 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3765 if (error) { 3766 *msg = "processor does not support desired exit controls"; 3767 return (error); 3768 } 3769 3770 /* Check support for VM-entry controls */ 3771 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3772 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3773 if (error) { 3774 *msg = "processor does not support desired entry controls"; 3775 return (error); 3776 } 3777 3778 /* Unrestricted guest is nominally optional, but not for us. */ 3779 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3780 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3781 if (error) { 3782 *msg = "processor does not support desired unrestricted guest " 3783 "controls"; 3784 return (error); 3785 } 3786 3787 return (0); 3788 } 3789