1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2020 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/smp.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #ifndef __FreeBSD__ 59 #include <sys/x86_archext.h> 60 #include <sys/smp_impldefs.h> 61 #include <sys/smt.h> 62 #include <sys/hma.h> 63 #include <sys/trap.h> 64 #endif 65 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 69 #include <machine/psl.h> 70 #include <machine/cpufunc.h> 71 #include <machine/md_var.h> 72 #include <machine/reg.h> 73 #include <machine/segments.h> 74 #include <machine/smp.h> 75 #include <machine/specialreg.h> 76 #include <machine/vmparam.h> 77 78 #include <machine/vmm.h> 79 #include <machine/vmm_dev.h> 80 #include <sys/vmm_instruction_emul.h> 81 #include "vmm_lapic.h" 82 #include "vmm_host.h" 83 #include "vmm_ioport.h" 84 #include "vmm_ktr.h" 85 #include "vmm_stat.h" 86 #include "vatpic.h" 87 #include "vlapic.h" 88 #include "vlapic_priv.h" 89 90 #include "ept.h" 91 #include "vmcs.h" 92 #include "vmx.h" 93 #include "vmx_msr.h" 94 #include "x86.h" 95 #include "vmx_controls.h" 96 97 #define PINBASED_CTLS_ONE_SETTING \ 98 (PINBASED_EXTINT_EXITING | \ 99 PINBASED_NMI_EXITING | \ 100 PINBASED_VIRTUAL_NMI) 101 #define PINBASED_CTLS_ZERO_SETTING 0 102 103 #define PROCBASED_CTLS_WINDOW_SETTING \ 104 (PROCBASED_INT_WINDOW_EXITING | \ 105 PROCBASED_NMI_WINDOW_EXITING) 106 107 #ifdef __FreeBSD__ 108 #define PROCBASED_CTLS_ONE_SETTING \ 109 (PROCBASED_SECONDARY_CONTROLS | \ 110 PROCBASED_MWAIT_EXITING | \ 111 PROCBASED_MONITOR_EXITING | \ 112 PROCBASED_IO_EXITING | \ 113 PROCBASED_MSR_BITMAPS | \ 114 PROCBASED_CTLS_WINDOW_SETTING | \ 115 PROCBASED_CR8_LOAD_EXITING | \ 116 PROCBASED_CR8_STORE_EXITING) 117 #else 118 /* We consider TSC offset a necessity for unsynched TSC handling */ 119 #define PROCBASED_CTLS_ONE_SETTING \ 120 (PROCBASED_SECONDARY_CONTROLS | \ 121 PROCBASED_TSC_OFFSET | \ 122 PROCBASED_MWAIT_EXITING | \ 123 PROCBASED_MONITOR_EXITING | \ 124 PROCBASED_IO_EXITING | \ 125 PROCBASED_MSR_BITMAPS | \ 126 PROCBASED_CTLS_WINDOW_SETTING | \ 127 PROCBASED_CR8_LOAD_EXITING | \ 128 PROCBASED_CR8_STORE_EXITING) 129 #endif /* __FreeBSD__ */ 130 131 #define PROCBASED_CTLS_ZERO_SETTING \ 132 (PROCBASED_CR3_LOAD_EXITING | \ 133 PROCBASED_CR3_STORE_EXITING | \ 134 PROCBASED_IO_BITMAPS) 135 136 /* 137 * EPT and Unrestricted Guest are considered necessities. The latter is not a 138 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 139 * without a bootrom starting in real mode. 140 */ 141 #define PROCBASED_CTLS2_ONE_SETTING \ 142 (PROCBASED2_ENABLE_EPT | \ 143 PROCBASED2_UNRESTRICTED_GUEST) 144 #define PROCBASED_CTLS2_ZERO_SETTING 0 145 146 #define VM_EXIT_CTLS_ONE_SETTING \ 147 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 148 VM_EXIT_HOST_LMA | \ 149 VM_EXIT_LOAD_PAT | \ 150 VM_EXIT_SAVE_EFER | \ 151 VM_EXIT_LOAD_EFER | \ 152 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 153 154 #define VM_EXIT_CTLS_ZERO_SETTING 0 155 156 #define VM_ENTRY_CTLS_ONE_SETTING \ 157 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 158 VM_ENTRY_LOAD_EFER) 159 160 #define VM_ENTRY_CTLS_ZERO_SETTING \ 161 (VM_ENTRY_INTO_SMM | \ 162 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 163 164 #define HANDLED 1 165 #define UNHANDLED 0 166 167 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 168 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 169 170 SYSCTL_DECL(_hw_vmm); 171 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 172 NULL); 173 174 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 175 static uint32_t exit_ctls, entry_ctls; 176 177 static uint64_t cr0_ones_mask, cr0_zeros_mask; 178 179 static uint64_t cr4_ones_mask, cr4_zeros_mask; 180 181 static int vmx_initialized; 182 183 /* Do not flush RSB upon vmexit */ 184 static int no_flush_rsb; 185 186 /* 187 * Optional capabilities 188 */ 189 #ifdef __FreeBSD__ 190 SYSCTL_DECL(_hw_vmm_vmx); 191 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, 192 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 193 NULL); 194 #endif 195 196 /* HLT triggers a VM-exit */ 197 static int cap_halt_exit; 198 199 /* PAUSE triggers a VM-exit */ 200 static int cap_pause_exit; 201 202 /* Monitor trap flag */ 203 static int cap_monitor_trap; 204 205 /* Guests are allowed to use INVPCID */ 206 static int cap_invpcid; 207 208 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 209 static enum vmx_caps vmx_capabilities; 210 211 /* APICv posted interrupt vector */ 212 static int pirvec = -1; 213 214 #ifdef __FreeBSD__ 215 static struct unrhdr *vpid_unr; 216 #endif /* __FreeBSD__ */ 217 static uint_t vpid_alloc_failed; 218 219 int guest_l1d_flush; 220 int guest_l1d_flush_sw; 221 222 /* MSR save region is composed of an array of 'struct msr_entry' */ 223 struct msr_entry { 224 uint32_t index; 225 uint32_t reserved; 226 uint64_t val; 227 }; 228 229 static struct msr_entry msr_load_list[1] __aligned(16); 230 231 /* 232 * The definitions of SDT probes for VMX. 233 */ 234 235 /* BEGIN CSTYLED */ 236 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 237 "struct vmx *", "int", "struct vm_exit *"); 238 239 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 240 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 241 242 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 243 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 244 245 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 246 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 247 248 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 249 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 250 251 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 252 "struct vmx *", "int", "struct vm_exit *"); 253 254 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 255 "struct vmx *", "int", "struct vm_exit *"); 256 257 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 258 "struct vmx *", "int", "struct vm_exit *"); 259 260 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 261 "struct vmx *", "int", "struct vm_exit *"); 262 263 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 264 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 265 266 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 267 "struct vmx *", "int", "struct vm_exit *"); 268 269 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 270 "struct vmx *", "int", "struct vm_exit *"); 271 272 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 273 "struct vmx *", "int", "struct vm_exit *"); 274 275 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 276 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 277 278 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 279 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 280 281 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 282 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 283 284 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 285 "struct vmx *", "int", "struct vm_exit *"); 286 287 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 288 "struct vmx *", "int", "struct vm_exit *"); 289 290 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 291 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 292 293 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 294 "struct vmx *", "int", "struct vm_exit *"); 295 296 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 297 "struct vmx *", "int", "struct vm_exit *"); 298 299 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 300 "struct vmx *", "int", "struct vm_exit *"); 301 302 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 303 "struct vmx *", "int", "struct vm_exit *"); 304 305 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 306 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 307 308 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 309 "struct vmx *", "int", "struct vm_exit *", "int"); 310 /* END CSTYLED */ 311 312 /* 313 * Use the last page below 4GB as the APIC access address. This address is 314 * occupied by the boot firmware so it is guaranteed that it will not conflict 315 * with a page in system memory. 316 */ 317 #define APIC_ACCESS_ADDRESS 0xFFFFF000 318 319 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 320 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 321 static void vmx_apply_tsc_adjust(struct vmx *, int); 322 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 323 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 324 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 325 326 static int 327 vmx_allow_x2apic_msrs(struct vmx *vmx) 328 { 329 int i, error; 330 331 error = 0; 332 333 /* 334 * Allow readonly access to the following x2APIC MSRs from the guest. 335 */ 336 error += guest_msr_ro(vmx, MSR_APIC_ID); 337 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 338 error += guest_msr_ro(vmx, MSR_APIC_LDR); 339 error += guest_msr_ro(vmx, MSR_APIC_SVR); 340 341 for (i = 0; i < 8; i++) 342 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 343 344 for (i = 0; i < 8; i++) 345 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 346 347 for (i = 0; i < 8; i++) 348 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 349 350 error += guest_msr_ro(vmx, MSR_APIC_ESR); 351 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 352 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 353 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 354 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 355 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 356 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 357 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 358 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 359 error += guest_msr_ro(vmx, MSR_APIC_ICR); 360 361 /* 362 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 363 * 364 * These registers get special treatment described in the section 365 * "Virtualizing MSR-Based APIC Accesses". 366 */ 367 error += guest_msr_rw(vmx, MSR_APIC_TPR); 368 error += guest_msr_rw(vmx, MSR_APIC_EOI); 369 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 370 371 return (error); 372 } 373 374 static ulong_t 375 vmx_fix_cr0(ulong_t cr0) 376 { 377 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 378 } 379 380 static ulong_t 381 vmx_fix_cr4(ulong_t cr4) 382 { 383 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 384 } 385 386 static void 387 vpid_free(int vpid) 388 { 389 if (vpid < 0 || vpid > 0xffff) 390 panic("vpid_free: invalid vpid %d", vpid); 391 392 /* 393 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 394 * the unit number allocator. 395 */ 396 397 if (vpid > VM_MAXCPU) 398 #ifdef __FreeBSD__ 399 free_unr(vpid_unr, vpid); 400 #else 401 hma_vmx_vpid_free((uint16_t)vpid); 402 #endif 403 } 404 405 static void 406 vpid_alloc(uint16_t *vpid, int num) 407 { 408 int i, x; 409 410 if (num <= 0 || num > VM_MAXCPU) 411 panic("invalid number of vpids requested: %d", num); 412 413 /* 414 * If the "enable vpid" execution control is not enabled then the 415 * VPID is required to be 0 for all vcpus. 416 */ 417 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 418 for (i = 0; i < num; i++) 419 vpid[i] = 0; 420 return; 421 } 422 423 /* 424 * Allocate a unique VPID for each vcpu from the unit number allocator. 425 */ 426 for (i = 0; i < num; i++) { 427 #ifdef __FreeBSD__ 428 x = alloc_unr(vpid_unr); 429 #else 430 uint16_t tmp; 431 432 tmp = hma_vmx_vpid_alloc(); 433 x = (tmp == 0) ? -1 : tmp; 434 #endif 435 if (x == -1) 436 break; 437 else 438 vpid[i] = x; 439 } 440 441 if (i < num) { 442 atomic_add_int(&vpid_alloc_failed, 1); 443 444 /* 445 * If the unit number allocator does not have enough unique 446 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 447 * 448 * These VPIDs are not be unique across VMs but this does not 449 * affect correctness because the combined mappings are also 450 * tagged with the EP4TA which is unique for each VM. 451 * 452 * It is still sub-optimal because the invvpid will invalidate 453 * combined mappings for a particular VPID across all EP4TAs. 454 */ 455 while (i-- > 0) 456 vpid_free(vpid[i]); 457 458 for (i = 0; i < num; i++) 459 vpid[i] = i + 1; 460 } 461 } 462 463 static int 464 vmx_cleanup(void) 465 { 466 /* This is taken care of by the hma registration */ 467 return (0); 468 } 469 470 static void 471 vmx_restore(void) 472 { 473 /* No-op on illumos */ 474 } 475 476 static int 477 vmx_init(int ipinum) 478 { 479 int error; 480 uint64_t fixed0, fixed1; 481 uint32_t tmp; 482 enum vmx_caps avail_caps = VMX_CAP_NONE; 483 484 /* Check support for primary processor-based VM-execution controls */ 485 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 486 MSR_VMX_TRUE_PROCBASED_CTLS, 487 PROCBASED_CTLS_ONE_SETTING, 488 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 489 if (error) { 490 printf("vmx_init: processor does not support desired primary " 491 "processor-based controls\n"); 492 return (error); 493 } 494 495 /* Clear the processor-based ctl bits that are set on demand */ 496 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 497 498 /* Check support for secondary processor-based VM-execution controls */ 499 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 500 MSR_VMX_PROCBASED_CTLS2, 501 PROCBASED_CTLS2_ONE_SETTING, 502 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 503 if (error) { 504 printf("vmx_init: processor does not support desired secondary " 505 "processor-based controls\n"); 506 return (error); 507 } 508 509 /* Check support for VPID */ 510 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 511 MSR_VMX_PROCBASED_CTLS2, 512 PROCBASED2_ENABLE_VPID, 513 0, &tmp); 514 if (error == 0) 515 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 516 517 /* Check support for pin-based VM-execution controls */ 518 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 519 MSR_VMX_TRUE_PINBASED_CTLS, 520 PINBASED_CTLS_ONE_SETTING, 521 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 522 if (error) { 523 printf("vmx_init: processor does not support desired " 524 "pin-based controls\n"); 525 return (error); 526 } 527 528 /* Check support for VM-exit controls */ 529 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 530 VM_EXIT_CTLS_ONE_SETTING, 531 VM_EXIT_CTLS_ZERO_SETTING, 532 &exit_ctls); 533 if (error) { 534 printf("vmx_init: processor does not support desired " 535 "exit controls\n"); 536 return (error); 537 } 538 539 /* Check support for VM-entry controls */ 540 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 541 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 542 &entry_ctls); 543 if (error) { 544 printf("vmx_init: processor does not support desired " 545 "entry controls\n"); 546 return (error); 547 } 548 549 /* 550 * Check support for optional features by testing them 551 * as individual bits 552 */ 553 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 554 MSR_VMX_TRUE_PROCBASED_CTLS, 555 PROCBASED_HLT_EXITING, 0, 556 &tmp) == 0); 557 558 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 559 MSR_VMX_PROCBASED_CTLS, 560 PROCBASED_MTF, 0, 561 &tmp) == 0); 562 563 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 564 MSR_VMX_TRUE_PROCBASED_CTLS, 565 PROCBASED_PAUSE_EXITING, 0, 566 &tmp) == 0); 567 568 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 569 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 570 &tmp) == 0); 571 572 /* 573 * Check for APIC virtualization capabilities: 574 * - TPR shadowing 575 * - Full APICv (with or without x2APIC support) 576 * - Posted interrupt handling 577 */ 578 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 579 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 580 avail_caps |= VMX_CAP_TPR_SHADOW; 581 582 const uint32_t apicv_bits = 583 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 584 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 585 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 586 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 587 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 588 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 589 avail_caps |= VMX_CAP_APICV; 590 591 /* 592 * It may make sense in the future to differentiate 593 * hardware (or software) configurations with APICv but 594 * no support for accelerating x2APIC mode. 595 */ 596 avail_caps |= VMX_CAP_APICV_X2APIC; 597 598 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 599 MSR_VMX_TRUE_PINBASED_CTLS, 600 PINBASED_POSTED_INTERRUPT, 0, &tmp); 601 if (error == 0) { 602 /* 603 * If the PSM-provided interfaces for requesting 604 * and using a PIR IPI vector are present, use 605 * them for posted interrupts. 606 */ 607 if (psm_get_pir_ipivect != NULL && 608 psm_send_pir_ipi != NULL) { 609 pirvec = psm_get_pir_ipivect(); 610 avail_caps |= VMX_CAP_APICV_PIR; 611 } 612 } 613 } 614 } 615 616 /* Initialize EPT */ 617 error = ept_init(ipinum); 618 if (error) { 619 printf("vmx_init: ept initialization failed (%d)\n", error); 620 return (error); 621 } 622 623 #ifdef __FreeBSD__ 624 guest_l1d_flush = (cpu_ia32_arch_caps & 625 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 626 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 627 628 /* 629 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 630 * available. Otherwise fall back to the software flush 631 * method which loads enough data from the kernel text to 632 * flush existing L1D content, both on VMX entry and on NMI 633 * return. 634 */ 635 if (guest_l1d_flush) { 636 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 637 guest_l1d_flush_sw = 1; 638 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 639 &guest_l1d_flush_sw); 640 } 641 if (guest_l1d_flush_sw) { 642 if (nmi_flush_l1d_sw <= 1) 643 nmi_flush_l1d_sw = 1; 644 } else { 645 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 646 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 647 } 648 } 649 #else 650 /* L1D flushing is taken care of by smt_acquire() and friends */ 651 guest_l1d_flush = 0; 652 #endif /* __FreeBSD__ */ 653 654 /* 655 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 656 */ 657 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 658 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 659 cr0_ones_mask = fixed0 & fixed1; 660 cr0_zeros_mask = ~fixed0 & ~fixed1; 661 662 /* 663 * Since Unrestricted Guest was already verified present, CR0_PE and 664 * CR0_PG are allowed to be set to zero in VMX non-root operation 665 */ 666 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 667 668 /* 669 * Do not allow the guest to set CR0_NW or CR0_CD. 670 */ 671 cr0_zeros_mask |= (CR0_NW | CR0_CD); 672 673 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 674 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 675 cr4_ones_mask = fixed0 & fixed1; 676 cr4_zeros_mask = ~fixed0 & ~fixed1; 677 678 vmx_msr_init(); 679 680 vmx_capabilities = avail_caps; 681 vmx_initialized = 1; 682 683 return (0); 684 } 685 686 static void 687 vmx_trigger_hostintr(int vector) 688 { 689 #ifdef __FreeBSD__ 690 uintptr_t func; 691 struct gate_descriptor *gd; 692 693 gd = &idt[vector]; 694 695 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 696 "invalid vector %d", vector)); 697 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 698 vector)); 699 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 700 "has invalid type %d", vector, gd->gd_type)); 701 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 702 "has invalid dpl %d", vector, gd->gd_dpl)); 703 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 704 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 705 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 706 "IST %d", vector, gd->gd_ist)); 707 708 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 709 vmx_call_isr(func); 710 #else 711 VERIFY(vector >= 32 && vector <= 255); 712 vmx_call_isr(vector - 32); 713 #endif /* __FreeBSD__ */ 714 } 715 716 static void * 717 vmx_vminit(struct vm *vm, pmap_t pmap) 718 { 719 uint16_t vpid[VM_MAXCPU]; 720 int i, error, datasel; 721 struct vmx *vmx; 722 uint32_t exc_bitmap; 723 uint16_t maxcpus; 724 uint32_t proc_ctls, proc2_ctls, pin_ctls; 725 726 vmx = malloc(sizeof (struct vmx), M_VMX, M_WAITOK | M_ZERO); 727 if ((uintptr_t)vmx & PAGE_MASK) { 728 panic("malloc of struct vmx not aligned on %d byte boundary", 729 PAGE_SIZE); 730 } 731 vmx->vm = vm; 732 733 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 734 735 /* 736 * Clean up EPTP-tagged guest physical and combined mappings 737 * 738 * VMX transitions are not required to invalidate any guest physical 739 * mappings. So, it may be possible for stale guest physical mappings 740 * to be present in the processor TLBs. 741 * 742 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 743 */ 744 ept_invalidate_mappings(vmx->eptp); 745 746 msr_bitmap_initialize(vmx->msr_bitmap); 747 748 /* 749 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 750 * The guest FSBASE and GSBASE are saved and restored during 751 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 752 * always restored from the vmcs host state area on vm-exit. 753 * 754 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 755 * how they are saved/restored so can be directly accessed by the 756 * guest. 757 * 758 * MSR_EFER is saved and restored in the guest VMCS area on a 759 * VM exit and entry respectively. It is also restored from the 760 * host VMCS area on a VM exit. 761 * 762 * The TSC MSR is exposed read-only. Writes are disallowed as 763 * that will impact the host TSC. If the guest does a write 764 * the "use TSC offsetting" execution control is enabled and the 765 * difference between the host TSC and the guest TSC is written 766 * into the TSC offset in the VMCS. 767 */ 768 if (guest_msr_rw(vmx, MSR_GSBASE) || 769 guest_msr_rw(vmx, MSR_FSBASE) || 770 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 771 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 772 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 773 guest_msr_rw(vmx, MSR_EFER) || 774 guest_msr_ro(vmx, MSR_TSC)) 775 panic("vmx_vminit: error setting guest msr access"); 776 777 vpid_alloc(vpid, VM_MAXCPU); 778 779 /* Grab the established defaults */ 780 proc_ctls = procbased_ctls; 781 proc2_ctls = procbased_ctls2; 782 pin_ctls = pinbased_ctls; 783 /* For now, default to the available capabilities */ 784 vmx->vmx_caps = vmx_capabilities; 785 786 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 787 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 788 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 789 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 790 } 791 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 792 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 793 794 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 795 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 796 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 797 798 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 799 APIC_ACCESS_ADDRESS); 800 /* XXX this should really return an error to the caller */ 801 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 802 } 803 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 804 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 805 806 pin_ctls |= PINBASED_POSTED_INTERRUPT; 807 } 808 809 maxcpus = vm_get_maxcpus(vm); 810 datasel = vmm_get_host_datasel(); 811 for (i = 0; i < maxcpus; i++) { 812 /* 813 * Cache physical address lookups for various components which 814 * may be required inside the critical_enter() section implied 815 * by VMPTRLD() below. 816 */ 817 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap); 818 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 819 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 820 821 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 822 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 823 824 vmx_msr_guest_init(vmx, i); 825 826 vmcs_load(vmx->vmcs_pa[i]); 827 828 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 829 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 830 831 /* Load the control registers */ 832 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 833 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 834 835 /* Load the segment selectors */ 836 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 837 838 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 839 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 840 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 841 842 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 843 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 844 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 845 846 /* 847 * Configure host sysenter MSRs to be restored on VM exit. 848 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 849 * vmx_run. 850 */ 851 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 852 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 853 rdmsr(MSR_SYSENTER_EIP_MSR)); 854 855 /* instruction pointer */ 856 if (no_flush_rsb) { 857 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 858 } else { 859 vmcs_write(VMCS_HOST_RIP, 860 (uint64_t)vmx_exit_guest_flush_rsb); 861 } 862 863 /* link pointer */ 864 vmcs_write(VMCS_LINK_POINTER, ~0); 865 866 vmcs_write(VMCS_EPTP, vmx->eptp); 867 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 868 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 869 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 870 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 871 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 872 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 873 vmcs_write(VMCS_VPID, vpid[i]); 874 875 if (guest_l1d_flush && !guest_l1d_flush_sw) { 876 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 877 (vm_offset_t)&msr_load_list[0])); 878 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 879 nitems(msr_load_list)); 880 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 881 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 882 } 883 884 /* exception bitmap */ 885 if (vcpu_trace_exceptions(vm, i)) 886 exc_bitmap = 0xffffffff; 887 else 888 exc_bitmap = 1 << IDT_MC; 889 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 890 891 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 892 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 893 894 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 895 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 896 } 897 898 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 899 vmcs_write(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 900 vmcs_write(VMCS_EOI_EXIT0, 0); 901 vmcs_write(VMCS_EOI_EXIT1, 0); 902 vmcs_write(VMCS_EOI_EXIT2, 0); 903 vmcs_write(VMCS_EOI_EXIT3, 0); 904 } 905 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 906 vmcs_write(VMCS_PIR_VECTOR, pirvec); 907 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 908 } 909 910 /* 911 * Set up the CR0/4 masks and configure the read shadow state 912 * to the power-on register value from the Intel Sys Arch. 913 * CR0 - 0x60000010 914 * CR4 - 0 915 */ 916 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 917 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 918 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 919 vmcs_write(VMCS_CR4_SHADOW, 0); 920 921 vmcs_clear(vmx->vmcs_pa[i]); 922 923 vmx->cap[i].set = 0; 924 vmx->cap[i].proc_ctls = proc_ctls; 925 vmx->cap[i].proc_ctls2 = proc2_ctls; 926 vmx->cap[i].exc_bitmap = exc_bitmap; 927 928 vmx->state[i].nextrip = ~0; 929 vmx->state[i].lastcpu = NOCPU; 930 vmx->state[i].vpid = vpid[i]; 931 932 933 vmx->ctx[i].pmap = pmap; 934 } 935 936 return (vmx); 937 } 938 939 static int 940 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 941 { 942 #ifdef __FreeBSD__ 943 int handled, func; 944 945 func = vmxctx->guest_rax; 946 #else 947 int handled; 948 #endif 949 950 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 951 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 952 (uint64_t *)&vmxctx->guest_rdx); 953 return (handled); 954 } 955 956 static __inline void 957 vmx_run_trace(struct vmx *vmx, int vcpu) 958 { 959 #ifdef KTR 960 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip()); 961 #endif 962 } 963 964 static __inline void 965 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 966 { 967 #ifdef KTR 968 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 969 #endif 970 } 971 972 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 973 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 974 975 #define INVVPID_TYPE_ADDRESS 0UL 976 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 977 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 978 979 struct invvpid_desc { 980 uint16_t vpid; 981 uint16_t _res1; 982 uint32_t _res2; 983 uint64_t linear_addr; 984 }; 985 CTASSERT(sizeof (struct invvpid_desc) == 16); 986 987 static __inline void 988 invvpid(uint64_t type, struct invvpid_desc desc) 989 { 990 int error; 991 992 __asm __volatile("invvpid %[desc], %[type];" 993 VMX_SET_ERROR_CODE_ASM 994 : [error] "=r" (error) 995 : [desc] "m" (desc), [type] "r" (type) 996 : "memory"); 997 998 if (error) 999 panic("invvpid error %d", error); 1000 } 1001 1002 /* 1003 * Invalidate guest mappings identified by its vpid from the TLB. 1004 */ 1005 static __inline void 1006 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1007 { 1008 struct vmxstate *vmxstate; 1009 struct invvpid_desc invvpid_desc; 1010 1011 vmxstate = &vmx->state[vcpu]; 1012 if (vmxstate->vpid == 0) 1013 return; 1014 1015 if (!running) { 1016 /* 1017 * Set the 'lastcpu' to an invalid host cpu. 1018 * 1019 * This will invalidate TLB entries tagged with the vcpu's 1020 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1021 */ 1022 vmxstate->lastcpu = NOCPU; 1023 return; 1024 } 1025 1026 #ifdef __FreeBSD__ 1027 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1028 "critical section", __func__, vcpu)); 1029 #endif 1030 1031 /* 1032 * Invalidate all mappings tagged with 'vpid' 1033 * 1034 * We do this because this vcpu was executing on a different host 1035 * cpu when it last ran. We do not track whether it invalidated 1036 * mappings associated with its 'vpid' during that run. So we must 1037 * assume that the mappings associated with 'vpid' on 'curcpu' are 1038 * stale and invalidate them. 1039 * 1040 * Note that we incur this penalty only when the scheduler chooses to 1041 * move the thread associated with this vcpu between host cpus. 1042 * 1043 * Note also that this will invalidate mappings tagged with 'vpid' 1044 * for "all" EP4TAs. 1045 */ 1046 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1047 invvpid_desc._res1 = 0; 1048 invvpid_desc._res2 = 0; 1049 invvpid_desc.vpid = vmxstate->vpid; 1050 invvpid_desc.linear_addr = 0; 1051 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1052 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1053 } else { 1054 /* 1055 * The invvpid can be skipped if an invept is going to 1056 * be performed before entering the guest. The invept 1057 * will invalidate combined mappings tagged with 1058 * 'vmx->eptp' for all vpids. 1059 */ 1060 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1061 } 1062 } 1063 1064 static void 1065 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1066 { 1067 struct vmxstate *vmxstate; 1068 1069 /* 1070 * Regardless of whether the VM appears to have migrated between CPUs, 1071 * save the host sysenter stack pointer. As it points to the kernel 1072 * stack of each thread, the correct value must be maintained for every 1073 * trip into the critical section. 1074 */ 1075 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1076 1077 /* 1078 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1079 * migration between host CPUs with differing TSC values. 1080 */ 1081 vmx_apply_tsc_adjust(vmx, vcpu); 1082 1083 vmxstate = &vmx->state[vcpu]; 1084 if (vmxstate->lastcpu == curcpu) 1085 return; 1086 1087 vmxstate->lastcpu = curcpu; 1088 1089 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1090 1091 /* Load the per-CPU IDT address */ 1092 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1093 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1094 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1095 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1096 vmx_invvpid(vmx, vcpu, pmap, 1); 1097 } 1098 1099 /* 1100 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1101 */ 1102 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1103 1104 static __inline void 1105 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1106 { 1107 1108 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1109 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1110 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1111 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1112 } 1113 } 1114 1115 static __inline void 1116 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1117 { 1118 1119 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1120 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1121 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1122 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1123 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1124 } 1125 1126 static __inline bool 1127 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1128 { 1129 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1130 } 1131 1132 static __inline void 1133 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1134 { 1135 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1136 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1137 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1138 } 1139 } 1140 1141 static __inline void 1142 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1143 { 1144 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1145 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1146 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1147 } 1148 1149 /* 1150 * Set the TSC adjustment, taking into account the offsets measured between 1151 * host physical CPUs. This is required even if the guest has not set a TSC 1152 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1153 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1154 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1155 */ 1156 static void 1157 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1158 { 1159 extern hrtime_t tsc_gethrtime_tick_delta(void); 1160 const uint64_t target_offset = (vcpu_tsc_offset(vmx->vm, vcpu) + 1161 (uint64_t)tsc_gethrtime_tick_delta()); 1162 1163 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1164 1165 if (vmx->tsc_offset_active[vcpu] != target_offset) { 1166 vmcs_write(VMCS_TSC_OFFSET, target_offset); 1167 vmx->tsc_offset_active[vcpu] = target_offset; 1168 } 1169 } 1170 1171 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1172 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1173 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1174 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1175 1176 static void 1177 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1178 { 1179 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1180 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1181 1182 /* 1183 * Inject the virtual NMI. The vector must be the NMI IDT entry 1184 * or the VMCS entry check will fail. 1185 */ 1186 vmcs_write(VMCS_ENTRY_INTR_INFO, 1187 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1188 1189 /* Clear the request */ 1190 vm_nmi_clear(vmx->vm, vcpu); 1191 } 1192 1193 /* 1194 * Inject exceptions, NMIs, and ExtINTs. 1195 * 1196 * The logic behind these are complicated and may involve mutex contention, so 1197 * the injection is performed without the protection of host CPU interrupts 1198 * being disabled. This means a racing notification could be "lost", 1199 * necessitating a later call to vmx_inject_recheck() to close that window 1200 * of opportunity. 1201 */ 1202 static enum event_inject_state 1203 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1204 { 1205 uint64_t entryinfo; 1206 uint32_t gi, info; 1207 int vector; 1208 enum event_inject_state state; 1209 1210 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1211 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1212 state = EIS_CAN_INJECT; 1213 1214 /* Clear any interrupt blocking if the guest %rip has changed */ 1215 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1216 gi &= ~HWINTR_BLOCKING; 1217 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1218 } 1219 1220 /* 1221 * It could be that an interrupt is already pending for injection from 1222 * the VMCS. This would be the case if the vCPU exited for conditions 1223 * such as an AST before a vm-entry delivered the injection. 1224 */ 1225 if ((info & VMCS_INTR_VALID) != 0) { 1226 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1227 } 1228 1229 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1230 ASSERT(entryinfo & VMCS_INTR_VALID); 1231 1232 info = entryinfo; 1233 vector = info & 0xff; 1234 if (vector == IDT_BP || vector == IDT_OF) { 1235 /* 1236 * VT-x requires #BP and #OF to be injected as software 1237 * exceptions. 1238 */ 1239 info &= ~VMCS_INTR_T_MASK; 1240 info |= VMCS_INTR_T_SWEXCEPTION; 1241 } 1242 1243 if (info & VMCS_INTR_DEL_ERRCODE) { 1244 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1245 } 1246 1247 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1248 state = EIS_EV_INJECTED; 1249 } 1250 1251 if (vm_nmi_pending(vmx->vm, vcpu)) { 1252 /* 1253 * If there are no conditions blocking NMI injection then inject 1254 * it directly here otherwise enable "NMI window exiting" to 1255 * inject it as soon as we can. 1256 * 1257 * According to the Intel manual, some CPUs do not allow NMI 1258 * injection when STI_BLOCKING is active. That check is 1259 * enforced here, regardless of CPU capability. If running on a 1260 * CPU without such a restriction it will immediately exit and 1261 * the NMI will be injected in the "NMI window exiting" handler. 1262 */ 1263 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1264 if (state == EIS_CAN_INJECT) { 1265 vmx_inject_nmi(vmx, vcpu); 1266 state = EIS_EV_INJECTED; 1267 } else { 1268 return (state | EIS_REQ_EXIT); 1269 } 1270 } else { 1271 vmx_set_nmi_window_exiting(vmx, vcpu); 1272 } 1273 } 1274 1275 if (vm_extint_pending(vmx->vm, vcpu)) { 1276 if (state != EIS_CAN_INJECT) { 1277 return (state | EIS_REQ_EXIT); 1278 } 1279 if ((gi & HWINTR_BLOCKING) != 0 || 1280 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1281 return (EIS_GI_BLOCK); 1282 } 1283 1284 /* Ask the legacy pic for a vector to inject */ 1285 vatpic_pending_intr(vmx->vm, &vector); 1286 1287 /* 1288 * From the Intel SDM, Volume 3, Section "Maskable 1289 * Hardware Interrupts": 1290 * - maskable interrupt vectors [0,255] can be delivered 1291 * through the INTR pin. 1292 */ 1293 KASSERT(vector >= 0 && vector <= 255, 1294 ("invalid vector %d from INTR", vector)); 1295 1296 /* Inject the interrupt */ 1297 vmcs_write(VMCS_ENTRY_INTR_INFO, 1298 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1299 1300 vm_extint_clear(vmx->vm, vcpu); 1301 vatpic_intr_accepted(vmx->vm, vector); 1302 state = EIS_EV_INJECTED; 1303 } 1304 1305 return (state); 1306 } 1307 1308 /* 1309 * Inject any interrupts pending on the vLAPIC. 1310 * 1311 * This is done with host CPU interrupts disabled so notification IPIs, either 1312 * from the standard vCPU notification or APICv posted interrupts, will be 1313 * queued on the host APIC and recognized when entering VMX context. 1314 */ 1315 static enum event_inject_state 1316 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1317 { 1318 int vector; 1319 1320 if (!vlapic_pending_intr(vlapic, &vector)) { 1321 return (EIS_CAN_INJECT); 1322 } 1323 1324 /* 1325 * From the Intel SDM, Volume 3, Section "Maskable 1326 * Hardware Interrupts": 1327 * - maskable interrupt vectors [16,255] can be delivered 1328 * through the local APIC. 1329 */ 1330 KASSERT(vector >= 16 && vector <= 255, 1331 ("invalid vector %d from local APIC", vector)); 1332 1333 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1334 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1335 uint16_t status_new = (status_old & 0xff00) | vector; 1336 1337 /* 1338 * The APICv state will have been synced into the vLAPIC 1339 * as part of vlapic_pending_intr(). Prepare the VMCS 1340 * for the to-be-injected pending interrupt. 1341 */ 1342 if (status_new > status_old) { 1343 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1344 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, 1345 "vmx_inject_interrupts: guest_intr_status " 1346 "changed from 0x%04x to 0x%04x", 1347 status_old, status_new); 1348 } 1349 1350 /* 1351 * Ensure VMCS state regarding EOI traps is kept in sync 1352 * with the TMRs in the vlapic. 1353 */ 1354 vmx_apicv_sync_tmr(vlapic); 1355 1356 /* 1357 * The rest of the injection process for injecting the 1358 * interrupt(s) is handled by APICv. It does not preclude other 1359 * event injection from occurring. 1360 */ 1361 return (EIS_CAN_INJECT); 1362 } 1363 1364 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1365 1366 /* Does guest interruptability block injection? */ 1367 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1368 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1369 return (EIS_GI_BLOCK); 1370 } 1371 1372 /* Inject the interrupt */ 1373 vmcs_write(VMCS_ENTRY_INTR_INFO, 1374 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1375 1376 /* Update the Local APIC ISR */ 1377 vlapic_intr_accepted(vlapic, vector); 1378 1379 return (EIS_EV_INJECTED); 1380 } 1381 1382 /* 1383 * Re-check for events to be injected. 1384 * 1385 * Once host CPU interrupts are disabled, check for the presence of any events 1386 * which require injection processing. If an exit is required upon injection, 1387 * or once the guest becomes interruptable, that will be configured too. 1388 */ 1389 static bool 1390 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1391 { 1392 if (state == EIS_CAN_INJECT) { 1393 if (vm_nmi_pending(vmx->vm, vcpu) && 1394 !vmx_nmi_window_exiting(vmx, vcpu)) { 1395 /* queued NMI not blocked by NMI-window-exiting */ 1396 return (true); 1397 } 1398 if (vm_extint_pending(vmx->vm, vcpu)) { 1399 /* queued ExtINT not blocked by existing injection */ 1400 return (true); 1401 } 1402 } else { 1403 if ((state & EIS_REQ_EXIT) != 0) { 1404 /* 1405 * Use a self-IPI to force an immediate exit after 1406 * event injection has occurred. 1407 */ 1408 poke_cpu(CPU->cpu_id); 1409 } else { 1410 /* 1411 * If any event is being injected, an exit immediately 1412 * upon becoming interruptable again will allow pending 1413 * or newly queued events to be injected in a timely 1414 * manner. 1415 */ 1416 vmx_set_int_window_exiting(vmx, vcpu); 1417 } 1418 } 1419 return (false); 1420 } 1421 1422 /* 1423 * If the Virtual NMIs execution control is '1' then the logical processor 1424 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1425 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1426 * virtual-NMI blocking. 1427 * 1428 * This unblocking occurs even if the IRET causes a fault. In this case the 1429 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1430 */ 1431 static void 1432 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1433 { 1434 uint32_t gi; 1435 1436 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1437 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1438 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1439 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1440 } 1441 1442 static void 1443 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1444 { 1445 uint32_t gi; 1446 1447 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1448 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1449 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1450 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1451 } 1452 1453 static void 1454 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1455 { 1456 uint32_t gi; 1457 1458 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1459 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1460 ("NMI blocking is not in effect %x", gi)); 1461 } 1462 1463 static int 1464 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1465 { 1466 struct vmxctx *vmxctx; 1467 uint64_t xcrval; 1468 const struct xsave_limits *limits; 1469 1470 vmxctx = &vmx->ctx[vcpu]; 1471 limits = vmm_get_xsave_limits(); 1472 1473 /* 1474 * Note that the processor raises a GP# fault on its own if 1475 * xsetbv is executed for CPL != 0, so we do not have to 1476 * emulate that fault here. 1477 */ 1478 1479 /* Only xcr0 is supported. */ 1480 if (vmxctx->guest_rcx != 0) { 1481 vm_inject_gp(vmx->vm, vcpu); 1482 return (HANDLED); 1483 } 1484 1485 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1486 if (!limits->xsave_enabled || 1487 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1488 vm_inject_ud(vmx->vm, vcpu); 1489 return (HANDLED); 1490 } 1491 1492 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1493 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1494 vm_inject_gp(vmx->vm, vcpu); 1495 return (HANDLED); 1496 } 1497 1498 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1499 vm_inject_gp(vmx->vm, vcpu); 1500 return (HANDLED); 1501 } 1502 1503 /* AVX (YMM_Hi128) requires SSE. */ 1504 if (xcrval & XFEATURE_ENABLED_AVX && 1505 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1506 vm_inject_gp(vmx->vm, vcpu); 1507 return (HANDLED); 1508 } 1509 1510 /* 1511 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1512 * ZMM_Hi256, and Hi16_ZMM. 1513 */ 1514 if (xcrval & XFEATURE_AVX512 && 1515 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1516 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1517 vm_inject_gp(vmx->vm, vcpu); 1518 return (HANDLED); 1519 } 1520 1521 /* 1522 * Intel MPX requires both bound register state flags to be 1523 * set. 1524 */ 1525 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1526 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1527 vm_inject_gp(vmx->vm, vcpu); 1528 return (HANDLED); 1529 } 1530 1531 /* 1532 * This runs "inside" vmrun() with the guest's FPU state, so 1533 * modifying xcr0 directly modifies the guest's xcr0, not the 1534 * host's. 1535 */ 1536 load_xcr(0, xcrval); 1537 return (HANDLED); 1538 } 1539 1540 static uint64_t 1541 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1542 { 1543 const struct vmxctx *vmxctx; 1544 1545 vmxctx = &vmx->ctx[vcpu]; 1546 1547 switch (ident) { 1548 case 0: 1549 return (vmxctx->guest_rax); 1550 case 1: 1551 return (vmxctx->guest_rcx); 1552 case 2: 1553 return (vmxctx->guest_rdx); 1554 case 3: 1555 return (vmxctx->guest_rbx); 1556 case 4: 1557 return (vmcs_read(VMCS_GUEST_RSP)); 1558 case 5: 1559 return (vmxctx->guest_rbp); 1560 case 6: 1561 return (vmxctx->guest_rsi); 1562 case 7: 1563 return (vmxctx->guest_rdi); 1564 case 8: 1565 return (vmxctx->guest_r8); 1566 case 9: 1567 return (vmxctx->guest_r9); 1568 case 10: 1569 return (vmxctx->guest_r10); 1570 case 11: 1571 return (vmxctx->guest_r11); 1572 case 12: 1573 return (vmxctx->guest_r12); 1574 case 13: 1575 return (vmxctx->guest_r13); 1576 case 14: 1577 return (vmxctx->guest_r14); 1578 case 15: 1579 return (vmxctx->guest_r15); 1580 default: 1581 panic("invalid vmx register %d", ident); 1582 } 1583 } 1584 1585 static void 1586 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1587 { 1588 struct vmxctx *vmxctx; 1589 1590 vmxctx = &vmx->ctx[vcpu]; 1591 1592 switch (ident) { 1593 case 0: 1594 vmxctx->guest_rax = regval; 1595 break; 1596 case 1: 1597 vmxctx->guest_rcx = regval; 1598 break; 1599 case 2: 1600 vmxctx->guest_rdx = regval; 1601 break; 1602 case 3: 1603 vmxctx->guest_rbx = regval; 1604 break; 1605 case 4: 1606 vmcs_write(VMCS_GUEST_RSP, regval); 1607 break; 1608 case 5: 1609 vmxctx->guest_rbp = regval; 1610 break; 1611 case 6: 1612 vmxctx->guest_rsi = regval; 1613 break; 1614 case 7: 1615 vmxctx->guest_rdi = regval; 1616 break; 1617 case 8: 1618 vmxctx->guest_r8 = regval; 1619 break; 1620 case 9: 1621 vmxctx->guest_r9 = regval; 1622 break; 1623 case 10: 1624 vmxctx->guest_r10 = regval; 1625 break; 1626 case 11: 1627 vmxctx->guest_r11 = regval; 1628 break; 1629 case 12: 1630 vmxctx->guest_r12 = regval; 1631 break; 1632 case 13: 1633 vmxctx->guest_r13 = regval; 1634 break; 1635 case 14: 1636 vmxctx->guest_r14 = regval; 1637 break; 1638 case 15: 1639 vmxctx->guest_r15 = regval; 1640 break; 1641 default: 1642 panic("invalid vmx register %d", ident); 1643 } 1644 } 1645 1646 static int 1647 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1648 { 1649 uint64_t crval, regval; 1650 1651 /* We only handle mov to %cr0 at this time */ 1652 if ((exitqual & 0xf0) != 0x00) 1653 return (UNHANDLED); 1654 1655 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1656 1657 vmcs_write(VMCS_CR0_SHADOW, regval); 1658 1659 crval = regval | cr0_ones_mask; 1660 crval &= ~cr0_zeros_mask; 1661 vmcs_write(VMCS_GUEST_CR0, crval); 1662 1663 if (regval & CR0_PG) { 1664 uint64_t efer, entry_ctls; 1665 1666 /* 1667 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1668 * the "IA-32e mode guest" bit in VM-entry control must be 1669 * equal. 1670 */ 1671 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1672 if (efer & EFER_LME) { 1673 efer |= EFER_LMA; 1674 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1675 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1676 entry_ctls |= VM_ENTRY_GUEST_LMA; 1677 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1678 } 1679 } 1680 1681 return (HANDLED); 1682 } 1683 1684 static int 1685 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1686 { 1687 uint64_t crval, regval; 1688 1689 /* We only handle mov to %cr4 at this time */ 1690 if ((exitqual & 0xf0) != 0x00) 1691 return (UNHANDLED); 1692 1693 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1694 1695 vmcs_write(VMCS_CR4_SHADOW, regval); 1696 1697 crval = regval | cr4_ones_mask; 1698 crval &= ~cr4_zeros_mask; 1699 vmcs_write(VMCS_GUEST_CR4, crval); 1700 1701 return (HANDLED); 1702 } 1703 1704 static int 1705 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1706 { 1707 struct vlapic *vlapic; 1708 uint64_t cr8; 1709 int regnum; 1710 1711 /* We only handle mov %cr8 to/from a register at this time. */ 1712 if ((exitqual & 0xe0) != 0x00) { 1713 return (UNHANDLED); 1714 } 1715 1716 vlapic = vm_lapic(vmx->vm, vcpu); 1717 regnum = (exitqual >> 8) & 0xf; 1718 if (exitqual & 0x10) { 1719 cr8 = vlapic_get_cr8(vlapic); 1720 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1721 } else { 1722 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1723 vlapic_set_cr8(vlapic, cr8); 1724 } 1725 1726 return (HANDLED); 1727 } 1728 1729 /* 1730 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1731 */ 1732 static int 1733 vmx_cpl(void) 1734 { 1735 uint32_t ssar; 1736 1737 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1738 return ((ssar >> 5) & 0x3); 1739 } 1740 1741 static enum vm_cpu_mode 1742 vmx_cpu_mode(void) 1743 { 1744 uint32_t csar; 1745 1746 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1747 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1748 if (csar & 0x2000) 1749 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1750 else 1751 return (CPU_MODE_COMPATIBILITY); 1752 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1753 return (CPU_MODE_PROTECTED); 1754 } else { 1755 return (CPU_MODE_REAL); 1756 } 1757 } 1758 1759 static enum vm_paging_mode 1760 vmx_paging_mode(void) 1761 { 1762 1763 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1764 return (PAGING_MODE_FLAT); 1765 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1766 return (PAGING_MODE_32); 1767 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1768 return (PAGING_MODE_64); 1769 else 1770 return (PAGING_MODE_PAE); 1771 } 1772 1773 static void 1774 vmx_paging_info(struct vm_guest_paging *paging) 1775 { 1776 paging->cr3 = vmcs_guest_cr3(); 1777 paging->cpl = vmx_cpl(); 1778 paging->cpu_mode = vmx_cpu_mode(); 1779 paging->paging_mode = vmx_paging_mode(); 1780 } 1781 1782 static void 1783 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1784 uint64_t gla) 1785 { 1786 struct vm_guest_paging paging; 1787 uint32_t csar; 1788 1789 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1790 vmexit->inst_length = 0; 1791 vmexit->u.mmio_emul.gpa = gpa; 1792 vmexit->u.mmio_emul.gla = gla; 1793 vmx_paging_info(&paging); 1794 1795 switch (paging.cpu_mode) { 1796 case CPU_MODE_REAL: 1797 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1798 vmexit->u.mmio_emul.cs_d = 0; 1799 break; 1800 case CPU_MODE_PROTECTED: 1801 case CPU_MODE_COMPATIBILITY: 1802 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1803 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1804 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1805 break; 1806 default: 1807 vmexit->u.mmio_emul.cs_base = 0; 1808 vmexit->u.mmio_emul.cs_d = 0; 1809 break; 1810 } 1811 1812 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1813 } 1814 1815 static void 1816 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1817 uint32_t eax) 1818 { 1819 struct vm_guest_paging paging; 1820 struct vm_inout *inout; 1821 1822 inout = &vmexit->u.inout; 1823 1824 inout->bytes = (qual & 0x7) + 1; 1825 inout->flags = 0; 1826 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1827 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1828 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1829 inout->port = (uint16_t)(qual >> 16); 1830 inout->eax = eax; 1831 if (inout->flags & INOUT_STR) { 1832 uint64_t inst_info; 1833 1834 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1835 1836 /* 1837 * According to the SDM, bits 9:7 encode the address size of the 1838 * ins/outs operation, but only values 0/1/2 are expected, 1839 * corresponding to 16/32/64 bit sizes. 1840 */ 1841 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1842 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1843 inout->addrsize == 8); 1844 1845 if (inout->flags & INOUT_IN) { 1846 /* 1847 * The bits describing the segment in INSTRUCTION_INFO 1848 * are not defined for ins, leaving it to system 1849 * software to assume %es (encoded as 0) 1850 */ 1851 inout->segment = 0; 1852 } else { 1853 /* 1854 * Bits 15-17 encode the segment for OUTS. 1855 * This value follows the standard x86 segment order. 1856 */ 1857 inout->segment = (inst_info >> 15) & 0x7; 1858 } 1859 } 1860 1861 vmexit->exitcode = VM_EXITCODE_INOUT; 1862 vmx_paging_info(&paging); 1863 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1864 1865 /* The in/out emulation will handle advancing %rip */ 1866 vmexit->inst_length = 0; 1867 } 1868 1869 static int 1870 ept_fault_type(uint64_t ept_qual) 1871 { 1872 int fault_type; 1873 1874 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1875 fault_type = VM_PROT_WRITE; 1876 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1877 fault_type = VM_PROT_EXECUTE; 1878 else 1879 fault_type = VM_PROT_READ; 1880 1881 return (fault_type); 1882 } 1883 1884 static bool 1885 ept_emulation_fault(uint64_t ept_qual) 1886 { 1887 int read, write; 1888 1889 /* EPT fault on an instruction fetch doesn't make sense here */ 1890 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1891 return (false); 1892 1893 /* EPT fault must be a read fault or a write fault */ 1894 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1895 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1896 if ((read | write) == 0) 1897 return (false); 1898 1899 /* 1900 * The EPT violation must have been caused by accessing a 1901 * guest-physical address that is a translation of a guest-linear 1902 * address. 1903 */ 1904 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1905 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1906 return (false); 1907 } 1908 1909 return (true); 1910 } 1911 1912 static __inline int 1913 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1914 { 1915 uint32_t proc_ctls2; 1916 1917 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1918 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1919 } 1920 1921 static __inline int 1922 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1923 { 1924 uint32_t proc_ctls2; 1925 1926 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1927 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1928 } 1929 1930 static int 1931 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1932 uint64_t qual) 1933 { 1934 int handled, offset; 1935 uint32_t *apic_regs, vector; 1936 1937 handled = HANDLED; 1938 offset = APIC_WRITE_OFFSET(qual); 1939 1940 if (!apic_access_virtualization(vmx, vcpuid)) { 1941 /* 1942 * In general there should not be any APIC write VM-exits 1943 * unless APIC-access virtualization is enabled. 1944 * 1945 * However self-IPI virtualization can legitimately trigger 1946 * an APIC-write VM-exit so treat it specially. 1947 */ 1948 if (x2apic_virtualization(vmx, vcpuid) && 1949 offset == APIC_OFFSET_SELF_IPI) { 1950 apic_regs = (uint32_t *)(vlapic->apic_page); 1951 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1952 vlapic_self_ipi_handler(vlapic, vector); 1953 return (HANDLED); 1954 } else 1955 return (UNHANDLED); 1956 } 1957 1958 switch (offset) { 1959 case APIC_OFFSET_ID: 1960 vlapic_id_write_handler(vlapic); 1961 break; 1962 case APIC_OFFSET_LDR: 1963 vlapic_ldr_write_handler(vlapic); 1964 break; 1965 case APIC_OFFSET_DFR: 1966 vlapic_dfr_write_handler(vlapic); 1967 break; 1968 case APIC_OFFSET_SVR: 1969 vlapic_svr_write_handler(vlapic); 1970 break; 1971 case APIC_OFFSET_ESR: 1972 vlapic_esr_write_handler(vlapic); 1973 break; 1974 case APIC_OFFSET_ICR_LOW: 1975 if (vlapic_icrlo_write_handler(vlapic) != 0) { 1976 handled = UNHANDLED; 1977 } 1978 break; 1979 case APIC_OFFSET_CMCI_LVT: 1980 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1981 vlapic_lvt_write_handler(vlapic, offset); 1982 break; 1983 case APIC_OFFSET_TIMER_ICR: 1984 vlapic_icrtmr_write_handler(vlapic); 1985 break; 1986 case APIC_OFFSET_TIMER_DCR: 1987 vlapic_dcr_write_handler(vlapic); 1988 break; 1989 default: 1990 handled = UNHANDLED; 1991 break; 1992 } 1993 return (handled); 1994 } 1995 1996 static bool 1997 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1998 { 1999 2000 if (apic_access_virtualization(vmx, vcpuid) && 2001 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2002 return (true); 2003 else 2004 return (false); 2005 } 2006 2007 static int 2008 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2009 { 2010 uint64_t qual; 2011 int access_type, offset, allowed; 2012 struct vie *vie; 2013 2014 if (!apic_access_virtualization(vmx, vcpuid)) 2015 return (UNHANDLED); 2016 2017 qual = vmexit->u.vmx.exit_qualification; 2018 access_type = APIC_ACCESS_TYPE(qual); 2019 offset = APIC_ACCESS_OFFSET(qual); 2020 2021 allowed = 0; 2022 if (access_type == 0) { 2023 /* 2024 * Read data access to the following registers is expected. 2025 */ 2026 switch (offset) { 2027 case APIC_OFFSET_APR: 2028 case APIC_OFFSET_PPR: 2029 case APIC_OFFSET_RRR: 2030 case APIC_OFFSET_CMCI_LVT: 2031 case APIC_OFFSET_TIMER_CCR: 2032 allowed = 1; 2033 break; 2034 default: 2035 break; 2036 } 2037 } else if (access_type == 1) { 2038 /* 2039 * Write data access to the following registers is expected. 2040 */ 2041 switch (offset) { 2042 case APIC_OFFSET_VER: 2043 case APIC_OFFSET_APR: 2044 case APIC_OFFSET_PPR: 2045 case APIC_OFFSET_RRR: 2046 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2047 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2048 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2049 case APIC_OFFSET_CMCI_LVT: 2050 case APIC_OFFSET_TIMER_CCR: 2051 allowed = 1; 2052 break; 2053 default: 2054 break; 2055 } 2056 } 2057 2058 if (allowed) { 2059 vie = vm_vie_ctx(vmx->vm, vcpuid); 2060 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2061 VIE_INVALID_GLA); 2062 } 2063 2064 /* 2065 * Regardless of whether the APIC-access is allowed this handler 2066 * always returns UNHANDLED: 2067 * - if the access is allowed then it is handled by emulating the 2068 * instruction that caused the VM-exit (outside the critical section) 2069 * - if the access is not allowed then it will be converted to an 2070 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2071 */ 2072 return (UNHANDLED); 2073 } 2074 2075 static enum task_switch_reason 2076 vmx_task_switch_reason(uint64_t qual) 2077 { 2078 int reason; 2079 2080 reason = (qual >> 30) & 0x3; 2081 switch (reason) { 2082 case 0: 2083 return (TSR_CALL); 2084 case 1: 2085 return (TSR_IRET); 2086 case 2: 2087 return (TSR_JMP); 2088 case 3: 2089 return (TSR_IDT_GATE); 2090 default: 2091 panic("%s: invalid reason %d", __func__, reason); 2092 } 2093 } 2094 2095 static int 2096 emulate_wrmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t val) 2097 { 2098 int error; 2099 2100 if (lapic_msr(num)) 2101 error = lapic_wrmsr(vmx->vm, vcpuid, num, val); 2102 else 2103 error = vmx_wrmsr(vmx, vcpuid, num, val); 2104 2105 return (error); 2106 } 2107 2108 static int 2109 emulate_rdmsr(struct vmx *vmx, int vcpuid, uint_t num) 2110 { 2111 uint64_t result; 2112 int error; 2113 2114 if (lapic_msr(num)) 2115 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result); 2116 else 2117 error = vmx_rdmsr(vmx, vcpuid, num, &result); 2118 2119 if (error == 0) { 2120 vmx->ctx[vcpuid].guest_rax = (uint32_t)result; 2121 vmx->ctx[vcpuid].guest_rdx = result >> 32; 2122 } 2123 2124 return (error); 2125 } 2126 2127 #ifndef __FreeBSD__ 2128 #define __predict_false(x) (x) 2129 #endif 2130 2131 static int 2132 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2133 { 2134 int error, errcode, errcode_valid, handled; 2135 struct vmxctx *vmxctx; 2136 struct vie *vie; 2137 struct vlapic *vlapic; 2138 struct vm_task_switch *ts; 2139 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info; 2140 uint32_t intr_type, intr_vec, reason; 2141 uint64_t exitintinfo, qual, gpa; 2142 2143 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2144 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2145 2146 handled = UNHANDLED; 2147 vmxctx = &vmx->ctx[vcpu]; 2148 2149 qual = vmexit->u.vmx.exit_qualification; 2150 reason = vmexit->u.vmx.exit_reason; 2151 vmexit->exitcode = VM_EXITCODE_BOGUS; 2152 2153 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2154 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2155 2156 /* 2157 * VM-entry failures during or after loading guest state. 2158 * 2159 * These VM-exits are uncommon but must be handled specially 2160 * as most VM-exit fields are not populated as usual. 2161 */ 2162 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2163 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2164 #ifdef __FreeBSD__ 2165 __asm __volatile("int $18"); 2166 #else 2167 vmm_call_trap(T_MCE); 2168 #endif 2169 return (1); 2170 } 2171 2172 /* 2173 * VM exits that can be triggered during event delivery need to 2174 * be handled specially by re-injecting the event if the IDT 2175 * vectoring information field's valid bit is set. 2176 * 2177 * See "Information for VM Exits During Event Delivery" in Intel SDM 2178 * for details. 2179 */ 2180 idtvec_info = vmcs_idt_vectoring_info(); 2181 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2182 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2183 exitintinfo = idtvec_info; 2184 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2185 idtvec_err = vmcs_idt_vectoring_err(); 2186 exitintinfo |= (uint64_t)idtvec_err << 32; 2187 } 2188 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2189 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2190 __func__, error)); 2191 2192 /* 2193 * If 'virtual NMIs' are being used and the VM-exit 2194 * happened while injecting an NMI during the previous 2195 * VM-entry, then clear "blocking by NMI" in the 2196 * Guest Interruptibility-State so the NMI can be 2197 * reinjected on the subsequent VM-entry. 2198 * 2199 * However, if the NMI was being delivered through a task 2200 * gate, then the new task must start execution with NMIs 2201 * blocked so don't clear NMI blocking in this case. 2202 */ 2203 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2204 if (intr_type == VMCS_INTR_T_NMI) { 2205 if (reason != EXIT_REASON_TASK_SWITCH) 2206 vmx_clear_nmi_blocking(vmx, vcpu); 2207 else 2208 vmx_assert_nmi_blocking(vmx, vcpu); 2209 } 2210 2211 /* 2212 * Update VM-entry instruction length if the event being 2213 * delivered was a software interrupt or software exception. 2214 */ 2215 if (intr_type == VMCS_INTR_T_SWINTR || 2216 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2217 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2218 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2219 } 2220 } 2221 2222 switch (reason) { 2223 case EXIT_REASON_TASK_SWITCH: 2224 ts = &vmexit->u.task_switch; 2225 ts->tsssel = qual & 0xffff; 2226 ts->reason = vmx_task_switch_reason(qual); 2227 ts->ext = 0; 2228 ts->errcode_valid = 0; 2229 vmx_paging_info(&ts->paging); 2230 /* 2231 * If the task switch was due to a CALL, JMP, IRET, software 2232 * interrupt (INT n) or software exception (INT3, INTO), 2233 * then the saved %rip references the instruction that caused 2234 * the task switch. The instruction length field in the VMCS 2235 * is valid in this case. 2236 * 2237 * In all other cases (e.g., NMI, hardware exception) the 2238 * saved %rip is one that would have been saved in the old TSS 2239 * had the task switch completed normally so the instruction 2240 * length field is not needed in this case and is explicitly 2241 * set to 0. 2242 */ 2243 if (ts->reason == TSR_IDT_GATE) { 2244 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2245 ("invalid idtvec_info %x for IDT task switch", 2246 idtvec_info)); 2247 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2248 if (intr_type != VMCS_INTR_T_SWINTR && 2249 intr_type != VMCS_INTR_T_SWEXCEPTION && 2250 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2251 /* Task switch triggered by external event */ 2252 ts->ext = 1; 2253 vmexit->inst_length = 0; 2254 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2255 ts->errcode_valid = 1; 2256 ts->errcode = vmcs_idt_vectoring_err(); 2257 } 2258 } 2259 } 2260 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2261 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2262 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2263 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2264 ts->ext ? "external" : "internal", 2265 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2266 break; 2267 case EXIT_REASON_CR_ACCESS: 2268 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2269 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2270 switch (qual & 0xf) { 2271 case 0: 2272 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2273 break; 2274 case 4: 2275 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2276 break; 2277 case 8: 2278 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2279 break; 2280 } 2281 break; 2282 case EXIT_REASON_RDMSR: 2283 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2284 ecx = vmxctx->guest_rcx; 2285 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2286 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2287 error = emulate_rdmsr(vmx, vcpu, ecx); 2288 if (error == 0) { 2289 handled = HANDLED; 2290 } else if (error > 0) { 2291 vmexit->exitcode = VM_EXITCODE_RDMSR; 2292 vmexit->u.msr.code = ecx; 2293 } else { 2294 /* Return to userspace with a valid exitcode */ 2295 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2296 ("emulate_rdmsr retu with bogus exitcode")); 2297 } 2298 break; 2299 case EXIT_REASON_WRMSR: 2300 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2301 eax = vmxctx->guest_rax; 2302 ecx = vmxctx->guest_rcx; 2303 edx = vmxctx->guest_rdx; 2304 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2305 ecx, (uint64_t)edx << 32 | eax); 2306 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2307 (uint64_t)edx << 32 | eax); 2308 error = emulate_wrmsr(vmx, vcpu, ecx, 2309 (uint64_t)edx << 32 | eax); 2310 if (error == 0) { 2311 handled = HANDLED; 2312 } else if (error > 0) { 2313 vmexit->exitcode = VM_EXITCODE_WRMSR; 2314 vmexit->u.msr.code = ecx; 2315 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2316 } else { 2317 /* Return to userspace with a valid exitcode */ 2318 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2319 ("emulate_wrmsr retu with bogus exitcode")); 2320 } 2321 break; 2322 case EXIT_REASON_HLT: 2323 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2324 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2325 vmexit->exitcode = VM_EXITCODE_HLT; 2326 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2327 break; 2328 case EXIT_REASON_MTF: 2329 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2330 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2331 vmexit->exitcode = VM_EXITCODE_MTRAP; 2332 vmexit->inst_length = 0; 2333 break; 2334 case EXIT_REASON_PAUSE: 2335 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2336 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2337 vmexit->exitcode = VM_EXITCODE_PAUSE; 2338 break; 2339 case EXIT_REASON_INTR_WINDOW: 2340 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2341 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2342 vmx_clear_int_window_exiting(vmx, vcpu); 2343 return (1); 2344 case EXIT_REASON_EXT_INTR: 2345 /* 2346 * External interrupts serve only to cause VM exits and allow 2347 * the host interrupt handler to run. 2348 * 2349 * If this external interrupt triggers a virtual interrupt 2350 * to a VM, then that state will be recorded by the 2351 * host interrupt handler in the VM's softc. We will inject 2352 * this virtual interrupt during the subsequent VM enter. 2353 */ 2354 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2355 SDT_PROBE4(vmm, vmx, exit, interrupt, 2356 vmx, vcpu, vmexit, intr_info); 2357 2358 /* 2359 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2360 * This appears to be a bug in VMware Fusion? 2361 */ 2362 if (!(intr_info & VMCS_INTR_VALID)) 2363 return (1); 2364 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2365 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2366 ("VM exit interruption info invalid: %x", intr_info)); 2367 vmx_trigger_hostintr(intr_info & 0xff); 2368 2369 /* 2370 * This is special. We want to treat this as an 'handled' 2371 * VM-exit but not increment the instruction pointer. 2372 */ 2373 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2374 return (1); 2375 case EXIT_REASON_NMI_WINDOW: 2376 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2377 /* Exit to allow the pending virtual NMI to be injected */ 2378 if (vm_nmi_pending(vmx->vm, vcpu)) 2379 vmx_inject_nmi(vmx, vcpu); 2380 vmx_clear_nmi_window_exiting(vmx, vcpu); 2381 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2382 return (1); 2383 case EXIT_REASON_INOUT: 2384 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2385 vie = vm_vie_ctx(vmx->vm, vcpu); 2386 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2387 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2388 break; 2389 case EXIT_REASON_CPUID: 2390 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2391 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2392 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2393 break; 2394 case EXIT_REASON_EXCEPTION: 2395 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2396 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2397 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2398 ("VM exit interruption info invalid: %x", intr_info)); 2399 2400 intr_vec = intr_info & 0xff; 2401 intr_type = intr_info & VMCS_INTR_T_MASK; 2402 2403 /* 2404 * If Virtual NMIs control is 1 and the VM-exit is due to a 2405 * fault encountered during the execution of IRET then we must 2406 * restore the state of "virtual-NMI blocking" before resuming 2407 * the guest. 2408 * 2409 * See "Resuming Guest Software after Handling an Exception". 2410 * See "Information for VM Exits Due to Vectored Events". 2411 */ 2412 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2413 (intr_vec != IDT_DF) && 2414 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2415 vmx_restore_nmi_blocking(vmx, vcpu); 2416 2417 /* 2418 * The NMI has already been handled in vmx_exit_handle_nmi(). 2419 */ 2420 if (intr_type == VMCS_INTR_T_NMI) 2421 return (1); 2422 2423 /* 2424 * Call the machine check handler by hand. Also don't reflect 2425 * the machine check back into the guest. 2426 */ 2427 if (intr_vec == IDT_MC) { 2428 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2429 #ifdef __FreeBSD__ 2430 __asm __volatile("int $18"); 2431 #else 2432 vmm_call_trap(T_MCE); 2433 #endif 2434 return (1); 2435 } 2436 2437 /* 2438 * If the hypervisor has requested user exits for 2439 * debug exceptions, bounce them out to userland. 2440 */ 2441 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2442 intr_vec == IDT_BP && 2443 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2444 vmexit->exitcode = VM_EXITCODE_BPT; 2445 vmexit->u.bpt.inst_length = vmexit->inst_length; 2446 vmexit->inst_length = 0; 2447 break; 2448 } 2449 2450 if (intr_vec == IDT_PF) { 2451 vmxctx->guest_cr2 = qual; 2452 } 2453 2454 /* 2455 * Software exceptions exhibit trap-like behavior. This in 2456 * turn requires populating the VM-entry instruction length 2457 * so that the %rip in the trap frame is past the INT3/INTO 2458 * instruction. 2459 */ 2460 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2461 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2462 2463 /* Reflect all other exceptions back into the guest */ 2464 errcode_valid = errcode = 0; 2465 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2466 errcode_valid = 1; 2467 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2468 } 2469 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%x into " 2470 "the guest", intr_vec, errcode); 2471 SDT_PROBE5(vmm, vmx, exit, exception, 2472 vmx, vcpu, vmexit, intr_vec, errcode); 2473 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2474 errcode_valid, errcode, 0); 2475 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2476 __func__, error)); 2477 return (1); 2478 2479 case EXIT_REASON_EPT_FAULT: 2480 /* 2481 * If 'gpa' lies within the address space allocated to 2482 * memory then this must be a nested page fault otherwise 2483 * this must be an instruction that accesses MMIO space. 2484 */ 2485 gpa = vmcs_gpa(); 2486 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2487 apic_access_fault(vmx, vcpu, gpa)) { 2488 vmexit->exitcode = VM_EXITCODE_PAGING; 2489 vmexit->inst_length = 0; 2490 vmexit->u.paging.gpa = gpa; 2491 vmexit->u.paging.fault_type = ept_fault_type(qual); 2492 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2493 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2494 vmx, vcpu, vmexit, gpa, qual); 2495 } else if (ept_emulation_fault(qual)) { 2496 vie = vm_vie_ctx(vmx->vm, vcpu); 2497 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla()); 2498 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2499 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2500 vmx, vcpu, vmexit, gpa); 2501 } 2502 /* 2503 * If Virtual NMIs control is 1 and the VM-exit is due to an 2504 * EPT fault during the execution of IRET then we must restore 2505 * the state of "virtual-NMI blocking" before resuming. 2506 * 2507 * See description of "NMI unblocking due to IRET" in 2508 * "Exit Qualification for EPT Violations". 2509 */ 2510 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2511 (qual & EXIT_QUAL_NMIUDTI) != 0) 2512 vmx_restore_nmi_blocking(vmx, vcpu); 2513 break; 2514 case EXIT_REASON_VIRTUALIZED_EOI: 2515 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2516 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2517 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2518 vmexit->inst_length = 0; /* trap-like */ 2519 break; 2520 case EXIT_REASON_APIC_ACCESS: 2521 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2522 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2523 break; 2524 case EXIT_REASON_APIC_WRITE: 2525 /* 2526 * APIC-write VM exit is trap-like so the %rip is already 2527 * pointing to the next instruction. 2528 */ 2529 vmexit->inst_length = 0; 2530 vlapic = vm_lapic(vmx->vm, vcpu); 2531 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2532 vmx, vcpu, vmexit, vlapic); 2533 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2534 break; 2535 case EXIT_REASON_XSETBV: 2536 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2537 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2538 break; 2539 case EXIT_REASON_MONITOR: 2540 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2541 vmexit->exitcode = VM_EXITCODE_MONITOR; 2542 break; 2543 case EXIT_REASON_MWAIT: 2544 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2545 vmexit->exitcode = VM_EXITCODE_MWAIT; 2546 break; 2547 case EXIT_REASON_TPR: 2548 vlapic = vm_lapic(vmx->vm, vcpu); 2549 vlapic_sync_tpr(vlapic); 2550 vmexit->inst_length = 0; 2551 handled = HANDLED; 2552 break; 2553 case EXIT_REASON_VMCALL: 2554 case EXIT_REASON_VMCLEAR: 2555 case EXIT_REASON_VMLAUNCH: 2556 case EXIT_REASON_VMPTRLD: 2557 case EXIT_REASON_VMPTRST: 2558 case EXIT_REASON_VMREAD: 2559 case EXIT_REASON_VMRESUME: 2560 case EXIT_REASON_VMWRITE: 2561 case EXIT_REASON_VMXOFF: 2562 case EXIT_REASON_VMXON: 2563 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2564 vmexit->exitcode = VM_EXITCODE_VMINSN; 2565 break; 2566 default: 2567 SDT_PROBE4(vmm, vmx, exit, unknown, 2568 vmx, vcpu, vmexit, reason); 2569 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2570 break; 2571 } 2572 2573 if (handled) { 2574 /* 2575 * It is possible that control is returned to userland 2576 * even though we were able to handle the VM exit in the 2577 * kernel. 2578 * 2579 * In such a case we want to make sure that the userland 2580 * restarts guest execution at the instruction *after* 2581 * the one we just processed. Therefore we update the 2582 * guest rip in the VMCS and in 'vmexit'. 2583 */ 2584 vmexit->rip += vmexit->inst_length; 2585 vmexit->inst_length = 0; 2586 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2587 } else { 2588 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2589 /* 2590 * If this VM exit was not claimed by anybody then 2591 * treat it as a generic VMX exit. 2592 */ 2593 vmexit->exitcode = VM_EXITCODE_VMX; 2594 vmexit->u.vmx.status = VM_SUCCESS; 2595 vmexit->u.vmx.inst_type = 0; 2596 vmexit->u.vmx.inst_error = 0; 2597 } else { 2598 /* 2599 * The exitcode and collateral have been populated. 2600 * The VM exit will be processed further in userland. 2601 */ 2602 } 2603 } 2604 2605 SDT_PROBE4(vmm, vmx, exit, return, 2606 vmx, vcpu, vmexit, handled); 2607 return (handled); 2608 } 2609 2610 static void 2611 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2612 { 2613 2614 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2615 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2616 vmxctx->inst_fail_status)); 2617 2618 vmexit->inst_length = 0; 2619 vmexit->exitcode = VM_EXITCODE_VMX; 2620 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2621 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2622 vmexit->u.vmx.exit_reason = ~0; 2623 vmexit->u.vmx.exit_qualification = ~0; 2624 2625 switch (rc) { 2626 case VMX_VMRESUME_ERROR: 2627 case VMX_VMLAUNCH_ERROR: 2628 case VMX_INVEPT_ERROR: 2629 #ifndef __FreeBSD__ 2630 case VMX_VMWRITE_ERROR: 2631 #endif 2632 vmexit->u.vmx.inst_type = rc; 2633 break; 2634 default: 2635 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2636 } 2637 } 2638 2639 /* 2640 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2641 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2642 * sufficient to simply vector to the NMI handler via a software interrupt. 2643 * However, this must be done before maskable interrupts are enabled 2644 * otherwise the "iret" issued by an interrupt handler will incorrectly 2645 * clear NMI blocking. 2646 */ 2647 static __inline void 2648 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2649 { 2650 uint32_t intr_info; 2651 2652 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2653 2654 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2655 return; 2656 2657 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2658 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2659 ("VM exit interruption info invalid: %x", intr_info)); 2660 2661 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2662 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2663 "to NMI has invalid vector: %x", intr_info)); 2664 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2665 #ifdef __FreeBSD__ 2666 __asm __volatile("int $2"); 2667 #else 2668 vmm_call_trap(T_NMIFLT); 2669 #endif 2670 } 2671 } 2672 2673 static __inline void 2674 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2675 { 2676 uint64_t rflags; 2677 2678 /* Save host control debug registers. */ 2679 vmxctx->host_dr7 = rdr7(); 2680 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2681 2682 /* 2683 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2684 * exceptions in the host based on the guest DRx values. The 2685 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2686 */ 2687 load_dr7(0); 2688 wrmsr(MSR_DEBUGCTLMSR, 0); 2689 2690 /* 2691 * Disable single stepping the kernel to avoid corrupting the 2692 * guest DR6. A debugger might still be able to corrupt the 2693 * guest DR6 by setting a breakpoint after this point and then 2694 * single stepping. 2695 */ 2696 rflags = read_rflags(); 2697 vmxctx->host_tf = rflags & PSL_T; 2698 write_rflags(rflags & ~PSL_T); 2699 2700 /* Save host debug registers. */ 2701 vmxctx->host_dr0 = rdr0(); 2702 vmxctx->host_dr1 = rdr1(); 2703 vmxctx->host_dr2 = rdr2(); 2704 vmxctx->host_dr3 = rdr3(); 2705 vmxctx->host_dr6 = rdr6(); 2706 2707 /* Restore guest debug registers. */ 2708 load_dr0(vmxctx->guest_dr0); 2709 load_dr1(vmxctx->guest_dr1); 2710 load_dr2(vmxctx->guest_dr2); 2711 load_dr3(vmxctx->guest_dr3); 2712 load_dr6(vmxctx->guest_dr6); 2713 } 2714 2715 static __inline void 2716 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2717 { 2718 2719 /* Save guest debug registers. */ 2720 vmxctx->guest_dr0 = rdr0(); 2721 vmxctx->guest_dr1 = rdr1(); 2722 vmxctx->guest_dr2 = rdr2(); 2723 vmxctx->guest_dr3 = rdr3(); 2724 vmxctx->guest_dr6 = rdr6(); 2725 2726 /* 2727 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2728 * PSL_T last. 2729 */ 2730 load_dr0(vmxctx->host_dr0); 2731 load_dr1(vmxctx->host_dr1); 2732 load_dr2(vmxctx->host_dr2); 2733 load_dr3(vmxctx->host_dr3); 2734 load_dr6(vmxctx->host_dr6); 2735 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2736 load_dr7(vmxctx->host_dr7); 2737 write_rflags(read_rflags() | vmxctx->host_tf); 2738 } 2739 2740 static int 2741 vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap, 2742 struct vm_eventinfo *evinfo) 2743 { 2744 int rc, handled, launched; 2745 struct vmx *vmx; 2746 struct vm *vm; 2747 struct vmxctx *vmxctx; 2748 uintptr_t vmcs_pa; 2749 struct vm_exit *vmexit; 2750 struct vlapic *vlapic; 2751 uint32_t exit_reason; 2752 #ifdef __FreeBSD__ 2753 struct region_descriptor gdtr, idtr; 2754 uint16_t ldt_sel; 2755 #endif 2756 bool tpr_shadow_active; 2757 2758 vmx = arg; 2759 vm = vmx->vm; 2760 vmcs_pa = vmx->vmcs_pa[vcpu]; 2761 vmxctx = &vmx->ctx[vcpu]; 2762 vlapic = vm_lapic(vm, vcpu); 2763 vmexit = vm_exitinfo(vm, vcpu); 2764 launched = 0; 2765 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2766 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2767 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2768 2769 KASSERT(vmxctx->pmap == pmap, 2770 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2771 2772 vmx_msr_guest_enter(vmx, vcpu); 2773 2774 vmcs_load(vmcs_pa); 2775 2776 #ifndef __FreeBSD__ 2777 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2778 vmx->vmcs_state[vcpu] = VS_LOADED; 2779 #endif 2780 2781 /* 2782 * XXX 2783 * We do this every time because we may setup the virtual machine 2784 * from a different process than the one that actually runs it. 2785 * 2786 * If the life of a virtual machine was spent entirely in the context 2787 * of a single process we could do this once in vmx_vminit(). 2788 */ 2789 vmcs_write(VMCS_HOST_CR3, rcr3()); 2790 2791 vmcs_write(VMCS_GUEST_RIP, rip); 2792 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2793 do { 2794 enum event_inject_state inject_state; 2795 2796 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2797 "%lx/%lx", __func__, vmcs_guest_rip(), rip)); 2798 2799 handled = UNHANDLED; 2800 2801 /* 2802 * Perform initial event/exception/interrupt injection before 2803 * host CPU interrupts are disabled. 2804 */ 2805 inject_state = vmx_inject_events(vmx, vcpu, rip); 2806 2807 /* 2808 * Interrupts are disabled from this point on until the 2809 * guest starts executing. This is done for the following 2810 * reasons: 2811 * 2812 * If an AST is asserted on this thread after the check below, 2813 * then the IPI_AST notification will not be lost, because it 2814 * will cause a VM exit due to external interrupt as soon as 2815 * the guest state is loaded. 2816 * 2817 * A posted interrupt after vmx_inject_vlapic() will not be 2818 * "lost" because it will be held pending in the host APIC 2819 * because interrupts are disabled. The pending interrupt will 2820 * be recognized as soon as the guest state is loaded. 2821 * 2822 * The same reasoning applies to the IPI generated by 2823 * pmap_invalidate_ept(). 2824 */ 2825 disable_intr(); 2826 2827 /* 2828 * If not precluded by existing events, inject any interrupt 2829 * pending on the vLAPIC. As a lock-less operation, it is safe 2830 * (and prudent) to perform with host CPU interrupts disabled. 2831 */ 2832 if (inject_state == EIS_CAN_INJECT) { 2833 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2834 } 2835 2836 /* 2837 * Check for vcpu suspension after injecting events because 2838 * vmx_inject_events() can suspend the vcpu due to a 2839 * triple fault. 2840 */ 2841 if (vcpu_suspended(evinfo)) { 2842 enable_intr(); 2843 vm_exit_suspended(vmx->vm, vcpu, rip); 2844 break; 2845 } 2846 2847 if (vcpu_runblocked(evinfo)) { 2848 enable_intr(); 2849 vm_exit_runblock(vmx->vm, vcpu, rip); 2850 break; 2851 } 2852 2853 if (vcpu_reqidle(evinfo)) { 2854 enable_intr(); 2855 vm_exit_reqidle(vmx->vm, vcpu, rip); 2856 break; 2857 } 2858 2859 if (vcpu_should_yield(vm, vcpu)) { 2860 enable_intr(); 2861 vm_exit_astpending(vmx->vm, vcpu, rip); 2862 vmx_astpending_trace(vmx, vcpu, rip); 2863 handled = HANDLED; 2864 break; 2865 } 2866 2867 if (vcpu_debugged(vm, vcpu)) { 2868 enable_intr(); 2869 vm_exit_debug(vmx->vm, vcpu, rip); 2870 break; 2871 } 2872 2873 /* 2874 * If subsequent activity queued events which require injection 2875 * handling, take another lap to handle them. 2876 */ 2877 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2878 enable_intr(); 2879 handled = HANDLED; 2880 continue; 2881 } 2882 2883 #ifndef __FreeBSD__ 2884 if ((rc = smt_acquire()) != 1) { 2885 enable_intr(); 2886 vmexit->rip = rip; 2887 vmexit->inst_length = 0; 2888 if (rc == -1) { 2889 vmexit->exitcode = VM_EXITCODE_HT; 2890 } else { 2891 vmexit->exitcode = VM_EXITCODE_BOGUS; 2892 handled = HANDLED; 2893 } 2894 break; 2895 } 2896 2897 /* 2898 * If this thread has gone off-cpu due to mutex operations 2899 * during vmx_run, the VMCS will have been unloaded, forcing a 2900 * re-VMLAUNCH as opposed to VMRESUME. 2901 */ 2902 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2903 /* 2904 * Restoration of the GDT limit is taken care of by 2905 * vmx_savectx(). Since the maximum practical index for the 2906 * IDT is 255, restoring its limits from the post-VMX-exit 2907 * default of 0xffff is not a concern. 2908 * 2909 * Only 64-bit hypervisor callers are allowed, which forgoes 2910 * the need to restore any LDT descriptor. Toss an error to 2911 * anyone attempting to break that rule. 2912 */ 2913 if (curproc->p_model != DATAMODEL_LP64) { 2914 smt_release(); 2915 enable_intr(); 2916 bzero(vmexit, sizeof (*vmexit)); 2917 vmexit->rip = rip; 2918 vmexit->exitcode = VM_EXITCODE_VMX; 2919 vmexit->u.vmx.status = VM_FAIL_INVALID; 2920 handled = UNHANDLED; 2921 break; 2922 } 2923 #else 2924 /* 2925 * VM exits restore the base address but not the 2926 * limits of GDTR and IDTR. The VMCS only stores the 2927 * base address, so VM exits set the limits to 0xffff. 2928 * Save and restore the full GDTR and IDTR to restore 2929 * the limits. 2930 * 2931 * The VMCS does not save the LDTR at all, and VM 2932 * exits clear LDTR as if a NULL selector were loaded. 2933 * The userspace hypervisor probably doesn't use a 2934 * LDT, but save and restore it to be safe. 2935 */ 2936 sgdt(&gdtr); 2937 sidt(&idtr); 2938 ldt_sel = sldt(); 2939 #endif 2940 2941 if (tpr_shadow_active) { 2942 vmx_tpr_shadow_enter(vlapic); 2943 } 2944 2945 vmx_run_trace(vmx, vcpu); 2946 vmx_dr_enter_guest(vmxctx); 2947 rc = vmx_enter_guest(vmxctx, vmx, launched); 2948 vmx_dr_leave_guest(vmxctx); 2949 2950 #ifndef __FreeBSD__ 2951 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2952 smt_release(); 2953 #else 2954 bare_lgdt(&gdtr); 2955 lidt(&idtr); 2956 lldt(ldt_sel); 2957 #endif 2958 2959 if (tpr_shadow_active) { 2960 vmx_tpr_shadow_exit(vlapic); 2961 } 2962 2963 /* Collect some information for VM exit processing */ 2964 vmexit->rip = rip = vmcs_guest_rip(); 2965 vmexit->inst_length = vmexit_instruction_length(); 2966 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2967 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2968 2969 /* Update 'nextrip' */ 2970 vmx->state[vcpu].nextrip = rip; 2971 2972 if (rc == VMX_GUEST_VMEXIT) { 2973 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2974 enable_intr(); 2975 handled = vmx_exit_process(vmx, vcpu, vmexit); 2976 } else { 2977 enable_intr(); 2978 vmx_exit_inst_error(vmxctx, rc, vmexit); 2979 } 2980 #ifdef __FreeBSD__ 2981 launched = 1; 2982 #endif 2983 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2984 uint32_t, exit_reason); 2985 rip = vmexit->rip; 2986 } while (handled); 2987 2988 /* 2989 * If a VM exit has been handled then the exitcode must be BOGUS 2990 * If a VM exit is not handled then the exitcode must not be BOGUS 2991 */ 2992 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2993 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2994 panic("Mismatch between handled (%d) and exitcode (%d)", 2995 handled, vmexit->exitcode); 2996 } 2997 2998 if (!handled) 2999 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 3000 3001 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 3002 vmexit->exitcode); 3003 3004 vmcs_clear(vmcs_pa); 3005 vmx_msr_guest_exit(vmx, vcpu); 3006 3007 #ifndef __FreeBSD__ 3008 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 3009 vmx->vmcs_state[vcpu] = VS_NONE; 3010 #endif 3011 3012 return (0); 3013 } 3014 3015 static void 3016 vmx_vmcleanup(void *arg) 3017 { 3018 int i; 3019 struct vmx *vmx = arg; 3020 uint16_t maxcpus; 3021 3022 if (apic_access_virtualization(vmx, 0)) 3023 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3024 3025 maxcpus = vm_get_maxcpus(vmx->vm); 3026 for (i = 0; i < maxcpus; i++) 3027 vpid_free(vmx->state[i].vpid); 3028 3029 free(vmx, M_VMX); 3030 } 3031 3032 static uint64_t * 3033 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3034 { 3035 switch (reg) { 3036 case VM_REG_GUEST_RAX: 3037 return (&vmxctx->guest_rax); 3038 case VM_REG_GUEST_RBX: 3039 return (&vmxctx->guest_rbx); 3040 case VM_REG_GUEST_RCX: 3041 return (&vmxctx->guest_rcx); 3042 case VM_REG_GUEST_RDX: 3043 return (&vmxctx->guest_rdx); 3044 case VM_REG_GUEST_RSI: 3045 return (&vmxctx->guest_rsi); 3046 case VM_REG_GUEST_RDI: 3047 return (&vmxctx->guest_rdi); 3048 case VM_REG_GUEST_RBP: 3049 return (&vmxctx->guest_rbp); 3050 case VM_REG_GUEST_R8: 3051 return (&vmxctx->guest_r8); 3052 case VM_REG_GUEST_R9: 3053 return (&vmxctx->guest_r9); 3054 case VM_REG_GUEST_R10: 3055 return (&vmxctx->guest_r10); 3056 case VM_REG_GUEST_R11: 3057 return (&vmxctx->guest_r11); 3058 case VM_REG_GUEST_R12: 3059 return (&vmxctx->guest_r12); 3060 case VM_REG_GUEST_R13: 3061 return (&vmxctx->guest_r13); 3062 case VM_REG_GUEST_R14: 3063 return (&vmxctx->guest_r14); 3064 case VM_REG_GUEST_R15: 3065 return (&vmxctx->guest_r15); 3066 case VM_REG_GUEST_CR2: 3067 return (&vmxctx->guest_cr2); 3068 case VM_REG_GUEST_DR0: 3069 return (&vmxctx->guest_dr0); 3070 case VM_REG_GUEST_DR1: 3071 return (&vmxctx->guest_dr1); 3072 case VM_REG_GUEST_DR2: 3073 return (&vmxctx->guest_dr2); 3074 case VM_REG_GUEST_DR3: 3075 return (&vmxctx->guest_dr3); 3076 case VM_REG_GUEST_DR6: 3077 return (&vmxctx->guest_dr6); 3078 default: 3079 break; 3080 } 3081 return (NULL); 3082 } 3083 3084 static int 3085 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3086 { 3087 int running, hostcpu, err; 3088 struct vmx *vmx = arg; 3089 uint64_t *regp; 3090 3091 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3092 if (running && hostcpu != curcpu) 3093 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 3094 3095 /* VMCS access not required for ctx reads */ 3096 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3097 *retval = *regp; 3098 return (0); 3099 } 3100 3101 if (!running) { 3102 vmcs_load(vmx->vmcs_pa[vcpu]); 3103 } 3104 3105 err = EINVAL; 3106 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3107 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3108 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3109 err = 0; 3110 } else { 3111 uint32_t encoding; 3112 3113 encoding = vmcs_field_encoding(reg); 3114 if (encoding != VMCS_INVALID_ENCODING) { 3115 *retval = vmcs_read(encoding); 3116 err = 0; 3117 } 3118 } 3119 3120 if (!running) { 3121 vmcs_clear(vmx->vmcs_pa[vcpu]); 3122 } 3123 3124 return (err); 3125 } 3126 3127 static int 3128 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3129 { 3130 int running, hostcpu, error; 3131 struct vmx *vmx = arg; 3132 uint64_t *regp; 3133 3134 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3135 if (running && hostcpu != curcpu) 3136 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3137 3138 /* VMCS access not required for ctx writes */ 3139 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3140 *regp = val; 3141 return (0); 3142 } 3143 3144 if (!running) { 3145 vmcs_load(vmx->vmcs_pa[vcpu]); 3146 } 3147 3148 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3149 if (val != 0) { 3150 /* 3151 * Forcing the vcpu into an interrupt shadow is not 3152 * presently supported. 3153 */ 3154 error = EINVAL; 3155 } else { 3156 uint64_t gi; 3157 3158 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3159 gi &= ~HWINTR_BLOCKING; 3160 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3161 error = 0; 3162 } 3163 } else { 3164 uint32_t encoding; 3165 3166 error = 0; 3167 encoding = vmcs_field_encoding(reg); 3168 switch (encoding) { 3169 case VMCS_GUEST_IA32_EFER: 3170 /* 3171 * If the "load EFER" VM-entry control is 1 then the 3172 * value of EFER.LMA must be identical to "IA-32e mode 3173 * guest" bit in the VM-entry control. 3174 */ 3175 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) { 3176 uint64_t ctls; 3177 3178 ctls = vmcs_read(VMCS_ENTRY_CTLS); 3179 if (val & EFER_LMA) { 3180 ctls |= VM_ENTRY_GUEST_LMA; 3181 } else { 3182 ctls &= ~VM_ENTRY_GUEST_LMA; 3183 } 3184 vmcs_write(VMCS_ENTRY_CTLS, ctls); 3185 } 3186 vmcs_write(encoding, val); 3187 break; 3188 case VMCS_GUEST_CR0: 3189 /* 3190 * The guest is not allowed to modify certain bits in 3191 * %cr0 and %cr4. To maintain the illusion of full 3192 * control, they have shadow versions which contain the 3193 * guest-perceived (via reads from the register) values 3194 * as opposed to the guest-effective values. 3195 * 3196 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3197 */ 3198 vmcs_write(VMCS_CR0_SHADOW, val); 3199 vmcs_write(encoding, vmx_fix_cr0(val)); 3200 break; 3201 case VMCS_GUEST_CR4: 3202 /* See above for detail on %cr4 shadowing */ 3203 vmcs_write(VMCS_CR4_SHADOW, val); 3204 vmcs_write(encoding, vmx_fix_cr4(val)); 3205 break; 3206 case VMCS_GUEST_CR3: 3207 vmcs_write(encoding, val); 3208 /* 3209 * Invalidate the guest vcpu's TLB mappings to emulate 3210 * the behavior of updating %cr3. 3211 * 3212 * XXX the processor retains global mappings when %cr3 3213 * is updated but vmx_invvpid() does not. 3214 */ 3215 vmx_invvpid(vmx, vcpu, vmx->ctx[vcpu].pmap, running); 3216 break; 3217 case VMCS_INVALID_ENCODING: 3218 error = EINVAL; 3219 break; 3220 default: 3221 vmcs_write(encoding, val); 3222 break; 3223 } 3224 } 3225 3226 if (!running) { 3227 vmcs_clear(vmx->vmcs_pa[vcpu]); 3228 } 3229 3230 return (error); 3231 } 3232 3233 static int 3234 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3235 { 3236 int hostcpu, running; 3237 struct vmx *vmx = arg; 3238 uint32_t base, limit, access; 3239 3240 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3241 if (running && hostcpu != curcpu) 3242 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3243 3244 if (!running) { 3245 vmcs_load(vmx->vmcs_pa[vcpu]); 3246 } 3247 3248 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3249 desc->base = vmcs_read(base); 3250 desc->limit = vmcs_read(limit); 3251 if (access != VMCS_INVALID_ENCODING) { 3252 desc->access = vmcs_read(access); 3253 } else { 3254 desc->access = 0; 3255 } 3256 3257 if (!running) { 3258 vmcs_clear(vmx->vmcs_pa[vcpu]); 3259 } 3260 return (0); 3261 } 3262 3263 static int 3264 vmx_setdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3265 { 3266 int hostcpu, running; 3267 struct vmx *vmx = arg; 3268 uint32_t base, limit, access; 3269 3270 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3271 if (running && hostcpu != curcpu) 3272 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3273 3274 if (!running) { 3275 vmcs_load(vmx->vmcs_pa[vcpu]); 3276 } 3277 3278 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3279 vmcs_write(base, desc->base); 3280 vmcs_write(limit, desc->limit); 3281 if (access != VMCS_INVALID_ENCODING) { 3282 vmcs_write(access, desc->access); 3283 } 3284 3285 if (!running) { 3286 vmcs_clear(vmx->vmcs_pa[vcpu]); 3287 } 3288 return (0); 3289 } 3290 3291 static int 3292 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3293 { 3294 struct vmx *vmx = arg; 3295 int vcap; 3296 int ret; 3297 3298 ret = ENOENT; 3299 3300 vcap = vmx->cap[vcpu].set; 3301 3302 switch (type) { 3303 case VM_CAP_HALT_EXIT: 3304 if (cap_halt_exit) 3305 ret = 0; 3306 break; 3307 case VM_CAP_PAUSE_EXIT: 3308 if (cap_pause_exit) 3309 ret = 0; 3310 break; 3311 case VM_CAP_MTRAP_EXIT: 3312 if (cap_monitor_trap) 3313 ret = 0; 3314 break; 3315 case VM_CAP_ENABLE_INVPCID: 3316 if (cap_invpcid) 3317 ret = 0; 3318 break; 3319 case VM_CAP_BPT_EXIT: 3320 ret = 0; 3321 break; 3322 default: 3323 break; 3324 } 3325 3326 if (ret == 0) 3327 *retval = (vcap & (1 << type)) ? 1 : 0; 3328 3329 return (ret); 3330 } 3331 3332 static int 3333 vmx_setcap(void *arg, int vcpu, int type, int val) 3334 { 3335 struct vmx *vmx = arg; 3336 uint32_t baseval, reg, flag; 3337 uint32_t *pptr; 3338 int error; 3339 3340 error = ENOENT; 3341 pptr = NULL; 3342 3343 switch (type) { 3344 case VM_CAP_HALT_EXIT: 3345 if (cap_halt_exit) { 3346 error = 0; 3347 pptr = &vmx->cap[vcpu].proc_ctls; 3348 baseval = *pptr; 3349 flag = PROCBASED_HLT_EXITING; 3350 reg = VMCS_PRI_PROC_BASED_CTLS; 3351 } 3352 break; 3353 case VM_CAP_MTRAP_EXIT: 3354 if (cap_monitor_trap) { 3355 error = 0; 3356 pptr = &vmx->cap[vcpu].proc_ctls; 3357 baseval = *pptr; 3358 flag = PROCBASED_MTF; 3359 reg = VMCS_PRI_PROC_BASED_CTLS; 3360 } 3361 break; 3362 case VM_CAP_PAUSE_EXIT: 3363 if (cap_pause_exit) { 3364 error = 0; 3365 pptr = &vmx->cap[vcpu].proc_ctls; 3366 baseval = *pptr; 3367 flag = PROCBASED_PAUSE_EXITING; 3368 reg = VMCS_PRI_PROC_BASED_CTLS; 3369 } 3370 break; 3371 case VM_CAP_ENABLE_INVPCID: 3372 if (cap_invpcid) { 3373 error = 0; 3374 pptr = &vmx->cap[vcpu].proc_ctls2; 3375 baseval = *pptr; 3376 flag = PROCBASED2_ENABLE_INVPCID; 3377 reg = VMCS_SEC_PROC_BASED_CTLS; 3378 } 3379 break; 3380 case VM_CAP_BPT_EXIT: 3381 error = 0; 3382 3383 /* Don't change the bitmap if we are tracing all exceptions. */ 3384 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3385 pptr = &vmx->cap[vcpu].exc_bitmap; 3386 baseval = *pptr; 3387 flag = (1 << IDT_BP); 3388 reg = VMCS_EXCEPTION_BITMAP; 3389 } 3390 break; 3391 default: 3392 break; 3393 } 3394 3395 if (error != 0) { 3396 return (error); 3397 } 3398 3399 if (pptr != NULL) { 3400 if (val) { 3401 baseval |= flag; 3402 } else { 3403 baseval &= ~flag; 3404 } 3405 vmcs_load(vmx->vmcs_pa[vcpu]); 3406 vmcs_write(reg, baseval); 3407 vmcs_clear(vmx->vmcs_pa[vcpu]); 3408 3409 /* 3410 * Update optional stored flags, and record 3411 * setting 3412 */ 3413 *pptr = baseval; 3414 } 3415 3416 if (val) { 3417 vmx->cap[vcpu].set |= (1 << type); 3418 } else { 3419 vmx->cap[vcpu].set &= ~(1 << type); 3420 } 3421 3422 return (0); 3423 } 3424 3425 struct vlapic_vtx { 3426 struct vlapic vlapic; 3427 3428 /* Align to the nearest cacheline */ 3429 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3430 3431 /* TMR handling state for posted interrupts */ 3432 uint32_t tmr_active[8]; 3433 uint32_t pending_level[8]; 3434 uint32_t pending_edge[8]; 3435 3436 struct pir_desc *pir_desc; 3437 struct vmx *vmx; 3438 uint_t pending_prio; 3439 boolean_t tmr_sync; 3440 }; 3441 3442 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3443 3444 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3445 3446 static vcpu_notify_t 3447 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3448 { 3449 struct vlapic_vtx *vlapic_vtx; 3450 struct pir_desc *pir_desc; 3451 uint32_t mask, tmrval; 3452 int idx; 3453 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3454 3455 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3456 pir_desc = vlapic_vtx->pir_desc; 3457 idx = vector / 32; 3458 mask = 1UL << (vector % 32); 3459 3460 /* 3461 * If the currently asserted TMRs do not match the state requested by 3462 * the incoming interrupt, an exit will be required to reconcile those 3463 * bits in the APIC page. This will keep the vLAPIC behavior in line 3464 * with the architecturally defined expectations. 3465 * 3466 * If actors of mixed types (edge and level) are racing against the same 3467 * vector (toggling its TMR bit back and forth), the results could 3468 * inconsistent. Such circumstances are considered a rare edge case and 3469 * are never expected to be found in the wild. 3470 */ 3471 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3472 if (!level) { 3473 if ((tmrval & mask) != 0) { 3474 /* Edge-triggered interrupt needs TMR de-asserted */ 3475 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3476 atomic_store_rel_long(&pir_desc->pending, 1); 3477 return (VCPU_NOTIFY_EXIT); 3478 } 3479 } else { 3480 if ((tmrval & mask) == 0) { 3481 /* Level-triggered interrupt needs TMR asserted */ 3482 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3483 atomic_store_rel_long(&pir_desc->pending, 1); 3484 return (VCPU_NOTIFY_EXIT); 3485 } 3486 } 3487 3488 /* 3489 * If the interrupt request does not require manipulation of the TMRs 3490 * for delivery, set it in PIR descriptor. It cannot be inserted into 3491 * the APIC page while the vCPU might be running. 3492 */ 3493 atomic_set_int(&pir_desc->pir[idx], mask); 3494 3495 /* 3496 * A notification is required whenever the 'pending' bit makes a 3497 * transition from 0->1. 3498 * 3499 * Even if the 'pending' bit is already asserted, notification about 3500 * the incoming interrupt may still be necessary. For example, if a 3501 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3502 * the 0->1 'pending' transition with a notification, but the vCPU 3503 * would ignore the interrupt for the time being. The same vCPU would 3504 * need to then be notified if a high-priority interrupt arrived which 3505 * satisfied the PPR. 3506 * 3507 * The priorities of interrupts injected while 'pending' is asserted 3508 * are tracked in a custom bitfield 'pending_prio'. Should the 3509 * to-be-injected interrupt exceed the priorities already present, the 3510 * notification is sent. The priorities recorded in 'pending_prio' are 3511 * cleared whenever the 'pending' bit makes another 0->1 transition. 3512 */ 3513 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3514 notify = VCPU_NOTIFY_APIC; 3515 vlapic_vtx->pending_prio = 0; 3516 } else { 3517 const uint_t old_prio = vlapic_vtx->pending_prio; 3518 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3519 3520 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3521 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3522 notify = VCPU_NOTIFY_APIC; 3523 } 3524 } 3525 3526 return (notify); 3527 } 3528 3529 static void 3530 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3531 { 3532 /* 3533 * When APICv is enabled for an instance, the traditional interrupt 3534 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3535 * used and the CPU does the heavy lifting of virtual interrupt 3536 * delivery. For that reason vmx_intr_accepted() should never be called 3537 * when APICv is enabled. 3538 */ 3539 panic("vmx_intr_accepted: not expected to be called"); 3540 } 3541 3542 static void 3543 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3544 { 3545 struct vlapic_vtx *vlapic_vtx; 3546 const uint32_t *tmrs; 3547 3548 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3549 tmrs = &vlapic_vtx->tmr_active[0]; 3550 3551 if (!vlapic_vtx->tmr_sync) { 3552 return; 3553 } 3554 3555 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3556 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3557 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3558 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3559 vlapic_vtx->tmr_sync = B_FALSE; 3560 } 3561 3562 static void 3563 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3564 { 3565 struct vmx *vmx; 3566 uint32_t proc_ctls; 3567 int vcpuid; 3568 3569 vcpuid = vlapic->vcpuid; 3570 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3571 3572 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3573 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3574 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3575 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3576 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3577 3578 vmcs_load(vmx->vmcs_pa[vcpuid]); 3579 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3580 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3581 } 3582 3583 static void 3584 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3585 { 3586 struct vmx *vmx; 3587 uint32_t proc_ctls2; 3588 int vcpuid, error; 3589 3590 vcpuid = vlapic->vcpuid; 3591 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3592 3593 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3594 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3595 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3596 3597 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3598 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3599 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3600 3601 vmcs_load(vmx->vmcs_pa[vcpuid]); 3602 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3603 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3604 3605 if (vlapic->vcpuid == 0) { 3606 /* 3607 * The nested page table mappings are shared by all vcpus 3608 * so unmap the APIC access page just once. 3609 */ 3610 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3611 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3612 __func__, error)); 3613 3614 /* 3615 * The MSR bitmap is shared by all vcpus so modify it only 3616 * once in the context of vcpu 0. 3617 */ 3618 error = vmx_allow_x2apic_msrs(vmx); 3619 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3620 __func__, error)); 3621 } 3622 } 3623 3624 static void 3625 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3626 { 3627 psm_send_pir_ipi(hostcpu); 3628 } 3629 3630 static void 3631 vmx_apicv_sync(struct vlapic *vlapic) 3632 { 3633 struct vlapic_vtx *vlapic_vtx; 3634 struct pir_desc *pir_desc; 3635 struct LAPIC *lapic; 3636 uint_t i; 3637 3638 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3639 pir_desc = vlapic_vtx->pir_desc; 3640 lapic = vlapic->apic_page; 3641 3642 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3643 return; 3644 } 3645 3646 vlapic_vtx->pending_prio = 0; 3647 3648 /* Make sure the invalid (0-15) vectors are not set */ 3649 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3650 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3651 ASSERT0(pir_desc->pir[0] & 0xffff); 3652 3653 for (i = 0; i <= 7; i++) { 3654 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3655 uint32_t *irrp = &lapic->irr0 + (i * 4); 3656 3657 const uint32_t pending_level = 3658 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3659 const uint32_t pending_edge = 3660 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3661 const uint32_t pending_inject = 3662 atomic_readandclear_int(&pir_desc->pir[i]); 3663 3664 if (pending_level != 0) { 3665 /* 3666 * Level-triggered interrupts assert their corresponding 3667 * bit in the TMR when queued in IRR. 3668 */ 3669 *tmrp |= pending_level; 3670 *irrp |= pending_level; 3671 } 3672 if (pending_edge != 0) { 3673 /* 3674 * When queuing an edge-triggered interrupt in IRR, the 3675 * corresponding bit in the TMR is cleared. 3676 */ 3677 *tmrp &= ~pending_edge; 3678 *irrp |= pending_edge; 3679 } 3680 if (pending_inject != 0) { 3681 /* 3682 * Interrupts which do not require a change to the TMR 3683 * (because it already matches the necessary state) can 3684 * simply be queued in IRR. 3685 */ 3686 *irrp |= pending_inject; 3687 } 3688 3689 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3690 /* Check if VMX EOI triggers require updating. */ 3691 vlapic_vtx->tmr_active[i] = *tmrp; 3692 vlapic_vtx->tmr_sync = B_TRUE; 3693 } 3694 } 3695 } 3696 3697 static void 3698 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3699 { 3700 /* 3701 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3702 * TPR falls below a threshold priority. That threshold is set to the 3703 * current TPR priority, since guest interrupt status should be 3704 * re-evaluated if its TPR is set lower. 3705 */ 3706 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3707 } 3708 3709 static void 3710 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3711 { 3712 /* 3713 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3714 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3715 * the PPR is updated to reflect any change in the TPR here. 3716 */ 3717 vlapic_sync_tpr(vlapic); 3718 } 3719 3720 static struct vlapic * 3721 vmx_vlapic_init(void *arg, int vcpuid) 3722 { 3723 struct vmx *vmx; 3724 struct vlapic *vlapic; 3725 struct vlapic_vtx *vlapic_vtx; 3726 3727 vmx = arg; 3728 3729 vlapic = malloc(sizeof (struct vlapic_vtx), M_VLAPIC, 3730 M_WAITOK | M_ZERO); 3731 vlapic->vm = vmx->vm; 3732 vlapic->vcpuid = vcpuid; 3733 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3734 3735 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3736 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3737 vlapic_vtx->vmx = vmx; 3738 3739 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3740 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3741 } 3742 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3743 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3744 vlapic->ops.sync_state = vmx_apicv_sync; 3745 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3746 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3747 3748 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3749 vlapic->ops.post_intr = vmx_apicv_notify; 3750 } 3751 } 3752 3753 vlapic_init(vlapic); 3754 3755 return (vlapic); 3756 } 3757 3758 static void 3759 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3760 { 3761 3762 vlapic_cleanup(vlapic); 3763 free(vlapic, M_VLAPIC); 3764 } 3765 3766 #ifndef __FreeBSD__ 3767 static void 3768 vmx_savectx(void *arg, int vcpu) 3769 { 3770 struct vmx *vmx = arg; 3771 3772 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3773 vmcs_clear(vmx->vmcs_pa[vcpu]); 3774 vmx_msr_guest_exit(vmx, vcpu); 3775 /* 3776 * Having VMCLEARed the VMCS, it can no longer be re-entered 3777 * with VMRESUME, but must be VMLAUNCHed again. 3778 */ 3779 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3780 } 3781 3782 reset_gdtr_limit(); 3783 } 3784 3785 static void 3786 vmx_restorectx(void *arg, int vcpu) 3787 { 3788 struct vmx *vmx = arg; 3789 3790 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3791 3792 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3793 vmx_msr_guest_enter(vmx, vcpu); 3794 vmcs_load(vmx->vmcs_pa[vcpu]); 3795 } 3796 } 3797 #endif /* __FreeBSD__ */ 3798 3799 struct vmm_ops vmm_ops_intel = { 3800 .init = vmx_init, 3801 .cleanup = vmx_cleanup, 3802 .resume = vmx_restore, 3803 .vminit = vmx_vminit, 3804 .vmrun = vmx_run, 3805 .vmcleanup = vmx_vmcleanup, 3806 .vmgetreg = vmx_getreg, 3807 .vmsetreg = vmx_setreg, 3808 .vmgetdesc = vmx_getdesc, 3809 .vmsetdesc = vmx_setdesc, 3810 .vmgetcap = vmx_getcap, 3811 .vmsetcap = vmx_setcap, 3812 .vmspace_alloc = ept_vmspace_alloc, 3813 .vmspace_free = ept_vmspace_free, 3814 .vlapic_init = vmx_vlapic_init, 3815 .vlapic_cleanup = vmx_vlapic_cleanup, 3816 3817 #ifndef __FreeBSD__ 3818 .vmsavectx = vmx_savectx, 3819 .vmrestorectx = vmx_restorectx, 3820 #endif 3821 }; 3822 3823 #ifndef __FreeBSD__ 3824 /* Side-effect free HW validation derived from checks in vmx_init. */ 3825 int 3826 vmx_x86_supported(const char **msg) 3827 { 3828 int error; 3829 uint32_t tmp; 3830 3831 ASSERT(msg != NULL); 3832 3833 /* Check support for primary processor-based VM-execution controls */ 3834 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3835 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3836 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3837 if (error) { 3838 *msg = "processor does not support desired primary " 3839 "processor-based controls"; 3840 return (error); 3841 } 3842 3843 /* Check support for secondary processor-based VM-execution controls */ 3844 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3845 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3846 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3847 if (error) { 3848 *msg = "processor does not support desired secondary " 3849 "processor-based controls"; 3850 return (error); 3851 } 3852 3853 /* Check support for pin-based VM-execution controls */ 3854 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3855 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3856 PINBASED_CTLS_ZERO_SETTING, &tmp); 3857 if (error) { 3858 *msg = "processor does not support desired pin-based controls"; 3859 return (error); 3860 } 3861 3862 /* Check support for VM-exit controls */ 3863 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3864 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3865 if (error) { 3866 *msg = "processor does not support desired exit controls"; 3867 return (error); 3868 } 3869 3870 /* Check support for VM-entry controls */ 3871 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3872 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3873 if (error) { 3874 *msg = "processor does not support desired entry controls"; 3875 return (error); 3876 } 3877 3878 /* Unrestricted guest is nominally optional, but not for us. */ 3879 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3880 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3881 if (error) { 3882 *msg = "processor does not support desired unrestricted guest " 3883 "controls"; 3884 return (error); 3885 } 3886 3887 return (0); 3888 } 3889 #endif 3890