1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2020 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/smp.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #ifndef __FreeBSD__ 59 #include <sys/x86_archext.h> 60 #include <sys/smp_impldefs.h> 61 #include <sys/smt.h> 62 #include <sys/hma.h> 63 #include <sys/trap.h> 64 #endif 65 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 69 #include <machine/psl.h> 70 #include <machine/cpufunc.h> 71 #include <machine/md_var.h> 72 #include <machine/reg.h> 73 #include <machine/segments.h> 74 #include <machine/smp.h> 75 #include <machine/specialreg.h> 76 #include <machine/vmparam.h> 77 78 #include <machine/vmm.h> 79 #include <machine/vmm_dev.h> 80 #include <sys/vmm_instruction_emul.h> 81 #include "vmm_lapic.h" 82 #include "vmm_host.h" 83 #include "vmm_ioport.h" 84 #include "vmm_ktr.h" 85 #include "vmm_stat.h" 86 #include "vatpic.h" 87 #include "vlapic.h" 88 #include "vlapic_priv.h" 89 90 #include "ept.h" 91 #include "vmx_cpufunc.h" 92 #include "vmcs.h" 93 #include "vmx.h" 94 #include "vmx_msr.h" 95 #include "x86.h" 96 #include "vmx_controls.h" 97 98 #define PINBASED_CTLS_ONE_SETTING \ 99 (PINBASED_EXTINT_EXITING | \ 100 PINBASED_NMI_EXITING | \ 101 PINBASED_VIRTUAL_NMI) 102 #define PINBASED_CTLS_ZERO_SETTING 0 103 104 #define PROCBASED_CTLS_WINDOW_SETTING \ 105 (PROCBASED_INT_WINDOW_EXITING | \ 106 PROCBASED_NMI_WINDOW_EXITING) 107 108 #ifdef __FreeBSD__ 109 #define PROCBASED_CTLS_ONE_SETTING \ 110 (PROCBASED_SECONDARY_CONTROLS | \ 111 PROCBASED_MWAIT_EXITING | \ 112 PROCBASED_MONITOR_EXITING | \ 113 PROCBASED_IO_EXITING | \ 114 PROCBASED_MSR_BITMAPS | \ 115 PROCBASED_CTLS_WINDOW_SETTING | \ 116 PROCBASED_CR8_LOAD_EXITING | \ 117 PROCBASED_CR8_STORE_EXITING) 118 #else 119 /* We consider TSC offset a necessity for unsynched TSC handling */ 120 #define PROCBASED_CTLS_ONE_SETTING \ 121 (PROCBASED_SECONDARY_CONTROLS | \ 122 PROCBASED_TSC_OFFSET | \ 123 PROCBASED_MWAIT_EXITING | \ 124 PROCBASED_MONITOR_EXITING | \ 125 PROCBASED_IO_EXITING | \ 126 PROCBASED_MSR_BITMAPS | \ 127 PROCBASED_CTLS_WINDOW_SETTING | \ 128 PROCBASED_CR8_LOAD_EXITING | \ 129 PROCBASED_CR8_STORE_EXITING) 130 #endif /* __FreeBSD__ */ 131 132 #define PROCBASED_CTLS_ZERO_SETTING \ 133 (PROCBASED_CR3_LOAD_EXITING | \ 134 PROCBASED_CR3_STORE_EXITING | \ 135 PROCBASED_IO_BITMAPS) 136 137 /* 138 * EPT and Unrestricted Guest are considered necessities. The latter is not a 139 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 140 * without a bootrom starting in real mode. 141 */ 142 #define PROCBASED_CTLS2_ONE_SETTING \ 143 (PROCBASED2_ENABLE_EPT | \ 144 PROCBASED2_UNRESTRICTED_GUEST) 145 #define PROCBASED_CTLS2_ZERO_SETTING 0 146 147 #define VM_EXIT_CTLS_ONE_SETTING \ 148 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 149 VM_EXIT_HOST_LMA | \ 150 VM_EXIT_LOAD_PAT | \ 151 VM_EXIT_SAVE_EFER | \ 152 VM_EXIT_LOAD_EFER | \ 153 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 154 155 #define VM_EXIT_CTLS_ZERO_SETTING 0 156 157 #define VM_ENTRY_CTLS_ONE_SETTING \ 158 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 159 VM_ENTRY_LOAD_EFER) 160 161 #define VM_ENTRY_CTLS_ZERO_SETTING \ 162 (VM_ENTRY_INTO_SMM | \ 163 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 164 165 #define HANDLED 1 166 #define UNHANDLED 0 167 168 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 169 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 170 171 SYSCTL_DECL(_hw_vmm); 172 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 173 NULL); 174 175 #ifdef __FreeBSD__ 176 int vmxon_enabled[MAXCPU]; 177 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 178 #endif /*__FreeBSD__ */ 179 180 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 181 static uint32_t exit_ctls, entry_ctls; 182 183 static uint64_t cr0_ones_mask, cr0_zeros_mask; 184 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 185 &cr0_ones_mask, 0, NULL); 186 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 187 &cr0_zeros_mask, 0, NULL); 188 189 static uint64_t cr4_ones_mask, cr4_zeros_mask; 190 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 191 &cr4_ones_mask, 0, NULL); 192 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 193 &cr4_zeros_mask, 0, NULL); 194 195 static int vmx_initialized; 196 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 197 &vmx_initialized, 0, "Intel VMX initialized"); 198 199 /* 200 * Optional capabilities 201 */ 202 #ifdef __FreeBSD__ 203 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, 204 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 205 NULL); 206 #endif 207 208 static int cap_halt_exit; 209 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 210 "HLT triggers a VM-exit"); 211 212 static int cap_pause_exit; 213 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 214 0, "PAUSE triggers a VM-exit"); 215 216 static int cap_monitor_trap; 217 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 218 &cap_monitor_trap, 0, "Monitor trap flag"); 219 220 static int cap_invpcid; 221 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 222 0, "Guests are allowed to use INVPCID"); 223 224 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 225 static enum vmx_caps vmx_capabilities; 226 227 static int pirvec = -1; 228 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 229 &pirvec, 0, "APICv posted interrupt vector"); 230 231 #ifdef __FreeBSD__ 232 static struct unrhdr *vpid_unr; 233 #endif /* __FreeBSD__ */ 234 static u_int vpid_alloc_failed; 235 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 236 &vpid_alloc_failed, 0, NULL); 237 238 int guest_l1d_flush; 239 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD, 240 &guest_l1d_flush, 0, NULL); 241 int guest_l1d_flush_sw; 242 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD, 243 &guest_l1d_flush_sw, 0, NULL); 244 245 static struct msr_entry msr_load_list[1] __aligned(16); 246 247 /* 248 * The definitions of SDT probes for VMX. 249 */ 250 251 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 252 "struct vmx *", "int", "struct vm_exit *"); 253 254 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 255 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 256 257 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 258 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 259 260 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 261 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 262 263 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 264 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 265 266 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 267 "struct vmx *", "int", "struct vm_exit *"); 268 269 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 270 "struct vmx *", "int", "struct vm_exit *"); 271 272 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 273 "struct vmx *", "int", "struct vm_exit *"); 274 275 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 276 "struct vmx *", "int", "struct vm_exit *"); 277 278 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 279 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 280 281 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 282 "struct vmx *", "int", "struct vm_exit *"); 283 284 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 285 "struct vmx *", "int", "struct vm_exit *"); 286 287 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 288 "struct vmx *", "int", "struct vm_exit *"); 289 290 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 291 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 292 293 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 294 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 295 296 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 297 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 298 299 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 300 "struct vmx *", "int", "struct vm_exit *"); 301 302 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 303 "struct vmx *", "int", "struct vm_exit *"); 304 305 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 306 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 307 308 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 309 "struct vmx *", "int", "struct vm_exit *"); 310 311 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 312 "struct vmx *", "int", "struct vm_exit *"); 313 314 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 315 "struct vmx *", "int", "struct vm_exit *"); 316 317 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 318 "struct vmx *", "int", "struct vm_exit *"); 319 320 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 321 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 322 323 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 324 "struct vmx *", "int", "struct vm_exit *", "int"); 325 326 /* 327 * Use the last page below 4GB as the APIC access address. This address is 328 * occupied by the boot firmware so it is guaranteed that it will not conflict 329 * with a page in system memory. 330 */ 331 #define APIC_ACCESS_ADDRESS 0xFFFFF000 332 333 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 334 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 335 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); 336 static void vmx_inject_pir(struct vlapic *vlapic); 337 #ifndef __FreeBSD__ 338 static int vmx_apply_tsc_adjust(struct vmx *, int); 339 #endif /* __FreeBSD__ */ 340 341 #ifdef KTR 342 static const char * 343 exit_reason_to_str(int reason) 344 { 345 static char reasonbuf[32]; 346 347 switch (reason) { 348 case EXIT_REASON_EXCEPTION: 349 return "exception"; 350 case EXIT_REASON_EXT_INTR: 351 return "extint"; 352 case EXIT_REASON_TRIPLE_FAULT: 353 return "triplefault"; 354 case EXIT_REASON_INIT: 355 return "init"; 356 case EXIT_REASON_SIPI: 357 return "sipi"; 358 case EXIT_REASON_IO_SMI: 359 return "iosmi"; 360 case EXIT_REASON_SMI: 361 return "smi"; 362 case EXIT_REASON_INTR_WINDOW: 363 return "intrwindow"; 364 case EXIT_REASON_NMI_WINDOW: 365 return "nmiwindow"; 366 case EXIT_REASON_TASK_SWITCH: 367 return "taskswitch"; 368 case EXIT_REASON_CPUID: 369 return "cpuid"; 370 case EXIT_REASON_GETSEC: 371 return "getsec"; 372 case EXIT_REASON_HLT: 373 return "hlt"; 374 case EXIT_REASON_INVD: 375 return "invd"; 376 case EXIT_REASON_INVLPG: 377 return "invlpg"; 378 case EXIT_REASON_RDPMC: 379 return "rdpmc"; 380 case EXIT_REASON_RDTSC: 381 return "rdtsc"; 382 case EXIT_REASON_RSM: 383 return "rsm"; 384 case EXIT_REASON_VMCALL: 385 return "vmcall"; 386 case EXIT_REASON_VMCLEAR: 387 return "vmclear"; 388 case EXIT_REASON_VMLAUNCH: 389 return "vmlaunch"; 390 case EXIT_REASON_VMPTRLD: 391 return "vmptrld"; 392 case EXIT_REASON_VMPTRST: 393 return "vmptrst"; 394 case EXIT_REASON_VMREAD: 395 return "vmread"; 396 case EXIT_REASON_VMRESUME: 397 return "vmresume"; 398 case EXIT_REASON_VMWRITE: 399 return "vmwrite"; 400 case EXIT_REASON_VMXOFF: 401 return "vmxoff"; 402 case EXIT_REASON_VMXON: 403 return "vmxon"; 404 case EXIT_REASON_CR_ACCESS: 405 return "craccess"; 406 case EXIT_REASON_DR_ACCESS: 407 return "draccess"; 408 case EXIT_REASON_INOUT: 409 return "inout"; 410 case EXIT_REASON_RDMSR: 411 return "rdmsr"; 412 case EXIT_REASON_WRMSR: 413 return "wrmsr"; 414 case EXIT_REASON_INVAL_VMCS: 415 return "invalvmcs"; 416 case EXIT_REASON_INVAL_MSR: 417 return "invalmsr"; 418 case EXIT_REASON_MWAIT: 419 return "mwait"; 420 case EXIT_REASON_MTF: 421 return "mtf"; 422 case EXIT_REASON_MONITOR: 423 return "monitor"; 424 case EXIT_REASON_PAUSE: 425 return "pause"; 426 case EXIT_REASON_MCE_DURING_ENTRY: 427 return "mce-during-entry"; 428 case EXIT_REASON_TPR: 429 return "tpr"; 430 case EXIT_REASON_APIC_ACCESS: 431 return "apic-access"; 432 case EXIT_REASON_GDTR_IDTR: 433 return "gdtridtr"; 434 case EXIT_REASON_LDTR_TR: 435 return "ldtrtr"; 436 case EXIT_REASON_EPT_FAULT: 437 return "eptfault"; 438 case EXIT_REASON_EPT_MISCONFIG: 439 return "eptmisconfig"; 440 case EXIT_REASON_INVEPT: 441 return "invept"; 442 case EXIT_REASON_RDTSCP: 443 return "rdtscp"; 444 case EXIT_REASON_VMX_PREEMPT: 445 return "vmxpreempt"; 446 case EXIT_REASON_INVVPID: 447 return "invvpid"; 448 case EXIT_REASON_WBINVD: 449 return "wbinvd"; 450 case EXIT_REASON_XSETBV: 451 return "xsetbv"; 452 case EXIT_REASON_APIC_WRITE: 453 return "apic-write"; 454 default: 455 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 456 return (reasonbuf); 457 } 458 } 459 #endif /* KTR */ 460 461 static int 462 vmx_allow_x2apic_msrs(struct vmx *vmx) 463 { 464 int i, error; 465 466 error = 0; 467 468 /* 469 * Allow readonly access to the following x2APIC MSRs from the guest. 470 */ 471 error += guest_msr_ro(vmx, MSR_APIC_ID); 472 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 473 error += guest_msr_ro(vmx, MSR_APIC_LDR); 474 error += guest_msr_ro(vmx, MSR_APIC_SVR); 475 476 for (i = 0; i < 8; i++) 477 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 478 479 for (i = 0; i < 8; i++) 480 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 481 482 for (i = 0; i < 8; i++) 483 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 484 485 error += guest_msr_ro(vmx, MSR_APIC_ESR); 486 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 487 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 488 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 489 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 490 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 491 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 492 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 493 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 494 error += guest_msr_ro(vmx, MSR_APIC_ICR); 495 496 /* 497 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 498 * 499 * These registers get special treatment described in the section 500 * "Virtualizing MSR-Based APIC Accesses". 501 */ 502 error += guest_msr_rw(vmx, MSR_APIC_TPR); 503 error += guest_msr_rw(vmx, MSR_APIC_EOI); 504 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 505 506 return (error); 507 } 508 509 u_long 510 vmx_fix_cr0(u_long cr0) 511 { 512 513 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 514 } 515 516 u_long 517 vmx_fix_cr4(u_long cr4) 518 { 519 520 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 521 } 522 523 static void 524 vpid_free(int vpid) 525 { 526 if (vpid < 0 || vpid > 0xffff) 527 panic("vpid_free: invalid vpid %d", vpid); 528 529 /* 530 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 531 * the unit number allocator. 532 */ 533 534 if (vpid > VM_MAXCPU) 535 #ifdef __FreeBSD__ 536 free_unr(vpid_unr, vpid); 537 #else 538 hma_vmx_vpid_free((uint16_t)vpid); 539 #endif 540 } 541 542 static void 543 vpid_alloc(uint16_t *vpid, int num) 544 { 545 int i, x; 546 547 if (num <= 0 || num > VM_MAXCPU) 548 panic("invalid number of vpids requested: %d", num); 549 550 /* 551 * If the "enable vpid" execution control is not enabled then the 552 * VPID is required to be 0 for all vcpus. 553 */ 554 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 555 for (i = 0; i < num; i++) 556 vpid[i] = 0; 557 return; 558 } 559 560 /* 561 * Allocate a unique VPID for each vcpu from the unit number allocator. 562 */ 563 for (i = 0; i < num; i++) { 564 #ifdef __FreeBSD__ 565 x = alloc_unr(vpid_unr); 566 #else 567 uint16_t tmp; 568 569 tmp = hma_vmx_vpid_alloc(); 570 x = (tmp == 0) ? -1 : tmp; 571 #endif 572 if (x == -1) 573 break; 574 else 575 vpid[i] = x; 576 } 577 578 if (i < num) { 579 atomic_add_int(&vpid_alloc_failed, 1); 580 581 /* 582 * If the unit number allocator does not have enough unique 583 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 584 * 585 * These VPIDs are not be unique across VMs but this does not 586 * affect correctness because the combined mappings are also 587 * tagged with the EP4TA which is unique for each VM. 588 * 589 * It is still sub-optimal because the invvpid will invalidate 590 * combined mappings for a particular VPID across all EP4TAs. 591 */ 592 while (i-- > 0) 593 vpid_free(vpid[i]); 594 595 for (i = 0; i < num; i++) 596 vpid[i] = i + 1; 597 } 598 } 599 600 static int 601 vmx_cleanup(void) 602 { 603 /* This is taken care of by the hma registration */ 604 return (0); 605 } 606 607 static void 608 vmx_restore(void) 609 { 610 /* No-op on illumos */ 611 } 612 613 static int 614 vmx_init(int ipinum) 615 { 616 int error; 617 uint64_t fixed0, fixed1; 618 uint32_t tmp; 619 enum vmx_caps avail_caps = VMX_CAP_NONE; 620 621 /* Check support for primary processor-based VM-execution controls */ 622 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 623 MSR_VMX_TRUE_PROCBASED_CTLS, 624 PROCBASED_CTLS_ONE_SETTING, 625 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 626 if (error) { 627 printf("vmx_init: processor does not support desired primary " 628 "processor-based controls\n"); 629 return (error); 630 } 631 632 /* Clear the processor-based ctl bits that are set on demand */ 633 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 634 635 /* Check support for secondary processor-based VM-execution controls */ 636 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 637 MSR_VMX_PROCBASED_CTLS2, 638 PROCBASED_CTLS2_ONE_SETTING, 639 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 640 if (error) { 641 printf("vmx_init: processor does not support desired secondary " 642 "processor-based controls\n"); 643 return (error); 644 } 645 646 /* Check support for VPID */ 647 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 648 PROCBASED2_ENABLE_VPID, 0, &tmp); 649 if (error == 0) 650 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 651 652 /* Check support for pin-based VM-execution controls */ 653 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 654 MSR_VMX_TRUE_PINBASED_CTLS, 655 PINBASED_CTLS_ONE_SETTING, 656 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 657 if (error) { 658 printf("vmx_init: processor does not support desired " 659 "pin-based controls\n"); 660 return (error); 661 } 662 663 /* Check support for VM-exit controls */ 664 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 665 VM_EXIT_CTLS_ONE_SETTING, 666 VM_EXIT_CTLS_ZERO_SETTING, 667 &exit_ctls); 668 if (error) { 669 printf("vmx_init: processor does not support desired " 670 "exit controls\n"); 671 return (error); 672 } 673 674 /* Check support for VM-entry controls */ 675 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 676 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 677 &entry_ctls); 678 if (error) { 679 printf("vmx_init: processor does not support desired " 680 "entry controls\n"); 681 return (error); 682 } 683 684 /* 685 * Check support for optional features by testing them 686 * as individual bits 687 */ 688 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 689 MSR_VMX_TRUE_PROCBASED_CTLS, 690 PROCBASED_HLT_EXITING, 0, 691 &tmp) == 0); 692 693 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 694 MSR_VMX_PROCBASED_CTLS, 695 PROCBASED_MTF, 0, 696 &tmp) == 0); 697 698 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 699 MSR_VMX_TRUE_PROCBASED_CTLS, 700 PROCBASED_PAUSE_EXITING, 0, 701 &tmp) == 0); 702 703 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 704 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 705 &tmp) == 0); 706 707 /* Check for APIC virtualization capabilities: 708 * - TPR shadowing 709 * - Full APICv (with or without x2APIC support) 710 * - Posted interrupt handling 711 */ 712 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 713 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 714 avail_caps |= VMX_CAP_TPR_SHADOW; 715 716 const uint32_t apicv_bits = 717 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 718 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 719 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 720 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 721 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 722 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 723 avail_caps |= VMX_CAP_APICV; 724 725 /* 726 * It may make sense in the future to differentiate 727 * hardware (or software) configurations with APICv but 728 * no support for accelerating x2APIC mode. 729 */ 730 avail_caps |= VMX_CAP_APICV_X2APIC; 731 732 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 733 MSR_VMX_TRUE_PINBASED_CTLS, 734 PINBASED_POSTED_INTERRUPT, 0, &tmp); 735 if (error == 0) { 736 /* 737 * If the PSM-provided interfaces for requesting 738 * and using a PIR IPI vector are present, use 739 * them for posted interrupts. 740 */ 741 if (psm_get_pir_ipivect != NULL && 742 psm_send_pir_ipi != NULL) { 743 pirvec = psm_get_pir_ipivect(); 744 avail_caps |= VMX_CAP_APICV_PIR; 745 } 746 } 747 } 748 } 749 750 /* Initialize EPT */ 751 error = ept_init(ipinum); 752 if (error) { 753 printf("vmx_init: ept initialization failed (%d)\n", error); 754 return (error); 755 } 756 757 #ifdef __FreeBSD__ 758 guest_l1d_flush = (cpu_ia32_arch_caps & 759 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 760 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 761 762 /* 763 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 764 * available. Otherwise fall back to the software flush 765 * method which loads enough data from the kernel text to 766 * flush existing L1D content, both on VMX entry and on NMI 767 * return. 768 */ 769 if (guest_l1d_flush) { 770 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 771 guest_l1d_flush_sw = 1; 772 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 773 &guest_l1d_flush_sw); 774 } 775 if (guest_l1d_flush_sw) { 776 if (nmi_flush_l1d_sw <= 1) 777 nmi_flush_l1d_sw = 1; 778 } else { 779 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 780 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 781 } 782 } 783 #else 784 /* L1D flushing is taken care of by smt_acquire() and friends */ 785 guest_l1d_flush = 0; 786 #endif /* __FreeBSD__ */ 787 788 /* 789 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 790 */ 791 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 792 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 793 cr0_ones_mask = fixed0 & fixed1; 794 cr0_zeros_mask = ~fixed0 & ~fixed1; 795 796 /* 797 * Since Unrestricted Guest was already verified present, CR0_PE and 798 * CR0_PG are allowed to be set to zero in VMX non-root operation 799 */ 800 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 801 802 /* 803 * Do not allow the guest to set CR0_NW or CR0_CD. 804 */ 805 cr0_zeros_mask |= (CR0_NW | CR0_CD); 806 807 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 808 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 809 cr4_ones_mask = fixed0 & fixed1; 810 cr4_zeros_mask = ~fixed0 & ~fixed1; 811 812 vmx_msr_init(); 813 814 vmx_capabilities = avail_caps; 815 vmx_initialized = 1; 816 817 return (0); 818 } 819 820 static void 821 vmx_trigger_hostintr(int vector) 822 { 823 #ifdef __FreeBSD__ 824 uintptr_t func; 825 struct gate_descriptor *gd; 826 827 gd = &idt[vector]; 828 829 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 830 "invalid vector %d", vector)); 831 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 832 vector)); 833 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 834 "has invalid type %d", vector, gd->gd_type)); 835 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 836 "has invalid dpl %d", vector, gd->gd_dpl)); 837 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 838 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 839 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 840 "IST %d", vector, gd->gd_ist)); 841 842 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 843 vmx_call_isr(func); 844 #else 845 VERIFY(vector >= 32 && vector <= 255); 846 vmx_call_isr(vector - 32); 847 #endif /* __FreeBSD__ */ 848 } 849 850 static int 851 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 852 { 853 int error, mask_ident, shadow_ident; 854 uint64_t mask_value; 855 856 if (which != 0 && which != 4) 857 panic("vmx_setup_cr_shadow: unknown cr%d", which); 858 859 if (which == 0) { 860 mask_ident = VMCS_CR0_MASK; 861 mask_value = cr0_ones_mask | cr0_zeros_mask; 862 shadow_ident = VMCS_CR0_SHADOW; 863 } else { 864 mask_ident = VMCS_CR4_MASK; 865 mask_value = cr4_ones_mask | cr4_zeros_mask; 866 shadow_ident = VMCS_CR4_SHADOW; 867 } 868 869 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 870 if (error) 871 return (error); 872 873 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 874 if (error) 875 return (error); 876 877 return (0); 878 } 879 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 880 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 881 882 static void * 883 vmx_vminit(struct vm *vm, pmap_t pmap) 884 { 885 uint16_t vpid[VM_MAXCPU]; 886 int i, error; 887 struct vmx *vmx; 888 struct vmcs *vmcs; 889 uint32_t exc_bitmap; 890 uint16_t maxcpus; 891 uint32_t proc_ctls, proc2_ctls, pin_ctls; 892 893 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 894 if ((uintptr_t)vmx & PAGE_MASK) { 895 panic("malloc of struct vmx not aligned on %d byte boundary", 896 PAGE_SIZE); 897 } 898 vmx->vm = vm; 899 900 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 901 902 /* 903 * Clean up EPTP-tagged guest physical and combined mappings 904 * 905 * VMX transitions are not required to invalidate any guest physical 906 * mappings. So, it may be possible for stale guest physical mappings 907 * to be present in the processor TLBs. 908 * 909 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 910 */ 911 ept_invalidate_mappings(vmx->eptp); 912 913 msr_bitmap_initialize(vmx->msr_bitmap); 914 915 /* 916 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 917 * The guest FSBASE and GSBASE are saved and restored during 918 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 919 * always restored from the vmcs host state area on vm-exit. 920 * 921 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 922 * how they are saved/restored so can be directly accessed by the 923 * guest. 924 * 925 * MSR_EFER is saved and restored in the guest VMCS area on a 926 * VM exit and entry respectively. It is also restored from the 927 * host VMCS area on a VM exit. 928 * 929 * The TSC MSR is exposed read-only. Writes are disallowed as 930 * that will impact the host TSC. If the guest does a write 931 * the "use TSC offsetting" execution control is enabled and the 932 * difference between the host TSC and the guest TSC is written 933 * into the TSC offset in the VMCS. 934 */ 935 if (guest_msr_rw(vmx, MSR_GSBASE) || 936 guest_msr_rw(vmx, MSR_FSBASE) || 937 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 938 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 939 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 940 guest_msr_rw(vmx, MSR_EFER) || 941 guest_msr_ro(vmx, MSR_TSC)) 942 panic("vmx_vminit: error setting guest msr access"); 943 944 vpid_alloc(vpid, VM_MAXCPU); 945 946 /* Grab the established defaults */ 947 proc_ctls = procbased_ctls; 948 proc2_ctls = procbased_ctls2; 949 pin_ctls = pinbased_ctls; 950 /* For now, default to the available capabilities */ 951 vmx->vmx_caps = vmx_capabilities; 952 953 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 954 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 955 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 956 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 957 } 958 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 959 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 960 961 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 962 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 963 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 964 965 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 966 APIC_ACCESS_ADDRESS); 967 /* XXX this should really return an error to the caller */ 968 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 969 } 970 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 971 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 972 973 pin_ctls |= PINBASED_POSTED_INTERRUPT; 974 } 975 976 maxcpus = vm_get_maxcpus(vm); 977 for (i = 0; i < maxcpus; i++) { 978 /* 979 * Cache physical address lookups for various components which 980 * may be required inside the critical_enter() section implied 981 * by VMPTRLD() below. 982 */ 983 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap); 984 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 985 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 986 987 vmcs = &vmx->vmcs[i]; 988 vmcs->identifier = vmx_revision(); 989 vmcs->vmcs_pa = (uint64_t)vtophys(vmcs); 990 error = vmclear(vmcs); 991 if (error != 0) { 992 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 993 error, i); 994 } 995 996 vmx_msr_guest_init(vmx, i); 997 998 error = vmcs_init(vmcs); 999 KASSERT(error == 0, ("vmcs_init error %d", error)); 1000 1001 VMPTRLD(vmcs); 1002 error = 0; 1003 1004 error += vmwrite(VMCS_EPTP, vmx->eptp); 1005 error += vmwrite(VMCS_PIN_BASED_CTLS, pin_ctls); 1006 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 1007 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 1008 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 1009 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 1010 error += vmwrite(VMCS_MSR_BITMAP, msr_bitmap_pa); 1011 error += vmwrite(VMCS_VPID, vpid[i]); 1012 1013 if (guest_l1d_flush && !guest_l1d_flush_sw) { 1014 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 1015 (vm_offset_t)&msr_load_list[0])); 1016 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 1017 nitems(msr_load_list)); 1018 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 1019 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 1020 } 1021 1022 /* exception bitmap */ 1023 if (vcpu_trace_exceptions(vm, i)) 1024 exc_bitmap = 0xffffffff; 1025 else 1026 exc_bitmap = 1 << IDT_MC; 1027 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); 1028 1029 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 1030 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 1031 1032 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 1033 error += vmwrite(VMCS_VIRTUAL_APIC, apic_page_pa); 1034 } 1035 1036 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1037 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 1038 error += vmwrite(VMCS_EOI_EXIT0, 0); 1039 error += vmwrite(VMCS_EOI_EXIT1, 0); 1040 error += vmwrite(VMCS_EOI_EXIT2, 0); 1041 error += vmwrite(VMCS_EOI_EXIT3, 0); 1042 } 1043 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 1044 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 1045 error += vmwrite(VMCS_PIR_DESC, pir_desc_pa); 1046 } 1047 VMCLEAR(vmcs); 1048 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 1049 1050 vmx->cap[i].set = 0; 1051 vmx->cap[i].proc_ctls = proc_ctls; 1052 vmx->cap[i].proc_ctls2 = proc2_ctls; 1053 vmx->cap[i].exc_bitmap = exc_bitmap; 1054 1055 vmx->state[i].nextrip = ~0; 1056 vmx->state[i].lastcpu = NOCPU; 1057 vmx->state[i].vpid = vpid[i]; 1058 1059 /* 1060 * Set up the CR0/4 shadows, and init the read shadow 1061 * to the power-on register value from the Intel Sys Arch. 1062 * CR0 - 0x60000010 1063 * CR4 - 0 1064 */ 1065 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 1066 if (error != 0) 1067 panic("vmx_setup_cr0_shadow %d", error); 1068 1069 error = vmx_setup_cr4_shadow(vmcs, 0); 1070 if (error != 0) 1071 panic("vmx_setup_cr4_shadow %d", error); 1072 1073 vmx->ctx[i].pmap = pmap; 1074 } 1075 1076 return (vmx); 1077 } 1078 1079 static int 1080 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1081 { 1082 #ifdef __FreeBSD__ 1083 int handled, func; 1084 1085 func = vmxctx->guest_rax; 1086 #else 1087 int handled; 1088 #endif 1089 1090 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 1091 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 1092 (uint64_t *)&vmxctx->guest_rdx); 1093 return (handled); 1094 } 1095 1096 static __inline void 1097 vmx_run_trace(struct vmx *vmx, int vcpu) 1098 { 1099 #ifdef KTR 1100 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 1101 #endif 1102 } 1103 1104 static __inline void 1105 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 1106 int handled) 1107 { 1108 #ifdef KTR 1109 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 1110 handled ? "handled" : "unhandled", 1111 exit_reason_to_str(exit_reason), rip); 1112 #endif 1113 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 1114 uint32_t, exit_reason); 1115 } 1116 1117 static __inline void 1118 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1119 { 1120 #ifdef KTR 1121 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1122 #endif 1123 } 1124 1125 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1126 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1127 1128 /* 1129 * Invalidate guest mappings identified by its vpid from the TLB. 1130 */ 1131 static __inline void 1132 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1133 { 1134 struct vmxstate *vmxstate; 1135 struct invvpid_desc invvpid_desc; 1136 1137 vmxstate = &vmx->state[vcpu]; 1138 if (vmxstate->vpid == 0) 1139 return; 1140 1141 if (!running) { 1142 /* 1143 * Set the 'lastcpu' to an invalid host cpu. 1144 * 1145 * This will invalidate TLB entries tagged with the vcpu's 1146 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1147 */ 1148 vmxstate->lastcpu = NOCPU; 1149 return; 1150 } 1151 1152 #ifdef __FreeBSD__ 1153 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1154 "critical section", __func__, vcpu)); 1155 #endif 1156 1157 /* 1158 * Invalidate all mappings tagged with 'vpid' 1159 * 1160 * We do this because this vcpu was executing on a different host 1161 * cpu when it last ran. We do not track whether it invalidated 1162 * mappings associated with its 'vpid' during that run. So we must 1163 * assume that the mappings associated with 'vpid' on 'curcpu' are 1164 * stale and invalidate them. 1165 * 1166 * Note that we incur this penalty only when the scheduler chooses to 1167 * move the thread associated with this vcpu between host cpus. 1168 * 1169 * Note also that this will invalidate mappings tagged with 'vpid' 1170 * for "all" EP4TAs. 1171 */ 1172 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1173 invvpid_desc._res1 = 0; 1174 invvpid_desc._res2 = 0; 1175 invvpid_desc.vpid = vmxstate->vpid; 1176 invvpid_desc.linear_addr = 0; 1177 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1178 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1179 } else { 1180 /* 1181 * The invvpid can be skipped if an invept is going to 1182 * be performed before entering the guest. The invept 1183 * will invalidate combined mappings tagged with 1184 * 'vmx->eptp' for all vpids. 1185 */ 1186 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1187 } 1188 } 1189 1190 static void 1191 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1192 { 1193 struct vmxstate *vmxstate; 1194 1195 #ifndef __FreeBSD__ 1196 /* 1197 * Regardless of whether the VM appears to have migrated between CPUs, 1198 * save the host sysenter stack pointer. As it points to the kernel 1199 * stack of each thread, the correct value must be maintained for every 1200 * trip into the critical section. 1201 */ 1202 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1203 1204 /* 1205 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1206 * migration between host CPUs with differing TSC values. 1207 */ 1208 VERIFY0(vmx_apply_tsc_adjust(vmx, vcpu)); 1209 #endif 1210 1211 vmxstate = &vmx->state[vcpu]; 1212 if (vmxstate->lastcpu == curcpu) 1213 return; 1214 1215 vmxstate->lastcpu = curcpu; 1216 1217 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1218 1219 #ifndef __FreeBSD__ 1220 /* Load the per-CPU IDT address */ 1221 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1222 #endif 1223 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1224 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1225 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1226 vmx_invvpid(vmx, vcpu, pmap, 1); 1227 } 1228 1229 /* 1230 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1231 */ 1232 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1233 1234 static __inline void 1235 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1236 { 1237 1238 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1239 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1240 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1241 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1242 } 1243 } 1244 1245 static __inline void 1246 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1247 { 1248 1249 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1250 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1251 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1252 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1253 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1254 } 1255 1256 static __inline void 1257 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1258 { 1259 1260 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1261 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1262 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1263 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1264 } 1265 } 1266 1267 static __inline void 1268 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1269 { 1270 1271 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1272 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1273 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1274 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1275 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1276 } 1277 1278 #ifdef __FreeBSD__ 1279 int 1280 vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset) 1281 { 1282 int error; 1283 1284 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) { 1285 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET; 1286 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1287 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting"); 1288 } 1289 1290 error = vmwrite(VMCS_TSC_OFFSET, offset); 1291 1292 return (error); 1293 } 1294 #else /* __FreeBSD__ */ 1295 /* 1296 * Set the TSC adjustment, taking into account the offsets measured between 1297 * host physical CPUs. This is required even if the guest has not set a TSC 1298 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1299 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1300 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1301 */ 1302 static int 1303 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1304 { 1305 extern hrtime_t tsc_gethrtime_tick_delta(void); 1306 const uint64_t target_offset = (vcpu_tsc_offset(vmx->vm, vcpu) + 1307 (uint64_t)tsc_gethrtime_tick_delta()); 1308 int error = 0; 1309 1310 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1311 1312 if (vmx->tsc_offset_active[vcpu] != target_offset) { 1313 error = vmwrite(VMCS_TSC_OFFSET, target_offset); 1314 vmx->tsc_offset_active[vcpu] = target_offset; 1315 } 1316 1317 return (error); 1318 } 1319 #endif /* __FreeBSD__ */ 1320 1321 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1322 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1323 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1324 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1325 1326 #ifndef __FreeBSD__ 1327 static uint32_t 1328 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1329 #else 1330 static void 1331 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1332 #endif 1333 { 1334 uint32_t gi, info; 1335 1336 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1337 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1338 "interruptibility-state %#x", gi)); 1339 1340 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1341 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1342 "VM-entry interruption information %#x", info)); 1343 1344 /* 1345 * Inject the virtual NMI. The vector must be the NMI IDT entry 1346 * or the VMCS entry check will fail. 1347 */ 1348 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1349 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1350 1351 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1352 1353 /* Clear the request */ 1354 vm_nmi_clear(vmx->vm, vcpu); 1355 1356 #ifndef __FreeBSD__ 1357 return (info); 1358 #endif 1359 } 1360 1361 static void 1362 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic, 1363 uint64_t guestrip) 1364 { 1365 uint64_t entryinfo, rflags; 1366 uint32_t gi, info; 1367 int vector; 1368 boolean_t extint_pending = B_FALSE; 1369 1370 vlapic_tmr_update(vlapic); 1371 1372 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1373 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1374 1375 if (vmx->state[vcpu].nextrip != guestrip && 1376 (gi & HWINTR_BLOCKING) != 0) { 1377 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " 1378 "cleared due to rip change: %#lx/%#lx", 1379 vmx->state[vcpu].nextrip, guestrip); 1380 gi &= ~HWINTR_BLOCKING; 1381 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1382 } 1383 1384 /* 1385 * It could be that an interrupt is already pending for injection from 1386 * the VMCS. This would be the case if the vCPU exited for conditions 1387 * such as an AST before a vm-entry delivered the injection. 1388 */ 1389 if ((info & VMCS_INTR_VALID) != 0) { 1390 goto cantinject; 1391 } 1392 1393 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1394 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1395 "intinfo is not valid: %#lx", __func__, entryinfo)); 1396 1397 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1398 "pending exception: %#lx/%#x", __func__, entryinfo, info)); 1399 1400 info = entryinfo; 1401 vector = info & 0xff; 1402 if (vector == IDT_BP || vector == IDT_OF) { 1403 /* 1404 * VT-x requires #BP and #OF to be injected as software 1405 * exceptions. 1406 */ 1407 info &= ~VMCS_INTR_T_MASK; 1408 info |= VMCS_INTR_T_SWEXCEPTION; 1409 } 1410 1411 if (info & VMCS_INTR_DEL_ERRCODE) 1412 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1413 1414 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1415 } 1416 1417 if (vm_nmi_pending(vmx->vm, vcpu)) { 1418 int need_nmi_exiting = 1; 1419 1420 /* 1421 * If there are no conditions blocking NMI injection then 1422 * inject it directly here otherwise enable "NMI window 1423 * exiting" to inject it as soon as we can. 1424 * 1425 * We also check for STI_BLOCKING because some implementations 1426 * don't allow NMI injection in this case. If we are running 1427 * on a processor that doesn't have this restriction it will 1428 * immediately exit and the NMI will be injected in the 1429 * "NMI window exiting" handler. 1430 */ 1431 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1432 if ((info & VMCS_INTR_VALID) == 0) { 1433 info = vmx_inject_nmi(vmx, vcpu); 1434 need_nmi_exiting = 0; 1435 } else { 1436 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1437 "due to VM-entry intr info %#x", info); 1438 } 1439 } else { 1440 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1441 "Guest Interruptibility-state %#x", gi); 1442 } 1443 1444 if (need_nmi_exiting) { 1445 vmx_set_nmi_window_exiting(vmx, vcpu); 1446 return; 1447 } 1448 } 1449 1450 /* Check the AT-PIC and APIC for interrupts. */ 1451 if (vm_extint_pending(vmx->vm, vcpu)) { 1452 /* Ask the legacy pic for a vector to inject */ 1453 vatpic_pending_intr(vmx->vm, &vector); 1454 extint_pending = B_TRUE; 1455 1456 /* 1457 * From the Intel SDM, Volume 3, Section "Maskable 1458 * Hardware Interrupts": 1459 * - maskable interrupt vectors [0,255] can be delivered 1460 * through the INTR pin. 1461 */ 1462 KASSERT(vector >= 0 && vector <= 255, 1463 ("invalid vector %d from INTR", vector)); 1464 } else if (!vmx_cap_en(vmx, VMX_CAP_APICV)) { 1465 /* Ask the local apic for a vector to inject */ 1466 if (!vlapic_pending_intr(vlapic, &vector)) 1467 return; 1468 1469 /* 1470 * From the Intel SDM, Volume 3, Section "Maskable 1471 * Hardware Interrupts": 1472 * - maskable interrupt vectors [16,255] can be delivered 1473 * through the local APIC. 1474 */ 1475 KASSERT(vector >= 16 && vector <= 255, 1476 ("invalid vector %d from local APIC", vector)); 1477 } else { 1478 /* No futher injection needed */ 1479 return; 1480 } 1481 1482 /* 1483 * Verify that the guest is interruptable and the above logic has not 1484 * already queued an event for injection. 1485 */ 1486 if ((gi & HWINTR_BLOCKING) != 0) { 1487 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1488 "Guest Interruptibility-state %#x", vector, gi); 1489 goto cantinject; 1490 } 1491 if ((info & VMCS_INTR_VALID) != 0) { 1492 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1493 "VM-entry intr info %#x", vector, info); 1494 goto cantinject; 1495 } 1496 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1497 if ((rflags & PSL_I) == 0) { 1498 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1499 "rflags %#lx", vector, rflags); 1500 goto cantinject; 1501 } 1502 1503 /* Inject the interrupt */ 1504 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1505 info |= vector; 1506 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1507 1508 if (extint_pending) { 1509 vm_extint_clear(vmx->vm, vcpu); 1510 vatpic_intr_accepted(vmx->vm, vector); 1511 1512 /* 1513 * After we accepted the current ExtINT the PIC may 1514 * have posted another one. If that is the case, set 1515 * the Interrupt Window Exiting execution control so 1516 * we can inject that one too. 1517 * 1518 * Also, interrupt window exiting allows us to inject any 1519 * pending APIC vector that was preempted by the ExtINT 1520 * as soon as possible. This applies both for the software 1521 * emulated vlapic and the hardware assisted virtual APIC. 1522 */ 1523 vmx_set_int_window_exiting(vmx, vcpu); 1524 } else { 1525 /* Update the Local APIC ISR */ 1526 vlapic_intr_accepted(vlapic, vector); 1527 } 1528 1529 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1530 return; 1531 1532 cantinject: 1533 /* 1534 * Set the Interrupt Window Exiting execution control so we can inject 1535 * the interrupt as soon as blocking condition goes away. 1536 */ 1537 vmx_set_int_window_exiting(vmx, vcpu); 1538 } 1539 1540 /* 1541 * If the Virtual NMIs execution control is '1' then the logical processor 1542 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1543 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1544 * virtual-NMI blocking. 1545 * 1546 * This unblocking occurs even if the IRET causes a fault. In this case the 1547 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1548 */ 1549 static void 1550 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1551 { 1552 uint32_t gi; 1553 1554 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1555 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1556 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1557 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1558 } 1559 1560 static void 1561 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1562 { 1563 uint32_t gi; 1564 1565 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1566 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1567 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1568 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1569 } 1570 1571 static void 1572 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1573 { 1574 uint32_t gi; 1575 1576 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1577 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1578 ("NMI blocking is not in effect %#x", gi)); 1579 } 1580 1581 static int 1582 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1583 { 1584 struct vmxctx *vmxctx; 1585 uint64_t xcrval; 1586 const struct xsave_limits *limits; 1587 1588 vmxctx = &vmx->ctx[vcpu]; 1589 limits = vmm_get_xsave_limits(); 1590 1591 /* 1592 * Note that the processor raises a GP# fault on its own if 1593 * xsetbv is executed for CPL != 0, so we do not have to 1594 * emulate that fault here. 1595 */ 1596 1597 /* Only xcr0 is supported. */ 1598 if (vmxctx->guest_rcx != 0) { 1599 vm_inject_gp(vmx->vm, vcpu); 1600 return (HANDLED); 1601 } 1602 1603 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1604 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1605 vm_inject_ud(vmx->vm, vcpu); 1606 return (HANDLED); 1607 } 1608 1609 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1610 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1611 vm_inject_gp(vmx->vm, vcpu); 1612 return (HANDLED); 1613 } 1614 1615 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1616 vm_inject_gp(vmx->vm, vcpu); 1617 return (HANDLED); 1618 } 1619 1620 /* AVX (YMM_Hi128) requires SSE. */ 1621 if (xcrval & XFEATURE_ENABLED_AVX && 1622 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1623 vm_inject_gp(vmx->vm, vcpu); 1624 return (HANDLED); 1625 } 1626 1627 /* 1628 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1629 * ZMM_Hi256, and Hi16_ZMM. 1630 */ 1631 if (xcrval & XFEATURE_AVX512 && 1632 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1633 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1634 vm_inject_gp(vmx->vm, vcpu); 1635 return (HANDLED); 1636 } 1637 1638 /* 1639 * Intel MPX requires both bound register state flags to be 1640 * set. 1641 */ 1642 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1643 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1644 vm_inject_gp(vmx->vm, vcpu); 1645 return (HANDLED); 1646 } 1647 1648 /* 1649 * This runs "inside" vmrun() with the guest's FPU state, so 1650 * modifying xcr0 directly modifies the guest's xcr0, not the 1651 * host's. 1652 */ 1653 load_xcr(0, xcrval); 1654 return (HANDLED); 1655 } 1656 1657 static uint64_t 1658 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1659 { 1660 const struct vmxctx *vmxctx; 1661 1662 vmxctx = &vmx->ctx[vcpu]; 1663 1664 switch (ident) { 1665 case 0: 1666 return (vmxctx->guest_rax); 1667 case 1: 1668 return (vmxctx->guest_rcx); 1669 case 2: 1670 return (vmxctx->guest_rdx); 1671 case 3: 1672 return (vmxctx->guest_rbx); 1673 case 4: 1674 return (vmcs_read(VMCS_GUEST_RSP)); 1675 case 5: 1676 return (vmxctx->guest_rbp); 1677 case 6: 1678 return (vmxctx->guest_rsi); 1679 case 7: 1680 return (vmxctx->guest_rdi); 1681 case 8: 1682 return (vmxctx->guest_r8); 1683 case 9: 1684 return (vmxctx->guest_r9); 1685 case 10: 1686 return (vmxctx->guest_r10); 1687 case 11: 1688 return (vmxctx->guest_r11); 1689 case 12: 1690 return (vmxctx->guest_r12); 1691 case 13: 1692 return (vmxctx->guest_r13); 1693 case 14: 1694 return (vmxctx->guest_r14); 1695 case 15: 1696 return (vmxctx->guest_r15); 1697 default: 1698 panic("invalid vmx register %d", ident); 1699 } 1700 } 1701 1702 static void 1703 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1704 { 1705 struct vmxctx *vmxctx; 1706 1707 vmxctx = &vmx->ctx[vcpu]; 1708 1709 switch (ident) { 1710 case 0: 1711 vmxctx->guest_rax = regval; 1712 break; 1713 case 1: 1714 vmxctx->guest_rcx = regval; 1715 break; 1716 case 2: 1717 vmxctx->guest_rdx = regval; 1718 break; 1719 case 3: 1720 vmxctx->guest_rbx = regval; 1721 break; 1722 case 4: 1723 vmcs_write(VMCS_GUEST_RSP, regval); 1724 break; 1725 case 5: 1726 vmxctx->guest_rbp = regval; 1727 break; 1728 case 6: 1729 vmxctx->guest_rsi = regval; 1730 break; 1731 case 7: 1732 vmxctx->guest_rdi = regval; 1733 break; 1734 case 8: 1735 vmxctx->guest_r8 = regval; 1736 break; 1737 case 9: 1738 vmxctx->guest_r9 = regval; 1739 break; 1740 case 10: 1741 vmxctx->guest_r10 = regval; 1742 break; 1743 case 11: 1744 vmxctx->guest_r11 = regval; 1745 break; 1746 case 12: 1747 vmxctx->guest_r12 = regval; 1748 break; 1749 case 13: 1750 vmxctx->guest_r13 = regval; 1751 break; 1752 case 14: 1753 vmxctx->guest_r14 = regval; 1754 break; 1755 case 15: 1756 vmxctx->guest_r15 = regval; 1757 break; 1758 default: 1759 panic("invalid vmx register %d", ident); 1760 } 1761 } 1762 1763 static int 1764 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1765 { 1766 uint64_t crval, regval; 1767 1768 /* We only handle mov to %cr0 at this time */ 1769 if ((exitqual & 0xf0) != 0x00) 1770 return (UNHANDLED); 1771 1772 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1773 1774 vmcs_write(VMCS_CR0_SHADOW, regval); 1775 1776 crval = regval | cr0_ones_mask; 1777 crval &= ~cr0_zeros_mask; 1778 vmcs_write(VMCS_GUEST_CR0, crval); 1779 1780 if (regval & CR0_PG) { 1781 uint64_t efer, entry_ctls; 1782 1783 /* 1784 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1785 * the "IA-32e mode guest" bit in VM-entry control must be 1786 * equal. 1787 */ 1788 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1789 if (efer & EFER_LME) { 1790 efer |= EFER_LMA; 1791 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1792 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1793 entry_ctls |= VM_ENTRY_GUEST_LMA; 1794 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1795 } 1796 } 1797 1798 return (HANDLED); 1799 } 1800 1801 static int 1802 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1803 { 1804 uint64_t crval, regval; 1805 1806 /* We only handle mov to %cr4 at this time */ 1807 if ((exitqual & 0xf0) != 0x00) 1808 return (UNHANDLED); 1809 1810 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1811 1812 vmcs_write(VMCS_CR4_SHADOW, regval); 1813 1814 crval = regval | cr4_ones_mask; 1815 crval &= ~cr4_zeros_mask; 1816 vmcs_write(VMCS_GUEST_CR4, crval); 1817 1818 return (HANDLED); 1819 } 1820 1821 static int 1822 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1823 { 1824 struct vlapic *vlapic; 1825 uint64_t cr8; 1826 int regnum; 1827 1828 /* We only handle mov %cr8 to/from a register at this time. */ 1829 if ((exitqual & 0xe0) != 0x00) { 1830 return (UNHANDLED); 1831 } 1832 1833 vlapic = vm_lapic(vmx->vm, vcpu); 1834 regnum = (exitqual >> 8) & 0xf; 1835 if (exitqual & 0x10) { 1836 cr8 = vlapic_get_cr8(vlapic); 1837 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1838 } else { 1839 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1840 vlapic_set_cr8(vlapic, cr8); 1841 } 1842 1843 return (HANDLED); 1844 } 1845 1846 /* 1847 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1848 */ 1849 static int 1850 vmx_cpl(void) 1851 { 1852 uint32_t ssar; 1853 1854 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1855 return ((ssar >> 5) & 0x3); 1856 } 1857 1858 static enum vm_cpu_mode 1859 vmx_cpu_mode(void) 1860 { 1861 uint32_t csar; 1862 1863 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1864 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1865 if (csar & 0x2000) 1866 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1867 else 1868 return (CPU_MODE_COMPATIBILITY); 1869 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1870 return (CPU_MODE_PROTECTED); 1871 } else { 1872 return (CPU_MODE_REAL); 1873 } 1874 } 1875 1876 static enum vm_paging_mode 1877 vmx_paging_mode(void) 1878 { 1879 1880 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1881 return (PAGING_MODE_FLAT); 1882 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1883 return (PAGING_MODE_32); 1884 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1885 return (PAGING_MODE_64); 1886 else 1887 return (PAGING_MODE_PAE); 1888 } 1889 1890 static void 1891 vmx_paging_info(struct vm_guest_paging *paging) 1892 { 1893 paging->cr3 = vmcs_guest_cr3(); 1894 paging->cpl = vmx_cpl(); 1895 paging->cpu_mode = vmx_cpu_mode(); 1896 paging->paging_mode = vmx_paging_mode(); 1897 } 1898 1899 static void 1900 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1901 uint64_t gla) 1902 { 1903 struct vm_guest_paging paging; 1904 uint32_t csar; 1905 1906 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1907 vmexit->inst_length = 0; 1908 vmexit->u.mmio_emul.gpa = gpa; 1909 vmexit->u.mmio_emul.gla = gla; 1910 vmx_paging_info(&paging); 1911 1912 switch (paging.cpu_mode) { 1913 case CPU_MODE_REAL: 1914 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1915 vmexit->u.mmio_emul.cs_d = 0; 1916 break; 1917 case CPU_MODE_PROTECTED: 1918 case CPU_MODE_COMPATIBILITY: 1919 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1920 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1921 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1922 break; 1923 default: 1924 vmexit->u.mmio_emul.cs_base = 0; 1925 vmexit->u.mmio_emul.cs_d = 0; 1926 break; 1927 } 1928 1929 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1930 } 1931 1932 static void 1933 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1934 uint32_t eax) 1935 { 1936 struct vm_guest_paging paging; 1937 struct vm_inout *inout; 1938 1939 inout = &vmexit->u.inout; 1940 1941 inout->bytes = (qual & 0x7) + 1; 1942 inout->flags = 0; 1943 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1944 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1945 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1946 inout->port = (uint16_t)(qual >> 16); 1947 inout->eax = eax; 1948 if (inout->flags & INOUT_STR) { 1949 uint64_t inst_info; 1950 1951 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1952 1953 /* 1954 * Bits 7-9 encode the address size of ins/outs operations where 1955 * the 0/1/2 values correspond to 16/32/64 bit sizes. 1956 */ 1957 inout->addrsize = 2 << (1 + ((inst_info >> 7) & 0x3)); 1958 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1959 inout->addrsize == 8); 1960 1961 if (inout->flags & INOUT_IN) { 1962 /* 1963 * The bits describing the segment in INSTRUCTION_INFO 1964 * are not defined for ins, leaving it to system 1965 * software to assume %es (encoded as 0) 1966 */ 1967 inout->segment = 0; 1968 } else { 1969 /* 1970 * Bits 15-17 encode the segment for OUTS. 1971 * This value follows the standard x86 segment order. 1972 */ 1973 inout->segment = (inst_info >> 15) & 0x7; 1974 } 1975 } 1976 1977 vmexit->exitcode = VM_EXITCODE_INOUT; 1978 vmx_paging_info(&paging); 1979 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1980 1981 /* The in/out emulation will handle advancing %rip */ 1982 vmexit->inst_length = 0; 1983 } 1984 1985 static int 1986 ept_fault_type(uint64_t ept_qual) 1987 { 1988 int fault_type; 1989 1990 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1991 fault_type = VM_PROT_WRITE; 1992 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1993 fault_type = VM_PROT_EXECUTE; 1994 else 1995 fault_type= VM_PROT_READ; 1996 1997 return (fault_type); 1998 } 1999 2000 static bool 2001 ept_emulation_fault(uint64_t ept_qual) 2002 { 2003 int read, write; 2004 2005 /* EPT fault on an instruction fetch doesn't make sense here */ 2006 if (ept_qual & EPT_VIOLATION_INST_FETCH) 2007 return (false); 2008 2009 /* EPT fault must be a read fault or a write fault */ 2010 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 2011 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 2012 if ((read | write) == 0) 2013 return (false); 2014 2015 /* 2016 * The EPT violation must have been caused by accessing a 2017 * guest-physical address that is a translation of a guest-linear 2018 * address. 2019 */ 2020 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 2021 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 2022 return (false); 2023 } 2024 2025 return (true); 2026 } 2027 2028 static __inline int 2029 apic_access_virtualization(struct vmx *vmx, int vcpuid) 2030 { 2031 uint32_t proc_ctls2; 2032 2033 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2034 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 2035 } 2036 2037 static __inline int 2038 x2apic_virtualization(struct vmx *vmx, int vcpuid) 2039 { 2040 uint32_t proc_ctls2; 2041 2042 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2043 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 2044 } 2045 2046 static int 2047 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 2048 uint64_t qual) 2049 { 2050 int error, handled, offset; 2051 uint32_t *apic_regs, vector; 2052 bool retu; 2053 2054 handled = HANDLED; 2055 offset = APIC_WRITE_OFFSET(qual); 2056 2057 if (!apic_access_virtualization(vmx, vcpuid)) { 2058 /* 2059 * In general there should not be any APIC write VM-exits 2060 * unless APIC-access virtualization is enabled. 2061 * 2062 * However self-IPI virtualization can legitimately trigger 2063 * an APIC-write VM-exit so treat it specially. 2064 */ 2065 if (x2apic_virtualization(vmx, vcpuid) && 2066 offset == APIC_OFFSET_SELF_IPI) { 2067 apic_regs = (uint32_t *)(vlapic->apic_page); 2068 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 2069 vlapic_self_ipi_handler(vlapic, vector); 2070 return (HANDLED); 2071 } else 2072 return (UNHANDLED); 2073 } 2074 2075 switch (offset) { 2076 case APIC_OFFSET_ID: 2077 vlapic_id_write_handler(vlapic); 2078 break; 2079 case APIC_OFFSET_LDR: 2080 vlapic_ldr_write_handler(vlapic); 2081 break; 2082 case APIC_OFFSET_DFR: 2083 vlapic_dfr_write_handler(vlapic); 2084 break; 2085 case APIC_OFFSET_SVR: 2086 vlapic_svr_write_handler(vlapic); 2087 break; 2088 case APIC_OFFSET_ESR: 2089 vlapic_esr_write_handler(vlapic); 2090 break; 2091 case APIC_OFFSET_ICR_LOW: 2092 retu = false; 2093 error = vlapic_icrlo_write_handler(vlapic, &retu); 2094 if (error != 0 || retu) 2095 handled = UNHANDLED; 2096 break; 2097 case APIC_OFFSET_CMCI_LVT: 2098 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 2099 vlapic_lvt_write_handler(vlapic, offset); 2100 break; 2101 case APIC_OFFSET_TIMER_ICR: 2102 vlapic_icrtmr_write_handler(vlapic); 2103 break; 2104 case APIC_OFFSET_TIMER_DCR: 2105 vlapic_dcr_write_handler(vlapic); 2106 break; 2107 default: 2108 handled = UNHANDLED; 2109 break; 2110 } 2111 return (handled); 2112 } 2113 2114 static bool 2115 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2116 { 2117 2118 if (apic_access_virtualization(vmx, vcpuid) && 2119 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2120 return (true); 2121 else 2122 return (false); 2123 } 2124 2125 static int 2126 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2127 { 2128 uint64_t qual; 2129 int access_type, offset, allowed; 2130 struct vie *vie; 2131 2132 if (!apic_access_virtualization(vmx, vcpuid)) 2133 return (UNHANDLED); 2134 2135 qual = vmexit->u.vmx.exit_qualification; 2136 access_type = APIC_ACCESS_TYPE(qual); 2137 offset = APIC_ACCESS_OFFSET(qual); 2138 2139 allowed = 0; 2140 if (access_type == 0) { 2141 /* 2142 * Read data access to the following registers is expected. 2143 */ 2144 switch (offset) { 2145 case APIC_OFFSET_APR: 2146 case APIC_OFFSET_PPR: 2147 case APIC_OFFSET_RRR: 2148 case APIC_OFFSET_CMCI_LVT: 2149 case APIC_OFFSET_TIMER_CCR: 2150 allowed = 1; 2151 break; 2152 default: 2153 break; 2154 } 2155 } else if (access_type == 1) { 2156 /* 2157 * Write data access to the following registers is expected. 2158 */ 2159 switch (offset) { 2160 case APIC_OFFSET_VER: 2161 case APIC_OFFSET_APR: 2162 case APIC_OFFSET_PPR: 2163 case APIC_OFFSET_RRR: 2164 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2165 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2166 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2167 case APIC_OFFSET_CMCI_LVT: 2168 case APIC_OFFSET_TIMER_CCR: 2169 allowed = 1; 2170 break; 2171 default: 2172 break; 2173 } 2174 } 2175 2176 if (allowed) { 2177 vie = vm_vie_ctx(vmx->vm, vcpuid); 2178 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2179 VIE_INVALID_GLA); 2180 } 2181 2182 /* 2183 * Regardless of whether the APIC-access is allowed this handler 2184 * always returns UNHANDLED: 2185 * - if the access is allowed then it is handled by emulating the 2186 * instruction that caused the VM-exit (outside the critical section) 2187 * - if the access is not allowed then it will be converted to an 2188 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2189 */ 2190 return (UNHANDLED); 2191 } 2192 2193 static enum task_switch_reason 2194 vmx_task_switch_reason(uint64_t qual) 2195 { 2196 int reason; 2197 2198 reason = (qual >> 30) & 0x3; 2199 switch (reason) { 2200 case 0: 2201 return (TSR_CALL); 2202 case 1: 2203 return (TSR_IRET); 2204 case 2: 2205 return (TSR_JMP); 2206 case 3: 2207 return (TSR_IDT_GATE); 2208 default: 2209 panic("%s: invalid reason %d", __func__, reason); 2210 } 2211 } 2212 2213 static int 2214 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) 2215 { 2216 int error; 2217 2218 if (lapic_msr(num)) 2219 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu); 2220 else 2221 error = vmx_wrmsr(vmx, vcpuid, num, val, retu); 2222 2223 return (error); 2224 } 2225 2226 static int 2227 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu) 2228 { 2229 struct vmxctx *vmxctx; 2230 uint64_t result; 2231 uint32_t eax, edx; 2232 int error; 2233 2234 if (lapic_msr(num)) 2235 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu); 2236 else 2237 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu); 2238 2239 if (error == 0) { 2240 eax = result; 2241 vmxctx = &vmx->ctx[vcpuid]; 2242 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); 2243 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); 2244 2245 edx = result >> 32; 2246 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); 2247 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); 2248 } 2249 2250 return (error); 2251 } 2252 2253 #ifndef __FreeBSD__ 2254 #define __predict_false(x) (x) 2255 #endif 2256 2257 static int 2258 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2259 { 2260 int error, errcode, errcode_valid, handled; 2261 struct vmxctx *vmxctx; 2262 struct vie *vie; 2263 struct vlapic *vlapic; 2264 struct vm_task_switch *ts; 2265 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2266 uint32_t intr_type, intr_vec, reason; 2267 uint64_t exitintinfo, qual, gpa; 2268 bool retu; 2269 2270 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2271 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2272 2273 handled = UNHANDLED; 2274 vmxctx = &vmx->ctx[vcpu]; 2275 2276 qual = vmexit->u.vmx.exit_qualification; 2277 reason = vmexit->u.vmx.exit_reason; 2278 vmexit->exitcode = VM_EXITCODE_BOGUS; 2279 2280 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2281 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2282 2283 /* 2284 * VM-entry failures during or after loading guest state. 2285 * 2286 * These VM-exits are uncommon but must be handled specially 2287 * as most VM-exit fields are not populated as usual. 2288 */ 2289 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2290 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2291 #ifdef __FreeBSD__ 2292 __asm __volatile("int $18"); 2293 #else 2294 vmm_call_trap(T_MCE); 2295 #endif 2296 return (1); 2297 } 2298 2299 /* 2300 * VM exits that can be triggered during event delivery need to 2301 * be handled specially by re-injecting the event if the IDT 2302 * vectoring information field's valid bit is set. 2303 * 2304 * See "Information for VM Exits During Event Delivery" in Intel SDM 2305 * for details. 2306 */ 2307 idtvec_info = vmcs_idt_vectoring_info(); 2308 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2309 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2310 exitintinfo = idtvec_info; 2311 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2312 idtvec_err = vmcs_idt_vectoring_err(); 2313 exitintinfo |= (uint64_t)idtvec_err << 32; 2314 } 2315 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2316 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2317 __func__, error)); 2318 2319 /* 2320 * If 'virtual NMIs' are being used and the VM-exit 2321 * happened while injecting an NMI during the previous 2322 * VM-entry, then clear "blocking by NMI" in the 2323 * Guest Interruptibility-State so the NMI can be 2324 * reinjected on the subsequent VM-entry. 2325 * 2326 * However, if the NMI was being delivered through a task 2327 * gate, then the new task must start execution with NMIs 2328 * blocked so don't clear NMI blocking in this case. 2329 */ 2330 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2331 if (intr_type == VMCS_INTR_T_NMI) { 2332 if (reason != EXIT_REASON_TASK_SWITCH) 2333 vmx_clear_nmi_blocking(vmx, vcpu); 2334 else 2335 vmx_assert_nmi_blocking(vmx, vcpu); 2336 } 2337 2338 /* 2339 * Update VM-entry instruction length if the event being 2340 * delivered was a software interrupt or software exception. 2341 */ 2342 if (intr_type == VMCS_INTR_T_SWINTR || 2343 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2344 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2345 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2346 } 2347 } 2348 2349 switch (reason) { 2350 case EXIT_REASON_TASK_SWITCH: 2351 ts = &vmexit->u.task_switch; 2352 ts->tsssel = qual & 0xffff; 2353 ts->reason = vmx_task_switch_reason(qual); 2354 ts->ext = 0; 2355 ts->errcode_valid = 0; 2356 vmx_paging_info(&ts->paging); 2357 /* 2358 * If the task switch was due to a CALL, JMP, IRET, software 2359 * interrupt (INT n) or software exception (INT3, INTO), 2360 * then the saved %rip references the instruction that caused 2361 * the task switch. The instruction length field in the VMCS 2362 * is valid in this case. 2363 * 2364 * In all other cases (e.g., NMI, hardware exception) the 2365 * saved %rip is one that would have been saved in the old TSS 2366 * had the task switch completed normally so the instruction 2367 * length field is not needed in this case and is explicitly 2368 * set to 0. 2369 */ 2370 if (ts->reason == TSR_IDT_GATE) { 2371 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2372 ("invalid idtvec_info %#x for IDT task switch", 2373 idtvec_info)); 2374 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2375 if (intr_type != VMCS_INTR_T_SWINTR && 2376 intr_type != VMCS_INTR_T_SWEXCEPTION && 2377 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2378 /* Task switch triggered by external event */ 2379 ts->ext = 1; 2380 vmexit->inst_length = 0; 2381 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2382 ts->errcode_valid = 1; 2383 ts->errcode = vmcs_idt_vectoring_err(); 2384 } 2385 } 2386 } 2387 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2388 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2389 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2390 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2391 ts->ext ? "external" : "internal", 2392 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2393 break; 2394 case EXIT_REASON_CR_ACCESS: 2395 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2396 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2397 switch (qual & 0xf) { 2398 case 0: 2399 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2400 break; 2401 case 4: 2402 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2403 break; 2404 case 8: 2405 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2406 break; 2407 } 2408 break; 2409 case EXIT_REASON_RDMSR: 2410 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2411 retu = false; 2412 ecx = vmxctx->guest_rcx; 2413 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2414 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2415 error = emulate_rdmsr(vmx, vcpu, ecx, &retu); 2416 if (error) { 2417 vmexit->exitcode = VM_EXITCODE_RDMSR; 2418 vmexit->u.msr.code = ecx; 2419 } else if (!retu) { 2420 handled = HANDLED; 2421 } else { 2422 /* Return to userspace with a valid exitcode */ 2423 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2424 ("emulate_rdmsr retu with bogus exitcode")); 2425 } 2426 break; 2427 case EXIT_REASON_WRMSR: 2428 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2429 retu = false; 2430 eax = vmxctx->guest_rax; 2431 ecx = vmxctx->guest_rcx; 2432 edx = vmxctx->guest_rdx; 2433 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2434 ecx, (uint64_t)edx << 32 | eax); 2435 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2436 (uint64_t)edx << 32 | eax); 2437 error = emulate_wrmsr(vmx, vcpu, ecx, 2438 (uint64_t)edx << 32 | eax, &retu); 2439 if (error) { 2440 vmexit->exitcode = VM_EXITCODE_WRMSR; 2441 vmexit->u.msr.code = ecx; 2442 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2443 } else if (!retu) { 2444 handled = HANDLED; 2445 } else { 2446 /* Return to userspace with a valid exitcode */ 2447 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2448 ("emulate_wrmsr retu with bogus exitcode")); 2449 } 2450 break; 2451 case EXIT_REASON_HLT: 2452 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2453 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2454 vmexit->exitcode = VM_EXITCODE_HLT; 2455 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2456 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2457 vmexit->u.hlt.intr_status = 2458 vmcs_read(VMCS_GUEST_INTR_STATUS); 2459 } else { 2460 vmexit->u.hlt.intr_status = 0; 2461 } 2462 break; 2463 case EXIT_REASON_MTF: 2464 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2465 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2466 vmexit->exitcode = VM_EXITCODE_MTRAP; 2467 vmexit->inst_length = 0; 2468 break; 2469 case EXIT_REASON_PAUSE: 2470 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2471 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2472 vmexit->exitcode = VM_EXITCODE_PAUSE; 2473 break; 2474 case EXIT_REASON_INTR_WINDOW: 2475 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2476 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2477 vmx_clear_int_window_exiting(vmx, vcpu); 2478 return (1); 2479 case EXIT_REASON_EXT_INTR: 2480 /* 2481 * External interrupts serve only to cause VM exits and allow 2482 * the host interrupt handler to run. 2483 * 2484 * If this external interrupt triggers a virtual interrupt 2485 * to a VM, then that state will be recorded by the 2486 * host interrupt handler in the VM's softc. We will inject 2487 * this virtual interrupt during the subsequent VM enter. 2488 */ 2489 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2490 SDT_PROBE4(vmm, vmx, exit, interrupt, 2491 vmx, vcpu, vmexit, intr_info); 2492 2493 /* 2494 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2495 * This appears to be a bug in VMware Fusion? 2496 */ 2497 if (!(intr_info & VMCS_INTR_VALID)) 2498 return (1); 2499 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2500 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2501 ("VM exit interruption info invalid: %#x", intr_info)); 2502 vmx_trigger_hostintr(intr_info & 0xff); 2503 2504 /* 2505 * This is special. We want to treat this as an 'handled' 2506 * VM-exit but not increment the instruction pointer. 2507 */ 2508 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2509 return (1); 2510 case EXIT_REASON_NMI_WINDOW: 2511 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2512 /* Exit to allow the pending virtual NMI to be injected */ 2513 if (vm_nmi_pending(vmx->vm, vcpu)) 2514 vmx_inject_nmi(vmx, vcpu); 2515 vmx_clear_nmi_window_exiting(vmx, vcpu); 2516 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2517 return (1); 2518 case EXIT_REASON_INOUT: 2519 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2520 vie = vm_vie_ctx(vmx->vm, vcpu); 2521 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2522 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2523 break; 2524 case EXIT_REASON_CPUID: 2525 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2526 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2527 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2528 break; 2529 case EXIT_REASON_EXCEPTION: 2530 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2531 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2532 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2533 ("VM exit interruption info invalid: %#x", intr_info)); 2534 2535 intr_vec = intr_info & 0xff; 2536 intr_type = intr_info & VMCS_INTR_T_MASK; 2537 2538 /* 2539 * If Virtual NMIs control is 1 and the VM-exit is due to a 2540 * fault encountered during the execution of IRET then we must 2541 * restore the state of "virtual-NMI blocking" before resuming 2542 * the guest. 2543 * 2544 * See "Resuming Guest Software after Handling an Exception". 2545 * See "Information for VM Exits Due to Vectored Events". 2546 */ 2547 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2548 (intr_vec != IDT_DF) && 2549 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2550 vmx_restore_nmi_blocking(vmx, vcpu); 2551 2552 /* 2553 * The NMI has already been handled in vmx_exit_handle_nmi(). 2554 */ 2555 if (intr_type == VMCS_INTR_T_NMI) 2556 return (1); 2557 2558 /* 2559 * Call the machine check handler by hand. Also don't reflect 2560 * the machine check back into the guest. 2561 */ 2562 if (intr_vec == IDT_MC) { 2563 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2564 #ifdef __FreeBSD__ 2565 __asm __volatile("int $18"); 2566 #else 2567 vmm_call_trap(T_MCE); 2568 #endif 2569 return (1); 2570 } 2571 2572 /* 2573 * If the hypervisor has requested user exits for 2574 * debug exceptions, bounce them out to userland. 2575 */ 2576 if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP && 2577 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2578 vmexit->exitcode = VM_EXITCODE_BPT; 2579 vmexit->u.bpt.inst_length = vmexit->inst_length; 2580 vmexit->inst_length = 0; 2581 break; 2582 } 2583 2584 if (intr_vec == IDT_PF) { 2585 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); 2586 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", 2587 __func__, error)); 2588 } 2589 2590 /* 2591 * Software exceptions exhibit trap-like behavior. This in 2592 * turn requires populating the VM-entry instruction length 2593 * so that the %rip in the trap frame is past the INT3/INTO 2594 * instruction. 2595 */ 2596 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2597 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2598 2599 /* Reflect all other exceptions back into the guest */ 2600 errcode_valid = errcode = 0; 2601 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2602 errcode_valid = 1; 2603 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2604 } 2605 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into " 2606 "the guest", intr_vec, errcode); 2607 SDT_PROBE5(vmm, vmx, exit, exception, 2608 vmx, vcpu, vmexit, intr_vec, errcode); 2609 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2610 errcode_valid, errcode, 0); 2611 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2612 __func__, error)); 2613 return (1); 2614 2615 case EXIT_REASON_EPT_FAULT: 2616 /* 2617 * If 'gpa' lies within the address space allocated to 2618 * memory then this must be a nested page fault otherwise 2619 * this must be an instruction that accesses MMIO space. 2620 */ 2621 gpa = vmcs_gpa(); 2622 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2623 apic_access_fault(vmx, vcpu, gpa)) { 2624 vmexit->exitcode = VM_EXITCODE_PAGING; 2625 vmexit->inst_length = 0; 2626 vmexit->u.paging.gpa = gpa; 2627 vmexit->u.paging.fault_type = ept_fault_type(qual); 2628 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2629 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2630 vmx, vcpu, vmexit, gpa, qual); 2631 } else if (ept_emulation_fault(qual)) { 2632 vie = vm_vie_ctx(vmx->vm, vcpu); 2633 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla()); 2634 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2635 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2636 vmx, vcpu, vmexit, gpa); 2637 } 2638 /* 2639 * If Virtual NMIs control is 1 and the VM-exit is due to an 2640 * EPT fault during the execution of IRET then we must restore 2641 * the state of "virtual-NMI blocking" before resuming. 2642 * 2643 * See description of "NMI unblocking due to IRET" in 2644 * "Exit Qualification for EPT Violations". 2645 */ 2646 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2647 (qual & EXIT_QUAL_NMIUDTI) != 0) 2648 vmx_restore_nmi_blocking(vmx, vcpu); 2649 break; 2650 case EXIT_REASON_VIRTUALIZED_EOI: 2651 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2652 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2653 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2654 vmexit->inst_length = 0; /* trap-like */ 2655 break; 2656 case EXIT_REASON_APIC_ACCESS: 2657 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2658 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2659 break; 2660 case EXIT_REASON_APIC_WRITE: 2661 /* 2662 * APIC-write VM exit is trap-like so the %rip is already 2663 * pointing to the next instruction. 2664 */ 2665 vmexit->inst_length = 0; 2666 vlapic = vm_lapic(vmx->vm, vcpu); 2667 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2668 vmx, vcpu, vmexit, vlapic); 2669 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2670 break; 2671 case EXIT_REASON_XSETBV: 2672 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2673 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2674 break; 2675 case EXIT_REASON_MONITOR: 2676 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2677 vmexit->exitcode = VM_EXITCODE_MONITOR; 2678 break; 2679 case EXIT_REASON_MWAIT: 2680 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2681 vmexit->exitcode = VM_EXITCODE_MWAIT; 2682 break; 2683 case EXIT_REASON_TPR: 2684 vlapic = vm_lapic(vmx->vm, vcpu); 2685 vlapic_sync_tpr(vlapic); 2686 vmexit->inst_length = 0; 2687 handled = HANDLED; 2688 break; 2689 case EXIT_REASON_VMCALL: 2690 case EXIT_REASON_VMCLEAR: 2691 case EXIT_REASON_VMLAUNCH: 2692 case EXIT_REASON_VMPTRLD: 2693 case EXIT_REASON_VMPTRST: 2694 case EXIT_REASON_VMREAD: 2695 case EXIT_REASON_VMRESUME: 2696 case EXIT_REASON_VMWRITE: 2697 case EXIT_REASON_VMXOFF: 2698 case EXIT_REASON_VMXON: 2699 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2700 vmexit->exitcode = VM_EXITCODE_VMINSN; 2701 break; 2702 default: 2703 SDT_PROBE4(vmm, vmx, exit, unknown, 2704 vmx, vcpu, vmexit, reason); 2705 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2706 break; 2707 } 2708 2709 if (handled) { 2710 /* 2711 * It is possible that control is returned to userland 2712 * even though we were able to handle the VM exit in the 2713 * kernel. 2714 * 2715 * In such a case we want to make sure that the userland 2716 * restarts guest execution at the instruction *after* 2717 * the one we just processed. Therefore we update the 2718 * guest rip in the VMCS and in 'vmexit'. 2719 */ 2720 vmexit->rip += vmexit->inst_length; 2721 vmexit->inst_length = 0; 2722 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2723 } else { 2724 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2725 /* 2726 * If this VM exit was not claimed by anybody then 2727 * treat it as a generic VMX exit. 2728 */ 2729 vmexit->exitcode = VM_EXITCODE_VMX; 2730 vmexit->u.vmx.status = VM_SUCCESS; 2731 vmexit->u.vmx.inst_type = 0; 2732 vmexit->u.vmx.inst_error = 0; 2733 } else { 2734 /* 2735 * The exitcode and collateral have been populated. 2736 * The VM exit will be processed further in userland. 2737 */ 2738 } 2739 } 2740 2741 SDT_PROBE4(vmm, vmx, exit, return, 2742 vmx, vcpu, vmexit, handled); 2743 return (handled); 2744 } 2745 2746 static void 2747 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2748 { 2749 2750 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2751 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2752 vmxctx->inst_fail_status)); 2753 2754 vmexit->inst_length = 0; 2755 vmexit->exitcode = VM_EXITCODE_VMX; 2756 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2757 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2758 vmexit->u.vmx.exit_reason = ~0; 2759 vmexit->u.vmx.exit_qualification = ~0; 2760 2761 switch (rc) { 2762 case VMX_VMRESUME_ERROR: 2763 case VMX_VMLAUNCH_ERROR: 2764 case VMX_INVEPT_ERROR: 2765 #ifndef __FreeBSD__ 2766 case VMX_VMWRITE_ERROR: 2767 #endif 2768 vmexit->u.vmx.inst_type = rc; 2769 break; 2770 default: 2771 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2772 } 2773 } 2774 2775 /* 2776 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2777 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2778 * sufficient to simply vector to the NMI handler via a software interrupt. 2779 * However, this must be done before maskable interrupts are enabled 2780 * otherwise the "iret" issued by an interrupt handler will incorrectly 2781 * clear NMI blocking. 2782 */ 2783 static __inline void 2784 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2785 { 2786 uint32_t intr_info; 2787 2788 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2789 2790 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2791 return; 2792 2793 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2794 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2795 ("VM exit interruption info invalid: %#x", intr_info)); 2796 2797 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2798 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2799 "to NMI has invalid vector: %#x", intr_info)); 2800 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2801 #ifdef __FreeBSD__ 2802 __asm __volatile("int $2"); 2803 #else 2804 vmm_call_trap(T_NMIFLT); 2805 #endif 2806 } 2807 } 2808 2809 static __inline void 2810 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2811 { 2812 register_t rflags; 2813 2814 /* Save host control debug registers. */ 2815 vmxctx->host_dr7 = rdr7(); 2816 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2817 2818 /* 2819 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2820 * exceptions in the host based on the guest DRx values. The 2821 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2822 */ 2823 load_dr7(0); 2824 wrmsr(MSR_DEBUGCTLMSR, 0); 2825 2826 /* 2827 * Disable single stepping the kernel to avoid corrupting the 2828 * guest DR6. A debugger might still be able to corrupt the 2829 * guest DR6 by setting a breakpoint after this point and then 2830 * single stepping. 2831 */ 2832 rflags = read_rflags(); 2833 vmxctx->host_tf = rflags & PSL_T; 2834 write_rflags(rflags & ~PSL_T); 2835 2836 /* Save host debug registers. */ 2837 vmxctx->host_dr0 = rdr0(); 2838 vmxctx->host_dr1 = rdr1(); 2839 vmxctx->host_dr2 = rdr2(); 2840 vmxctx->host_dr3 = rdr3(); 2841 vmxctx->host_dr6 = rdr6(); 2842 2843 /* Restore guest debug registers. */ 2844 load_dr0(vmxctx->guest_dr0); 2845 load_dr1(vmxctx->guest_dr1); 2846 load_dr2(vmxctx->guest_dr2); 2847 load_dr3(vmxctx->guest_dr3); 2848 load_dr6(vmxctx->guest_dr6); 2849 } 2850 2851 static __inline void 2852 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2853 { 2854 2855 /* Save guest debug registers. */ 2856 vmxctx->guest_dr0 = rdr0(); 2857 vmxctx->guest_dr1 = rdr1(); 2858 vmxctx->guest_dr2 = rdr2(); 2859 vmxctx->guest_dr3 = rdr3(); 2860 vmxctx->guest_dr6 = rdr6(); 2861 2862 /* 2863 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2864 * PSL_T last. 2865 */ 2866 load_dr0(vmxctx->host_dr0); 2867 load_dr1(vmxctx->host_dr1); 2868 load_dr2(vmxctx->host_dr2); 2869 load_dr3(vmxctx->host_dr3); 2870 load_dr6(vmxctx->host_dr6); 2871 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2872 load_dr7(vmxctx->host_dr7); 2873 write_rflags(read_rflags() | vmxctx->host_tf); 2874 } 2875 2876 static int 2877 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap, 2878 struct vm_eventinfo *evinfo) 2879 { 2880 int rc, handled, launched; 2881 struct vmx *vmx; 2882 struct vm *vm; 2883 struct vmxctx *vmxctx; 2884 struct vmcs *vmcs; 2885 struct vm_exit *vmexit; 2886 struct vlapic *vlapic; 2887 uint32_t exit_reason; 2888 #ifdef __FreeBSD__ 2889 struct region_descriptor gdtr, idtr; 2890 uint16_t ldt_sel; 2891 #endif 2892 2893 vmx = arg; 2894 vm = vmx->vm; 2895 vmcs = &vmx->vmcs[vcpu]; 2896 vmxctx = &vmx->ctx[vcpu]; 2897 vlapic = vm_lapic(vm, vcpu); 2898 vmexit = vm_exitinfo(vm, vcpu); 2899 launched = 0; 2900 2901 KASSERT(vmxctx->pmap == pmap, 2902 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2903 2904 vmx_msr_guest_enter(vmx, vcpu); 2905 2906 VMPTRLD(vmcs); 2907 2908 #ifndef __FreeBSD__ 2909 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2910 vmx->vmcs_state[vcpu] = VS_LOADED; 2911 #endif 2912 2913 /* 2914 * XXX 2915 * We do this every time because we may setup the virtual machine 2916 * from a different process than the one that actually runs it. 2917 * 2918 * If the life of a virtual machine was spent entirely in the context 2919 * of a single process we could do this once in vmx_vminit(). 2920 */ 2921 vmcs_write(VMCS_HOST_CR3, rcr3()); 2922 2923 vmcs_write(VMCS_GUEST_RIP, rip); 2924 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2925 do { 2926 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2927 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); 2928 2929 handled = UNHANDLED; 2930 /* 2931 * Interrupts are disabled from this point on until the 2932 * guest starts executing. This is done for the following 2933 * reasons: 2934 * 2935 * If an AST is asserted on this thread after the check below, 2936 * then the IPI_AST notification will not be lost, because it 2937 * will cause a VM exit due to external interrupt as soon as 2938 * the guest state is loaded. 2939 * 2940 * A posted interrupt after 'vmx_inject_interrupts()' will 2941 * not be "lost" because it will be held pending in the host 2942 * APIC because interrupts are disabled. The pending interrupt 2943 * will be recognized as soon as the guest state is loaded. 2944 * 2945 * The same reasoning applies to the IPI generated by 2946 * pmap_invalidate_ept(). 2947 * 2948 * The bulk of guest interrupt injection is done without 2949 * interrupts disabled on the host CPU. This is necessary 2950 * since contended mutexes might force the thread to sleep. 2951 */ 2952 vmx_inject_interrupts(vmx, vcpu, vlapic, rip); 2953 disable_intr(); 2954 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2955 vmx_inject_pir(vlapic); 2956 } 2957 2958 /* 2959 * Check for vcpu suspension after injecting events because 2960 * vmx_inject_interrupts() can suspend the vcpu due to a 2961 * triple fault. 2962 */ 2963 if (vcpu_suspended(evinfo)) { 2964 enable_intr(); 2965 vm_exit_suspended(vmx->vm, vcpu, rip); 2966 break; 2967 } 2968 2969 if (vcpu_runblocked(evinfo)) { 2970 enable_intr(); 2971 vm_exit_runblock(vmx->vm, vcpu, rip); 2972 break; 2973 } 2974 2975 if (vcpu_reqidle(evinfo)) { 2976 enable_intr(); 2977 vm_exit_reqidle(vmx->vm, vcpu, rip); 2978 break; 2979 } 2980 2981 if (vcpu_should_yield(vm, vcpu)) { 2982 enable_intr(); 2983 vm_exit_astpending(vmx->vm, vcpu, rip); 2984 vmx_astpending_trace(vmx, vcpu, rip); 2985 handled = HANDLED; 2986 break; 2987 } 2988 2989 if (vcpu_debugged(vm, vcpu)) { 2990 enable_intr(); 2991 vm_exit_debug(vmx->vm, vcpu, rip); 2992 break; 2993 } 2994 2995 #ifndef __FreeBSD__ 2996 if ((rc = smt_acquire()) != 1) { 2997 enable_intr(); 2998 vmexit->rip = rip; 2999 vmexit->inst_length = 0; 3000 if (rc == -1) { 3001 vmexit->exitcode = VM_EXITCODE_HT; 3002 } else { 3003 vmexit->exitcode = VM_EXITCODE_BOGUS; 3004 handled = HANDLED; 3005 } 3006 break; 3007 } 3008 3009 /* 3010 * If this thread has gone off-cpu due to mutex operations 3011 * during vmx_run, the VMCS will have been unloaded, forcing a 3012 * re-VMLAUNCH as opposed to VMRESUME. 3013 */ 3014 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 3015 /* 3016 * Restoration of the GDT limit is taken care of by 3017 * vmx_savectx(). Since the maximum practical index for the 3018 * IDT is 255, restoring its limits from the post-VMX-exit 3019 * default of 0xffff is not a concern. 3020 * 3021 * Only 64-bit hypervisor callers are allowed, which forgoes 3022 * the need to restore any LDT descriptor. Toss an error to 3023 * anyone attempting to break that rule. 3024 */ 3025 if (curproc->p_model != DATAMODEL_LP64) { 3026 smt_release(); 3027 enable_intr(); 3028 bzero(vmexit, sizeof (*vmexit)); 3029 vmexit->rip = rip; 3030 vmexit->exitcode = VM_EXITCODE_VMX; 3031 vmexit->u.vmx.status = VM_FAIL_INVALID; 3032 handled = UNHANDLED; 3033 break; 3034 } 3035 #else 3036 /* 3037 * VM exits restore the base address but not the 3038 * limits of GDTR and IDTR. The VMCS only stores the 3039 * base address, so VM exits set the limits to 0xffff. 3040 * Save and restore the full GDTR and IDTR to restore 3041 * the limits. 3042 * 3043 * The VMCS does not save the LDTR at all, and VM 3044 * exits clear LDTR as if a NULL selector were loaded. 3045 * The userspace hypervisor probably doesn't use a 3046 * LDT, but save and restore it to be safe. 3047 */ 3048 sgdt(&gdtr); 3049 sidt(&idtr); 3050 ldt_sel = sldt(); 3051 #endif 3052 3053 /* 3054 * If TPR Shadowing is enabled, the TPR Threshold must be 3055 * updated right before entering the guest. 3056 */ 3057 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 3058 !vmx_cap_en(vmx, VMX_CAP_APICV)) { 3059 if ((vmx->cap[vcpu].proc_ctls & 3060 PROCBASED_USE_TPR_SHADOW) != 0) { 3061 vmcs_write(VMCS_TPR_THRESHOLD, 3062 vlapic_get_cr8(vlapic)); 3063 } 3064 } 3065 3066 vmx_run_trace(vmx, vcpu); 3067 vmx_dr_enter_guest(vmxctx); 3068 rc = vmx_enter_guest(vmxctx, vmx, launched); 3069 vmx_dr_leave_guest(vmxctx); 3070 3071 #ifndef __FreeBSD__ 3072 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 3073 smt_release(); 3074 #else 3075 bare_lgdt(&gdtr); 3076 lidt(&idtr); 3077 lldt(ldt_sel); 3078 #endif 3079 3080 /* Collect some information for VM exit processing */ 3081 vmexit->rip = rip = vmcs_guest_rip(); 3082 vmexit->inst_length = vmexit_instruction_length(); 3083 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 3084 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 3085 3086 /* Update 'nextrip' */ 3087 vmx->state[vcpu].nextrip = rip; 3088 3089 if (rc == VMX_GUEST_VMEXIT) { 3090 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 3091 enable_intr(); 3092 handled = vmx_exit_process(vmx, vcpu, vmexit); 3093 } else { 3094 enable_intr(); 3095 vmx_exit_inst_error(vmxctx, rc, vmexit); 3096 } 3097 #ifdef __FreeBSD__ 3098 launched = 1; 3099 #endif 3100 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 3101 rip = vmexit->rip; 3102 } while (handled); 3103 3104 /* 3105 * If a VM exit has been handled then the exitcode must be BOGUS 3106 * If a VM exit is not handled then the exitcode must not be BOGUS 3107 */ 3108 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 3109 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 3110 panic("Mismatch between handled (%d) and exitcode (%d)", 3111 handled, vmexit->exitcode); 3112 } 3113 3114 if (!handled) 3115 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 3116 3117 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 3118 vmexit->exitcode); 3119 3120 VMCLEAR(vmcs); 3121 vmx_msr_guest_exit(vmx, vcpu); 3122 3123 #ifndef __FreeBSD__ 3124 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 3125 vmx->vmcs_state[vcpu] = VS_NONE; 3126 #endif 3127 3128 return (0); 3129 } 3130 3131 static void 3132 vmx_vmcleanup(void *arg) 3133 { 3134 int i; 3135 struct vmx *vmx = arg; 3136 uint16_t maxcpus; 3137 3138 if (apic_access_virtualization(vmx, 0)) 3139 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3140 3141 maxcpus = vm_get_maxcpus(vmx->vm); 3142 for (i = 0; i < maxcpus; i++) 3143 vpid_free(vmx->state[i].vpid); 3144 3145 free(vmx, M_VMX); 3146 3147 return; 3148 } 3149 3150 static register_t * 3151 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3152 { 3153 3154 switch (reg) { 3155 case VM_REG_GUEST_RAX: 3156 return (&vmxctx->guest_rax); 3157 case VM_REG_GUEST_RBX: 3158 return (&vmxctx->guest_rbx); 3159 case VM_REG_GUEST_RCX: 3160 return (&vmxctx->guest_rcx); 3161 case VM_REG_GUEST_RDX: 3162 return (&vmxctx->guest_rdx); 3163 case VM_REG_GUEST_RSI: 3164 return (&vmxctx->guest_rsi); 3165 case VM_REG_GUEST_RDI: 3166 return (&vmxctx->guest_rdi); 3167 case VM_REG_GUEST_RBP: 3168 return (&vmxctx->guest_rbp); 3169 case VM_REG_GUEST_R8: 3170 return (&vmxctx->guest_r8); 3171 case VM_REG_GUEST_R9: 3172 return (&vmxctx->guest_r9); 3173 case VM_REG_GUEST_R10: 3174 return (&vmxctx->guest_r10); 3175 case VM_REG_GUEST_R11: 3176 return (&vmxctx->guest_r11); 3177 case VM_REG_GUEST_R12: 3178 return (&vmxctx->guest_r12); 3179 case VM_REG_GUEST_R13: 3180 return (&vmxctx->guest_r13); 3181 case VM_REG_GUEST_R14: 3182 return (&vmxctx->guest_r14); 3183 case VM_REG_GUEST_R15: 3184 return (&vmxctx->guest_r15); 3185 case VM_REG_GUEST_CR2: 3186 return (&vmxctx->guest_cr2); 3187 case VM_REG_GUEST_DR0: 3188 return (&vmxctx->guest_dr0); 3189 case VM_REG_GUEST_DR1: 3190 return (&vmxctx->guest_dr1); 3191 case VM_REG_GUEST_DR2: 3192 return (&vmxctx->guest_dr2); 3193 case VM_REG_GUEST_DR3: 3194 return (&vmxctx->guest_dr3); 3195 case VM_REG_GUEST_DR6: 3196 return (&vmxctx->guest_dr6); 3197 default: 3198 break; 3199 } 3200 return (NULL); 3201 } 3202 3203 static int 3204 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 3205 { 3206 register_t *regp; 3207 3208 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3209 *retval = *regp; 3210 return (0); 3211 } else 3212 return (EINVAL); 3213 } 3214 3215 static int 3216 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 3217 { 3218 register_t *regp; 3219 3220 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3221 *regp = val; 3222 return (0); 3223 } else 3224 return (EINVAL); 3225 } 3226 3227 static int 3228 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval) 3229 { 3230 uint64_t gi; 3231 int error; 3232 3233 error = vmcs_getreg(&vmx->vmcs[vcpu], running, 3234 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); 3235 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3236 return (error); 3237 } 3238 3239 static int 3240 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val) 3241 { 3242 struct vmcs *vmcs; 3243 uint64_t gi; 3244 int error, ident; 3245 3246 /* 3247 * Forcing the vcpu into an interrupt shadow is not supported. 3248 */ 3249 if (val) { 3250 error = EINVAL; 3251 goto done; 3252 } 3253 3254 vmcs = &vmx->vmcs[vcpu]; 3255 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); 3256 error = vmcs_getreg(vmcs, running, ident, &gi); 3257 if (error == 0) { 3258 gi &= ~HWINTR_BLOCKING; 3259 error = vmcs_setreg(vmcs, running, ident, gi); 3260 } 3261 done: 3262 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val, 3263 error ? "failed" : "succeeded"); 3264 return (error); 3265 } 3266 3267 static int 3268 vmx_shadow_reg(int reg) 3269 { 3270 int shreg; 3271 3272 shreg = -1; 3273 3274 switch (reg) { 3275 case VM_REG_GUEST_CR0: 3276 shreg = VMCS_CR0_SHADOW; 3277 break; 3278 case VM_REG_GUEST_CR4: 3279 shreg = VMCS_CR4_SHADOW; 3280 break; 3281 default: 3282 break; 3283 } 3284 3285 return (shreg); 3286 } 3287 3288 static int 3289 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3290 { 3291 int running, hostcpu; 3292 struct vmx *vmx = arg; 3293 3294 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3295 if (running && hostcpu != curcpu) 3296 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 3297 3298 if (reg == VM_REG_GUEST_INTR_SHADOW) 3299 return (vmx_get_intr_shadow(vmx, vcpu, running, retval)); 3300 3301 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 3302 return (0); 3303 3304 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 3305 } 3306 3307 static int 3308 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3309 { 3310 int error, hostcpu, running, shadow; 3311 uint64_t ctls; 3312 pmap_t pmap; 3313 struct vmx *vmx = arg; 3314 3315 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3316 if (running && hostcpu != curcpu) 3317 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3318 3319 if (reg == VM_REG_GUEST_INTR_SHADOW) 3320 return (vmx_modify_intr_shadow(vmx, vcpu, running, val)); 3321 3322 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 3323 return (0); 3324 3325 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 3326 3327 if (error == 0) { 3328 /* 3329 * If the "load EFER" VM-entry control is 1 then the 3330 * value of EFER.LMA must be identical to "IA-32e mode guest" 3331 * bit in the VM-entry control. 3332 */ 3333 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 3334 (reg == VM_REG_GUEST_EFER)) { 3335 vmcs_getreg(&vmx->vmcs[vcpu], running, 3336 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 3337 if (val & EFER_LMA) 3338 ctls |= VM_ENTRY_GUEST_LMA; 3339 else 3340 ctls &= ~VM_ENTRY_GUEST_LMA; 3341 vmcs_setreg(&vmx->vmcs[vcpu], running, 3342 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 3343 } 3344 3345 shadow = vmx_shadow_reg(reg); 3346 if (shadow > 0) { 3347 /* 3348 * Store the unmodified value in the shadow 3349 */ 3350 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 3351 VMCS_IDENT(shadow), val); 3352 } 3353 3354 if (reg == VM_REG_GUEST_CR3) { 3355 /* 3356 * Invalidate the guest vcpu's TLB mappings to emulate 3357 * the behavior of updating %cr3. 3358 * 3359 * XXX the processor retains global mappings when %cr3 3360 * is updated but vmx_invvpid() does not. 3361 */ 3362 pmap = vmx->ctx[vcpu].pmap; 3363 vmx_invvpid(vmx, vcpu, pmap, running); 3364 } 3365 } 3366 3367 return (error); 3368 } 3369 3370 static int 3371 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 3372 { 3373 int hostcpu, running; 3374 struct vmx *vmx = arg; 3375 3376 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3377 if (running && hostcpu != curcpu) 3378 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3379 3380 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc)); 3381 } 3382 3383 static int 3384 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 3385 { 3386 int hostcpu, running; 3387 struct vmx *vmx = arg; 3388 3389 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3390 if (running && hostcpu != curcpu) 3391 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3392 3393 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc)); 3394 } 3395 3396 static int 3397 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3398 { 3399 struct vmx *vmx = arg; 3400 int vcap; 3401 int ret; 3402 3403 ret = ENOENT; 3404 3405 vcap = vmx->cap[vcpu].set; 3406 3407 switch (type) { 3408 case VM_CAP_HALT_EXIT: 3409 if (cap_halt_exit) 3410 ret = 0; 3411 break; 3412 case VM_CAP_PAUSE_EXIT: 3413 if (cap_pause_exit) 3414 ret = 0; 3415 break; 3416 case VM_CAP_MTRAP_EXIT: 3417 if (cap_monitor_trap) 3418 ret = 0; 3419 break; 3420 case VM_CAP_ENABLE_INVPCID: 3421 if (cap_invpcid) 3422 ret = 0; 3423 break; 3424 case VM_CAP_BPT_EXIT: 3425 ret = 0; 3426 break; 3427 default: 3428 break; 3429 } 3430 3431 if (ret == 0) 3432 *retval = (vcap & (1 << type)) ? 1 : 0; 3433 3434 return (ret); 3435 } 3436 3437 static int 3438 vmx_setcap(void *arg, int vcpu, int type, int val) 3439 { 3440 struct vmx *vmx = arg; 3441 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 3442 uint32_t baseval; 3443 uint32_t *pptr; 3444 int error; 3445 int flag; 3446 int reg; 3447 int retval; 3448 3449 retval = ENOENT; 3450 pptr = NULL; 3451 3452 switch (type) { 3453 case VM_CAP_HALT_EXIT: 3454 if (cap_halt_exit) { 3455 retval = 0; 3456 pptr = &vmx->cap[vcpu].proc_ctls; 3457 baseval = *pptr; 3458 flag = PROCBASED_HLT_EXITING; 3459 reg = VMCS_PRI_PROC_BASED_CTLS; 3460 } 3461 break; 3462 case VM_CAP_MTRAP_EXIT: 3463 if (cap_monitor_trap) { 3464 retval = 0; 3465 pptr = &vmx->cap[vcpu].proc_ctls; 3466 baseval = *pptr; 3467 flag = PROCBASED_MTF; 3468 reg = VMCS_PRI_PROC_BASED_CTLS; 3469 } 3470 break; 3471 case VM_CAP_PAUSE_EXIT: 3472 if (cap_pause_exit) { 3473 retval = 0; 3474 pptr = &vmx->cap[vcpu].proc_ctls; 3475 baseval = *pptr; 3476 flag = PROCBASED_PAUSE_EXITING; 3477 reg = VMCS_PRI_PROC_BASED_CTLS; 3478 } 3479 break; 3480 case VM_CAP_ENABLE_INVPCID: 3481 if (cap_invpcid) { 3482 retval = 0; 3483 pptr = &vmx->cap[vcpu].proc_ctls2; 3484 baseval = *pptr; 3485 flag = PROCBASED2_ENABLE_INVPCID; 3486 reg = VMCS_SEC_PROC_BASED_CTLS; 3487 } 3488 break; 3489 case VM_CAP_BPT_EXIT: 3490 retval = 0; 3491 3492 /* Don't change the bitmap if we are tracing all exceptions. */ 3493 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3494 pptr = &vmx->cap[vcpu].exc_bitmap; 3495 baseval = *pptr; 3496 flag = (1 << IDT_BP); 3497 reg = VMCS_EXCEPTION_BITMAP; 3498 } 3499 break; 3500 default: 3501 break; 3502 } 3503 3504 if (retval) 3505 return (retval); 3506 3507 if (pptr != NULL) { 3508 if (val) { 3509 baseval |= flag; 3510 } else { 3511 baseval &= ~flag; 3512 } 3513 VMPTRLD(vmcs); 3514 error = vmwrite(reg, baseval); 3515 VMCLEAR(vmcs); 3516 3517 if (error) 3518 return (error); 3519 3520 /* 3521 * Update optional stored flags, and record 3522 * setting 3523 */ 3524 *pptr = baseval; 3525 } 3526 3527 if (val) { 3528 vmx->cap[vcpu].set |= (1 << type); 3529 } else { 3530 vmx->cap[vcpu].set &= ~(1 << type); 3531 } 3532 3533 return (0); 3534 } 3535 3536 struct vlapic_vtx { 3537 struct vlapic vlapic; 3538 struct pir_desc *pir_desc; 3539 struct vmx *vmx; 3540 u_int pending_prio; 3541 }; 3542 3543 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3544 3545 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 3546 do { \ 3547 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 3548 level ? "level" : "edge", vector); \ 3549 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3550 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3551 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3552 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3553 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 3554 } while (0) 3555 3556 /* 3557 * vlapic->ops handlers that utilize the APICv hardware assist described in 3558 * Chapter 29 of the Intel SDM. 3559 */ 3560 static int 3561 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 3562 { 3563 struct vlapic_vtx *vlapic_vtx; 3564 struct pir_desc *pir_desc; 3565 uint64_t mask; 3566 int idx, notify = 0; 3567 3568 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3569 pir_desc = vlapic_vtx->pir_desc; 3570 3571 /* 3572 * Keep track of interrupt requests in the PIR descriptor. This is 3573 * because the virtual APIC page pointed to by the VMCS cannot be 3574 * modified if the vcpu is running. 3575 */ 3576 idx = vector / 64; 3577 mask = 1UL << (vector % 64); 3578 atomic_set_long(&pir_desc->pir[idx], mask); 3579 3580 /* 3581 * A notification is required whenever the 'pending' bit makes a 3582 * transition from 0->1. 3583 * 3584 * Even if the 'pending' bit is already asserted, notification about 3585 * the incoming interrupt may still be necessary. For example, if a 3586 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3587 * the 0->1 'pending' transition with a notification, but the vCPU 3588 * would ignore the interrupt for the time being. The same vCPU would 3589 * need to then be notified if a high-priority interrupt arrived which 3590 * satisfied the PPR. 3591 * 3592 * The priorities of interrupts injected while 'pending' is asserted 3593 * are tracked in a custom bitfield 'pending_prio'. Should the 3594 * to-be-injected interrupt exceed the priorities already present, the 3595 * notification is sent. The priorities recorded in 'pending_prio' are 3596 * cleared whenever the 'pending' bit makes another 0->1 transition. 3597 */ 3598 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3599 notify = 1; 3600 vlapic_vtx->pending_prio = 0; 3601 } else { 3602 const u_int old_prio = vlapic_vtx->pending_prio; 3603 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3604 3605 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3606 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3607 notify = 1; 3608 } 3609 } 3610 3611 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 3612 level, "vmx_set_intr_ready"); 3613 return (notify); 3614 } 3615 3616 static int 3617 vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 3618 { 3619 struct vlapic_vtx *vlapic_vtx; 3620 struct pir_desc *pir_desc; 3621 struct LAPIC *lapic; 3622 uint64_t pending, pirval; 3623 uint32_t ppr, vpr; 3624 int i; 3625 3626 /* 3627 * This function is only expected to be called from the 'HLT' exit 3628 * handler which does not care about the vector that is pending. 3629 */ 3630 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 3631 3632 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3633 pir_desc = vlapic_vtx->pir_desc; 3634 3635 pending = atomic_load_acq_long(&pir_desc->pending); 3636 if (!pending) { 3637 /* 3638 * While a virtual interrupt may have already been 3639 * processed the actual delivery maybe pending the 3640 * interruptibility of the guest. Recognize a pending 3641 * interrupt by reevaluating virtual interrupts 3642 * following Section 29.2.1 in the Intel SDM Volume 3. 3643 */ 3644 struct vm_exit *vmexit; 3645 uint8_t rvi, ppr; 3646 3647 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); 3648 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; 3649 lapic = vlapic->apic_page; 3650 ppr = lapic->ppr & APIC_TPR_INT; 3651 if (rvi > ppr) { 3652 return (1); 3653 } 3654 3655 return (0); 3656 } 3657 3658 /* 3659 * If there is an interrupt pending then it will be recognized only 3660 * if its priority is greater than the processor priority. 3661 * 3662 * Special case: if the processor priority is zero then any pending 3663 * interrupt will be recognized. 3664 */ 3665 lapic = vlapic->apic_page; 3666 ppr = lapic->ppr & APIC_TPR_INT; 3667 if (ppr == 0) 3668 return (1); 3669 3670 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 3671 lapic->ppr); 3672 3673 vpr = 0; 3674 for (i = 3; i >= 0; i--) { 3675 pirval = pir_desc->pir[i]; 3676 if (pirval != 0) { 3677 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; 3678 break; 3679 } 3680 } 3681 3682 /* 3683 * If the highest-priority pending interrupt falls short of the 3684 * processor priority of this vCPU, ensure that 'pending_prio' does not 3685 * have any stale bits which would preclude a higher-priority interrupt 3686 * from incurring a notification later. 3687 */ 3688 if (vpr <= ppr) { 3689 const u_int prio_bit = VPR_PRIO_BIT(vpr); 3690 const u_int old = vlapic_vtx->pending_prio; 3691 3692 if (old > prio_bit && (old & prio_bit) == 0) { 3693 vlapic_vtx->pending_prio = prio_bit; 3694 } 3695 return (0); 3696 } 3697 return (1); 3698 } 3699 3700 static void 3701 vmx_intr_accepted(struct vlapic *vlapic, int vector) 3702 { 3703 3704 panic("vmx_intr_accepted: not expected to be called"); 3705 } 3706 3707 static void 3708 vmx_set_tmr(struct vlapic *vlapic, const uint32_t *masks) 3709 { 3710 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)masks[1] << 32) | masks[0]); 3711 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)masks[3] << 32) | masks[2]); 3712 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)masks[5] << 32) | masks[4]); 3713 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)masks[7] << 32) | masks[6]); 3714 } 3715 3716 static void 3717 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3718 { 3719 struct vmx *vmx; 3720 struct vmcs *vmcs; 3721 uint32_t proc_ctls; 3722 int vcpuid; 3723 3724 vcpuid = vlapic->vcpuid; 3725 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3726 vmcs = &vmx->vmcs[vcpuid]; 3727 3728 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3729 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3730 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3731 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3732 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3733 3734 VMPTRLD(vmcs); 3735 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3736 VMCLEAR(vmcs); 3737 } 3738 3739 static void 3740 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3741 { 3742 struct vmx *vmx; 3743 struct vmcs *vmcs; 3744 uint32_t proc_ctls2; 3745 int vcpuid, error; 3746 3747 vcpuid = vlapic->vcpuid; 3748 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3749 vmcs = &vmx->vmcs[vcpuid]; 3750 3751 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3752 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3753 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3754 3755 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3756 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3757 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3758 3759 VMPTRLD(vmcs); 3760 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3761 VMCLEAR(vmcs); 3762 3763 if (vlapic->vcpuid == 0) { 3764 /* 3765 * The nested page table mappings are shared by all vcpus 3766 * so unmap the APIC access page just once. 3767 */ 3768 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3769 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3770 __func__, error)); 3771 3772 /* 3773 * The MSR bitmap is shared by all vcpus so modify it only 3774 * once in the context of vcpu 0. 3775 */ 3776 error = vmx_allow_x2apic_msrs(vmx); 3777 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3778 __func__, error)); 3779 } 3780 } 3781 3782 static void 3783 vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3784 { 3785 #ifdef __FreeBSD__ 3786 ipi_cpu(hostcpu, pirvec); 3787 #else 3788 psm_send_pir_ipi(hostcpu); 3789 #endif 3790 } 3791 3792 /* 3793 * Transfer the pending interrupts in the PIR descriptor to the IRR 3794 * in the virtual APIC page. 3795 */ 3796 static void 3797 vmx_inject_pir(struct vlapic *vlapic) 3798 { 3799 struct vlapic_vtx *vlapic_vtx; 3800 struct pir_desc *pir_desc; 3801 struct LAPIC *lapic; 3802 uint64_t val, pirval; 3803 int rvi, pirbase = -1; 3804 uint16_t intr_status_old, intr_status_new; 3805 3806 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3807 pir_desc = vlapic_vtx->pir_desc; 3808 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3809 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3810 "no posted interrupt pending"); 3811 return; 3812 } 3813 3814 pirval = 0; 3815 pirbase = -1; 3816 lapic = vlapic->apic_page; 3817 3818 val = atomic_readandclear_long(&pir_desc->pir[0]); 3819 if (val != 0) { 3820 lapic->irr0 |= val; 3821 lapic->irr1 |= val >> 32; 3822 pirbase = 0; 3823 pirval = val; 3824 } 3825 3826 val = atomic_readandclear_long(&pir_desc->pir[1]); 3827 if (val != 0) { 3828 lapic->irr2 |= val; 3829 lapic->irr3 |= val >> 32; 3830 pirbase = 64; 3831 pirval = val; 3832 } 3833 3834 val = atomic_readandclear_long(&pir_desc->pir[2]); 3835 if (val != 0) { 3836 lapic->irr4 |= val; 3837 lapic->irr5 |= val >> 32; 3838 pirbase = 128; 3839 pirval = val; 3840 } 3841 3842 val = atomic_readandclear_long(&pir_desc->pir[3]); 3843 if (val != 0) { 3844 lapic->irr6 |= val; 3845 lapic->irr7 |= val >> 32; 3846 pirbase = 192; 3847 pirval = val; 3848 } 3849 3850 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 3851 3852 /* 3853 * Update RVI so the processor can evaluate pending virtual 3854 * interrupts on VM-entry. 3855 * 3856 * It is possible for pirval to be 0 here, even though the 3857 * pending bit has been set. The scenario is: 3858 * CPU-Y is sending a posted interrupt to CPU-X, which 3859 * is running a guest and processing posted interrupts in h/w. 3860 * CPU-X will eventually exit and the state seen in s/w is 3861 * the pending bit set, but no PIR bits set. 3862 * 3863 * CPU-X CPU-Y 3864 * (vm running) (host running) 3865 * rx posted interrupt 3866 * CLEAR pending bit 3867 * SET PIR bit 3868 * READ/CLEAR PIR bits 3869 * SET pending bit 3870 * (vm exit) 3871 * pending bit set, PIR 0 3872 */ 3873 if (pirval != 0) { 3874 rvi = pirbase + flsl(pirval) - 1; 3875 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 3876 intr_status_new = (intr_status_old & 0xFF00) | rvi; 3877 if (intr_status_new > intr_status_old) { 3878 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 3879 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3880 "guest_intr_status changed from 0x%04x to 0x%04x", 3881 intr_status_old, intr_status_new); 3882 } 3883 } 3884 } 3885 3886 static struct vlapic * 3887 vmx_vlapic_init(void *arg, int vcpuid) 3888 { 3889 struct vmx *vmx; 3890 struct vlapic *vlapic; 3891 struct vlapic_vtx *vlapic_vtx; 3892 3893 vmx = arg; 3894 3895 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3896 vlapic->vm = vmx->vm; 3897 vlapic->vcpuid = vcpuid; 3898 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3899 3900 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3901 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3902 vlapic_vtx->vmx = vmx; 3903 3904 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3905 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3906 } 3907 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3908 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3909 vlapic->ops.pending_intr = vmx_pending_intr; 3910 vlapic->ops.intr_accepted = vmx_intr_accepted; 3911 vlapic->ops.set_tmr = vmx_set_tmr; 3912 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3913 3914 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3915 vlapic->ops.post_intr = vmx_post_intr; 3916 } 3917 } 3918 3919 vlapic_init(vlapic); 3920 3921 return (vlapic); 3922 } 3923 3924 static void 3925 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3926 { 3927 3928 vlapic_cleanup(vlapic); 3929 free(vlapic, M_VLAPIC); 3930 } 3931 3932 #ifndef __FreeBSD__ 3933 static void 3934 vmx_savectx(void *arg, int vcpu) 3935 { 3936 struct vmx *vmx = arg; 3937 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 3938 3939 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3940 VERIFY3U(vmclear(vmcs), ==, 0); 3941 vmx_msr_guest_exit(vmx, vcpu); 3942 /* 3943 * Having VMCLEARed the VMCS, it can no longer be re-entered 3944 * with VMRESUME, but must be VMLAUNCHed again. 3945 */ 3946 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3947 } 3948 3949 reset_gdtr_limit(); 3950 } 3951 3952 static void 3953 vmx_restorectx(void *arg, int vcpu) 3954 { 3955 struct vmx *vmx = arg; 3956 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 3957 3958 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3959 3960 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3961 vmx_msr_guest_enter(vmx, vcpu); 3962 VERIFY3U(vmptrld(vmcs), ==, 0); 3963 } 3964 } 3965 #endif /* __FreeBSD__ */ 3966 3967 struct vmm_ops vmm_ops_intel = { 3968 .init = vmx_init, 3969 .cleanup = vmx_cleanup, 3970 .resume = vmx_restore, 3971 .vminit = vmx_vminit, 3972 .vmrun = vmx_run, 3973 .vmcleanup = vmx_vmcleanup, 3974 .vmgetreg = vmx_getreg, 3975 .vmsetreg = vmx_setreg, 3976 .vmgetdesc = vmx_getdesc, 3977 .vmsetdesc = vmx_setdesc, 3978 .vmgetcap = vmx_getcap, 3979 .vmsetcap = vmx_setcap, 3980 .vmspace_alloc = ept_vmspace_alloc, 3981 .vmspace_free = ept_vmspace_free, 3982 .vlapic_init = vmx_vlapic_init, 3983 .vlapic_cleanup = vmx_vlapic_cleanup, 3984 3985 #ifndef __FreeBSD__ 3986 .vmsavectx = vmx_savectx, 3987 .vmrestorectx = vmx_restorectx, 3988 #endif 3989 }; 3990 3991 #ifndef __FreeBSD__ 3992 /* Side-effect free HW validation derived from checks in vmx_init. */ 3993 int 3994 vmx_x86_supported(const char **msg) 3995 { 3996 int error; 3997 uint32_t tmp; 3998 3999 ASSERT(msg != NULL); 4000 4001 /* Check support for primary processor-based VM-execution controls */ 4002 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 4003 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 4004 PROCBASED_CTLS_ZERO_SETTING, &tmp); 4005 if (error) { 4006 *msg = "processor does not support desired primary " 4007 "processor-based controls"; 4008 return (error); 4009 } 4010 4011 /* Check support for secondary processor-based VM-execution controls */ 4012 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 4013 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 4014 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 4015 if (error) { 4016 *msg = "processor does not support desired secondary " 4017 "processor-based controls"; 4018 return (error); 4019 } 4020 4021 /* Check support for pin-based VM-execution controls */ 4022 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 4023 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 4024 PINBASED_CTLS_ZERO_SETTING, &tmp); 4025 if (error) { 4026 *msg = "processor does not support desired pin-based controls"; 4027 return (error); 4028 } 4029 4030 /* Check support for VM-exit controls */ 4031 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 4032 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 4033 if (error) { 4034 *msg = "processor does not support desired exit controls"; 4035 return (error); 4036 } 4037 4038 /* Check support for VM-entry controls */ 4039 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 4040 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 4041 if (error) { 4042 *msg = "processor does not support desired entry controls"; 4043 return (error); 4044 } 4045 4046 /* Unrestricted guest is nominally optional, but not for us. */ 4047 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 4048 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 4049 if (error) { 4050 *msg = "processor does not support desired unrestricted guest " 4051 "controls"; 4052 return (error); 4053 } 4054 4055 return (0); 4056 } 4057 #endif 4058