1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2020 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/smp.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #ifndef __FreeBSD__ 59 #include <sys/x86_archext.h> 60 #include <sys/smp_impldefs.h> 61 #include <sys/smt.h> 62 #include <sys/hma.h> 63 #include <sys/trap.h> 64 #endif 65 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 69 #include <machine/psl.h> 70 #include <machine/cpufunc.h> 71 #include <machine/md_var.h> 72 #include <machine/reg.h> 73 #include <machine/segments.h> 74 #include <machine/smp.h> 75 #include <machine/specialreg.h> 76 #include <machine/vmparam.h> 77 78 #include <machine/vmm.h> 79 #include <machine/vmm_dev.h> 80 #include <sys/vmm_instruction_emul.h> 81 #include "vmm_lapic.h" 82 #include "vmm_host.h" 83 #include "vmm_ioport.h" 84 #include "vmm_ktr.h" 85 #include "vmm_stat.h" 86 #include "vatpic.h" 87 #include "vlapic.h" 88 #include "vlapic_priv.h" 89 90 #include "ept.h" 91 #include "vmcs.h" 92 #include "vmx.h" 93 #include "vmx_msr.h" 94 #include "x86.h" 95 #include "vmx_controls.h" 96 97 #define PINBASED_CTLS_ONE_SETTING \ 98 (PINBASED_EXTINT_EXITING | \ 99 PINBASED_NMI_EXITING | \ 100 PINBASED_VIRTUAL_NMI) 101 #define PINBASED_CTLS_ZERO_SETTING 0 102 103 #define PROCBASED_CTLS_WINDOW_SETTING \ 104 (PROCBASED_INT_WINDOW_EXITING | \ 105 PROCBASED_NMI_WINDOW_EXITING) 106 107 #ifdef __FreeBSD__ 108 #define PROCBASED_CTLS_ONE_SETTING \ 109 (PROCBASED_SECONDARY_CONTROLS | \ 110 PROCBASED_MWAIT_EXITING | \ 111 PROCBASED_MONITOR_EXITING | \ 112 PROCBASED_IO_EXITING | \ 113 PROCBASED_MSR_BITMAPS | \ 114 PROCBASED_CTLS_WINDOW_SETTING | \ 115 PROCBASED_CR8_LOAD_EXITING | \ 116 PROCBASED_CR8_STORE_EXITING) 117 #else 118 /* We consider TSC offset a necessity for unsynched TSC handling */ 119 #define PROCBASED_CTLS_ONE_SETTING \ 120 (PROCBASED_SECONDARY_CONTROLS | \ 121 PROCBASED_TSC_OFFSET | \ 122 PROCBASED_MWAIT_EXITING | \ 123 PROCBASED_MONITOR_EXITING | \ 124 PROCBASED_IO_EXITING | \ 125 PROCBASED_MSR_BITMAPS | \ 126 PROCBASED_CTLS_WINDOW_SETTING | \ 127 PROCBASED_CR8_LOAD_EXITING | \ 128 PROCBASED_CR8_STORE_EXITING) 129 #endif /* __FreeBSD__ */ 130 131 #define PROCBASED_CTLS_ZERO_SETTING \ 132 (PROCBASED_CR3_LOAD_EXITING | \ 133 PROCBASED_CR3_STORE_EXITING | \ 134 PROCBASED_IO_BITMAPS) 135 136 /* 137 * EPT and Unrestricted Guest are considered necessities. The latter is not a 138 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 139 * without a bootrom starting in real mode. 140 */ 141 #define PROCBASED_CTLS2_ONE_SETTING \ 142 (PROCBASED2_ENABLE_EPT | \ 143 PROCBASED2_UNRESTRICTED_GUEST) 144 #define PROCBASED_CTLS2_ZERO_SETTING 0 145 146 #define VM_EXIT_CTLS_ONE_SETTING \ 147 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 148 VM_EXIT_HOST_LMA | \ 149 VM_EXIT_LOAD_PAT | \ 150 VM_EXIT_SAVE_EFER | \ 151 VM_EXIT_LOAD_EFER | \ 152 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 153 154 #define VM_EXIT_CTLS_ZERO_SETTING 0 155 156 #define VM_ENTRY_CTLS_ONE_SETTING \ 157 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 158 VM_ENTRY_LOAD_EFER) 159 160 #define VM_ENTRY_CTLS_ZERO_SETTING \ 161 (VM_ENTRY_INTO_SMM | \ 162 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 163 164 #define HANDLED 1 165 #define UNHANDLED 0 166 167 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 168 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 169 170 SYSCTL_DECL(_hw_vmm); 171 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 172 NULL); 173 174 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 175 static uint32_t exit_ctls, entry_ctls; 176 177 static uint64_t cr0_ones_mask, cr0_zeros_mask; 178 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 179 &cr0_ones_mask, 0, NULL); 180 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 181 &cr0_zeros_mask, 0, NULL); 182 183 static uint64_t cr4_ones_mask, cr4_zeros_mask; 184 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 185 &cr4_ones_mask, 0, NULL); 186 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 187 &cr4_zeros_mask, 0, NULL); 188 189 static int vmx_initialized; 190 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 191 &vmx_initialized, 0, "Intel VMX initialized"); 192 193 static int no_flush_rsb; 194 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW, 195 &no_flush_rsb, 0, "Do not flush RSB upon vmexit"); 196 197 /* 198 * Optional capabilities 199 */ 200 #ifdef __FreeBSD__ 201 SYSCTL_DECL(_hw_vmm_vmx); 202 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, 203 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 204 NULL); 205 #endif 206 207 static int cap_halt_exit; 208 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 209 "HLT triggers a VM-exit"); 210 211 static int cap_pause_exit; 212 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 213 0, "PAUSE triggers a VM-exit"); 214 215 static int cap_monitor_trap; 216 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 217 &cap_monitor_trap, 0, "Monitor trap flag"); 218 219 static int cap_invpcid; 220 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 221 0, "Guests are allowed to use INVPCID"); 222 223 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 224 static enum vmx_caps vmx_capabilities; 225 226 static int pirvec = -1; 227 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 228 &pirvec, 0, "APICv posted interrupt vector"); 229 230 #ifdef __FreeBSD__ 231 static struct unrhdr *vpid_unr; 232 #endif /* __FreeBSD__ */ 233 static u_int vpid_alloc_failed; 234 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 235 &vpid_alloc_failed, 0, NULL); 236 237 int guest_l1d_flush; 238 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD, 239 &guest_l1d_flush, 0, NULL); 240 int guest_l1d_flush_sw; 241 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD, 242 &guest_l1d_flush_sw, 0, NULL); 243 244 /* MSR save region is composed of an array of 'struct msr_entry' */ 245 struct msr_entry { 246 uint32_t index; 247 uint32_t reserved; 248 uint64_t val; 249 }; 250 251 static struct msr_entry msr_load_list[1] __aligned(16); 252 253 /* 254 * The definitions of SDT probes for VMX. 255 */ 256 257 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 258 "struct vmx *", "int", "struct vm_exit *"); 259 260 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 261 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 262 263 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 264 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 265 266 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 267 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 268 269 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 270 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 271 272 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 273 "struct vmx *", "int", "struct vm_exit *"); 274 275 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 276 "struct vmx *", "int", "struct vm_exit *"); 277 278 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 279 "struct vmx *", "int", "struct vm_exit *"); 280 281 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 282 "struct vmx *", "int", "struct vm_exit *"); 283 284 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 285 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 286 287 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 288 "struct vmx *", "int", "struct vm_exit *"); 289 290 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 291 "struct vmx *", "int", "struct vm_exit *"); 292 293 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 294 "struct vmx *", "int", "struct vm_exit *"); 295 296 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 297 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 298 299 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 300 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 301 302 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 303 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 304 305 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 306 "struct vmx *", "int", "struct vm_exit *"); 307 308 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 309 "struct vmx *", "int", "struct vm_exit *"); 310 311 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 312 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 313 314 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 315 "struct vmx *", "int", "struct vm_exit *"); 316 317 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 318 "struct vmx *", "int", "struct vm_exit *"); 319 320 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 321 "struct vmx *", "int", "struct vm_exit *"); 322 323 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 324 "struct vmx *", "int", "struct vm_exit *"); 325 326 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 327 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 328 329 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 330 "struct vmx *", "int", "struct vm_exit *", "int"); 331 332 /* 333 * Use the last page below 4GB as the APIC access address. This address is 334 * occupied by the boot firmware so it is guaranteed that it will not conflict 335 * with a page in system memory. 336 */ 337 #define APIC_ACCESS_ADDRESS 0xFFFFF000 338 339 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 340 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 341 static void vmx_inject_pir(struct vlapic *vlapic); 342 static void vmx_apply_tsc_adjust(struct vmx *, int); 343 344 #ifdef KTR 345 static const char * 346 exit_reason_to_str(int reason) 347 { 348 static char reasonbuf[32]; 349 350 switch (reason) { 351 case EXIT_REASON_EXCEPTION: 352 return "exception"; 353 case EXIT_REASON_EXT_INTR: 354 return "extint"; 355 case EXIT_REASON_TRIPLE_FAULT: 356 return "triplefault"; 357 case EXIT_REASON_INIT: 358 return "init"; 359 case EXIT_REASON_SIPI: 360 return "sipi"; 361 case EXIT_REASON_IO_SMI: 362 return "iosmi"; 363 case EXIT_REASON_SMI: 364 return "smi"; 365 case EXIT_REASON_INTR_WINDOW: 366 return "intrwindow"; 367 case EXIT_REASON_NMI_WINDOW: 368 return "nmiwindow"; 369 case EXIT_REASON_TASK_SWITCH: 370 return "taskswitch"; 371 case EXIT_REASON_CPUID: 372 return "cpuid"; 373 case EXIT_REASON_GETSEC: 374 return "getsec"; 375 case EXIT_REASON_HLT: 376 return "hlt"; 377 case EXIT_REASON_INVD: 378 return "invd"; 379 case EXIT_REASON_INVLPG: 380 return "invlpg"; 381 case EXIT_REASON_RDPMC: 382 return "rdpmc"; 383 case EXIT_REASON_RDTSC: 384 return "rdtsc"; 385 case EXIT_REASON_RSM: 386 return "rsm"; 387 case EXIT_REASON_VMCALL: 388 return "vmcall"; 389 case EXIT_REASON_VMCLEAR: 390 return "vmclear"; 391 case EXIT_REASON_VMLAUNCH: 392 return "vmlaunch"; 393 case EXIT_REASON_VMPTRLD: 394 return "vmptrld"; 395 case EXIT_REASON_VMPTRST: 396 return "vmptrst"; 397 case EXIT_REASON_VMREAD: 398 return "vmread"; 399 case EXIT_REASON_VMRESUME: 400 return "vmresume"; 401 case EXIT_REASON_VMWRITE: 402 return "vmwrite"; 403 case EXIT_REASON_VMXOFF: 404 return "vmxoff"; 405 case EXIT_REASON_VMXON: 406 return "vmxon"; 407 case EXIT_REASON_CR_ACCESS: 408 return "craccess"; 409 case EXIT_REASON_DR_ACCESS: 410 return "draccess"; 411 case EXIT_REASON_INOUT: 412 return "inout"; 413 case EXIT_REASON_RDMSR: 414 return "rdmsr"; 415 case EXIT_REASON_WRMSR: 416 return "wrmsr"; 417 case EXIT_REASON_INVAL_VMCS: 418 return "invalvmcs"; 419 case EXIT_REASON_INVAL_MSR: 420 return "invalmsr"; 421 case EXIT_REASON_MWAIT: 422 return "mwait"; 423 case EXIT_REASON_MTF: 424 return "mtf"; 425 case EXIT_REASON_MONITOR: 426 return "monitor"; 427 case EXIT_REASON_PAUSE: 428 return "pause"; 429 case EXIT_REASON_MCE_DURING_ENTRY: 430 return "mce-during-entry"; 431 case EXIT_REASON_TPR: 432 return "tpr"; 433 case EXIT_REASON_APIC_ACCESS: 434 return "apic-access"; 435 case EXIT_REASON_GDTR_IDTR: 436 return "gdtridtr"; 437 case EXIT_REASON_LDTR_TR: 438 return "ldtrtr"; 439 case EXIT_REASON_EPT_FAULT: 440 return "eptfault"; 441 case EXIT_REASON_EPT_MISCONFIG: 442 return "eptmisconfig"; 443 case EXIT_REASON_INVEPT: 444 return "invept"; 445 case EXIT_REASON_RDTSCP: 446 return "rdtscp"; 447 case EXIT_REASON_VMX_PREEMPT: 448 return "vmxpreempt"; 449 case EXIT_REASON_INVVPID: 450 return "invvpid"; 451 case EXIT_REASON_WBINVD: 452 return "wbinvd"; 453 case EXIT_REASON_XSETBV: 454 return "xsetbv"; 455 case EXIT_REASON_APIC_WRITE: 456 return "apic-write"; 457 default: 458 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 459 return (reasonbuf); 460 } 461 } 462 #endif /* KTR */ 463 464 static int 465 vmx_allow_x2apic_msrs(struct vmx *vmx) 466 { 467 int i, error; 468 469 error = 0; 470 471 /* 472 * Allow readonly access to the following x2APIC MSRs from the guest. 473 */ 474 error += guest_msr_ro(vmx, MSR_APIC_ID); 475 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 476 error += guest_msr_ro(vmx, MSR_APIC_LDR); 477 error += guest_msr_ro(vmx, MSR_APIC_SVR); 478 479 for (i = 0; i < 8; i++) 480 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 481 482 for (i = 0; i < 8; i++) 483 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 484 485 for (i = 0; i < 8; i++) 486 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 487 488 error += guest_msr_ro(vmx, MSR_APIC_ESR); 489 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 490 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 491 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 492 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 493 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 494 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 495 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 496 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 497 error += guest_msr_ro(vmx, MSR_APIC_ICR); 498 499 /* 500 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 501 * 502 * These registers get special treatment described in the section 503 * "Virtualizing MSR-Based APIC Accesses". 504 */ 505 error += guest_msr_rw(vmx, MSR_APIC_TPR); 506 error += guest_msr_rw(vmx, MSR_APIC_EOI); 507 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 508 509 return (error); 510 } 511 512 static u_long 513 vmx_fix_cr0(u_long cr0) 514 { 515 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 516 } 517 518 static u_long 519 vmx_fix_cr4(u_long cr4) 520 { 521 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 522 } 523 524 static void 525 vpid_free(int vpid) 526 { 527 if (vpid < 0 || vpid > 0xffff) 528 panic("vpid_free: invalid vpid %d", vpid); 529 530 /* 531 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 532 * the unit number allocator. 533 */ 534 535 if (vpid > VM_MAXCPU) 536 #ifdef __FreeBSD__ 537 free_unr(vpid_unr, vpid); 538 #else 539 hma_vmx_vpid_free((uint16_t)vpid); 540 #endif 541 } 542 543 static void 544 vpid_alloc(uint16_t *vpid, int num) 545 { 546 int i, x; 547 548 if (num <= 0 || num > VM_MAXCPU) 549 panic("invalid number of vpids requested: %d", num); 550 551 /* 552 * If the "enable vpid" execution control is not enabled then the 553 * VPID is required to be 0 for all vcpus. 554 */ 555 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 556 for (i = 0; i < num; i++) 557 vpid[i] = 0; 558 return; 559 } 560 561 /* 562 * Allocate a unique VPID for each vcpu from the unit number allocator. 563 */ 564 for (i = 0; i < num; i++) { 565 #ifdef __FreeBSD__ 566 x = alloc_unr(vpid_unr); 567 #else 568 uint16_t tmp; 569 570 tmp = hma_vmx_vpid_alloc(); 571 x = (tmp == 0) ? -1 : tmp; 572 #endif 573 if (x == -1) 574 break; 575 else 576 vpid[i] = x; 577 } 578 579 if (i < num) { 580 atomic_add_int(&vpid_alloc_failed, 1); 581 582 /* 583 * If the unit number allocator does not have enough unique 584 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 585 * 586 * These VPIDs are not be unique across VMs but this does not 587 * affect correctness because the combined mappings are also 588 * tagged with the EP4TA which is unique for each VM. 589 * 590 * It is still sub-optimal because the invvpid will invalidate 591 * combined mappings for a particular VPID across all EP4TAs. 592 */ 593 while (i-- > 0) 594 vpid_free(vpid[i]); 595 596 for (i = 0; i < num; i++) 597 vpid[i] = i + 1; 598 } 599 } 600 601 static int 602 vmx_cleanup(void) 603 { 604 /* This is taken care of by the hma registration */ 605 return (0); 606 } 607 608 static void 609 vmx_restore(void) 610 { 611 /* No-op on illumos */ 612 } 613 614 static int 615 vmx_init(int ipinum) 616 { 617 int error; 618 uint64_t fixed0, fixed1; 619 uint32_t tmp; 620 enum vmx_caps avail_caps = VMX_CAP_NONE; 621 622 /* Check support for primary processor-based VM-execution controls */ 623 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 624 MSR_VMX_TRUE_PROCBASED_CTLS, 625 PROCBASED_CTLS_ONE_SETTING, 626 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 627 if (error) { 628 printf("vmx_init: processor does not support desired primary " 629 "processor-based controls\n"); 630 return (error); 631 } 632 633 /* Clear the processor-based ctl bits that are set on demand */ 634 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 635 636 /* Check support for secondary processor-based VM-execution controls */ 637 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 638 MSR_VMX_PROCBASED_CTLS2, 639 PROCBASED_CTLS2_ONE_SETTING, 640 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 641 if (error) { 642 printf("vmx_init: processor does not support desired secondary " 643 "processor-based controls\n"); 644 return (error); 645 } 646 647 /* Check support for VPID */ 648 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 649 PROCBASED2_ENABLE_VPID, 0, &tmp); 650 if (error == 0) 651 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 652 653 /* Check support for pin-based VM-execution controls */ 654 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 655 MSR_VMX_TRUE_PINBASED_CTLS, 656 PINBASED_CTLS_ONE_SETTING, 657 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 658 if (error) { 659 printf("vmx_init: processor does not support desired " 660 "pin-based controls\n"); 661 return (error); 662 } 663 664 /* Check support for VM-exit controls */ 665 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 666 VM_EXIT_CTLS_ONE_SETTING, 667 VM_EXIT_CTLS_ZERO_SETTING, 668 &exit_ctls); 669 if (error) { 670 printf("vmx_init: processor does not support desired " 671 "exit controls\n"); 672 return (error); 673 } 674 675 /* Check support for VM-entry controls */ 676 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 677 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 678 &entry_ctls); 679 if (error) { 680 printf("vmx_init: processor does not support desired " 681 "entry controls\n"); 682 return (error); 683 } 684 685 /* 686 * Check support for optional features by testing them 687 * as individual bits 688 */ 689 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 690 MSR_VMX_TRUE_PROCBASED_CTLS, 691 PROCBASED_HLT_EXITING, 0, 692 &tmp) == 0); 693 694 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 695 MSR_VMX_PROCBASED_CTLS, 696 PROCBASED_MTF, 0, 697 &tmp) == 0); 698 699 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 700 MSR_VMX_TRUE_PROCBASED_CTLS, 701 PROCBASED_PAUSE_EXITING, 0, 702 &tmp) == 0); 703 704 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 705 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 706 &tmp) == 0); 707 708 /* Check for APIC virtualization capabilities: 709 * - TPR shadowing 710 * - Full APICv (with or without x2APIC support) 711 * - Posted interrupt handling 712 */ 713 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 714 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 715 avail_caps |= VMX_CAP_TPR_SHADOW; 716 717 const uint32_t apicv_bits = 718 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 719 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 720 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 721 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 722 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 723 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 724 avail_caps |= VMX_CAP_APICV; 725 726 /* 727 * It may make sense in the future to differentiate 728 * hardware (or software) configurations with APICv but 729 * no support for accelerating x2APIC mode. 730 */ 731 avail_caps |= VMX_CAP_APICV_X2APIC; 732 733 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 734 MSR_VMX_TRUE_PINBASED_CTLS, 735 PINBASED_POSTED_INTERRUPT, 0, &tmp); 736 if (error == 0) { 737 /* 738 * If the PSM-provided interfaces for requesting 739 * and using a PIR IPI vector are present, use 740 * them for posted interrupts. 741 */ 742 if (psm_get_pir_ipivect != NULL && 743 psm_send_pir_ipi != NULL) { 744 pirvec = psm_get_pir_ipivect(); 745 avail_caps |= VMX_CAP_APICV_PIR; 746 } 747 } 748 } 749 } 750 751 /* Initialize EPT */ 752 error = ept_init(ipinum); 753 if (error) { 754 printf("vmx_init: ept initialization failed (%d)\n", error); 755 return (error); 756 } 757 758 #ifdef __FreeBSD__ 759 guest_l1d_flush = (cpu_ia32_arch_caps & 760 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 761 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 762 763 /* 764 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 765 * available. Otherwise fall back to the software flush 766 * method which loads enough data from the kernel text to 767 * flush existing L1D content, both on VMX entry and on NMI 768 * return. 769 */ 770 if (guest_l1d_flush) { 771 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 772 guest_l1d_flush_sw = 1; 773 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 774 &guest_l1d_flush_sw); 775 } 776 if (guest_l1d_flush_sw) { 777 if (nmi_flush_l1d_sw <= 1) 778 nmi_flush_l1d_sw = 1; 779 } else { 780 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 781 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 782 } 783 } 784 #else 785 /* L1D flushing is taken care of by smt_acquire() and friends */ 786 guest_l1d_flush = 0; 787 #endif /* __FreeBSD__ */ 788 789 /* 790 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 791 */ 792 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 793 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 794 cr0_ones_mask = fixed0 & fixed1; 795 cr0_zeros_mask = ~fixed0 & ~fixed1; 796 797 /* 798 * Since Unrestricted Guest was already verified present, CR0_PE and 799 * CR0_PG are allowed to be set to zero in VMX non-root operation 800 */ 801 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 802 803 /* 804 * Do not allow the guest to set CR0_NW or CR0_CD. 805 */ 806 cr0_zeros_mask |= (CR0_NW | CR0_CD); 807 808 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 809 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 810 cr4_ones_mask = fixed0 & fixed1; 811 cr4_zeros_mask = ~fixed0 & ~fixed1; 812 813 vmx_msr_init(); 814 815 vmx_capabilities = avail_caps; 816 vmx_initialized = 1; 817 818 return (0); 819 } 820 821 static void 822 vmx_trigger_hostintr(int vector) 823 { 824 #ifdef __FreeBSD__ 825 uintptr_t func; 826 struct gate_descriptor *gd; 827 828 gd = &idt[vector]; 829 830 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 831 "invalid vector %d", vector)); 832 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 833 vector)); 834 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 835 "has invalid type %d", vector, gd->gd_type)); 836 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 837 "has invalid dpl %d", vector, gd->gd_dpl)); 838 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 839 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 840 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 841 "IST %d", vector, gd->gd_ist)); 842 843 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 844 vmx_call_isr(func); 845 #else 846 VERIFY(vector >= 32 && vector <= 255); 847 vmx_call_isr(vector - 32); 848 #endif /* __FreeBSD__ */ 849 } 850 851 static void * 852 vmx_vminit(struct vm *vm, pmap_t pmap) 853 { 854 uint16_t vpid[VM_MAXCPU]; 855 int i, error, datasel; 856 struct vmx *vmx; 857 uint32_t exc_bitmap; 858 uint16_t maxcpus; 859 uint32_t proc_ctls, proc2_ctls, pin_ctls; 860 861 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 862 if ((uintptr_t)vmx & PAGE_MASK) { 863 panic("malloc of struct vmx not aligned on %d byte boundary", 864 PAGE_SIZE); 865 } 866 vmx->vm = vm; 867 868 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 869 870 /* 871 * Clean up EPTP-tagged guest physical and combined mappings 872 * 873 * VMX transitions are not required to invalidate any guest physical 874 * mappings. So, it may be possible for stale guest physical mappings 875 * to be present in the processor TLBs. 876 * 877 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 878 */ 879 ept_invalidate_mappings(vmx->eptp); 880 881 msr_bitmap_initialize(vmx->msr_bitmap); 882 883 /* 884 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 885 * The guest FSBASE and GSBASE are saved and restored during 886 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 887 * always restored from the vmcs host state area on vm-exit. 888 * 889 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 890 * how they are saved/restored so can be directly accessed by the 891 * guest. 892 * 893 * MSR_EFER is saved and restored in the guest VMCS area on a 894 * VM exit and entry respectively. It is also restored from the 895 * host VMCS area on a VM exit. 896 * 897 * The TSC MSR is exposed read-only. Writes are disallowed as 898 * that will impact the host TSC. If the guest does a write 899 * the "use TSC offsetting" execution control is enabled and the 900 * difference between the host TSC and the guest TSC is written 901 * into the TSC offset in the VMCS. 902 */ 903 if (guest_msr_rw(vmx, MSR_GSBASE) || 904 guest_msr_rw(vmx, MSR_FSBASE) || 905 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 906 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 907 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 908 guest_msr_rw(vmx, MSR_EFER) || 909 guest_msr_ro(vmx, MSR_TSC)) 910 panic("vmx_vminit: error setting guest msr access"); 911 912 vpid_alloc(vpid, VM_MAXCPU); 913 914 /* Grab the established defaults */ 915 proc_ctls = procbased_ctls; 916 proc2_ctls = procbased_ctls2; 917 pin_ctls = pinbased_ctls; 918 /* For now, default to the available capabilities */ 919 vmx->vmx_caps = vmx_capabilities; 920 921 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 922 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 923 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 924 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 925 } 926 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 927 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 928 929 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 930 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 931 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 932 933 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 934 APIC_ACCESS_ADDRESS); 935 /* XXX this should really return an error to the caller */ 936 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 937 } 938 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 939 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 940 941 pin_ctls |= PINBASED_POSTED_INTERRUPT; 942 } 943 944 maxcpus = vm_get_maxcpus(vm); 945 datasel = vmm_get_host_datasel(); 946 for (i = 0; i < maxcpus; i++) { 947 /* 948 * Cache physical address lookups for various components which 949 * may be required inside the critical_enter() section implied 950 * by VMPTRLD() below. 951 */ 952 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap); 953 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 954 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 955 956 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 957 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 958 959 vmx_msr_guest_init(vmx, i); 960 961 vmcs_load(vmx->vmcs_pa[i]); 962 963 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 964 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 965 966 /* Load the control registers */ 967 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 968 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 969 970 /* Load the segment selectors */ 971 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 972 973 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 974 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 975 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 976 977 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 978 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 979 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 980 981 /* 982 * Configure host sysenter MSRs to be restored on VM exit. 983 * The thread-specific MSR_INTC_SEP_ESP value is loaded in vmx_run. 984 */ 985 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 986 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 987 rdmsr(MSR_SYSENTER_EIP_MSR)); 988 989 /* instruction pointer */ 990 if (no_flush_rsb) { 991 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 992 } else { 993 vmcs_write(VMCS_HOST_RIP, 994 (uint64_t)vmx_exit_guest_flush_rsb); 995 } 996 997 /* link pointer */ 998 vmcs_write(VMCS_LINK_POINTER, ~0); 999 1000 vmcs_write(VMCS_EPTP, vmx->eptp); 1001 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 1002 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 1003 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls); 1004 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 1005 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1006 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 1007 vmcs_write(VMCS_VPID, vpid[i]); 1008 1009 if (guest_l1d_flush && !guest_l1d_flush_sw) { 1010 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 1011 (vm_offset_t)&msr_load_list[0])); 1012 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 1013 nitems(msr_load_list)); 1014 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 1015 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 1016 } 1017 1018 /* exception bitmap */ 1019 if (vcpu_trace_exceptions(vm, i)) 1020 exc_bitmap = 0xffffffff; 1021 else 1022 exc_bitmap = 1 << IDT_MC; 1023 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 1024 1025 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 1026 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 1027 1028 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 1029 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 1030 } 1031 1032 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1033 vmcs_write(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 1034 vmcs_write(VMCS_EOI_EXIT0, 0); 1035 vmcs_write(VMCS_EOI_EXIT1, 0); 1036 vmcs_write(VMCS_EOI_EXIT2, 0); 1037 vmcs_write(VMCS_EOI_EXIT3, 0); 1038 } 1039 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 1040 vmcs_write(VMCS_PIR_VECTOR, pirvec); 1041 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 1042 } 1043 1044 /* 1045 * Set up the CR0/4 masks and configure the read shadow state 1046 * to the power-on register value from the Intel Sys Arch. 1047 * CR0 - 0x60000010 1048 * CR4 - 0 1049 */ 1050 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 1051 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 1052 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 1053 vmcs_write(VMCS_CR4_SHADOW, 0); 1054 1055 vmcs_clear(vmx->vmcs_pa[i]); 1056 1057 vmx->cap[i].set = 0; 1058 vmx->cap[i].proc_ctls = proc_ctls; 1059 vmx->cap[i].proc_ctls2 = proc2_ctls; 1060 vmx->cap[i].exc_bitmap = exc_bitmap; 1061 1062 vmx->state[i].nextrip = ~0; 1063 vmx->state[i].lastcpu = NOCPU; 1064 vmx->state[i].vpid = vpid[i]; 1065 1066 1067 vmx->ctx[i].pmap = pmap; 1068 } 1069 1070 return (vmx); 1071 } 1072 1073 static int 1074 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1075 { 1076 #ifdef __FreeBSD__ 1077 int handled, func; 1078 1079 func = vmxctx->guest_rax; 1080 #else 1081 int handled; 1082 #endif 1083 1084 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 1085 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 1086 (uint64_t *)&vmxctx->guest_rdx); 1087 return (handled); 1088 } 1089 1090 static __inline void 1091 vmx_run_trace(struct vmx *vmx, int vcpu) 1092 { 1093 #ifdef KTR 1094 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip()); 1095 #endif 1096 } 1097 1098 static __inline void 1099 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 1100 int handled) 1101 { 1102 #ifdef KTR 1103 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 1104 handled ? "handled" : "unhandled", 1105 exit_reason_to_str(exit_reason), rip); 1106 #endif 1107 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 1108 uint32_t, exit_reason); 1109 } 1110 1111 static __inline void 1112 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1113 { 1114 #ifdef KTR 1115 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1116 #endif 1117 } 1118 1119 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1120 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1121 1122 #define INVVPID_TYPE_ADDRESS 0UL 1123 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 1124 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 1125 1126 struct invvpid_desc { 1127 uint16_t vpid; 1128 uint16_t _res1; 1129 uint32_t _res2; 1130 uint64_t linear_addr; 1131 }; 1132 CTASSERT(sizeof(struct invvpid_desc) == 16); 1133 1134 static __inline void 1135 invvpid(uint64_t type, struct invvpid_desc desc) 1136 { 1137 int error; 1138 1139 __asm __volatile("invvpid %[desc], %[type];" 1140 VMX_SET_ERROR_CODE_ASM 1141 : [error] "=r" (error) 1142 : [desc] "m" (desc), [type] "r" (type) 1143 : "memory"); 1144 1145 if (error) 1146 panic("invvpid error %d", error); 1147 } 1148 1149 /* 1150 * Invalidate guest mappings identified by its vpid from the TLB. 1151 */ 1152 static __inline void 1153 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1154 { 1155 struct vmxstate *vmxstate; 1156 struct invvpid_desc invvpid_desc; 1157 1158 vmxstate = &vmx->state[vcpu]; 1159 if (vmxstate->vpid == 0) 1160 return; 1161 1162 if (!running) { 1163 /* 1164 * Set the 'lastcpu' to an invalid host cpu. 1165 * 1166 * This will invalidate TLB entries tagged with the vcpu's 1167 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1168 */ 1169 vmxstate->lastcpu = NOCPU; 1170 return; 1171 } 1172 1173 #ifdef __FreeBSD__ 1174 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1175 "critical section", __func__, vcpu)); 1176 #endif 1177 1178 /* 1179 * Invalidate all mappings tagged with 'vpid' 1180 * 1181 * We do this because this vcpu was executing on a different host 1182 * cpu when it last ran. We do not track whether it invalidated 1183 * mappings associated with its 'vpid' during that run. So we must 1184 * assume that the mappings associated with 'vpid' on 'curcpu' are 1185 * stale and invalidate them. 1186 * 1187 * Note that we incur this penalty only when the scheduler chooses to 1188 * move the thread associated with this vcpu between host cpus. 1189 * 1190 * Note also that this will invalidate mappings tagged with 'vpid' 1191 * for "all" EP4TAs. 1192 */ 1193 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1194 invvpid_desc._res1 = 0; 1195 invvpid_desc._res2 = 0; 1196 invvpid_desc.vpid = vmxstate->vpid; 1197 invvpid_desc.linear_addr = 0; 1198 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1199 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1200 } else { 1201 /* 1202 * The invvpid can be skipped if an invept is going to 1203 * be performed before entering the guest. The invept 1204 * will invalidate combined mappings tagged with 1205 * 'vmx->eptp' for all vpids. 1206 */ 1207 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1208 } 1209 } 1210 1211 static void 1212 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1213 { 1214 struct vmxstate *vmxstate; 1215 1216 /* 1217 * Regardless of whether the VM appears to have migrated between CPUs, 1218 * save the host sysenter stack pointer. As it points to the kernel 1219 * stack of each thread, the correct value must be maintained for every 1220 * trip into the critical section. 1221 */ 1222 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1223 1224 /* 1225 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1226 * migration between host CPUs with differing TSC values. 1227 */ 1228 vmx_apply_tsc_adjust(vmx, vcpu); 1229 1230 vmxstate = &vmx->state[vcpu]; 1231 if (vmxstate->lastcpu == curcpu) 1232 return; 1233 1234 vmxstate->lastcpu = curcpu; 1235 1236 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1237 1238 /* Load the per-CPU IDT address */ 1239 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1240 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1241 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1242 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1243 vmx_invvpid(vmx, vcpu, pmap, 1); 1244 } 1245 1246 /* 1247 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1248 */ 1249 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1250 1251 static __inline void 1252 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1253 { 1254 1255 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1256 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1257 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1258 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1259 } 1260 } 1261 1262 static __inline void 1263 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1264 { 1265 1266 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1267 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1268 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1269 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1270 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1271 } 1272 1273 static __inline void 1274 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1275 { 1276 1277 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1278 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1279 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1280 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1281 } 1282 } 1283 1284 static __inline void 1285 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1286 { 1287 1288 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1289 ("nmi_window_exiting not set %x", vmx->cap[vcpu].proc_ctls)); 1290 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1291 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1292 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1293 } 1294 1295 /* 1296 * Set the TSC adjustment, taking into account the offsets measured between 1297 * host physical CPUs. This is required even if the guest has not set a TSC 1298 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1299 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1300 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1301 */ 1302 static void 1303 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1304 { 1305 extern hrtime_t tsc_gethrtime_tick_delta(void); 1306 const uint64_t target_offset = (vcpu_tsc_offset(vmx->vm, vcpu) + 1307 (uint64_t)tsc_gethrtime_tick_delta()); 1308 1309 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1310 1311 if (vmx->tsc_offset_active[vcpu] != target_offset) { 1312 vmcs_write(VMCS_TSC_OFFSET, target_offset); 1313 vmx->tsc_offset_active[vcpu] = target_offset; 1314 } 1315 } 1316 1317 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1318 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1319 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1320 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1321 1322 #ifndef __FreeBSD__ 1323 static uint32_t 1324 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1325 #else 1326 static void 1327 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1328 #endif 1329 { 1330 uint32_t gi, info; 1331 1332 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1333 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1334 "interruptibility-state %x", gi)); 1335 1336 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1337 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1338 "VM-entry interruption information %x", info)); 1339 1340 /* 1341 * Inject the virtual NMI. The vector must be the NMI IDT entry 1342 * or the VMCS entry check will fail. 1343 */ 1344 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1345 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1346 1347 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1348 1349 /* Clear the request */ 1350 vm_nmi_clear(vmx->vm, vcpu); 1351 1352 #ifndef __FreeBSD__ 1353 return (info); 1354 #endif 1355 } 1356 1357 static void 1358 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic, 1359 uint64_t guestrip) 1360 { 1361 uint64_t entryinfo, rflags; 1362 uint32_t gi, info; 1363 int vector; 1364 boolean_t extint_pending = B_FALSE; 1365 1366 vlapic_tmr_update(vlapic); 1367 1368 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1369 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1370 1371 if (vmx->state[vcpu].nextrip != guestrip && 1372 (gi & HWINTR_BLOCKING) != 0) { 1373 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " 1374 "cleared due to rip change: %lx/%lx", 1375 vmx->state[vcpu].nextrip, guestrip); 1376 gi &= ~HWINTR_BLOCKING; 1377 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1378 } 1379 1380 /* 1381 * It could be that an interrupt is already pending for injection from 1382 * the VMCS. This would be the case if the vCPU exited for conditions 1383 * such as an AST before a vm-entry delivered the injection. 1384 */ 1385 if ((info & VMCS_INTR_VALID) != 0) { 1386 goto cantinject; 1387 } 1388 1389 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1390 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1391 "intinfo is not valid: %lx", __func__, entryinfo)); 1392 1393 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1394 "pending exception: %lx/%x", __func__, entryinfo, info)); 1395 1396 info = entryinfo; 1397 vector = info & 0xff; 1398 if (vector == IDT_BP || vector == IDT_OF) { 1399 /* 1400 * VT-x requires #BP and #OF to be injected as software 1401 * exceptions. 1402 */ 1403 info &= ~VMCS_INTR_T_MASK; 1404 info |= VMCS_INTR_T_SWEXCEPTION; 1405 } 1406 1407 if (info & VMCS_INTR_DEL_ERRCODE) 1408 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1409 1410 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1411 } 1412 1413 if (vm_nmi_pending(vmx->vm, vcpu)) { 1414 int need_nmi_exiting = 1; 1415 1416 /* 1417 * If there are no conditions blocking NMI injection then 1418 * inject it directly here otherwise enable "NMI window 1419 * exiting" to inject it as soon as we can. 1420 * 1421 * We also check for STI_BLOCKING because some implementations 1422 * don't allow NMI injection in this case. If we are running 1423 * on a processor that doesn't have this restriction it will 1424 * immediately exit and the NMI will be injected in the 1425 * "NMI window exiting" handler. 1426 */ 1427 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1428 if ((info & VMCS_INTR_VALID) == 0) { 1429 info = vmx_inject_nmi(vmx, vcpu); 1430 need_nmi_exiting = 0; 1431 } else { 1432 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1433 "due to VM-entry intr info %x", info); 1434 } 1435 } else { 1436 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1437 "Guest Interruptibility-state %x", gi); 1438 } 1439 1440 if (need_nmi_exiting) { 1441 vmx_set_nmi_window_exiting(vmx, vcpu); 1442 return; 1443 } 1444 } 1445 1446 /* Check the AT-PIC and APIC for interrupts. */ 1447 if (vm_extint_pending(vmx->vm, vcpu)) { 1448 /* Ask the legacy pic for a vector to inject */ 1449 vatpic_pending_intr(vmx->vm, &vector); 1450 extint_pending = B_TRUE; 1451 1452 /* 1453 * From the Intel SDM, Volume 3, Section "Maskable 1454 * Hardware Interrupts": 1455 * - maskable interrupt vectors [0,255] can be delivered 1456 * through the INTR pin. 1457 */ 1458 KASSERT(vector >= 0 && vector <= 255, 1459 ("invalid vector %d from INTR", vector)); 1460 } else if (!vmx_cap_en(vmx, VMX_CAP_APICV)) { 1461 /* Ask the local apic for a vector to inject */ 1462 if (!vlapic_pending_intr(vlapic, &vector)) 1463 return; 1464 1465 /* 1466 * From the Intel SDM, Volume 3, Section "Maskable 1467 * Hardware Interrupts": 1468 * - maskable interrupt vectors [16,255] can be delivered 1469 * through the local APIC. 1470 */ 1471 KASSERT(vector >= 16 && vector <= 255, 1472 ("invalid vector %d from local APIC", vector)); 1473 } else { 1474 /* No futher injection needed */ 1475 return; 1476 } 1477 1478 /* 1479 * Verify that the guest is interruptable and the above logic has not 1480 * already queued an event for injection. 1481 */ 1482 if ((gi & HWINTR_BLOCKING) != 0) { 1483 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1484 "Guest Interruptibility-state %x", vector, gi); 1485 goto cantinject; 1486 } 1487 if ((info & VMCS_INTR_VALID) != 0) { 1488 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1489 "VM-entry intr info %x", vector, info); 1490 goto cantinject; 1491 } 1492 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1493 if ((rflags & PSL_I) == 0) { 1494 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1495 "rflags %lx", vector, rflags); 1496 goto cantinject; 1497 } 1498 1499 /* Inject the interrupt */ 1500 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1501 info |= vector; 1502 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1503 1504 if (extint_pending) { 1505 vm_extint_clear(vmx->vm, vcpu); 1506 vatpic_intr_accepted(vmx->vm, vector); 1507 1508 /* 1509 * After we accepted the current ExtINT the PIC may 1510 * have posted another one. If that is the case, set 1511 * the Interrupt Window Exiting execution control so 1512 * we can inject that one too. 1513 * 1514 * Also, interrupt window exiting allows us to inject any 1515 * pending APIC vector that was preempted by the ExtINT 1516 * as soon as possible. This applies both for the software 1517 * emulated vlapic and the hardware assisted virtual APIC. 1518 */ 1519 vmx_set_int_window_exiting(vmx, vcpu); 1520 } else { 1521 /* Update the Local APIC ISR */ 1522 vlapic_intr_accepted(vlapic, vector); 1523 } 1524 1525 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1526 return; 1527 1528 cantinject: 1529 /* 1530 * Set the Interrupt Window Exiting execution control so we can inject 1531 * the interrupt as soon as blocking condition goes away. 1532 */ 1533 vmx_set_int_window_exiting(vmx, vcpu); 1534 } 1535 1536 /* 1537 * If the Virtual NMIs execution control is '1' then the logical processor 1538 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1539 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1540 * virtual-NMI blocking. 1541 * 1542 * This unblocking occurs even if the IRET causes a fault. In this case the 1543 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1544 */ 1545 static void 1546 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1547 { 1548 uint32_t gi; 1549 1550 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1551 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1552 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1553 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1554 } 1555 1556 static void 1557 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1558 { 1559 uint32_t gi; 1560 1561 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1562 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1563 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1564 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1565 } 1566 1567 static void 1568 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1569 { 1570 uint32_t gi; 1571 1572 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1573 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1574 ("NMI blocking is not in effect %x", gi)); 1575 } 1576 1577 static int 1578 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1579 { 1580 struct vmxctx *vmxctx; 1581 uint64_t xcrval; 1582 const struct xsave_limits *limits; 1583 1584 vmxctx = &vmx->ctx[vcpu]; 1585 limits = vmm_get_xsave_limits(); 1586 1587 /* 1588 * Note that the processor raises a GP# fault on its own if 1589 * xsetbv is executed for CPL != 0, so we do not have to 1590 * emulate that fault here. 1591 */ 1592 1593 /* Only xcr0 is supported. */ 1594 if (vmxctx->guest_rcx != 0) { 1595 vm_inject_gp(vmx->vm, vcpu); 1596 return (HANDLED); 1597 } 1598 1599 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1600 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1601 vm_inject_ud(vmx->vm, vcpu); 1602 return (HANDLED); 1603 } 1604 1605 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1606 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1607 vm_inject_gp(vmx->vm, vcpu); 1608 return (HANDLED); 1609 } 1610 1611 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1612 vm_inject_gp(vmx->vm, vcpu); 1613 return (HANDLED); 1614 } 1615 1616 /* AVX (YMM_Hi128) requires SSE. */ 1617 if (xcrval & XFEATURE_ENABLED_AVX && 1618 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1619 vm_inject_gp(vmx->vm, vcpu); 1620 return (HANDLED); 1621 } 1622 1623 /* 1624 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1625 * ZMM_Hi256, and Hi16_ZMM. 1626 */ 1627 if (xcrval & XFEATURE_AVX512 && 1628 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1629 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1630 vm_inject_gp(vmx->vm, vcpu); 1631 return (HANDLED); 1632 } 1633 1634 /* 1635 * Intel MPX requires both bound register state flags to be 1636 * set. 1637 */ 1638 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1639 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1640 vm_inject_gp(vmx->vm, vcpu); 1641 return (HANDLED); 1642 } 1643 1644 /* 1645 * This runs "inside" vmrun() with the guest's FPU state, so 1646 * modifying xcr0 directly modifies the guest's xcr0, not the 1647 * host's. 1648 */ 1649 load_xcr(0, xcrval); 1650 return (HANDLED); 1651 } 1652 1653 static uint64_t 1654 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1655 { 1656 const struct vmxctx *vmxctx; 1657 1658 vmxctx = &vmx->ctx[vcpu]; 1659 1660 switch (ident) { 1661 case 0: 1662 return (vmxctx->guest_rax); 1663 case 1: 1664 return (vmxctx->guest_rcx); 1665 case 2: 1666 return (vmxctx->guest_rdx); 1667 case 3: 1668 return (vmxctx->guest_rbx); 1669 case 4: 1670 return (vmcs_read(VMCS_GUEST_RSP)); 1671 case 5: 1672 return (vmxctx->guest_rbp); 1673 case 6: 1674 return (vmxctx->guest_rsi); 1675 case 7: 1676 return (vmxctx->guest_rdi); 1677 case 8: 1678 return (vmxctx->guest_r8); 1679 case 9: 1680 return (vmxctx->guest_r9); 1681 case 10: 1682 return (vmxctx->guest_r10); 1683 case 11: 1684 return (vmxctx->guest_r11); 1685 case 12: 1686 return (vmxctx->guest_r12); 1687 case 13: 1688 return (vmxctx->guest_r13); 1689 case 14: 1690 return (vmxctx->guest_r14); 1691 case 15: 1692 return (vmxctx->guest_r15); 1693 default: 1694 panic("invalid vmx register %d", ident); 1695 } 1696 } 1697 1698 static void 1699 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1700 { 1701 struct vmxctx *vmxctx; 1702 1703 vmxctx = &vmx->ctx[vcpu]; 1704 1705 switch (ident) { 1706 case 0: 1707 vmxctx->guest_rax = regval; 1708 break; 1709 case 1: 1710 vmxctx->guest_rcx = regval; 1711 break; 1712 case 2: 1713 vmxctx->guest_rdx = regval; 1714 break; 1715 case 3: 1716 vmxctx->guest_rbx = regval; 1717 break; 1718 case 4: 1719 vmcs_write(VMCS_GUEST_RSP, regval); 1720 break; 1721 case 5: 1722 vmxctx->guest_rbp = regval; 1723 break; 1724 case 6: 1725 vmxctx->guest_rsi = regval; 1726 break; 1727 case 7: 1728 vmxctx->guest_rdi = regval; 1729 break; 1730 case 8: 1731 vmxctx->guest_r8 = regval; 1732 break; 1733 case 9: 1734 vmxctx->guest_r9 = regval; 1735 break; 1736 case 10: 1737 vmxctx->guest_r10 = regval; 1738 break; 1739 case 11: 1740 vmxctx->guest_r11 = regval; 1741 break; 1742 case 12: 1743 vmxctx->guest_r12 = regval; 1744 break; 1745 case 13: 1746 vmxctx->guest_r13 = regval; 1747 break; 1748 case 14: 1749 vmxctx->guest_r14 = regval; 1750 break; 1751 case 15: 1752 vmxctx->guest_r15 = regval; 1753 break; 1754 default: 1755 panic("invalid vmx register %d", ident); 1756 } 1757 } 1758 1759 static int 1760 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1761 { 1762 uint64_t crval, regval; 1763 1764 /* We only handle mov to %cr0 at this time */ 1765 if ((exitqual & 0xf0) != 0x00) 1766 return (UNHANDLED); 1767 1768 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1769 1770 vmcs_write(VMCS_CR0_SHADOW, regval); 1771 1772 crval = regval | cr0_ones_mask; 1773 crval &= ~cr0_zeros_mask; 1774 vmcs_write(VMCS_GUEST_CR0, crval); 1775 1776 if (regval & CR0_PG) { 1777 uint64_t efer, entry_ctls; 1778 1779 /* 1780 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1781 * the "IA-32e mode guest" bit in VM-entry control must be 1782 * equal. 1783 */ 1784 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1785 if (efer & EFER_LME) { 1786 efer |= EFER_LMA; 1787 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1788 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1789 entry_ctls |= VM_ENTRY_GUEST_LMA; 1790 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1791 } 1792 } 1793 1794 return (HANDLED); 1795 } 1796 1797 static int 1798 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1799 { 1800 uint64_t crval, regval; 1801 1802 /* We only handle mov to %cr4 at this time */ 1803 if ((exitqual & 0xf0) != 0x00) 1804 return (UNHANDLED); 1805 1806 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1807 1808 vmcs_write(VMCS_CR4_SHADOW, regval); 1809 1810 crval = regval | cr4_ones_mask; 1811 crval &= ~cr4_zeros_mask; 1812 vmcs_write(VMCS_GUEST_CR4, crval); 1813 1814 return (HANDLED); 1815 } 1816 1817 static int 1818 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1819 { 1820 struct vlapic *vlapic; 1821 uint64_t cr8; 1822 int regnum; 1823 1824 /* We only handle mov %cr8 to/from a register at this time. */ 1825 if ((exitqual & 0xe0) != 0x00) { 1826 return (UNHANDLED); 1827 } 1828 1829 vlapic = vm_lapic(vmx->vm, vcpu); 1830 regnum = (exitqual >> 8) & 0xf; 1831 if (exitqual & 0x10) { 1832 cr8 = vlapic_get_cr8(vlapic); 1833 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1834 } else { 1835 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1836 vlapic_set_cr8(vlapic, cr8); 1837 } 1838 1839 return (HANDLED); 1840 } 1841 1842 /* 1843 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1844 */ 1845 static int 1846 vmx_cpl(void) 1847 { 1848 uint32_t ssar; 1849 1850 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1851 return ((ssar >> 5) & 0x3); 1852 } 1853 1854 static enum vm_cpu_mode 1855 vmx_cpu_mode(void) 1856 { 1857 uint32_t csar; 1858 1859 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1860 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1861 if (csar & 0x2000) 1862 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1863 else 1864 return (CPU_MODE_COMPATIBILITY); 1865 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1866 return (CPU_MODE_PROTECTED); 1867 } else { 1868 return (CPU_MODE_REAL); 1869 } 1870 } 1871 1872 static enum vm_paging_mode 1873 vmx_paging_mode(void) 1874 { 1875 1876 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1877 return (PAGING_MODE_FLAT); 1878 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1879 return (PAGING_MODE_32); 1880 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1881 return (PAGING_MODE_64); 1882 else 1883 return (PAGING_MODE_PAE); 1884 } 1885 1886 static void 1887 vmx_paging_info(struct vm_guest_paging *paging) 1888 { 1889 paging->cr3 = vmcs_guest_cr3(); 1890 paging->cpl = vmx_cpl(); 1891 paging->cpu_mode = vmx_cpu_mode(); 1892 paging->paging_mode = vmx_paging_mode(); 1893 } 1894 1895 static void 1896 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1897 uint64_t gla) 1898 { 1899 struct vm_guest_paging paging; 1900 uint32_t csar; 1901 1902 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1903 vmexit->inst_length = 0; 1904 vmexit->u.mmio_emul.gpa = gpa; 1905 vmexit->u.mmio_emul.gla = gla; 1906 vmx_paging_info(&paging); 1907 1908 switch (paging.cpu_mode) { 1909 case CPU_MODE_REAL: 1910 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1911 vmexit->u.mmio_emul.cs_d = 0; 1912 break; 1913 case CPU_MODE_PROTECTED: 1914 case CPU_MODE_COMPATIBILITY: 1915 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1916 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1917 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1918 break; 1919 default: 1920 vmexit->u.mmio_emul.cs_base = 0; 1921 vmexit->u.mmio_emul.cs_d = 0; 1922 break; 1923 } 1924 1925 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1926 } 1927 1928 static void 1929 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1930 uint32_t eax) 1931 { 1932 struct vm_guest_paging paging; 1933 struct vm_inout *inout; 1934 1935 inout = &vmexit->u.inout; 1936 1937 inout->bytes = (qual & 0x7) + 1; 1938 inout->flags = 0; 1939 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1940 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1941 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1942 inout->port = (uint16_t)(qual >> 16); 1943 inout->eax = eax; 1944 if (inout->flags & INOUT_STR) { 1945 uint64_t inst_info; 1946 1947 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1948 1949 /* 1950 * Bits 7-9 encode the address size of ins/outs operations where 1951 * the 0/1/2 values correspond to 16/32/64 bit sizes. 1952 */ 1953 inout->addrsize = 2 << (1 + ((inst_info >> 7) & 0x3)); 1954 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1955 inout->addrsize == 8); 1956 1957 if (inout->flags & INOUT_IN) { 1958 /* 1959 * The bits describing the segment in INSTRUCTION_INFO 1960 * are not defined for ins, leaving it to system 1961 * software to assume %es (encoded as 0) 1962 */ 1963 inout->segment = 0; 1964 } else { 1965 /* 1966 * Bits 15-17 encode the segment for OUTS. 1967 * This value follows the standard x86 segment order. 1968 */ 1969 inout->segment = (inst_info >> 15) & 0x7; 1970 } 1971 } 1972 1973 vmexit->exitcode = VM_EXITCODE_INOUT; 1974 vmx_paging_info(&paging); 1975 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1976 1977 /* The in/out emulation will handle advancing %rip */ 1978 vmexit->inst_length = 0; 1979 } 1980 1981 static int 1982 ept_fault_type(uint64_t ept_qual) 1983 { 1984 int fault_type; 1985 1986 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1987 fault_type = VM_PROT_WRITE; 1988 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1989 fault_type = VM_PROT_EXECUTE; 1990 else 1991 fault_type= VM_PROT_READ; 1992 1993 return (fault_type); 1994 } 1995 1996 static bool 1997 ept_emulation_fault(uint64_t ept_qual) 1998 { 1999 int read, write; 2000 2001 /* EPT fault on an instruction fetch doesn't make sense here */ 2002 if (ept_qual & EPT_VIOLATION_INST_FETCH) 2003 return (false); 2004 2005 /* EPT fault must be a read fault or a write fault */ 2006 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 2007 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 2008 if ((read | write) == 0) 2009 return (false); 2010 2011 /* 2012 * The EPT violation must have been caused by accessing a 2013 * guest-physical address that is a translation of a guest-linear 2014 * address. 2015 */ 2016 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 2017 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 2018 return (false); 2019 } 2020 2021 return (true); 2022 } 2023 2024 static __inline int 2025 apic_access_virtualization(struct vmx *vmx, int vcpuid) 2026 { 2027 uint32_t proc_ctls2; 2028 2029 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2030 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 2031 } 2032 2033 static __inline int 2034 x2apic_virtualization(struct vmx *vmx, int vcpuid) 2035 { 2036 uint32_t proc_ctls2; 2037 2038 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2039 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 2040 } 2041 2042 static int 2043 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 2044 uint64_t qual) 2045 { 2046 int error, handled, offset; 2047 uint32_t *apic_regs, vector; 2048 bool retu; 2049 2050 handled = HANDLED; 2051 offset = APIC_WRITE_OFFSET(qual); 2052 2053 if (!apic_access_virtualization(vmx, vcpuid)) { 2054 /* 2055 * In general there should not be any APIC write VM-exits 2056 * unless APIC-access virtualization is enabled. 2057 * 2058 * However self-IPI virtualization can legitimately trigger 2059 * an APIC-write VM-exit so treat it specially. 2060 */ 2061 if (x2apic_virtualization(vmx, vcpuid) && 2062 offset == APIC_OFFSET_SELF_IPI) { 2063 apic_regs = (uint32_t *)(vlapic->apic_page); 2064 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 2065 vlapic_self_ipi_handler(vlapic, vector); 2066 return (HANDLED); 2067 } else 2068 return (UNHANDLED); 2069 } 2070 2071 switch (offset) { 2072 case APIC_OFFSET_ID: 2073 vlapic_id_write_handler(vlapic); 2074 break; 2075 case APIC_OFFSET_LDR: 2076 vlapic_ldr_write_handler(vlapic); 2077 break; 2078 case APIC_OFFSET_DFR: 2079 vlapic_dfr_write_handler(vlapic); 2080 break; 2081 case APIC_OFFSET_SVR: 2082 vlapic_svr_write_handler(vlapic); 2083 break; 2084 case APIC_OFFSET_ESR: 2085 vlapic_esr_write_handler(vlapic); 2086 break; 2087 case APIC_OFFSET_ICR_LOW: 2088 retu = false; 2089 error = vlapic_icrlo_write_handler(vlapic, &retu); 2090 if (error != 0 || retu) 2091 handled = UNHANDLED; 2092 break; 2093 case APIC_OFFSET_CMCI_LVT: 2094 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 2095 vlapic_lvt_write_handler(vlapic, offset); 2096 break; 2097 case APIC_OFFSET_TIMER_ICR: 2098 vlapic_icrtmr_write_handler(vlapic); 2099 break; 2100 case APIC_OFFSET_TIMER_DCR: 2101 vlapic_dcr_write_handler(vlapic); 2102 break; 2103 default: 2104 handled = UNHANDLED; 2105 break; 2106 } 2107 return (handled); 2108 } 2109 2110 static bool 2111 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2112 { 2113 2114 if (apic_access_virtualization(vmx, vcpuid) && 2115 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2116 return (true); 2117 else 2118 return (false); 2119 } 2120 2121 static int 2122 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2123 { 2124 uint64_t qual; 2125 int access_type, offset, allowed; 2126 struct vie *vie; 2127 2128 if (!apic_access_virtualization(vmx, vcpuid)) 2129 return (UNHANDLED); 2130 2131 qual = vmexit->u.vmx.exit_qualification; 2132 access_type = APIC_ACCESS_TYPE(qual); 2133 offset = APIC_ACCESS_OFFSET(qual); 2134 2135 allowed = 0; 2136 if (access_type == 0) { 2137 /* 2138 * Read data access to the following registers is expected. 2139 */ 2140 switch (offset) { 2141 case APIC_OFFSET_APR: 2142 case APIC_OFFSET_PPR: 2143 case APIC_OFFSET_RRR: 2144 case APIC_OFFSET_CMCI_LVT: 2145 case APIC_OFFSET_TIMER_CCR: 2146 allowed = 1; 2147 break; 2148 default: 2149 break; 2150 } 2151 } else if (access_type == 1) { 2152 /* 2153 * Write data access to the following registers is expected. 2154 */ 2155 switch (offset) { 2156 case APIC_OFFSET_VER: 2157 case APIC_OFFSET_APR: 2158 case APIC_OFFSET_PPR: 2159 case APIC_OFFSET_RRR: 2160 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2161 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2162 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2163 case APIC_OFFSET_CMCI_LVT: 2164 case APIC_OFFSET_TIMER_CCR: 2165 allowed = 1; 2166 break; 2167 default: 2168 break; 2169 } 2170 } 2171 2172 if (allowed) { 2173 vie = vm_vie_ctx(vmx->vm, vcpuid); 2174 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2175 VIE_INVALID_GLA); 2176 } 2177 2178 /* 2179 * Regardless of whether the APIC-access is allowed this handler 2180 * always returns UNHANDLED: 2181 * - if the access is allowed then it is handled by emulating the 2182 * instruction that caused the VM-exit (outside the critical section) 2183 * - if the access is not allowed then it will be converted to an 2184 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2185 */ 2186 return (UNHANDLED); 2187 } 2188 2189 static enum task_switch_reason 2190 vmx_task_switch_reason(uint64_t qual) 2191 { 2192 int reason; 2193 2194 reason = (qual >> 30) & 0x3; 2195 switch (reason) { 2196 case 0: 2197 return (TSR_CALL); 2198 case 1: 2199 return (TSR_IRET); 2200 case 2: 2201 return (TSR_JMP); 2202 case 3: 2203 return (TSR_IDT_GATE); 2204 default: 2205 panic("%s: invalid reason %d", __func__, reason); 2206 } 2207 } 2208 2209 static int 2210 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) 2211 { 2212 int error; 2213 2214 if (lapic_msr(num)) 2215 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu); 2216 else 2217 error = vmx_wrmsr(vmx, vcpuid, num, val, retu); 2218 2219 return (error); 2220 } 2221 2222 static int 2223 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu) 2224 { 2225 uint64_t result; 2226 int error; 2227 2228 if (lapic_msr(num)) 2229 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu); 2230 else 2231 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu); 2232 2233 if (error == 0) { 2234 vmx->ctx[vcpuid].guest_rax = (uint32_t)result; 2235 vmx->ctx[vcpuid].guest_rdx = result >> 32; 2236 } 2237 2238 return (error); 2239 } 2240 2241 #ifndef __FreeBSD__ 2242 #define __predict_false(x) (x) 2243 #endif 2244 2245 static int 2246 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2247 { 2248 int error, errcode, errcode_valid, handled; 2249 struct vmxctx *vmxctx; 2250 struct vie *vie; 2251 struct vlapic *vlapic; 2252 struct vm_task_switch *ts; 2253 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2254 uint32_t intr_type, intr_vec, reason; 2255 uint64_t exitintinfo, qual, gpa; 2256 bool retu; 2257 2258 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2259 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2260 2261 handled = UNHANDLED; 2262 vmxctx = &vmx->ctx[vcpu]; 2263 2264 qual = vmexit->u.vmx.exit_qualification; 2265 reason = vmexit->u.vmx.exit_reason; 2266 vmexit->exitcode = VM_EXITCODE_BOGUS; 2267 2268 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2269 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2270 2271 /* 2272 * VM-entry failures during or after loading guest state. 2273 * 2274 * These VM-exits are uncommon but must be handled specially 2275 * as most VM-exit fields are not populated as usual. 2276 */ 2277 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2278 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2279 #ifdef __FreeBSD__ 2280 __asm __volatile("int $18"); 2281 #else 2282 vmm_call_trap(T_MCE); 2283 #endif 2284 return (1); 2285 } 2286 2287 /* 2288 * VM exits that can be triggered during event delivery need to 2289 * be handled specially by re-injecting the event if the IDT 2290 * vectoring information field's valid bit is set. 2291 * 2292 * See "Information for VM Exits During Event Delivery" in Intel SDM 2293 * for details. 2294 */ 2295 idtvec_info = vmcs_idt_vectoring_info(); 2296 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2297 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2298 exitintinfo = idtvec_info; 2299 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2300 idtvec_err = vmcs_idt_vectoring_err(); 2301 exitintinfo |= (uint64_t)idtvec_err << 32; 2302 } 2303 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2304 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2305 __func__, error)); 2306 2307 /* 2308 * If 'virtual NMIs' are being used and the VM-exit 2309 * happened while injecting an NMI during the previous 2310 * VM-entry, then clear "blocking by NMI" in the 2311 * Guest Interruptibility-State so the NMI can be 2312 * reinjected on the subsequent VM-entry. 2313 * 2314 * However, if the NMI was being delivered through a task 2315 * gate, then the new task must start execution with NMIs 2316 * blocked so don't clear NMI blocking in this case. 2317 */ 2318 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2319 if (intr_type == VMCS_INTR_T_NMI) { 2320 if (reason != EXIT_REASON_TASK_SWITCH) 2321 vmx_clear_nmi_blocking(vmx, vcpu); 2322 else 2323 vmx_assert_nmi_blocking(vmx, vcpu); 2324 } 2325 2326 /* 2327 * Update VM-entry instruction length if the event being 2328 * delivered was a software interrupt or software exception. 2329 */ 2330 if (intr_type == VMCS_INTR_T_SWINTR || 2331 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2332 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2333 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2334 } 2335 } 2336 2337 switch (reason) { 2338 case EXIT_REASON_TASK_SWITCH: 2339 ts = &vmexit->u.task_switch; 2340 ts->tsssel = qual & 0xffff; 2341 ts->reason = vmx_task_switch_reason(qual); 2342 ts->ext = 0; 2343 ts->errcode_valid = 0; 2344 vmx_paging_info(&ts->paging); 2345 /* 2346 * If the task switch was due to a CALL, JMP, IRET, software 2347 * interrupt (INT n) or software exception (INT3, INTO), 2348 * then the saved %rip references the instruction that caused 2349 * the task switch. The instruction length field in the VMCS 2350 * is valid in this case. 2351 * 2352 * In all other cases (e.g., NMI, hardware exception) the 2353 * saved %rip is one that would have been saved in the old TSS 2354 * had the task switch completed normally so the instruction 2355 * length field is not needed in this case and is explicitly 2356 * set to 0. 2357 */ 2358 if (ts->reason == TSR_IDT_GATE) { 2359 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2360 ("invalid idtvec_info %x for IDT task switch", 2361 idtvec_info)); 2362 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2363 if (intr_type != VMCS_INTR_T_SWINTR && 2364 intr_type != VMCS_INTR_T_SWEXCEPTION && 2365 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2366 /* Task switch triggered by external event */ 2367 ts->ext = 1; 2368 vmexit->inst_length = 0; 2369 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2370 ts->errcode_valid = 1; 2371 ts->errcode = vmcs_idt_vectoring_err(); 2372 } 2373 } 2374 } 2375 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2376 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2377 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2378 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2379 ts->ext ? "external" : "internal", 2380 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2381 break; 2382 case EXIT_REASON_CR_ACCESS: 2383 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2384 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2385 switch (qual & 0xf) { 2386 case 0: 2387 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2388 break; 2389 case 4: 2390 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2391 break; 2392 case 8: 2393 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2394 break; 2395 } 2396 break; 2397 case EXIT_REASON_RDMSR: 2398 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2399 retu = false; 2400 ecx = vmxctx->guest_rcx; 2401 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2402 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2403 error = emulate_rdmsr(vmx, vcpu, ecx, &retu); 2404 if (error) { 2405 vmexit->exitcode = VM_EXITCODE_RDMSR; 2406 vmexit->u.msr.code = ecx; 2407 } else if (!retu) { 2408 handled = HANDLED; 2409 } else { 2410 /* Return to userspace with a valid exitcode */ 2411 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2412 ("emulate_rdmsr retu with bogus exitcode")); 2413 } 2414 break; 2415 case EXIT_REASON_WRMSR: 2416 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2417 retu = false; 2418 eax = vmxctx->guest_rax; 2419 ecx = vmxctx->guest_rcx; 2420 edx = vmxctx->guest_rdx; 2421 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2422 ecx, (uint64_t)edx << 32 | eax); 2423 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2424 (uint64_t)edx << 32 | eax); 2425 error = emulate_wrmsr(vmx, vcpu, ecx, 2426 (uint64_t)edx << 32 | eax, &retu); 2427 if (error) { 2428 vmexit->exitcode = VM_EXITCODE_WRMSR; 2429 vmexit->u.msr.code = ecx; 2430 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2431 } else if (!retu) { 2432 handled = HANDLED; 2433 } else { 2434 /* Return to userspace with a valid exitcode */ 2435 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2436 ("emulate_wrmsr retu with bogus exitcode")); 2437 } 2438 break; 2439 case EXIT_REASON_HLT: 2440 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2441 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2442 vmexit->exitcode = VM_EXITCODE_HLT; 2443 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2444 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2445 vmexit->u.hlt.intr_status = 2446 vmcs_read(VMCS_GUEST_INTR_STATUS); 2447 } else { 2448 vmexit->u.hlt.intr_status = 0; 2449 } 2450 break; 2451 case EXIT_REASON_MTF: 2452 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2453 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2454 vmexit->exitcode = VM_EXITCODE_MTRAP; 2455 vmexit->inst_length = 0; 2456 break; 2457 case EXIT_REASON_PAUSE: 2458 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2459 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2460 vmexit->exitcode = VM_EXITCODE_PAUSE; 2461 break; 2462 case EXIT_REASON_INTR_WINDOW: 2463 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2464 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2465 vmx_clear_int_window_exiting(vmx, vcpu); 2466 return (1); 2467 case EXIT_REASON_EXT_INTR: 2468 /* 2469 * External interrupts serve only to cause VM exits and allow 2470 * the host interrupt handler to run. 2471 * 2472 * If this external interrupt triggers a virtual interrupt 2473 * to a VM, then that state will be recorded by the 2474 * host interrupt handler in the VM's softc. We will inject 2475 * this virtual interrupt during the subsequent VM enter. 2476 */ 2477 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2478 SDT_PROBE4(vmm, vmx, exit, interrupt, 2479 vmx, vcpu, vmexit, intr_info); 2480 2481 /* 2482 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2483 * This appears to be a bug in VMware Fusion? 2484 */ 2485 if (!(intr_info & VMCS_INTR_VALID)) 2486 return (1); 2487 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2488 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2489 ("VM exit interruption info invalid: %x", intr_info)); 2490 vmx_trigger_hostintr(intr_info & 0xff); 2491 2492 /* 2493 * This is special. We want to treat this as an 'handled' 2494 * VM-exit but not increment the instruction pointer. 2495 */ 2496 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2497 return (1); 2498 case EXIT_REASON_NMI_WINDOW: 2499 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2500 /* Exit to allow the pending virtual NMI to be injected */ 2501 if (vm_nmi_pending(vmx->vm, vcpu)) 2502 vmx_inject_nmi(vmx, vcpu); 2503 vmx_clear_nmi_window_exiting(vmx, vcpu); 2504 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2505 return (1); 2506 case EXIT_REASON_INOUT: 2507 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2508 vie = vm_vie_ctx(vmx->vm, vcpu); 2509 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2510 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2511 break; 2512 case EXIT_REASON_CPUID: 2513 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2514 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2515 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2516 break; 2517 case EXIT_REASON_EXCEPTION: 2518 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2519 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2520 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2521 ("VM exit interruption info invalid: %x", intr_info)); 2522 2523 intr_vec = intr_info & 0xff; 2524 intr_type = intr_info & VMCS_INTR_T_MASK; 2525 2526 /* 2527 * If Virtual NMIs control is 1 and the VM-exit is due to a 2528 * fault encountered during the execution of IRET then we must 2529 * restore the state of "virtual-NMI blocking" before resuming 2530 * the guest. 2531 * 2532 * See "Resuming Guest Software after Handling an Exception". 2533 * See "Information for VM Exits Due to Vectored Events". 2534 */ 2535 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2536 (intr_vec != IDT_DF) && 2537 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2538 vmx_restore_nmi_blocking(vmx, vcpu); 2539 2540 /* 2541 * The NMI has already been handled in vmx_exit_handle_nmi(). 2542 */ 2543 if (intr_type == VMCS_INTR_T_NMI) 2544 return (1); 2545 2546 /* 2547 * Call the machine check handler by hand. Also don't reflect 2548 * the machine check back into the guest. 2549 */ 2550 if (intr_vec == IDT_MC) { 2551 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2552 #ifdef __FreeBSD__ 2553 __asm __volatile("int $18"); 2554 #else 2555 vmm_call_trap(T_MCE); 2556 #endif 2557 return (1); 2558 } 2559 2560 /* 2561 * If the hypervisor has requested user exits for 2562 * debug exceptions, bounce them out to userland. 2563 */ 2564 if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP && 2565 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2566 vmexit->exitcode = VM_EXITCODE_BPT; 2567 vmexit->u.bpt.inst_length = vmexit->inst_length; 2568 vmexit->inst_length = 0; 2569 break; 2570 } 2571 2572 if (intr_vec == IDT_PF) { 2573 vmxctx->guest_cr2 = qual; 2574 } 2575 2576 /* 2577 * Software exceptions exhibit trap-like behavior. This in 2578 * turn requires populating the VM-entry instruction length 2579 * so that the %rip in the trap frame is past the INT3/INTO 2580 * instruction. 2581 */ 2582 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2583 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2584 2585 /* Reflect all other exceptions back into the guest */ 2586 errcode_valid = errcode = 0; 2587 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2588 errcode_valid = 1; 2589 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2590 } 2591 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%x into " 2592 "the guest", intr_vec, errcode); 2593 SDT_PROBE5(vmm, vmx, exit, exception, 2594 vmx, vcpu, vmexit, intr_vec, errcode); 2595 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2596 errcode_valid, errcode, 0); 2597 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2598 __func__, error)); 2599 return (1); 2600 2601 case EXIT_REASON_EPT_FAULT: 2602 /* 2603 * If 'gpa' lies within the address space allocated to 2604 * memory then this must be a nested page fault otherwise 2605 * this must be an instruction that accesses MMIO space. 2606 */ 2607 gpa = vmcs_gpa(); 2608 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2609 apic_access_fault(vmx, vcpu, gpa)) { 2610 vmexit->exitcode = VM_EXITCODE_PAGING; 2611 vmexit->inst_length = 0; 2612 vmexit->u.paging.gpa = gpa; 2613 vmexit->u.paging.fault_type = ept_fault_type(qual); 2614 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2615 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2616 vmx, vcpu, vmexit, gpa, qual); 2617 } else if (ept_emulation_fault(qual)) { 2618 vie = vm_vie_ctx(vmx->vm, vcpu); 2619 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla()); 2620 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2621 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2622 vmx, vcpu, vmexit, gpa); 2623 } 2624 /* 2625 * If Virtual NMIs control is 1 and the VM-exit is due to an 2626 * EPT fault during the execution of IRET then we must restore 2627 * the state of "virtual-NMI blocking" before resuming. 2628 * 2629 * See description of "NMI unblocking due to IRET" in 2630 * "Exit Qualification for EPT Violations". 2631 */ 2632 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2633 (qual & EXIT_QUAL_NMIUDTI) != 0) 2634 vmx_restore_nmi_blocking(vmx, vcpu); 2635 break; 2636 case EXIT_REASON_VIRTUALIZED_EOI: 2637 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2638 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2639 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2640 vmexit->inst_length = 0; /* trap-like */ 2641 break; 2642 case EXIT_REASON_APIC_ACCESS: 2643 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2644 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2645 break; 2646 case EXIT_REASON_APIC_WRITE: 2647 /* 2648 * APIC-write VM exit is trap-like so the %rip is already 2649 * pointing to the next instruction. 2650 */ 2651 vmexit->inst_length = 0; 2652 vlapic = vm_lapic(vmx->vm, vcpu); 2653 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2654 vmx, vcpu, vmexit, vlapic); 2655 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2656 break; 2657 case EXIT_REASON_XSETBV: 2658 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2659 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2660 break; 2661 case EXIT_REASON_MONITOR: 2662 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2663 vmexit->exitcode = VM_EXITCODE_MONITOR; 2664 break; 2665 case EXIT_REASON_MWAIT: 2666 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2667 vmexit->exitcode = VM_EXITCODE_MWAIT; 2668 break; 2669 case EXIT_REASON_TPR: 2670 vlapic = vm_lapic(vmx->vm, vcpu); 2671 vlapic_sync_tpr(vlapic); 2672 vmexit->inst_length = 0; 2673 handled = HANDLED; 2674 break; 2675 case EXIT_REASON_VMCALL: 2676 case EXIT_REASON_VMCLEAR: 2677 case EXIT_REASON_VMLAUNCH: 2678 case EXIT_REASON_VMPTRLD: 2679 case EXIT_REASON_VMPTRST: 2680 case EXIT_REASON_VMREAD: 2681 case EXIT_REASON_VMRESUME: 2682 case EXIT_REASON_VMWRITE: 2683 case EXIT_REASON_VMXOFF: 2684 case EXIT_REASON_VMXON: 2685 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2686 vmexit->exitcode = VM_EXITCODE_VMINSN; 2687 break; 2688 default: 2689 SDT_PROBE4(vmm, vmx, exit, unknown, 2690 vmx, vcpu, vmexit, reason); 2691 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2692 break; 2693 } 2694 2695 if (handled) { 2696 /* 2697 * It is possible that control is returned to userland 2698 * even though we were able to handle the VM exit in the 2699 * kernel. 2700 * 2701 * In such a case we want to make sure that the userland 2702 * restarts guest execution at the instruction *after* 2703 * the one we just processed. Therefore we update the 2704 * guest rip in the VMCS and in 'vmexit'. 2705 */ 2706 vmexit->rip += vmexit->inst_length; 2707 vmexit->inst_length = 0; 2708 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2709 } else { 2710 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2711 /* 2712 * If this VM exit was not claimed by anybody then 2713 * treat it as a generic VMX exit. 2714 */ 2715 vmexit->exitcode = VM_EXITCODE_VMX; 2716 vmexit->u.vmx.status = VM_SUCCESS; 2717 vmexit->u.vmx.inst_type = 0; 2718 vmexit->u.vmx.inst_error = 0; 2719 } else { 2720 /* 2721 * The exitcode and collateral have been populated. 2722 * The VM exit will be processed further in userland. 2723 */ 2724 } 2725 } 2726 2727 SDT_PROBE4(vmm, vmx, exit, return, 2728 vmx, vcpu, vmexit, handled); 2729 return (handled); 2730 } 2731 2732 static void 2733 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2734 { 2735 2736 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2737 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2738 vmxctx->inst_fail_status)); 2739 2740 vmexit->inst_length = 0; 2741 vmexit->exitcode = VM_EXITCODE_VMX; 2742 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2743 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2744 vmexit->u.vmx.exit_reason = ~0; 2745 vmexit->u.vmx.exit_qualification = ~0; 2746 2747 switch (rc) { 2748 case VMX_VMRESUME_ERROR: 2749 case VMX_VMLAUNCH_ERROR: 2750 case VMX_INVEPT_ERROR: 2751 #ifndef __FreeBSD__ 2752 case VMX_VMWRITE_ERROR: 2753 #endif 2754 vmexit->u.vmx.inst_type = rc; 2755 break; 2756 default: 2757 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2758 } 2759 } 2760 2761 /* 2762 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2763 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2764 * sufficient to simply vector to the NMI handler via a software interrupt. 2765 * However, this must be done before maskable interrupts are enabled 2766 * otherwise the "iret" issued by an interrupt handler will incorrectly 2767 * clear NMI blocking. 2768 */ 2769 static __inline void 2770 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2771 { 2772 uint32_t intr_info; 2773 2774 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2775 2776 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2777 return; 2778 2779 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2780 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2781 ("VM exit interruption info invalid: %x", intr_info)); 2782 2783 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2784 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2785 "to NMI has invalid vector: %x", intr_info)); 2786 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2787 #ifdef __FreeBSD__ 2788 __asm __volatile("int $2"); 2789 #else 2790 vmm_call_trap(T_NMIFLT); 2791 #endif 2792 } 2793 } 2794 2795 static __inline void 2796 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2797 { 2798 register_t rflags; 2799 2800 /* Save host control debug registers. */ 2801 vmxctx->host_dr7 = rdr7(); 2802 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2803 2804 /* 2805 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2806 * exceptions in the host based on the guest DRx values. The 2807 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2808 */ 2809 load_dr7(0); 2810 wrmsr(MSR_DEBUGCTLMSR, 0); 2811 2812 /* 2813 * Disable single stepping the kernel to avoid corrupting the 2814 * guest DR6. A debugger might still be able to corrupt the 2815 * guest DR6 by setting a breakpoint after this point and then 2816 * single stepping. 2817 */ 2818 rflags = read_rflags(); 2819 vmxctx->host_tf = rflags & PSL_T; 2820 write_rflags(rflags & ~PSL_T); 2821 2822 /* Save host debug registers. */ 2823 vmxctx->host_dr0 = rdr0(); 2824 vmxctx->host_dr1 = rdr1(); 2825 vmxctx->host_dr2 = rdr2(); 2826 vmxctx->host_dr3 = rdr3(); 2827 vmxctx->host_dr6 = rdr6(); 2828 2829 /* Restore guest debug registers. */ 2830 load_dr0(vmxctx->guest_dr0); 2831 load_dr1(vmxctx->guest_dr1); 2832 load_dr2(vmxctx->guest_dr2); 2833 load_dr3(vmxctx->guest_dr3); 2834 load_dr6(vmxctx->guest_dr6); 2835 } 2836 2837 static __inline void 2838 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2839 { 2840 2841 /* Save guest debug registers. */ 2842 vmxctx->guest_dr0 = rdr0(); 2843 vmxctx->guest_dr1 = rdr1(); 2844 vmxctx->guest_dr2 = rdr2(); 2845 vmxctx->guest_dr3 = rdr3(); 2846 vmxctx->guest_dr6 = rdr6(); 2847 2848 /* 2849 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2850 * PSL_T last. 2851 */ 2852 load_dr0(vmxctx->host_dr0); 2853 load_dr1(vmxctx->host_dr1); 2854 load_dr2(vmxctx->host_dr2); 2855 load_dr3(vmxctx->host_dr3); 2856 load_dr6(vmxctx->host_dr6); 2857 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2858 load_dr7(vmxctx->host_dr7); 2859 write_rflags(read_rflags() | vmxctx->host_tf); 2860 } 2861 2862 static int 2863 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap, 2864 struct vm_eventinfo *evinfo) 2865 { 2866 int rc, handled, launched; 2867 struct vmx *vmx; 2868 struct vm *vm; 2869 struct vmxctx *vmxctx; 2870 uintptr_t vmcs_pa; 2871 struct vm_exit *vmexit; 2872 struct vlapic *vlapic; 2873 uint32_t exit_reason; 2874 #ifdef __FreeBSD__ 2875 struct region_descriptor gdtr, idtr; 2876 uint16_t ldt_sel; 2877 #endif 2878 2879 vmx = arg; 2880 vm = vmx->vm; 2881 vmcs_pa = vmx->vmcs_pa[vcpu]; 2882 vmxctx = &vmx->ctx[vcpu]; 2883 vlapic = vm_lapic(vm, vcpu); 2884 vmexit = vm_exitinfo(vm, vcpu); 2885 launched = 0; 2886 2887 KASSERT(vmxctx->pmap == pmap, 2888 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2889 2890 vmx_msr_guest_enter(vmx, vcpu); 2891 2892 vmcs_load(vmcs_pa); 2893 2894 #ifndef __FreeBSD__ 2895 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2896 vmx->vmcs_state[vcpu] = VS_LOADED; 2897 #endif 2898 2899 /* 2900 * XXX 2901 * We do this every time because we may setup the virtual machine 2902 * from a different process than the one that actually runs it. 2903 * 2904 * If the life of a virtual machine was spent entirely in the context 2905 * of a single process we could do this once in vmx_vminit(). 2906 */ 2907 vmcs_write(VMCS_HOST_CR3, rcr3()); 2908 2909 vmcs_write(VMCS_GUEST_RIP, rip); 2910 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2911 do { 2912 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2913 "%lx/%lx", __func__, vmcs_guest_rip(), rip)); 2914 2915 handled = UNHANDLED; 2916 /* 2917 * Interrupts are disabled from this point on until the 2918 * guest starts executing. This is done for the following 2919 * reasons: 2920 * 2921 * If an AST is asserted on this thread after the check below, 2922 * then the IPI_AST notification will not be lost, because it 2923 * will cause a VM exit due to external interrupt as soon as 2924 * the guest state is loaded. 2925 * 2926 * A posted interrupt after 'vmx_inject_interrupts()' will 2927 * not be "lost" because it will be held pending in the host 2928 * APIC because interrupts are disabled. The pending interrupt 2929 * will be recognized as soon as the guest state is loaded. 2930 * 2931 * The same reasoning applies to the IPI generated by 2932 * pmap_invalidate_ept(). 2933 * 2934 * The bulk of guest interrupt injection is done without 2935 * interrupts disabled on the host CPU. This is necessary 2936 * since contended mutexes might force the thread to sleep. 2937 */ 2938 vmx_inject_interrupts(vmx, vcpu, vlapic, rip); 2939 disable_intr(); 2940 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2941 vmx_inject_pir(vlapic); 2942 } 2943 2944 /* 2945 * Check for vcpu suspension after injecting events because 2946 * vmx_inject_interrupts() can suspend the vcpu due to a 2947 * triple fault. 2948 */ 2949 if (vcpu_suspended(evinfo)) { 2950 enable_intr(); 2951 vm_exit_suspended(vmx->vm, vcpu, rip); 2952 break; 2953 } 2954 2955 if (vcpu_runblocked(evinfo)) { 2956 enable_intr(); 2957 vm_exit_runblock(vmx->vm, vcpu, rip); 2958 break; 2959 } 2960 2961 if (vcpu_reqidle(evinfo)) { 2962 enable_intr(); 2963 vm_exit_reqidle(vmx->vm, vcpu, rip); 2964 break; 2965 } 2966 2967 if (vcpu_should_yield(vm, vcpu)) { 2968 enable_intr(); 2969 vm_exit_astpending(vmx->vm, vcpu, rip); 2970 vmx_astpending_trace(vmx, vcpu, rip); 2971 handled = HANDLED; 2972 break; 2973 } 2974 2975 if (vcpu_debugged(vm, vcpu)) { 2976 enable_intr(); 2977 vm_exit_debug(vmx->vm, vcpu, rip); 2978 break; 2979 } 2980 2981 #ifndef __FreeBSD__ 2982 if ((rc = smt_acquire()) != 1) { 2983 enable_intr(); 2984 vmexit->rip = rip; 2985 vmexit->inst_length = 0; 2986 if (rc == -1) { 2987 vmexit->exitcode = VM_EXITCODE_HT; 2988 } else { 2989 vmexit->exitcode = VM_EXITCODE_BOGUS; 2990 handled = HANDLED; 2991 } 2992 break; 2993 } 2994 2995 /* 2996 * If this thread has gone off-cpu due to mutex operations 2997 * during vmx_run, the VMCS will have been unloaded, forcing a 2998 * re-VMLAUNCH as opposed to VMRESUME. 2999 */ 3000 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 3001 /* 3002 * Restoration of the GDT limit is taken care of by 3003 * vmx_savectx(). Since the maximum practical index for the 3004 * IDT is 255, restoring its limits from the post-VMX-exit 3005 * default of 0xffff is not a concern. 3006 * 3007 * Only 64-bit hypervisor callers are allowed, which forgoes 3008 * the need to restore any LDT descriptor. Toss an error to 3009 * anyone attempting to break that rule. 3010 */ 3011 if (curproc->p_model != DATAMODEL_LP64) { 3012 smt_release(); 3013 enable_intr(); 3014 bzero(vmexit, sizeof (*vmexit)); 3015 vmexit->rip = rip; 3016 vmexit->exitcode = VM_EXITCODE_VMX; 3017 vmexit->u.vmx.status = VM_FAIL_INVALID; 3018 handled = UNHANDLED; 3019 break; 3020 } 3021 #else 3022 /* 3023 * VM exits restore the base address but not the 3024 * limits of GDTR and IDTR. The VMCS only stores the 3025 * base address, so VM exits set the limits to 0xffff. 3026 * Save and restore the full GDTR and IDTR to restore 3027 * the limits. 3028 * 3029 * The VMCS does not save the LDTR at all, and VM 3030 * exits clear LDTR as if a NULL selector were loaded. 3031 * The userspace hypervisor probably doesn't use a 3032 * LDT, but save and restore it to be safe. 3033 */ 3034 sgdt(&gdtr); 3035 sidt(&idtr); 3036 ldt_sel = sldt(); 3037 #endif 3038 3039 /* 3040 * If TPR Shadowing is enabled, the TPR Threshold must be 3041 * updated right before entering the guest. 3042 */ 3043 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 3044 !vmx_cap_en(vmx, VMX_CAP_APICV)) { 3045 if ((vmx->cap[vcpu].proc_ctls & 3046 PROCBASED_USE_TPR_SHADOW) != 0) { 3047 vmcs_write(VMCS_TPR_THRESHOLD, 3048 vlapic_get_cr8(vlapic)); 3049 } 3050 } 3051 3052 vmx_run_trace(vmx, vcpu); 3053 vmx_dr_enter_guest(vmxctx); 3054 rc = vmx_enter_guest(vmxctx, vmx, launched); 3055 vmx_dr_leave_guest(vmxctx); 3056 3057 #ifndef __FreeBSD__ 3058 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 3059 smt_release(); 3060 #else 3061 bare_lgdt(&gdtr); 3062 lidt(&idtr); 3063 lldt(ldt_sel); 3064 #endif 3065 3066 /* Collect some information for VM exit processing */ 3067 vmexit->rip = rip = vmcs_guest_rip(); 3068 vmexit->inst_length = vmexit_instruction_length(); 3069 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 3070 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 3071 3072 /* Update 'nextrip' */ 3073 vmx->state[vcpu].nextrip = rip; 3074 3075 if (rc == VMX_GUEST_VMEXIT) { 3076 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 3077 enable_intr(); 3078 handled = vmx_exit_process(vmx, vcpu, vmexit); 3079 } else { 3080 enable_intr(); 3081 vmx_exit_inst_error(vmxctx, rc, vmexit); 3082 } 3083 #ifdef __FreeBSD__ 3084 launched = 1; 3085 #endif 3086 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 3087 rip = vmexit->rip; 3088 } while (handled); 3089 3090 /* 3091 * If a VM exit has been handled then the exitcode must be BOGUS 3092 * If a VM exit is not handled then the exitcode must not be BOGUS 3093 */ 3094 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 3095 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 3096 panic("Mismatch between handled (%d) and exitcode (%d)", 3097 handled, vmexit->exitcode); 3098 } 3099 3100 if (!handled) 3101 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 3102 3103 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 3104 vmexit->exitcode); 3105 3106 vmcs_clear(vmcs_pa); 3107 vmx_msr_guest_exit(vmx, vcpu); 3108 3109 #ifndef __FreeBSD__ 3110 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0); 3111 vmx->vmcs_state[vcpu] = VS_NONE; 3112 #endif 3113 3114 return (0); 3115 } 3116 3117 static void 3118 vmx_vmcleanup(void *arg) 3119 { 3120 int i; 3121 struct vmx *vmx = arg; 3122 uint16_t maxcpus; 3123 3124 if (apic_access_virtualization(vmx, 0)) 3125 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3126 3127 maxcpus = vm_get_maxcpus(vmx->vm); 3128 for (i = 0; i < maxcpus; i++) 3129 vpid_free(vmx->state[i].vpid); 3130 3131 free(vmx, M_VMX); 3132 3133 return; 3134 } 3135 3136 static register_t * 3137 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3138 { 3139 switch (reg) { 3140 case VM_REG_GUEST_RAX: 3141 return (&vmxctx->guest_rax); 3142 case VM_REG_GUEST_RBX: 3143 return (&vmxctx->guest_rbx); 3144 case VM_REG_GUEST_RCX: 3145 return (&vmxctx->guest_rcx); 3146 case VM_REG_GUEST_RDX: 3147 return (&vmxctx->guest_rdx); 3148 case VM_REG_GUEST_RSI: 3149 return (&vmxctx->guest_rsi); 3150 case VM_REG_GUEST_RDI: 3151 return (&vmxctx->guest_rdi); 3152 case VM_REG_GUEST_RBP: 3153 return (&vmxctx->guest_rbp); 3154 case VM_REG_GUEST_R8: 3155 return (&vmxctx->guest_r8); 3156 case VM_REG_GUEST_R9: 3157 return (&vmxctx->guest_r9); 3158 case VM_REG_GUEST_R10: 3159 return (&vmxctx->guest_r10); 3160 case VM_REG_GUEST_R11: 3161 return (&vmxctx->guest_r11); 3162 case VM_REG_GUEST_R12: 3163 return (&vmxctx->guest_r12); 3164 case VM_REG_GUEST_R13: 3165 return (&vmxctx->guest_r13); 3166 case VM_REG_GUEST_R14: 3167 return (&vmxctx->guest_r14); 3168 case VM_REG_GUEST_R15: 3169 return (&vmxctx->guest_r15); 3170 case VM_REG_GUEST_CR2: 3171 return (&vmxctx->guest_cr2); 3172 case VM_REG_GUEST_DR0: 3173 return (&vmxctx->guest_dr0); 3174 case VM_REG_GUEST_DR1: 3175 return (&vmxctx->guest_dr1); 3176 case VM_REG_GUEST_DR2: 3177 return (&vmxctx->guest_dr2); 3178 case VM_REG_GUEST_DR3: 3179 return (&vmxctx->guest_dr3); 3180 case VM_REG_GUEST_DR6: 3181 return (&vmxctx->guest_dr6); 3182 default: 3183 break; 3184 } 3185 return (NULL); 3186 } 3187 3188 static int 3189 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3190 { 3191 int running, hostcpu, err; 3192 struct vmx *vmx = arg; 3193 register_t *regp; 3194 3195 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3196 if (running && hostcpu != curcpu) 3197 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 3198 3199 /* VMCS access not required for ctx reads */ 3200 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3201 *retval = *regp; 3202 return (0); 3203 } 3204 3205 if (!running) { 3206 vmcs_load(vmx->vmcs_pa[vcpu]); 3207 } 3208 3209 err = EINVAL; 3210 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3211 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3212 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3213 err = 0; 3214 } else { 3215 uint32_t encoding; 3216 3217 encoding = vmcs_field_encoding(reg); 3218 if (encoding != VMCS_INVALID_ENCODING) { 3219 *retval = vmcs_read(encoding); 3220 err = 0; 3221 } 3222 } 3223 3224 if (!running) { 3225 vmcs_clear(vmx->vmcs_pa[vcpu]); 3226 } 3227 3228 return (err); 3229 } 3230 3231 static int 3232 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3233 { 3234 int running, hostcpu, error; 3235 struct vmx *vmx = arg; 3236 register_t *regp; 3237 3238 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3239 if (running && hostcpu != curcpu) 3240 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3241 3242 /* VMCS access not required for ctx writes */ 3243 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3244 *regp = val; 3245 return (0); 3246 } 3247 3248 if (!running) { 3249 vmcs_load(vmx->vmcs_pa[vcpu]); 3250 } 3251 3252 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3253 if (val != 0) { 3254 /* 3255 * Forcing the vcpu into an interrupt shadow is not 3256 * presently supported. 3257 */ 3258 error = EINVAL; 3259 } else { 3260 uint64_t gi; 3261 3262 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3263 gi &= ~HWINTR_BLOCKING; 3264 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3265 error = 0; 3266 } 3267 } else { 3268 uint32_t encoding; 3269 3270 error = 0; 3271 encoding = vmcs_field_encoding(reg); 3272 switch (encoding) { 3273 case VMCS_GUEST_IA32_EFER: 3274 /* 3275 * If the "load EFER" VM-entry control is 1 then the 3276 * value of EFER.LMA must be identical to "IA-32e mode 3277 * guest" bit in the VM-entry control. 3278 */ 3279 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) { 3280 uint64_t ctls; 3281 3282 ctls = vmcs_read(VMCS_ENTRY_CTLS); 3283 if (val & EFER_LMA) { 3284 ctls |= VM_ENTRY_GUEST_LMA; 3285 } else { 3286 ctls &= ~VM_ENTRY_GUEST_LMA; 3287 } 3288 vmcs_write(VMCS_ENTRY_CTLS, ctls); 3289 } 3290 vmcs_write(encoding, val); 3291 break; 3292 case VMCS_GUEST_CR0: 3293 /* 3294 * The guest is not allowed to modify certain bits in 3295 * %cr0 and %cr4. To maintain the illusion of full 3296 * control, they have shadow versions which contain the 3297 * guest-perceived (via reads from the register) values 3298 * as opposed to the guest-effective values. 3299 * 3300 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3301 */ 3302 vmcs_write(VMCS_CR0_SHADOW, val); 3303 vmcs_write(encoding, vmx_fix_cr0(val)); 3304 break; 3305 case VMCS_GUEST_CR4: 3306 /* See above for detail on %cr4 shadowing */ 3307 vmcs_write(VMCS_CR4_SHADOW, val); 3308 vmcs_write(encoding, vmx_fix_cr4(val)); 3309 break; 3310 case VMCS_GUEST_CR3: 3311 vmcs_write(encoding, val); 3312 /* 3313 * Invalidate the guest vcpu's TLB mappings to emulate 3314 * the behavior of updating %cr3. 3315 * 3316 * XXX the processor retains global mappings when %cr3 3317 * is updated but vmx_invvpid() does not. 3318 */ 3319 vmx_invvpid(vmx, vcpu, vmx->ctx[vcpu].pmap, running); 3320 break; 3321 case VMCS_INVALID_ENCODING: 3322 error = EINVAL; 3323 break; 3324 default: 3325 vmcs_write(encoding, val); 3326 break; 3327 } 3328 } 3329 3330 if (!running) { 3331 vmcs_clear(vmx->vmcs_pa[vcpu]); 3332 } 3333 3334 return (error); 3335 } 3336 3337 static int 3338 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3339 { 3340 int hostcpu, running; 3341 struct vmx *vmx = arg; 3342 uint32_t base, limit, access; 3343 3344 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3345 if (running && hostcpu != curcpu) 3346 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3347 3348 if (!running) { 3349 vmcs_load(vmx->vmcs_pa[vcpu]); 3350 } 3351 3352 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3353 desc->base = vmcs_read(base); 3354 desc->limit = vmcs_read(limit); 3355 if (access != VMCS_INVALID_ENCODING) { 3356 desc->access = vmcs_read(access); 3357 } else { 3358 desc->access = 0; 3359 } 3360 3361 if (!running) { 3362 vmcs_clear(vmx->vmcs_pa[vcpu]); 3363 } 3364 return (0); 3365 } 3366 3367 static int 3368 vmx_setdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3369 { 3370 int hostcpu, running; 3371 struct vmx *vmx = arg; 3372 uint32_t base, limit, access; 3373 3374 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3375 if (running && hostcpu != curcpu) 3376 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3377 3378 if (!running) { 3379 vmcs_load(vmx->vmcs_pa[vcpu]); 3380 } 3381 3382 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3383 vmcs_write(base, desc->base); 3384 vmcs_write(limit, desc->limit); 3385 if (access != VMCS_INVALID_ENCODING) { 3386 vmcs_write(access, desc->access); 3387 } 3388 3389 if (!running) { 3390 vmcs_clear(vmx->vmcs_pa[vcpu]); 3391 } 3392 return (0); 3393 } 3394 3395 static int 3396 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3397 { 3398 struct vmx *vmx = arg; 3399 int vcap; 3400 int ret; 3401 3402 ret = ENOENT; 3403 3404 vcap = vmx->cap[vcpu].set; 3405 3406 switch (type) { 3407 case VM_CAP_HALT_EXIT: 3408 if (cap_halt_exit) 3409 ret = 0; 3410 break; 3411 case VM_CAP_PAUSE_EXIT: 3412 if (cap_pause_exit) 3413 ret = 0; 3414 break; 3415 case VM_CAP_MTRAP_EXIT: 3416 if (cap_monitor_trap) 3417 ret = 0; 3418 break; 3419 case VM_CAP_ENABLE_INVPCID: 3420 if (cap_invpcid) 3421 ret = 0; 3422 break; 3423 case VM_CAP_BPT_EXIT: 3424 ret = 0; 3425 break; 3426 default: 3427 break; 3428 } 3429 3430 if (ret == 0) 3431 *retval = (vcap & (1 << type)) ? 1 : 0; 3432 3433 return (ret); 3434 } 3435 3436 static int 3437 vmx_setcap(void *arg, int vcpu, int type, int val) 3438 { 3439 struct vmx *vmx = arg; 3440 uint32_t baseval, reg, flag; 3441 uint32_t *pptr; 3442 int error; 3443 3444 error = ENOENT; 3445 pptr = NULL; 3446 3447 switch (type) { 3448 case VM_CAP_HALT_EXIT: 3449 if (cap_halt_exit) { 3450 error = 0; 3451 pptr = &vmx->cap[vcpu].proc_ctls; 3452 baseval = *pptr; 3453 flag = PROCBASED_HLT_EXITING; 3454 reg = VMCS_PRI_PROC_BASED_CTLS; 3455 } 3456 break; 3457 case VM_CAP_MTRAP_EXIT: 3458 if (cap_monitor_trap) { 3459 error = 0; 3460 pptr = &vmx->cap[vcpu].proc_ctls; 3461 baseval = *pptr; 3462 flag = PROCBASED_MTF; 3463 reg = VMCS_PRI_PROC_BASED_CTLS; 3464 } 3465 break; 3466 case VM_CAP_PAUSE_EXIT: 3467 if (cap_pause_exit) { 3468 error = 0; 3469 pptr = &vmx->cap[vcpu].proc_ctls; 3470 baseval = *pptr; 3471 flag = PROCBASED_PAUSE_EXITING; 3472 reg = VMCS_PRI_PROC_BASED_CTLS; 3473 } 3474 break; 3475 case VM_CAP_ENABLE_INVPCID: 3476 if (cap_invpcid) { 3477 error = 0; 3478 pptr = &vmx->cap[vcpu].proc_ctls2; 3479 baseval = *pptr; 3480 flag = PROCBASED2_ENABLE_INVPCID; 3481 reg = VMCS_SEC_PROC_BASED_CTLS; 3482 } 3483 break; 3484 case VM_CAP_BPT_EXIT: 3485 error = 0; 3486 3487 /* Don't change the bitmap if we are tracing all exceptions. */ 3488 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3489 pptr = &vmx->cap[vcpu].exc_bitmap; 3490 baseval = *pptr; 3491 flag = (1 << IDT_BP); 3492 reg = VMCS_EXCEPTION_BITMAP; 3493 } 3494 break; 3495 default: 3496 break; 3497 } 3498 3499 if (error != 0) { 3500 return (error); 3501 } 3502 3503 if (pptr != NULL) { 3504 if (val) { 3505 baseval |= flag; 3506 } else { 3507 baseval &= ~flag; 3508 } 3509 vmcs_load(vmx->vmcs_pa[vcpu]); 3510 vmcs_write(reg, baseval); 3511 vmcs_clear(vmx->vmcs_pa[vcpu]); 3512 3513 /* 3514 * Update optional stored flags, and record 3515 * setting 3516 */ 3517 *pptr = baseval; 3518 } 3519 3520 if (val) { 3521 vmx->cap[vcpu].set |= (1 << type); 3522 } else { 3523 vmx->cap[vcpu].set &= ~(1 << type); 3524 } 3525 3526 return (0); 3527 } 3528 3529 struct vlapic_vtx { 3530 struct vlapic vlapic; 3531 struct pir_desc *pir_desc; 3532 struct vmx *vmx; 3533 u_int pending_prio; 3534 }; 3535 3536 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3537 3538 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 3539 do { \ 3540 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 3541 level ? "level" : "edge", vector); \ 3542 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3543 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3544 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3545 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3546 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 3547 } while (0) 3548 3549 /* 3550 * vlapic->ops handlers that utilize the APICv hardware assist described in 3551 * Chapter 29 of the Intel SDM. 3552 */ 3553 static int 3554 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 3555 { 3556 struct vlapic_vtx *vlapic_vtx; 3557 struct pir_desc *pir_desc; 3558 uint64_t mask; 3559 int idx, notify = 0; 3560 3561 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3562 pir_desc = vlapic_vtx->pir_desc; 3563 3564 /* 3565 * Keep track of interrupt requests in the PIR descriptor. This is 3566 * because the virtual APIC page pointed to by the VMCS cannot be 3567 * modified if the vcpu is running. 3568 */ 3569 idx = vector / 64; 3570 mask = 1UL << (vector % 64); 3571 atomic_set_long(&pir_desc->pir[idx], mask); 3572 3573 /* 3574 * A notification is required whenever the 'pending' bit makes a 3575 * transition from 0->1. 3576 * 3577 * Even if the 'pending' bit is already asserted, notification about 3578 * the incoming interrupt may still be necessary. For example, if a 3579 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3580 * the 0->1 'pending' transition with a notification, but the vCPU 3581 * would ignore the interrupt for the time being. The same vCPU would 3582 * need to then be notified if a high-priority interrupt arrived which 3583 * satisfied the PPR. 3584 * 3585 * The priorities of interrupts injected while 'pending' is asserted 3586 * are tracked in a custom bitfield 'pending_prio'. Should the 3587 * to-be-injected interrupt exceed the priorities already present, the 3588 * notification is sent. The priorities recorded in 'pending_prio' are 3589 * cleared whenever the 'pending' bit makes another 0->1 transition. 3590 */ 3591 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3592 notify = 1; 3593 vlapic_vtx->pending_prio = 0; 3594 } else { 3595 const u_int old_prio = vlapic_vtx->pending_prio; 3596 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3597 3598 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3599 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3600 notify = 1; 3601 } 3602 } 3603 3604 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 3605 level, "vmx_set_intr_ready"); 3606 return (notify); 3607 } 3608 3609 static int 3610 vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 3611 { 3612 struct vlapic_vtx *vlapic_vtx; 3613 struct pir_desc *pir_desc; 3614 struct LAPIC *lapic; 3615 uint64_t pending, pirval; 3616 uint32_t ppr, vpr; 3617 int i; 3618 3619 /* 3620 * This function is only expected to be called from the 'HLT' exit 3621 * handler which does not care about the vector that is pending. 3622 */ 3623 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 3624 3625 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3626 pir_desc = vlapic_vtx->pir_desc; 3627 3628 pending = atomic_load_acq_long(&pir_desc->pending); 3629 if (!pending) { 3630 /* 3631 * While a virtual interrupt may have already been 3632 * processed the actual delivery maybe pending the 3633 * interruptibility of the guest. Recognize a pending 3634 * interrupt by reevaluating virtual interrupts 3635 * following Section 29.2.1 in the Intel SDM Volume 3. 3636 */ 3637 struct vm_exit *vmexit; 3638 uint8_t rvi, ppr; 3639 3640 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); 3641 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; 3642 lapic = vlapic->apic_page; 3643 ppr = lapic->ppr & APIC_TPR_INT; 3644 if (rvi > ppr) { 3645 return (1); 3646 } 3647 3648 return (0); 3649 } 3650 3651 /* 3652 * If there is an interrupt pending then it will be recognized only 3653 * if its priority is greater than the processor priority. 3654 * 3655 * Special case: if the processor priority is zero then any pending 3656 * interrupt will be recognized. 3657 */ 3658 lapic = vlapic->apic_page; 3659 ppr = lapic->ppr & APIC_TPR_INT; 3660 if (ppr == 0) 3661 return (1); 3662 3663 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 3664 lapic->ppr); 3665 3666 vpr = 0; 3667 for (i = 3; i >= 0; i--) { 3668 pirval = pir_desc->pir[i]; 3669 if (pirval != 0) { 3670 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; 3671 break; 3672 } 3673 } 3674 3675 /* 3676 * If the highest-priority pending interrupt falls short of the 3677 * processor priority of this vCPU, ensure that 'pending_prio' does not 3678 * have any stale bits which would preclude a higher-priority interrupt 3679 * from incurring a notification later. 3680 */ 3681 if (vpr <= ppr) { 3682 const u_int prio_bit = VPR_PRIO_BIT(vpr); 3683 const u_int old = vlapic_vtx->pending_prio; 3684 3685 if (old > prio_bit && (old & prio_bit) == 0) { 3686 vlapic_vtx->pending_prio = prio_bit; 3687 } 3688 return (0); 3689 } 3690 return (1); 3691 } 3692 3693 static void 3694 vmx_intr_accepted(struct vlapic *vlapic, int vector) 3695 { 3696 3697 panic("vmx_intr_accepted: not expected to be called"); 3698 } 3699 3700 static void 3701 vmx_set_tmr(struct vlapic *vlapic, const uint32_t *masks) 3702 { 3703 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)masks[1] << 32) | masks[0]); 3704 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)masks[3] << 32) | masks[2]); 3705 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)masks[5] << 32) | masks[4]); 3706 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)masks[7] << 32) | masks[6]); 3707 } 3708 3709 static void 3710 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3711 { 3712 struct vmx *vmx; 3713 uint32_t proc_ctls; 3714 int vcpuid; 3715 3716 vcpuid = vlapic->vcpuid; 3717 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3718 3719 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3720 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3721 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3722 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3723 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3724 3725 vmcs_load(vmx->vmcs_pa[vcpuid]); 3726 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3727 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3728 } 3729 3730 static void 3731 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3732 { 3733 struct vmx *vmx; 3734 uint32_t proc_ctls2; 3735 int vcpuid, error; 3736 3737 vcpuid = vlapic->vcpuid; 3738 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3739 3740 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3741 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3742 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3743 3744 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3745 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3746 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3747 3748 vmcs_load(vmx->vmcs_pa[vcpuid]); 3749 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3750 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3751 3752 if (vlapic->vcpuid == 0) { 3753 /* 3754 * The nested page table mappings are shared by all vcpus 3755 * so unmap the APIC access page just once. 3756 */ 3757 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3758 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3759 __func__, error)); 3760 3761 /* 3762 * The MSR bitmap is shared by all vcpus so modify it only 3763 * once in the context of vcpu 0. 3764 */ 3765 error = vmx_allow_x2apic_msrs(vmx); 3766 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3767 __func__, error)); 3768 } 3769 } 3770 3771 static void 3772 vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3773 { 3774 #ifdef __FreeBSD__ 3775 ipi_cpu(hostcpu, pirvec); 3776 #else 3777 psm_send_pir_ipi(hostcpu); 3778 #endif 3779 } 3780 3781 /* 3782 * Transfer the pending interrupts in the PIR descriptor to the IRR 3783 * in the virtual APIC page. 3784 */ 3785 static void 3786 vmx_inject_pir(struct vlapic *vlapic) 3787 { 3788 struct vlapic_vtx *vlapic_vtx; 3789 struct pir_desc *pir_desc; 3790 struct LAPIC *lapic; 3791 uint64_t val, pirval; 3792 int rvi, pirbase = -1; 3793 uint16_t intr_status_old, intr_status_new; 3794 3795 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3796 pir_desc = vlapic_vtx->pir_desc; 3797 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3798 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3799 "no posted interrupt pending"); 3800 return; 3801 } 3802 3803 pirval = 0; 3804 pirbase = -1; 3805 lapic = vlapic->apic_page; 3806 3807 val = atomic_readandclear_long(&pir_desc->pir[0]); 3808 if (val != 0) { 3809 lapic->irr0 |= val; 3810 lapic->irr1 |= val >> 32; 3811 pirbase = 0; 3812 pirval = val; 3813 } 3814 3815 val = atomic_readandclear_long(&pir_desc->pir[1]); 3816 if (val != 0) { 3817 lapic->irr2 |= val; 3818 lapic->irr3 |= val >> 32; 3819 pirbase = 64; 3820 pirval = val; 3821 } 3822 3823 val = atomic_readandclear_long(&pir_desc->pir[2]); 3824 if (val != 0) { 3825 lapic->irr4 |= val; 3826 lapic->irr5 |= val >> 32; 3827 pirbase = 128; 3828 pirval = val; 3829 } 3830 3831 val = atomic_readandclear_long(&pir_desc->pir[3]); 3832 if (val != 0) { 3833 lapic->irr6 |= val; 3834 lapic->irr7 |= val >> 32; 3835 pirbase = 192; 3836 pirval = val; 3837 } 3838 3839 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 3840 3841 /* 3842 * Update RVI so the processor can evaluate pending virtual 3843 * interrupts on VM-entry. 3844 * 3845 * It is possible for pirval to be 0 here, even though the 3846 * pending bit has been set. The scenario is: 3847 * CPU-Y is sending a posted interrupt to CPU-X, which 3848 * is running a guest and processing posted interrupts in h/w. 3849 * CPU-X will eventually exit and the state seen in s/w is 3850 * the pending bit set, but no PIR bits set. 3851 * 3852 * CPU-X CPU-Y 3853 * (vm running) (host running) 3854 * rx posted interrupt 3855 * CLEAR pending bit 3856 * SET PIR bit 3857 * READ/CLEAR PIR bits 3858 * SET pending bit 3859 * (vm exit) 3860 * pending bit set, PIR 0 3861 */ 3862 if (pirval != 0) { 3863 rvi = pirbase + flsl(pirval) - 1; 3864 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 3865 intr_status_new = (intr_status_old & 0xFF00) | rvi; 3866 if (intr_status_new > intr_status_old) { 3867 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 3868 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3869 "guest_intr_status changed from 0x%04x to 0x%04x", 3870 intr_status_old, intr_status_new); 3871 } 3872 } 3873 } 3874 3875 static struct vlapic * 3876 vmx_vlapic_init(void *arg, int vcpuid) 3877 { 3878 struct vmx *vmx; 3879 struct vlapic *vlapic; 3880 struct vlapic_vtx *vlapic_vtx; 3881 3882 vmx = arg; 3883 3884 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3885 vlapic->vm = vmx->vm; 3886 vlapic->vcpuid = vcpuid; 3887 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3888 3889 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3890 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3891 vlapic_vtx->vmx = vmx; 3892 3893 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3894 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3895 } 3896 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3897 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3898 vlapic->ops.pending_intr = vmx_pending_intr; 3899 vlapic->ops.intr_accepted = vmx_intr_accepted; 3900 vlapic->ops.set_tmr = vmx_set_tmr; 3901 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3902 3903 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3904 vlapic->ops.post_intr = vmx_post_intr; 3905 } 3906 } 3907 3908 vlapic_init(vlapic); 3909 3910 return (vlapic); 3911 } 3912 3913 static void 3914 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3915 { 3916 3917 vlapic_cleanup(vlapic); 3918 free(vlapic, M_VLAPIC); 3919 } 3920 3921 #ifndef __FreeBSD__ 3922 static void 3923 vmx_savectx(void *arg, int vcpu) 3924 { 3925 struct vmx *vmx = arg; 3926 3927 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3928 vmcs_clear(vmx->vmcs_pa[vcpu]); 3929 vmx_msr_guest_exit(vmx, vcpu); 3930 /* 3931 * Having VMCLEARed the VMCS, it can no longer be re-entered 3932 * with VMRESUME, but must be VMLAUNCHed again. 3933 */ 3934 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3935 } 3936 3937 reset_gdtr_limit(); 3938 } 3939 3940 static void 3941 vmx_restorectx(void *arg, int vcpu) 3942 { 3943 struct vmx *vmx = arg; 3944 3945 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3946 3947 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3948 vmx_msr_guest_enter(vmx, vcpu); 3949 vmcs_load(vmx->vmcs_pa[vcpu]); 3950 } 3951 } 3952 #endif /* __FreeBSD__ */ 3953 3954 struct vmm_ops vmm_ops_intel = { 3955 .init = vmx_init, 3956 .cleanup = vmx_cleanup, 3957 .resume = vmx_restore, 3958 .vminit = vmx_vminit, 3959 .vmrun = vmx_run, 3960 .vmcleanup = vmx_vmcleanup, 3961 .vmgetreg = vmx_getreg, 3962 .vmsetreg = vmx_setreg, 3963 .vmgetdesc = vmx_getdesc, 3964 .vmsetdesc = vmx_setdesc, 3965 .vmgetcap = vmx_getcap, 3966 .vmsetcap = vmx_setcap, 3967 .vmspace_alloc = ept_vmspace_alloc, 3968 .vmspace_free = ept_vmspace_free, 3969 .vlapic_init = vmx_vlapic_init, 3970 .vlapic_cleanup = vmx_vlapic_cleanup, 3971 3972 #ifndef __FreeBSD__ 3973 .vmsavectx = vmx_savectx, 3974 .vmrestorectx = vmx_restorectx, 3975 #endif 3976 }; 3977 3978 #ifndef __FreeBSD__ 3979 /* Side-effect free HW validation derived from checks in vmx_init. */ 3980 int 3981 vmx_x86_supported(const char **msg) 3982 { 3983 int error; 3984 uint32_t tmp; 3985 3986 ASSERT(msg != NULL); 3987 3988 /* Check support for primary processor-based VM-execution controls */ 3989 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3990 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3991 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3992 if (error) { 3993 *msg = "processor does not support desired primary " 3994 "processor-based controls"; 3995 return (error); 3996 } 3997 3998 /* Check support for secondary processor-based VM-execution controls */ 3999 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 4000 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 4001 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 4002 if (error) { 4003 *msg = "processor does not support desired secondary " 4004 "processor-based controls"; 4005 return (error); 4006 } 4007 4008 /* Check support for pin-based VM-execution controls */ 4009 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 4010 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 4011 PINBASED_CTLS_ZERO_SETTING, &tmp); 4012 if (error) { 4013 *msg = "processor does not support desired pin-based controls"; 4014 return (error); 4015 } 4016 4017 /* Check support for VM-exit controls */ 4018 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 4019 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 4020 if (error) { 4021 *msg = "processor does not support desired exit controls"; 4022 return (error); 4023 } 4024 4025 /* Check support for VM-entry controls */ 4026 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 4027 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 4028 if (error) { 4029 *msg = "processor does not support desired entry controls"; 4030 return (error); 4031 } 4032 4033 /* Unrestricted guest is nominally optional, but not for us. */ 4034 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 4035 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 4036 if (error) { 4037 *msg = "processor does not support desired unrestricted guest " 4038 "controls"; 4039 return (error); 4040 } 4041 4042 return (0); 4043 } 4044 #endif 4045