vmx.c revision 268976
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 268976 2014-07-22 04:39:16Z jhb $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 268976 2014-07-22 04:39:16Z jhb $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/psl.h> 45#include <machine/cpufunc.h> 46#include <machine/md_var.h> 47#include <machine/segments.h> 48#include <machine/smp.h> 49#include <machine/specialreg.h> 50#include <machine/vmparam.h> 51 52#include <machine/vmm.h> 53#include <machine/vmm_dev.h> 54#include <machine/vmm_instruction_emul.h> 55#include "vmm_host.h" 56#include "vmm_ioport.h" 57#include "vmm_ipi.h" 58#include "vmm_msr.h" 59#include "vmm_ktr.h" 60#include "vmm_stat.h" 61#include "vatpic.h" 62#include "vlapic.h" 63#include "vlapic_priv.h" 64 65#include "vmx_msr.h" 66#include "ept.h" 67#include "vmx_cpufunc.h" 68#include "vmx.h" 69#include "x86.h" 70#include "vmx_controls.h" 71 72#define PINBASED_CTLS_ONE_SETTING \ 73 (PINBASED_EXTINT_EXITING | \ 74 PINBASED_NMI_EXITING | \ 75 PINBASED_VIRTUAL_NMI) 76#define PINBASED_CTLS_ZERO_SETTING 0 77 78#define PROCBASED_CTLS_WINDOW_SETTING \ 79 (PROCBASED_INT_WINDOW_EXITING | \ 80 PROCBASED_NMI_WINDOW_EXITING) 81 82#define PROCBASED_CTLS_ONE_SETTING \ 83 (PROCBASED_SECONDARY_CONTROLS | \ 84 PROCBASED_IO_EXITING | \ 85 PROCBASED_MSR_BITMAPS | \ 86 PROCBASED_CTLS_WINDOW_SETTING) 87#define PROCBASED_CTLS_ZERO_SETTING \ 88 (PROCBASED_CR3_LOAD_EXITING | \ 89 PROCBASED_CR3_STORE_EXITING | \ 90 PROCBASED_IO_BITMAPS) 91 92#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 93#define PROCBASED_CTLS2_ZERO_SETTING 0 94 95#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 96 (VM_EXIT_HOST_LMA | \ 97 VM_EXIT_SAVE_EFER | \ 98 VM_EXIT_LOAD_EFER) 99 100#define VM_EXIT_CTLS_ONE_SETTING \ 101 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 102 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 103 VM_EXIT_SAVE_PAT | \ 104 VM_EXIT_LOAD_PAT) 105#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 106 107#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 108 109#define VM_ENTRY_CTLS_ONE_SETTING \ 110 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 111 VM_ENTRY_LOAD_PAT) 112#define VM_ENTRY_CTLS_ZERO_SETTING \ 113 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 114 VM_ENTRY_INTO_SMM | \ 115 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 116 117#define guest_msr_rw(vmx, msr) \ 118 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 119 120#define guest_msr_ro(vmx, msr) \ 121 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ) 122 123#define HANDLED 1 124#define UNHANDLED 0 125 126static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 127static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 128 129SYSCTL_DECL(_hw_vmm); 130SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 131 132int vmxon_enabled[MAXCPU]; 133static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 134 135static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 136static uint32_t exit_ctls, entry_ctls; 137 138static uint64_t cr0_ones_mask, cr0_zeros_mask; 139SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 140 &cr0_ones_mask, 0, NULL); 141SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 142 &cr0_zeros_mask, 0, NULL); 143 144static uint64_t cr4_ones_mask, cr4_zeros_mask; 145SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 146 &cr4_ones_mask, 0, NULL); 147SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 148 &cr4_zeros_mask, 0, NULL); 149 150static int vmx_no_patmsr; 151 152static int vmx_initialized; 153SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 154 &vmx_initialized, 0, "Intel VMX initialized"); 155 156/* 157 * Optional capabilities 158 */ 159static int cap_halt_exit; 160static int cap_pause_exit; 161static int cap_unrestricted_guest; 162static int cap_monitor_trap; 163static int cap_invpcid; 164 165static int virtual_interrupt_delivery; 166SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 167 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 168 169static int posted_interrupts; 170SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 171 &posted_interrupts, 0, "APICv posted interrupt support"); 172 173static int pirvec; 174SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 175 &pirvec, 0, "APICv posted interrupt vector"); 176 177static struct unrhdr *vpid_unr; 178static u_int vpid_alloc_failed; 179SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 180 &vpid_alloc_failed, 0, NULL); 181 182/* 183 * Use the last page below 4GB as the APIC access address. This address is 184 * occupied by the boot firmware so it is guaranteed that it will not conflict 185 * with a page in system memory. 186 */ 187#define APIC_ACCESS_ADDRESS 0xFFFFF000 188 189static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 190static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 191static void vmx_inject_pir(struct vlapic *vlapic); 192 193#ifdef KTR 194static const char * 195exit_reason_to_str(int reason) 196{ 197 static char reasonbuf[32]; 198 199 switch (reason) { 200 case EXIT_REASON_EXCEPTION: 201 return "exception"; 202 case EXIT_REASON_EXT_INTR: 203 return "extint"; 204 case EXIT_REASON_TRIPLE_FAULT: 205 return "triplefault"; 206 case EXIT_REASON_INIT: 207 return "init"; 208 case EXIT_REASON_SIPI: 209 return "sipi"; 210 case EXIT_REASON_IO_SMI: 211 return "iosmi"; 212 case EXIT_REASON_SMI: 213 return "smi"; 214 case EXIT_REASON_INTR_WINDOW: 215 return "intrwindow"; 216 case EXIT_REASON_NMI_WINDOW: 217 return "nmiwindow"; 218 case EXIT_REASON_TASK_SWITCH: 219 return "taskswitch"; 220 case EXIT_REASON_CPUID: 221 return "cpuid"; 222 case EXIT_REASON_GETSEC: 223 return "getsec"; 224 case EXIT_REASON_HLT: 225 return "hlt"; 226 case EXIT_REASON_INVD: 227 return "invd"; 228 case EXIT_REASON_INVLPG: 229 return "invlpg"; 230 case EXIT_REASON_RDPMC: 231 return "rdpmc"; 232 case EXIT_REASON_RDTSC: 233 return "rdtsc"; 234 case EXIT_REASON_RSM: 235 return "rsm"; 236 case EXIT_REASON_VMCALL: 237 return "vmcall"; 238 case EXIT_REASON_VMCLEAR: 239 return "vmclear"; 240 case EXIT_REASON_VMLAUNCH: 241 return "vmlaunch"; 242 case EXIT_REASON_VMPTRLD: 243 return "vmptrld"; 244 case EXIT_REASON_VMPTRST: 245 return "vmptrst"; 246 case EXIT_REASON_VMREAD: 247 return "vmread"; 248 case EXIT_REASON_VMRESUME: 249 return "vmresume"; 250 case EXIT_REASON_VMWRITE: 251 return "vmwrite"; 252 case EXIT_REASON_VMXOFF: 253 return "vmxoff"; 254 case EXIT_REASON_VMXON: 255 return "vmxon"; 256 case EXIT_REASON_CR_ACCESS: 257 return "craccess"; 258 case EXIT_REASON_DR_ACCESS: 259 return "draccess"; 260 case EXIT_REASON_INOUT: 261 return "inout"; 262 case EXIT_REASON_RDMSR: 263 return "rdmsr"; 264 case EXIT_REASON_WRMSR: 265 return "wrmsr"; 266 case EXIT_REASON_INVAL_VMCS: 267 return "invalvmcs"; 268 case EXIT_REASON_INVAL_MSR: 269 return "invalmsr"; 270 case EXIT_REASON_MWAIT: 271 return "mwait"; 272 case EXIT_REASON_MTF: 273 return "mtf"; 274 case EXIT_REASON_MONITOR: 275 return "monitor"; 276 case EXIT_REASON_PAUSE: 277 return "pause"; 278 case EXIT_REASON_MCE: 279 return "mce"; 280 case EXIT_REASON_TPR: 281 return "tpr"; 282 case EXIT_REASON_APIC_ACCESS: 283 return "apic-access"; 284 case EXIT_REASON_GDTR_IDTR: 285 return "gdtridtr"; 286 case EXIT_REASON_LDTR_TR: 287 return "ldtrtr"; 288 case EXIT_REASON_EPT_FAULT: 289 return "eptfault"; 290 case EXIT_REASON_EPT_MISCONFIG: 291 return "eptmisconfig"; 292 case EXIT_REASON_INVEPT: 293 return "invept"; 294 case EXIT_REASON_RDTSCP: 295 return "rdtscp"; 296 case EXIT_REASON_VMX_PREEMPT: 297 return "vmxpreempt"; 298 case EXIT_REASON_INVVPID: 299 return "invvpid"; 300 case EXIT_REASON_WBINVD: 301 return "wbinvd"; 302 case EXIT_REASON_XSETBV: 303 return "xsetbv"; 304 case EXIT_REASON_APIC_WRITE: 305 return "apic-write"; 306 default: 307 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 308 return (reasonbuf); 309 } 310} 311#endif /* KTR */ 312 313static int 314vmx_allow_x2apic_msrs(struct vmx *vmx) 315{ 316 int i, error; 317 318 error = 0; 319 320 /* 321 * Allow readonly access to the following x2APIC MSRs from the guest. 322 */ 323 error += guest_msr_ro(vmx, MSR_APIC_ID); 324 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 325 error += guest_msr_ro(vmx, MSR_APIC_LDR); 326 error += guest_msr_ro(vmx, MSR_APIC_SVR); 327 328 for (i = 0; i < 8; i++) 329 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 330 331 for (i = 0; i < 8; i++) 332 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 333 334 for (i = 0; i < 8; i++) 335 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 336 337 error += guest_msr_ro(vmx, MSR_APIC_ESR); 338 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 339 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 340 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 341 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 342 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 343 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 344 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 345 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 346 error += guest_msr_ro(vmx, MSR_APIC_ICR); 347 348 /* 349 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 350 * 351 * These registers get special treatment described in the section 352 * "Virtualizing MSR-Based APIC Accesses". 353 */ 354 error += guest_msr_rw(vmx, MSR_APIC_TPR); 355 error += guest_msr_rw(vmx, MSR_APIC_EOI); 356 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 357 358 return (error); 359} 360 361u_long 362vmx_fix_cr0(u_long cr0) 363{ 364 365 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 366} 367 368u_long 369vmx_fix_cr4(u_long cr4) 370{ 371 372 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 373} 374 375static void 376vpid_free(int vpid) 377{ 378 if (vpid < 0 || vpid > 0xffff) 379 panic("vpid_free: invalid vpid %d", vpid); 380 381 /* 382 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 383 * the unit number allocator. 384 */ 385 386 if (vpid > VM_MAXCPU) 387 free_unr(vpid_unr, vpid); 388} 389 390static void 391vpid_alloc(uint16_t *vpid, int num) 392{ 393 int i, x; 394 395 if (num <= 0 || num > VM_MAXCPU) 396 panic("invalid number of vpids requested: %d", num); 397 398 /* 399 * If the "enable vpid" execution control is not enabled then the 400 * VPID is required to be 0 for all vcpus. 401 */ 402 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 403 for (i = 0; i < num; i++) 404 vpid[i] = 0; 405 return; 406 } 407 408 /* 409 * Allocate a unique VPID for each vcpu from the unit number allocator. 410 */ 411 for (i = 0; i < num; i++) { 412 x = alloc_unr(vpid_unr); 413 if (x == -1) 414 break; 415 else 416 vpid[i] = x; 417 } 418 419 if (i < num) { 420 atomic_add_int(&vpid_alloc_failed, 1); 421 422 /* 423 * If the unit number allocator does not have enough unique 424 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 425 * 426 * These VPIDs are not be unique across VMs but this does not 427 * affect correctness because the combined mappings are also 428 * tagged with the EP4TA which is unique for each VM. 429 * 430 * It is still sub-optimal because the invvpid will invalidate 431 * combined mappings for a particular VPID across all EP4TAs. 432 */ 433 while (i-- > 0) 434 vpid_free(vpid[i]); 435 436 for (i = 0; i < num; i++) 437 vpid[i] = i + 1; 438 } 439} 440 441static void 442vpid_init(void) 443{ 444 /* 445 * VPID 0 is required when the "enable VPID" execution control is 446 * disabled. 447 * 448 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 449 * unit number allocator does not have sufficient unique VPIDs to 450 * satisfy the allocation. 451 * 452 * The remaining VPIDs are managed by the unit number allocator. 453 */ 454 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 455} 456 457static void 458msr_save_area_init(struct msr_entry *g_area, int *g_count) 459{ 460 int cnt; 461 462 static struct msr_entry guest_msrs[] = { 463 { MSR_KGSBASE, 0, 0 }, 464 }; 465 466 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 467 if (cnt > GUEST_MSR_MAX_ENTRIES) 468 panic("guest msr save area overrun"); 469 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 470 *g_count = cnt; 471} 472 473static void 474vmx_disable(void *arg __unused) 475{ 476 struct invvpid_desc invvpid_desc = { 0 }; 477 struct invept_desc invept_desc = { 0 }; 478 479 if (vmxon_enabled[curcpu]) { 480 /* 481 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 482 * 483 * VMXON or VMXOFF are not required to invalidate any TLB 484 * caching structures. This prevents potential retention of 485 * cached information in the TLB between distinct VMX episodes. 486 */ 487 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 488 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 489 vmxoff(); 490 } 491 load_cr4(rcr4() & ~CR4_VMXE); 492} 493 494static int 495vmx_cleanup(void) 496{ 497 498 if (pirvec != 0) 499 vmm_ipi_free(pirvec); 500 501 if (vpid_unr != NULL) { 502 delete_unrhdr(vpid_unr); 503 vpid_unr = NULL; 504 } 505 506 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 507 508 return (0); 509} 510 511static void 512vmx_enable(void *arg __unused) 513{ 514 int error; 515 uint64_t feature_control; 516 517 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 518 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 519 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 520 wrmsr(MSR_IA32_FEATURE_CONTROL, 521 feature_control | IA32_FEATURE_CONTROL_VMX_EN | 522 IA32_FEATURE_CONTROL_LOCK); 523 } 524 525 load_cr4(rcr4() | CR4_VMXE); 526 527 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 528 error = vmxon(vmxon_region[curcpu]); 529 if (error == 0) 530 vmxon_enabled[curcpu] = 1; 531} 532 533static void 534vmx_restore(void) 535{ 536 537 if (vmxon_enabled[curcpu]) 538 vmxon(vmxon_region[curcpu]); 539} 540 541static int 542vmx_init(int ipinum) 543{ 544 int error, use_tpr_shadow; 545 uint64_t basic, fixed0, fixed1, feature_control; 546 uint32_t tmp, procbased2_vid_bits; 547 548 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 549 if (!(cpu_feature2 & CPUID2_VMX)) { 550 printf("vmx_init: processor does not support VMX operation\n"); 551 return (ENXIO); 552 } 553 554 /* 555 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 556 * are set (bits 0 and 2 respectively). 557 */ 558 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 559 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 560 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 561 printf("vmx_init: VMX operation disabled by BIOS\n"); 562 return (ENXIO); 563 } 564 565 /* 566 * Verify capabilities MSR_VMX_BASIC: 567 * - bit 54 indicates support for INS/OUTS decoding 568 */ 569 basic = rdmsr(MSR_VMX_BASIC); 570 if ((basic & (1UL << 54)) == 0) { 571 printf("vmx_init: processor does not support desired basic " 572 "capabilities\n"); 573 return (EINVAL); 574 } 575 576 /* Check support for primary processor-based VM-execution controls */ 577 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 578 MSR_VMX_TRUE_PROCBASED_CTLS, 579 PROCBASED_CTLS_ONE_SETTING, 580 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 581 if (error) { 582 printf("vmx_init: processor does not support desired primary " 583 "processor-based controls\n"); 584 return (error); 585 } 586 587 /* Clear the processor-based ctl bits that are set on demand */ 588 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 589 590 /* Check support for secondary processor-based VM-execution controls */ 591 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 592 MSR_VMX_PROCBASED_CTLS2, 593 PROCBASED_CTLS2_ONE_SETTING, 594 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 595 if (error) { 596 printf("vmx_init: processor does not support desired secondary " 597 "processor-based controls\n"); 598 return (error); 599 } 600 601 /* Check support for VPID */ 602 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 603 PROCBASED2_ENABLE_VPID, 0, &tmp); 604 if (error == 0) 605 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 606 607 /* Check support for pin-based VM-execution controls */ 608 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 609 MSR_VMX_TRUE_PINBASED_CTLS, 610 PINBASED_CTLS_ONE_SETTING, 611 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 612 if (error) { 613 printf("vmx_init: processor does not support desired " 614 "pin-based controls\n"); 615 return (error); 616 } 617 618 /* Check support for VM-exit controls */ 619 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 620 VM_EXIT_CTLS_ONE_SETTING, 621 VM_EXIT_CTLS_ZERO_SETTING, 622 &exit_ctls); 623 if (error) { 624 /* Try again without the PAT MSR bits */ 625 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 626 MSR_VMX_TRUE_EXIT_CTLS, 627 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 628 VM_EXIT_CTLS_ZERO_SETTING, 629 &exit_ctls); 630 if (error) { 631 printf("vmx_init: processor does not support desired " 632 "exit controls\n"); 633 return (error); 634 } else { 635 if (bootverbose) 636 printf("vmm: PAT MSR access not supported\n"); 637 guest_msr_valid(MSR_PAT); 638 vmx_no_patmsr = 1; 639 } 640 } 641 642 /* Check support for VM-entry controls */ 643 if (!vmx_no_patmsr) { 644 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 645 MSR_VMX_TRUE_ENTRY_CTLS, 646 VM_ENTRY_CTLS_ONE_SETTING, 647 VM_ENTRY_CTLS_ZERO_SETTING, 648 &entry_ctls); 649 } else { 650 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 651 MSR_VMX_TRUE_ENTRY_CTLS, 652 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 653 VM_ENTRY_CTLS_ZERO_SETTING, 654 &entry_ctls); 655 } 656 657 if (error) { 658 printf("vmx_init: processor does not support desired " 659 "entry controls\n"); 660 return (error); 661 } 662 663 /* 664 * Check support for optional features by testing them 665 * as individual bits 666 */ 667 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 668 MSR_VMX_TRUE_PROCBASED_CTLS, 669 PROCBASED_HLT_EXITING, 0, 670 &tmp) == 0); 671 672 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 673 MSR_VMX_PROCBASED_CTLS, 674 PROCBASED_MTF, 0, 675 &tmp) == 0); 676 677 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 678 MSR_VMX_TRUE_PROCBASED_CTLS, 679 PROCBASED_PAUSE_EXITING, 0, 680 &tmp) == 0); 681 682 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 683 MSR_VMX_PROCBASED_CTLS2, 684 PROCBASED2_UNRESTRICTED_GUEST, 0, 685 &tmp) == 0); 686 687 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 688 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 689 &tmp) == 0); 690 691 /* 692 * Check support for virtual interrupt delivery. 693 */ 694 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 695 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 696 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 697 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 698 699 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 700 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 701 &tmp) == 0); 702 703 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 704 procbased2_vid_bits, 0, &tmp); 705 if (error == 0 && use_tpr_shadow) { 706 virtual_interrupt_delivery = 1; 707 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 708 &virtual_interrupt_delivery); 709 } 710 711 if (virtual_interrupt_delivery) { 712 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 713 procbased_ctls2 |= procbased2_vid_bits; 714 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 715 716 /* 717 * Check for Posted Interrupts only if Virtual Interrupt 718 * Delivery is enabled. 719 */ 720 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 721 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 722 &tmp); 723 if (error == 0) { 724 pirvec = vmm_ipi_alloc(); 725 if (pirvec == 0) { 726 if (bootverbose) { 727 printf("vmx_init: unable to allocate " 728 "posted interrupt vector\n"); 729 } 730 } else { 731 posted_interrupts = 1; 732 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 733 &posted_interrupts); 734 } 735 } 736 } 737 738 if (posted_interrupts) 739 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 740 741 /* Initialize EPT */ 742 error = ept_init(ipinum); 743 if (error) { 744 printf("vmx_init: ept initialization failed (%d)\n", error); 745 return (error); 746 } 747 748 /* 749 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 750 */ 751 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 752 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 753 cr0_ones_mask = fixed0 & fixed1; 754 cr0_zeros_mask = ~fixed0 & ~fixed1; 755 756 /* 757 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 758 * if unrestricted guest execution is allowed. 759 */ 760 if (cap_unrestricted_guest) 761 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 762 763 /* 764 * Do not allow the guest to set CR0_NW or CR0_CD. 765 */ 766 cr0_zeros_mask |= (CR0_NW | CR0_CD); 767 768 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 769 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 770 cr4_ones_mask = fixed0 & fixed1; 771 cr4_zeros_mask = ~fixed0 & ~fixed1; 772 773 vpid_init(); 774 775 /* enable VMX operation */ 776 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 777 778 vmx_initialized = 1; 779 780 return (0); 781} 782 783static void 784vmx_trigger_hostintr(int vector) 785{ 786 uintptr_t func; 787 struct gate_descriptor *gd; 788 789 gd = &idt[vector]; 790 791 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 792 "invalid vector %d", vector)); 793 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 794 vector)); 795 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 796 "has invalid type %d", vector, gd->gd_type)); 797 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 798 "has invalid dpl %d", vector, gd->gd_dpl)); 799 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 800 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 801 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 802 "IST %d", vector, gd->gd_ist)); 803 804 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 805 vmx_call_isr(func); 806} 807 808static int 809vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 810{ 811 int error, mask_ident, shadow_ident; 812 uint64_t mask_value; 813 814 if (which != 0 && which != 4) 815 panic("vmx_setup_cr_shadow: unknown cr%d", which); 816 817 if (which == 0) { 818 mask_ident = VMCS_CR0_MASK; 819 mask_value = cr0_ones_mask | cr0_zeros_mask; 820 shadow_ident = VMCS_CR0_SHADOW; 821 } else { 822 mask_ident = VMCS_CR4_MASK; 823 mask_value = cr4_ones_mask | cr4_zeros_mask; 824 shadow_ident = VMCS_CR4_SHADOW; 825 } 826 827 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 828 if (error) 829 return (error); 830 831 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 832 if (error) 833 return (error); 834 835 return (0); 836} 837#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 838#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 839 840static void * 841vmx_vminit(struct vm *vm, pmap_t pmap) 842{ 843 uint16_t vpid[VM_MAXCPU]; 844 int i, error, guest_msr_count; 845 struct vmx *vmx; 846 struct vmcs *vmcs; 847 848 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 849 if ((uintptr_t)vmx & PAGE_MASK) { 850 panic("malloc of struct vmx not aligned on %d byte boundary", 851 PAGE_SIZE); 852 } 853 vmx->vm = vm; 854 855 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 856 857 /* 858 * Clean up EPTP-tagged guest physical and combined mappings 859 * 860 * VMX transitions are not required to invalidate any guest physical 861 * mappings. So, it may be possible for stale guest physical mappings 862 * to be present in the processor TLBs. 863 * 864 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 865 */ 866 ept_invalidate_mappings(vmx->eptp); 867 868 msr_bitmap_initialize(vmx->msr_bitmap); 869 870 /* 871 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 872 * The guest FSBASE and GSBASE are saved and restored during 873 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 874 * always restored from the vmcs host state area on vm-exit. 875 * 876 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 877 * how they are saved/restored so can be directly accessed by the 878 * guest. 879 * 880 * Guest KGSBASE is saved and restored in the guest MSR save area. 881 * Host KGSBASE is restored before returning to userland from the pcb. 882 * There will be a window of time when we are executing in the host 883 * kernel context with a value of KGSBASE from the guest. This is ok 884 * because the value of KGSBASE is inconsequential in kernel context. 885 * 886 * MSR_EFER is saved and restored in the guest VMCS area on a 887 * VM exit and entry respectively. It is also restored from the 888 * host VMCS area on a VM exit. 889 * 890 * The TSC MSR is exposed read-only. Writes are disallowed as that 891 * will impact the host TSC. 892 * XXX Writes would be implemented with a wrmsr trap, and 893 * then modifying the TSC offset in the VMCS. 894 */ 895 if (guest_msr_rw(vmx, MSR_GSBASE) || 896 guest_msr_rw(vmx, MSR_FSBASE) || 897 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 898 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 899 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 900 guest_msr_rw(vmx, MSR_KGSBASE) || 901 guest_msr_rw(vmx, MSR_EFER) || 902 guest_msr_ro(vmx, MSR_TSC)) 903 panic("vmx_vminit: error setting guest msr access"); 904 905 /* 906 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 907 * and entry respectively. It is also restored from the host VMCS 908 * area on a VM exit. However, if running on a system with no 909 * MSR_PAT save/restore support, leave access disabled so accesses 910 * will be trapped. 911 */ 912 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 913 panic("vmx_vminit: error setting guest pat msr access"); 914 915 vpid_alloc(vpid, VM_MAXCPU); 916 917 if (virtual_interrupt_delivery) { 918 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 919 APIC_ACCESS_ADDRESS); 920 /* XXX this should really return an error to the caller */ 921 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 922 } 923 924 for (i = 0; i < VM_MAXCPU; i++) { 925 vmcs = &vmx->vmcs[i]; 926 vmcs->identifier = vmx_revision(); 927 error = vmclear(vmcs); 928 if (error != 0) { 929 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 930 error, i); 931 } 932 933 error = vmcs_init(vmcs); 934 KASSERT(error == 0, ("vmcs_init error %d", error)); 935 936 VMPTRLD(vmcs); 937 error = 0; 938 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 939 error += vmwrite(VMCS_EPTP, vmx->eptp); 940 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 941 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 942 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 943 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 944 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 945 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 946 error += vmwrite(VMCS_VPID, vpid[i]); 947 if (virtual_interrupt_delivery) { 948 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 949 error += vmwrite(VMCS_VIRTUAL_APIC, 950 vtophys(&vmx->apic_page[i])); 951 error += vmwrite(VMCS_EOI_EXIT0, 0); 952 error += vmwrite(VMCS_EOI_EXIT1, 0); 953 error += vmwrite(VMCS_EOI_EXIT2, 0); 954 error += vmwrite(VMCS_EOI_EXIT3, 0); 955 } 956 if (posted_interrupts) { 957 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 958 error += vmwrite(VMCS_PIR_DESC, 959 vtophys(&vmx->pir_desc[i])); 960 } 961 VMCLEAR(vmcs); 962 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 963 964 vmx->cap[i].set = 0; 965 vmx->cap[i].proc_ctls = procbased_ctls; 966 vmx->cap[i].proc_ctls2 = procbased_ctls2; 967 968 vmx->state[i].lastcpu = -1; 969 vmx->state[i].vpid = vpid[i]; 970 971 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 972 973 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 974 guest_msr_count); 975 if (error != 0) 976 panic("vmcs_set_msr_save error %d", error); 977 978 /* 979 * Set up the CR0/4 shadows, and init the read shadow 980 * to the power-on register value from the Intel Sys Arch. 981 * CR0 - 0x60000010 982 * CR4 - 0 983 */ 984 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 985 if (error != 0) 986 panic("vmx_setup_cr0_shadow %d", error); 987 988 error = vmx_setup_cr4_shadow(vmcs, 0); 989 if (error != 0) 990 panic("vmx_setup_cr4_shadow %d", error); 991 992 vmx->ctx[i].pmap = pmap; 993 } 994 995 return (vmx); 996} 997 998static int 999vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1000{ 1001 int handled, func; 1002 1003 func = vmxctx->guest_rax; 1004 1005 handled = x86_emulate_cpuid(vm, vcpu, 1006 (uint32_t*)(&vmxctx->guest_rax), 1007 (uint32_t*)(&vmxctx->guest_rbx), 1008 (uint32_t*)(&vmxctx->guest_rcx), 1009 (uint32_t*)(&vmxctx->guest_rdx)); 1010 return (handled); 1011} 1012 1013static __inline void 1014vmx_run_trace(struct vmx *vmx, int vcpu) 1015{ 1016#ifdef KTR 1017 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 1018#endif 1019} 1020 1021static __inline void 1022vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 1023 int handled) 1024{ 1025#ifdef KTR 1026 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 1027 handled ? "handled" : "unhandled", 1028 exit_reason_to_str(exit_reason), rip); 1029#endif 1030} 1031 1032static __inline void 1033vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1034{ 1035#ifdef KTR 1036 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1037#endif 1038} 1039 1040static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1041 1042static void 1043vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1044{ 1045 struct vmxstate *vmxstate; 1046 struct invvpid_desc invvpid_desc; 1047 1048 vmxstate = &vmx->state[vcpu]; 1049 if (vmxstate->lastcpu == curcpu) 1050 return; 1051 1052 vmxstate->lastcpu = curcpu; 1053 1054 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1055 1056 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1057 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1058 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1059 1060 /* 1061 * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 1062 * 1063 * We do this because this vcpu was executing on a different host 1064 * cpu when it last ran. We do not track whether it invalidated 1065 * mappings associated with its 'vpid' during that run. So we must 1066 * assume that the mappings associated with 'vpid' on 'curcpu' are 1067 * stale and invalidate them. 1068 * 1069 * Note that we incur this penalty only when the scheduler chooses to 1070 * move the thread associated with this vcpu between host cpus. 1071 * 1072 * Note also that this will invalidate mappings tagged with 'vpid' 1073 * for "all" EP4TAs. 1074 */ 1075 if (vmxstate->vpid != 0) { 1076 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1077 invvpid_desc._res1 = 0; 1078 invvpid_desc._res2 = 0; 1079 invvpid_desc.vpid = vmxstate->vpid; 1080 invvpid_desc.linear_addr = 0; 1081 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1082 } else { 1083 /* 1084 * The invvpid can be skipped if an invept is going to 1085 * be performed before entering the guest. The invept 1086 * will invalidate combined mappings tagged with 1087 * 'vmx->eptp' for all vpids. 1088 */ 1089 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1090 } 1091 } 1092} 1093 1094/* 1095 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1096 */ 1097CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1098 1099static void __inline 1100vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1101{ 1102 1103 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1104 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1105 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1106 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1107 } 1108} 1109 1110static void __inline 1111vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1112{ 1113 1114 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1115 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1116 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1117 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1118 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1119} 1120 1121static void __inline 1122vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1123{ 1124 1125 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1126 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1127 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1128 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1129 } 1130} 1131 1132static void __inline 1133vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1134{ 1135 1136 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1137 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1138 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1139 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1140 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1141} 1142 1143#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1144 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1145#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1146 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1147 1148static void 1149vmx_inject_nmi(struct vmx *vmx, int vcpu) 1150{ 1151 uint32_t gi, info; 1152 1153 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1154 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1155 "interruptibility-state %#x", gi)); 1156 1157 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1158 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1159 "VM-entry interruption information %#x", info)); 1160 1161 /* 1162 * Inject the virtual NMI. The vector must be the NMI IDT entry 1163 * or the VMCS entry check will fail. 1164 */ 1165 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1166 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1167 1168 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1169 1170 /* Clear the request */ 1171 vm_nmi_clear(vmx->vm, vcpu); 1172} 1173 1174static void 1175vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1176{ 1177 struct vm_exception exc; 1178 int vector, need_nmi_exiting, extint_pending; 1179 uint64_t rflags; 1180 uint32_t gi, info; 1181 1182 if (vm_exception_pending(vmx->vm, vcpu, &exc)) { 1183 KASSERT(exc.vector >= 0 && exc.vector < 32, 1184 ("%s: invalid exception vector %d", __func__, exc.vector)); 1185 1186 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1187 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1188 "pending exception %d: %#x", __func__, exc.vector, info)); 1189 1190 info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID; 1191 if (exc.error_code_valid) { 1192 info |= VMCS_INTR_DEL_ERRCODE; 1193 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code); 1194 } 1195 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1196 } 1197 1198 if (vm_nmi_pending(vmx->vm, vcpu)) { 1199 /* 1200 * If there are no conditions blocking NMI injection then 1201 * inject it directly here otherwise enable "NMI window 1202 * exiting" to inject it as soon as we can. 1203 * 1204 * We also check for STI_BLOCKING because some implementations 1205 * don't allow NMI injection in this case. If we are running 1206 * on a processor that doesn't have this restriction it will 1207 * immediately exit and the NMI will be injected in the 1208 * "NMI window exiting" handler. 1209 */ 1210 need_nmi_exiting = 1; 1211 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1212 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1213 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1214 if ((info & VMCS_INTR_VALID) == 0) { 1215 vmx_inject_nmi(vmx, vcpu); 1216 need_nmi_exiting = 0; 1217 } else { 1218 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1219 "due to VM-entry intr info %#x", info); 1220 } 1221 } else { 1222 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1223 "Guest Interruptibility-state %#x", gi); 1224 } 1225 1226 if (need_nmi_exiting) 1227 vmx_set_nmi_window_exiting(vmx, vcpu); 1228 } 1229 1230 extint_pending = vm_extint_pending(vmx->vm, vcpu); 1231 1232 if (!extint_pending && virtual_interrupt_delivery) { 1233 vmx_inject_pir(vlapic); 1234 return; 1235 } 1236 1237 /* 1238 * If interrupt-window exiting is already in effect then don't bother 1239 * checking for pending interrupts. This is just an optimization and 1240 * not needed for correctness. 1241 */ 1242 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1243 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1244 "pending int_window_exiting"); 1245 return; 1246 } 1247 1248 if (!extint_pending) { 1249 /* Ask the local apic for a vector to inject */ 1250 if (!vlapic_pending_intr(vlapic, &vector)) 1251 return; 1252 1253 /* 1254 * From the Intel SDM, Volume 3, Section "Maskable 1255 * Hardware Interrupts": 1256 * - maskable interrupt vectors [16,255] can be delivered 1257 * through the local APIC. 1258 */ 1259 KASSERT(vector >= 16 && vector <= 255, 1260 ("invalid vector %d from local APIC", vector)); 1261 } else { 1262 /* Ask the legacy pic for a vector to inject */ 1263 vatpic_pending_intr(vmx->vm, &vector); 1264 1265 /* 1266 * From the Intel SDM, Volume 3, Section "Maskable 1267 * Hardware Interrupts": 1268 * - maskable interrupt vectors [0,255] can be delivered 1269 * through the INTR pin. 1270 */ 1271 KASSERT(vector >= 0 && vector <= 255, 1272 ("invalid vector %d from INTR", vector)); 1273 } 1274 1275 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1276 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1277 if ((rflags & PSL_I) == 0) { 1278 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1279 "rflags %#lx", vector, rflags); 1280 goto cantinject; 1281 } 1282 1283 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1284 if (gi & HWINTR_BLOCKING) { 1285 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1286 "Guest Interruptibility-state %#x", vector, gi); 1287 goto cantinject; 1288 } 1289 1290 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1291 if (info & VMCS_INTR_VALID) { 1292 /* 1293 * This is expected and could happen for multiple reasons: 1294 * - A vectoring VM-entry was aborted due to astpending 1295 * - A VM-exit happened during event injection. 1296 * - An exception was injected above. 1297 * - An NMI was injected above or after "NMI window exiting" 1298 */ 1299 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1300 "VM-entry intr info %#x", vector, info); 1301 goto cantinject; 1302 } 1303 1304 /* Inject the interrupt */ 1305 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1306 info |= vector; 1307 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1308 1309 if (!extint_pending) { 1310 /* Update the Local APIC ISR */ 1311 vlapic_intr_accepted(vlapic, vector); 1312 } else { 1313 vm_extint_clear(vmx->vm, vcpu); 1314 vatpic_intr_accepted(vmx->vm, vector); 1315 1316 /* 1317 * After we accepted the current ExtINT the PIC may 1318 * have posted another one. If that is the case, set 1319 * the Interrupt Window Exiting execution control so 1320 * we can inject that one too. 1321 */ 1322 if (vm_extint_pending(vmx->vm, vcpu)) 1323 vmx_set_int_window_exiting(vmx, vcpu); 1324 } 1325 1326 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1327 1328 return; 1329 1330cantinject: 1331 /* 1332 * Set the Interrupt Window Exiting execution control so we can inject 1333 * the interrupt as soon as blocking condition goes away. 1334 */ 1335 vmx_set_int_window_exiting(vmx, vcpu); 1336} 1337 1338/* 1339 * If the Virtual NMIs execution control is '1' then the logical processor 1340 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1341 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1342 * virtual-NMI blocking. 1343 * 1344 * This unblocking occurs even if the IRET causes a fault. In this case the 1345 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1346 */ 1347static void 1348vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1349{ 1350 uint32_t gi; 1351 1352 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1353 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1354 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1355 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1356} 1357 1358static void 1359vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1360{ 1361 uint32_t gi; 1362 1363 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1364 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1365 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1366 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1367} 1368 1369static int 1370vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1371{ 1372 struct vmxctx *vmxctx; 1373 uint64_t xcrval; 1374 const struct xsave_limits *limits; 1375 1376 vmxctx = &vmx->ctx[vcpu]; 1377 limits = vmm_get_xsave_limits(); 1378 1379 /* 1380 * Note that the processor raises a GP# fault on its own if 1381 * xsetbv is executed for CPL != 0, so we do not have to 1382 * emulate that fault here. 1383 */ 1384 1385 /* Only xcr0 is supported. */ 1386 if (vmxctx->guest_rcx != 0) { 1387 vm_inject_gp(vmx->vm, vcpu); 1388 return (HANDLED); 1389 } 1390 1391 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1392 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1393 vm_inject_ud(vmx->vm, vcpu); 1394 return (HANDLED); 1395 } 1396 1397 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1398 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1399 vm_inject_gp(vmx->vm, vcpu); 1400 return (HANDLED); 1401 } 1402 1403 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1404 vm_inject_gp(vmx->vm, vcpu); 1405 return (HANDLED); 1406 } 1407 1408 /* AVX (YMM_Hi128) requires SSE. */ 1409 if (xcrval & XFEATURE_ENABLED_AVX && 1410 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1411 vm_inject_gp(vmx->vm, vcpu); 1412 return (HANDLED); 1413 } 1414 1415 /* 1416 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1417 * ZMM_Hi256, and Hi16_ZMM. 1418 */ 1419 if (xcrval & XFEATURE_AVX512 && 1420 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1421 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1422 vm_inject_gp(vmx->vm, vcpu); 1423 return (HANDLED); 1424 } 1425 1426 /* 1427 * Intel MPX requires both bound register state flags to be 1428 * set. 1429 */ 1430 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1431 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1432 vm_inject_gp(vmx->vm, vcpu); 1433 return (HANDLED); 1434 } 1435 1436 /* 1437 * This runs "inside" vmrun() with the guest's FPU state, so 1438 * modifying xcr0 directly modifies the guest's xcr0, not the 1439 * host's. 1440 */ 1441 load_xcr(0, xcrval); 1442 return (HANDLED); 1443} 1444 1445static int 1446vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1447{ 1448 int cr, vmcs_guest_cr, vmcs_shadow_cr; 1449 uint64_t crval, regval, ones_mask, zeros_mask; 1450 const struct vmxctx *vmxctx; 1451 1452 /* We only handle mov to %cr0 or %cr4 at this time */ 1453 if ((exitqual & 0xf0) != 0x00) 1454 return (UNHANDLED); 1455 1456 cr = exitqual & 0xf; 1457 if (cr != 0 && cr != 4) 1458 return (UNHANDLED); 1459 1460 regval = 0; /* silence gcc */ 1461 vmxctx = &vmx->ctx[vcpu]; 1462 1463 /* 1464 * We must use vmcs_write() directly here because vmcs_setreg() will 1465 * call vmclear(vmcs) as a side-effect which we certainly don't want. 1466 */ 1467 switch ((exitqual >> 8) & 0xf) { 1468 case 0: 1469 regval = vmxctx->guest_rax; 1470 break; 1471 case 1: 1472 regval = vmxctx->guest_rcx; 1473 break; 1474 case 2: 1475 regval = vmxctx->guest_rdx; 1476 break; 1477 case 3: 1478 regval = vmxctx->guest_rbx; 1479 break; 1480 case 4: 1481 regval = vmcs_read(VMCS_GUEST_RSP); 1482 break; 1483 case 5: 1484 regval = vmxctx->guest_rbp; 1485 break; 1486 case 6: 1487 regval = vmxctx->guest_rsi; 1488 break; 1489 case 7: 1490 regval = vmxctx->guest_rdi; 1491 break; 1492 case 8: 1493 regval = vmxctx->guest_r8; 1494 break; 1495 case 9: 1496 regval = vmxctx->guest_r9; 1497 break; 1498 case 10: 1499 regval = vmxctx->guest_r10; 1500 break; 1501 case 11: 1502 regval = vmxctx->guest_r11; 1503 break; 1504 case 12: 1505 regval = vmxctx->guest_r12; 1506 break; 1507 case 13: 1508 regval = vmxctx->guest_r13; 1509 break; 1510 case 14: 1511 regval = vmxctx->guest_r14; 1512 break; 1513 case 15: 1514 regval = vmxctx->guest_r15; 1515 break; 1516 } 1517 1518 if (cr == 0) { 1519 ones_mask = cr0_ones_mask; 1520 zeros_mask = cr0_zeros_mask; 1521 vmcs_guest_cr = VMCS_GUEST_CR0; 1522 vmcs_shadow_cr = VMCS_CR0_SHADOW; 1523 } else { 1524 ones_mask = cr4_ones_mask; 1525 zeros_mask = cr4_zeros_mask; 1526 vmcs_guest_cr = VMCS_GUEST_CR4; 1527 vmcs_shadow_cr = VMCS_CR4_SHADOW; 1528 } 1529 vmcs_write(vmcs_shadow_cr, regval); 1530 1531 crval = regval | ones_mask; 1532 crval &= ~zeros_mask; 1533 vmcs_write(vmcs_guest_cr, crval); 1534 1535 if (cr == 0 && regval & CR0_PG) { 1536 uint64_t efer, entry_ctls; 1537 1538 /* 1539 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1540 * the "IA-32e mode guest" bit in VM-entry control must be 1541 * equal. 1542 */ 1543 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1544 if (efer & EFER_LME) { 1545 efer |= EFER_LMA; 1546 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1547 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1548 entry_ctls |= VM_ENTRY_GUEST_LMA; 1549 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1550 } 1551 } 1552 1553 return (HANDLED); 1554} 1555 1556/* 1557 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1558 */ 1559static int 1560vmx_cpl(void) 1561{ 1562 uint32_t ssar; 1563 1564 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1565 return ((ssar >> 5) & 0x3); 1566} 1567 1568static enum vm_cpu_mode 1569vmx_cpu_mode(void) 1570{ 1571 1572 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) 1573 return (CPU_MODE_64BIT); 1574 else 1575 return (CPU_MODE_COMPATIBILITY); 1576} 1577 1578static enum vm_paging_mode 1579vmx_paging_mode(void) 1580{ 1581 1582 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1583 return (PAGING_MODE_FLAT); 1584 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1585 return (PAGING_MODE_32); 1586 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1587 return (PAGING_MODE_64); 1588 else 1589 return (PAGING_MODE_PAE); 1590} 1591 1592static uint64_t 1593inout_str_index(struct vmx *vmx, int vcpuid, int in) 1594{ 1595 uint64_t val; 1596 int error; 1597 enum vm_reg_name reg; 1598 1599 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 1600 error = vmx_getreg(vmx, vcpuid, reg, &val); 1601 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 1602 return (val); 1603} 1604 1605static uint64_t 1606inout_str_count(struct vmx *vmx, int vcpuid, int rep) 1607{ 1608 uint64_t val; 1609 int error; 1610 1611 if (rep) { 1612 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); 1613 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 1614 } else { 1615 val = 1; 1616 } 1617 return (val); 1618} 1619 1620static int 1621inout_str_addrsize(uint32_t inst_info) 1622{ 1623 uint32_t size; 1624 1625 size = (inst_info >> 7) & 0x7; 1626 switch (size) { 1627 case 0: 1628 return (2); /* 16 bit */ 1629 case 1: 1630 return (4); /* 32 bit */ 1631 case 2: 1632 return (8); /* 64 bit */ 1633 default: 1634 panic("%s: invalid size encoding %d", __func__, size); 1635 } 1636} 1637 1638static void 1639inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in, 1640 struct vm_inout_str *vis) 1641{ 1642 int error, s; 1643 1644 if (in) { 1645 vis->seg_name = VM_REG_GUEST_ES; 1646 } else { 1647 s = (inst_info >> 15) & 0x7; 1648 vis->seg_name = vm_segment_name(s); 1649 } 1650 1651 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); 1652 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 1653 1654 /* XXX modify svm.c to update bit 16 of seg_desc.access (unusable) */ 1655} 1656 1657static void 1658vmx_paging_info(struct vm_guest_paging *paging) 1659{ 1660 paging->cr3 = vmcs_guest_cr3(); 1661 paging->cpl = vmx_cpl(); 1662 paging->cpu_mode = vmx_cpu_mode(); 1663 paging->paging_mode = vmx_paging_mode(); 1664} 1665 1666static void 1667vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 1668{ 1669 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1670 vmexit->u.inst_emul.gpa = gpa; 1671 vmexit->u.inst_emul.gla = gla; 1672 vmx_paging_info(&vmexit->u.inst_emul.paging); 1673} 1674 1675static int 1676ept_fault_type(uint64_t ept_qual) 1677{ 1678 int fault_type; 1679 1680 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1681 fault_type = VM_PROT_WRITE; 1682 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1683 fault_type = VM_PROT_EXECUTE; 1684 else 1685 fault_type= VM_PROT_READ; 1686 1687 return (fault_type); 1688} 1689 1690static boolean_t 1691ept_emulation_fault(uint64_t ept_qual) 1692{ 1693 int read, write; 1694 1695 /* EPT fault on an instruction fetch doesn't make sense here */ 1696 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1697 return (FALSE); 1698 1699 /* EPT fault must be a read fault or a write fault */ 1700 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1701 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1702 if ((read | write) == 0) 1703 return (FALSE); 1704 1705 /* 1706 * The EPT violation must have been caused by accessing a 1707 * guest-physical address that is a translation of a guest-linear 1708 * address. 1709 */ 1710 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1711 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1712 return (FALSE); 1713 } 1714 1715 return (TRUE); 1716} 1717 1718static __inline int 1719apic_access_virtualization(struct vmx *vmx, int vcpuid) 1720{ 1721 uint32_t proc_ctls2; 1722 1723 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1724 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1725} 1726 1727static __inline int 1728x2apic_virtualization(struct vmx *vmx, int vcpuid) 1729{ 1730 uint32_t proc_ctls2; 1731 1732 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1733 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1734} 1735 1736static int 1737vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1738 uint64_t qual) 1739{ 1740 int error, handled, offset; 1741 uint32_t *apic_regs, vector; 1742 bool retu; 1743 1744 handled = HANDLED; 1745 offset = APIC_WRITE_OFFSET(qual); 1746 1747 if (!apic_access_virtualization(vmx, vcpuid)) { 1748 /* 1749 * In general there should not be any APIC write VM-exits 1750 * unless APIC-access virtualization is enabled. 1751 * 1752 * However self-IPI virtualization can legitimately trigger 1753 * an APIC-write VM-exit so treat it specially. 1754 */ 1755 if (x2apic_virtualization(vmx, vcpuid) && 1756 offset == APIC_OFFSET_SELF_IPI) { 1757 apic_regs = (uint32_t *)(vlapic->apic_page); 1758 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1759 vlapic_self_ipi_handler(vlapic, vector); 1760 return (HANDLED); 1761 } else 1762 return (UNHANDLED); 1763 } 1764 1765 switch (offset) { 1766 case APIC_OFFSET_ID: 1767 vlapic_id_write_handler(vlapic); 1768 break; 1769 case APIC_OFFSET_LDR: 1770 vlapic_ldr_write_handler(vlapic); 1771 break; 1772 case APIC_OFFSET_DFR: 1773 vlapic_dfr_write_handler(vlapic); 1774 break; 1775 case APIC_OFFSET_SVR: 1776 vlapic_svr_write_handler(vlapic); 1777 break; 1778 case APIC_OFFSET_ESR: 1779 vlapic_esr_write_handler(vlapic); 1780 break; 1781 case APIC_OFFSET_ICR_LOW: 1782 retu = false; 1783 error = vlapic_icrlo_write_handler(vlapic, &retu); 1784 if (error != 0 || retu) 1785 handled = UNHANDLED; 1786 break; 1787 case APIC_OFFSET_CMCI_LVT: 1788 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1789 vlapic_lvt_write_handler(vlapic, offset); 1790 break; 1791 case APIC_OFFSET_TIMER_ICR: 1792 vlapic_icrtmr_write_handler(vlapic); 1793 break; 1794 case APIC_OFFSET_TIMER_DCR: 1795 vlapic_dcr_write_handler(vlapic); 1796 break; 1797 default: 1798 handled = UNHANDLED; 1799 break; 1800 } 1801 return (handled); 1802} 1803 1804static bool 1805apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1806{ 1807 1808 if (apic_access_virtualization(vmx, vcpuid) && 1809 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1810 return (true); 1811 else 1812 return (false); 1813} 1814 1815static int 1816vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1817{ 1818 uint64_t qual; 1819 int access_type, offset, allowed; 1820 1821 if (!apic_access_virtualization(vmx, vcpuid)) 1822 return (UNHANDLED); 1823 1824 qual = vmexit->u.vmx.exit_qualification; 1825 access_type = APIC_ACCESS_TYPE(qual); 1826 offset = APIC_ACCESS_OFFSET(qual); 1827 1828 allowed = 0; 1829 if (access_type == 0) { 1830 /* 1831 * Read data access to the following registers is expected. 1832 */ 1833 switch (offset) { 1834 case APIC_OFFSET_APR: 1835 case APIC_OFFSET_PPR: 1836 case APIC_OFFSET_RRR: 1837 case APIC_OFFSET_CMCI_LVT: 1838 case APIC_OFFSET_TIMER_CCR: 1839 allowed = 1; 1840 break; 1841 default: 1842 break; 1843 } 1844 } else if (access_type == 1) { 1845 /* 1846 * Write data access to the following registers is expected. 1847 */ 1848 switch (offset) { 1849 case APIC_OFFSET_VER: 1850 case APIC_OFFSET_APR: 1851 case APIC_OFFSET_PPR: 1852 case APIC_OFFSET_RRR: 1853 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1854 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1855 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1856 case APIC_OFFSET_CMCI_LVT: 1857 case APIC_OFFSET_TIMER_CCR: 1858 allowed = 1; 1859 break; 1860 default: 1861 break; 1862 } 1863 } 1864 1865 if (allowed) { 1866 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 1867 VIE_INVALID_GLA); 1868 } 1869 1870 /* 1871 * Regardless of whether the APIC-access is allowed this handler 1872 * always returns UNHANDLED: 1873 * - if the access is allowed then it is handled by emulating the 1874 * instruction that caused the VM-exit (outside the critical section) 1875 * - if the access is not allowed then it will be converted to an 1876 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1877 */ 1878 return (UNHANDLED); 1879} 1880 1881static int 1882vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1883{ 1884 int error, handled, in; 1885 struct vmxctx *vmxctx; 1886 struct vlapic *vlapic; 1887 struct vm_inout_str *vis; 1888 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 1889 uint32_t reason; 1890 uint64_t qual, gpa; 1891 bool retu; 1892 1893 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 1894 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 1895 1896 handled = UNHANDLED; 1897 vmxctx = &vmx->ctx[vcpu]; 1898 1899 qual = vmexit->u.vmx.exit_qualification; 1900 reason = vmexit->u.vmx.exit_reason; 1901 vmexit->exitcode = VM_EXITCODE_BOGUS; 1902 1903 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1904 1905 /* 1906 * VM exits that could be triggered during event injection on the 1907 * previous VM entry need to be handled specially by re-injecting 1908 * the event. 1909 * 1910 * See "Information for VM Exits During Event Delivery" in Intel SDM 1911 * for details. 1912 */ 1913 switch (reason) { 1914 case EXIT_REASON_EPT_FAULT: 1915 case EXIT_REASON_EPT_MISCONFIG: 1916 case EXIT_REASON_APIC_ACCESS: 1917 case EXIT_REASON_TASK_SWITCH: 1918 case EXIT_REASON_EXCEPTION: 1919 idtvec_info = vmcs_idt_vectoring_info(); 1920 if (idtvec_info & VMCS_IDT_VEC_VALID) { 1921 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 1922 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 1923 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 1924 idtvec_err = vmcs_idt_vectoring_err(); 1925 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1926 idtvec_err); 1927 } 1928 /* 1929 * If 'virtual NMIs' are being used and the VM-exit 1930 * happened while injecting an NMI during the previous 1931 * VM-entry, then clear "blocking by NMI" in the Guest 1932 * Interruptibility-state. 1933 */ 1934 if ((idtvec_info & VMCS_INTR_T_MASK) == 1935 VMCS_INTR_T_NMI) { 1936 vmx_clear_nmi_blocking(vmx, vcpu); 1937 } 1938 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 1939 } 1940 default: 1941 idtvec_info = 0; 1942 break; 1943 } 1944 1945 switch (reason) { 1946 case EXIT_REASON_CR_ACCESS: 1947 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1948 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1949 break; 1950 case EXIT_REASON_RDMSR: 1951 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1952 retu = false; 1953 ecx = vmxctx->guest_rcx; 1954 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 1955 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 1956 if (error) { 1957 vmexit->exitcode = VM_EXITCODE_RDMSR; 1958 vmexit->u.msr.code = ecx; 1959 } else if (!retu) { 1960 handled = HANDLED; 1961 } else { 1962 /* Return to userspace with a valid exitcode */ 1963 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1964 ("emulate_wrmsr retu with bogus exitcode")); 1965 } 1966 break; 1967 case EXIT_REASON_WRMSR: 1968 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1969 retu = false; 1970 eax = vmxctx->guest_rax; 1971 ecx = vmxctx->guest_rcx; 1972 edx = vmxctx->guest_rdx; 1973 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 1974 ecx, (uint64_t)edx << 32 | eax); 1975 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1976 (uint64_t)edx << 32 | eax, &retu); 1977 if (error) { 1978 vmexit->exitcode = VM_EXITCODE_WRMSR; 1979 vmexit->u.msr.code = ecx; 1980 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1981 } else if (!retu) { 1982 handled = HANDLED; 1983 } else { 1984 /* Return to userspace with a valid exitcode */ 1985 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1986 ("emulate_wrmsr retu with bogus exitcode")); 1987 } 1988 break; 1989 case EXIT_REASON_HLT: 1990 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1991 vmexit->exitcode = VM_EXITCODE_HLT; 1992 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1993 break; 1994 case EXIT_REASON_MTF: 1995 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1996 vmexit->exitcode = VM_EXITCODE_MTRAP; 1997 break; 1998 case EXIT_REASON_PAUSE: 1999 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2000 vmexit->exitcode = VM_EXITCODE_PAUSE; 2001 break; 2002 case EXIT_REASON_INTR_WINDOW: 2003 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2004 vmx_clear_int_window_exiting(vmx, vcpu); 2005 return (1); 2006 case EXIT_REASON_EXT_INTR: 2007 /* 2008 * External interrupts serve only to cause VM exits and allow 2009 * the host interrupt handler to run. 2010 * 2011 * If this external interrupt triggers a virtual interrupt 2012 * to a VM, then that state will be recorded by the 2013 * host interrupt handler in the VM's softc. We will inject 2014 * this virtual interrupt during the subsequent VM enter. 2015 */ 2016 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2017 2018 /* 2019 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2020 * This appears to be a bug in VMware Fusion? 2021 */ 2022 if (!(intr_info & VMCS_INTR_VALID)) 2023 return (1); 2024 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2025 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2026 ("VM exit interruption info invalid: %#x", intr_info)); 2027 vmx_trigger_hostintr(intr_info & 0xff); 2028 2029 /* 2030 * This is special. We want to treat this as an 'handled' 2031 * VM-exit but not increment the instruction pointer. 2032 */ 2033 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2034 return (1); 2035 case EXIT_REASON_NMI_WINDOW: 2036 /* Exit to allow the pending virtual NMI to be injected */ 2037 if (vm_nmi_pending(vmx->vm, vcpu)) 2038 vmx_inject_nmi(vmx, vcpu); 2039 vmx_clear_nmi_window_exiting(vmx, vcpu); 2040 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2041 return (1); 2042 case EXIT_REASON_INOUT: 2043 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2044 vmexit->exitcode = VM_EXITCODE_INOUT; 2045 vmexit->u.inout.bytes = (qual & 0x7) + 1; 2046 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2047 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2048 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2049 vmexit->u.inout.port = (uint16_t)(qual >> 16); 2050 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2051 if (vmexit->u.inout.string) { 2052 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2053 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2054 vis = &vmexit->u.inout_str; 2055 vmx_paging_info(&vis->paging); 2056 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2057 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2058 vis->index = inout_str_index(vmx, vcpu, in); 2059 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); 2060 vis->addrsize = inout_str_addrsize(inst_info); 2061 inout_str_seginfo(vmx, vcpu, inst_info, in, vis); 2062 } 2063 break; 2064 case EXIT_REASON_CPUID: 2065 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2066 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2067 break; 2068 case EXIT_REASON_EXCEPTION: 2069 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2070 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2071 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2072 ("VM exit interruption info invalid: %#x", intr_info)); 2073 2074 /* 2075 * If Virtual NMIs control is 1 and the VM-exit is due to a 2076 * fault encountered during the execution of IRET then we must 2077 * restore the state of "virtual-NMI blocking" before resuming 2078 * the guest. 2079 * 2080 * See "Resuming Guest Software after Handling an Exception". 2081 */ 2082 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2083 (intr_info & 0xff) != IDT_DF && 2084 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2085 vmx_restore_nmi_blocking(vmx, vcpu); 2086 2087 /* 2088 * The NMI has already been handled in vmx_exit_handle_nmi(). 2089 */ 2090 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) 2091 return (1); 2092 break; 2093 case EXIT_REASON_EPT_FAULT: 2094 /* 2095 * If 'gpa' lies within the address space allocated to 2096 * memory then this must be a nested page fault otherwise 2097 * this must be an instruction that accesses MMIO space. 2098 */ 2099 gpa = vmcs_gpa(); 2100 if (vm_mem_allocated(vmx->vm, gpa) || 2101 apic_access_fault(vmx, vcpu, gpa)) { 2102 vmexit->exitcode = VM_EXITCODE_PAGING; 2103 vmexit->u.paging.gpa = gpa; 2104 vmexit->u.paging.fault_type = ept_fault_type(qual); 2105 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2106 } else if (ept_emulation_fault(qual)) { 2107 vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 2108 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 2109 } 2110 /* 2111 * If Virtual NMIs control is 1 and the VM-exit is due to an 2112 * EPT fault during the execution of IRET then we must restore 2113 * the state of "virtual-NMI blocking" before resuming. 2114 * 2115 * See description of "NMI unblocking due to IRET" in 2116 * "Exit Qualification for EPT Violations". 2117 */ 2118 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2119 (qual & EXIT_QUAL_NMIUDTI) != 0) 2120 vmx_restore_nmi_blocking(vmx, vcpu); 2121 break; 2122 case EXIT_REASON_VIRTUALIZED_EOI: 2123 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2124 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2125 vmexit->inst_length = 0; /* trap-like */ 2126 break; 2127 case EXIT_REASON_APIC_ACCESS: 2128 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2129 break; 2130 case EXIT_REASON_APIC_WRITE: 2131 /* 2132 * APIC-write VM exit is trap-like so the %rip is already 2133 * pointing to the next instruction. 2134 */ 2135 vmexit->inst_length = 0; 2136 vlapic = vm_lapic(vmx->vm, vcpu); 2137 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2138 break; 2139 case EXIT_REASON_XSETBV: 2140 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2141 break; 2142 default: 2143 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2144 break; 2145 } 2146 2147 if (handled) { 2148 /* 2149 * It is possible that control is returned to userland 2150 * even though we were able to handle the VM exit in the 2151 * kernel. 2152 * 2153 * In such a case we want to make sure that the userland 2154 * restarts guest execution at the instruction *after* 2155 * the one we just processed. Therefore we update the 2156 * guest rip in the VMCS and in 'vmexit'. 2157 */ 2158 vmexit->rip += vmexit->inst_length; 2159 vmexit->inst_length = 0; 2160 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2161 } else { 2162 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2163 /* 2164 * If this VM exit was not claimed by anybody then 2165 * treat it as a generic VMX exit. 2166 */ 2167 vmexit->exitcode = VM_EXITCODE_VMX; 2168 vmexit->u.vmx.status = VM_SUCCESS; 2169 vmexit->u.vmx.inst_type = 0; 2170 vmexit->u.vmx.inst_error = 0; 2171 } else { 2172 /* 2173 * The exitcode and collateral have been populated. 2174 * The VM exit will be processed further in userland. 2175 */ 2176 } 2177 } 2178 return (handled); 2179} 2180 2181static __inline int 2182vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2183{ 2184 2185 vmexit->rip = vmcs_guest_rip(); 2186 vmexit->inst_length = 0; 2187 vmexit->exitcode = VM_EXITCODE_BOGUS; 2188 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 2189 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 2190 2191 return (HANDLED); 2192} 2193 2194static __inline int 2195vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2196{ 2197 2198 vmexit->rip = vmcs_guest_rip(); 2199 vmexit->inst_length = 0; 2200 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 2201 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1); 2202 2203 return (UNHANDLED); 2204} 2205 2206static __inline int 2207vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2208{ 2209 2210 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2211 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2212 vmxctx->inst_fail_status)); 2213 2214 vmexit->inst_length = 0; 2215 vmexit->exitcode = VM_EXITCODE_VMX; 2216 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2217 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2218 vmexit->u.vmx.exit_reason = ~0; 2219 vmexit->u.vmx.exit_qualification = ~0; 2220 2221 switch (rc) { 2222 case VMX_VMRESUME_ERROR: 2223 case VMX_VMLAUNCH_ERROR: 2224 case VMX_INVEPT_ERROR: 2225 vmexit->u.vmx.inst_type = rc; 2226 break; 2227 default: 2228 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2229 } 2230 2231 return (UNHANDLED); 2232} 2233 2234/* 2235 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2236 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2237 * sufficient to simply vector to the NMI handler via a software interrupt. 2238 * However, this must be done before maskable interrupts are enabled 2239 * otherwise the "iret" issued by an interrupt handler will incorrectly 2240 * clear NMI blocking. 2241 */ 2242static __inline void 2243vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2244{ 2245 uint32_t intr_info; 2246 2247 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2248 2249 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2250 return; 2251 2252 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2253 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2254 ("VM exit interruption info invalid: %#x", intr_info)); 2255 2256 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2257 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2258 "to NMI has invalid vector: %#x", intr_info)); 2259 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2260 __asm __volatile("int $2"); 2261 } 2262} 2263 2264static int 2265vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 2266 void *rendezvous_cookie, void *suspend_cookie) 2267{ 2268 int rc, handled, launched; 2269 struct vmx *vmx; 2270 struct vm *vm; 2271 struct vmxctx *vmxctx; 2272 struct vmcs *vmcs; 2273 struct vm_exit *vmexit; 2274 struct vlapic *vlapic; 2275 uint64_t rip; 2276 uint32_t exit_reason; 2277 2278 vmx = arg; 2279 vm = vmx->vm; 2280 vmcs = &vmx->vmcs[vcpu]; 2281 vmxctx = &vmx->ctx[vcpu]; 2282 vlapic = vm_lapic(vm, vcpu); 2283 vmexit = vm_exitinfo(vm, vcpu); 2284 launched = 0; 2285 2286 KASSERT(vmxctx->pmap == pmap, 2287 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2288 2289 VMPTRLD(vmcs); 2290 2291 /* 2292 * XXX 2293 * We do this every time because we may setup the virtual machine 2294 * from a different process than the one that actually runs it. 2295 * 2296 * If the life of a virtual machine was spent entirely in the context 2297 * of a single process we could do this once in vmx_vminit(). 2298 */ 2299 vmcs_write(VMCS_HOST_CR3, rcr3()); 2300 2301 vmcs_write(VMCS_GUEST_RIP, startrip); 2302 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2303 do { 2304 /* 2305 * Interrupts are disabled from this point on until the 2306 * guest starts executing. This is done for the following 2307 * reasons: 2308 * 2309 * If an AST is asserted on this thread after the check below, 2310 * then the IPI_AST notification will not be lost, because it 2311 * will cause a VM exit due to external interrupt as soon as 2312 * the guest state is loaded. 2313 * 2314 * A posted interrupt after 'vmx_inject_interrupts()' will 2315 * not be "lost" because it will be held pending in the host 2316 * APIC because interrupts are disabled. The pending interrupt 2317 * will be recognized as soon as the guest state is loaded. 2318 * 2319 * The same reasoning applies to the IPI generated by 2320 * pmap_invalidate_ept(). 2321 */ 2322 disable_intr(); 2323 if (vcpu_suspended(suspend_cookie)) { 2324 enable_intr(); 2325 vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip()); 2326 handled = UNHANDLED; 2327 break; 2328 } 2329 2330 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 2331 enable_intr(); 2332 handled = vmx_exit_rendezvous(vmx, vcpu, vmexit); 2333 break; 2334 } 2335 2336 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 2337 enable_intr(); 2338 handled = vmx_exit_astpending(vmx, vcpu, vmexit); 2339 break; 2340 } 2341 2342 vmx_inject_interrupts(vmx, vcpu, vlapic); 2343 vmx_run_trace(vmx, vcpu); 2344 rc = vmx_enter_guest(vmxctx, vmx, launched); 2345 2346 /* Collect some information for VM exit processing */ 2347 vmexit->rip = rip = vmcs_guest_rip(); 2348 vmexit->inst_length = vmexit_instruction_length(); 2349 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2350 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2351 2352 if (rc == VMX_GUEST_VMEXIT) { 2353 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2354 enable_intr(); 2355 handled = vmx_exit_process(vmx, vcpu, vmexit); 2356 } else { 2357 enable_intr(); 2358 handled = vmx_exit_inst_error(vmxctx, rc, vmexit); 2359 } 2360 launched = 1; 2361 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2362 } while (handled); 2363 2364 /* 2365 * If a VM exit has been handled then the exitcode must be BOGUS 2366 * If a VM exit is not handled then the exitcode must not be BOGUS 2367 */ 2368 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2369 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2370 panic("Mismatch between handled (%d) and exitcode (%d)", 2371 handled, vmexit->exitcode); 2372 } 2373 2374 if (!handled) 2375 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 2376 2377 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2378 vmexit->exitcode); 2379 2380 VMCLEAR(vmcs); 2381 return (0); 2382} 2383 2384static void 2385vmx_vmcleanup(void *arg) 2386{ 2387 int i; 2388 struct vmx *vmx = arg; 2389 2390 if (apic_access_virtualization(vmx, 0)) 2391 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2392 2393 for (i = 0; i < VM_MAXCPU; i++) 2394 vpid_free(vmx->state[i].vpid); 2395 2396 free(vmx, M_VMX); 2397 2398 return; 2399} 2400 2401static register_t * 2402vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2403{ 2404 2405 switch (reg) { 2406 case VM_REG_GUEST_RAX: 2407 return (&vmxctx->guest_rax); 2408 case VM_REG_GUEST_RBX: 2409 return (&vmxctx->guest_rbx); 2410 case VM_REG_GUEST_RCX: 2411 return (&vmxctx->guest_rcx); 2412 case VM_REG_GUEST_RDX: 2413 return (&vmxctx->guest_rdx); 2414 case VM_REG_GUEST_RSI: 2415 return (&vmxctx->guest_rsi); 2416 case VM_REG_GUEST_RDI: 2417 return (&vmxctx->guest_rdi); 2418 case VM_REG_GUEST_RBP: 2419 return (&vmxctx->guest_rbp); 2420 case VM_REG_GUEST_R8: 2421 return (&vmxctx->guest_r8); 2422 case VM_REG_GUEST_R9: 2423 return (&vmxctx->guest_r9); 2424 case VM_REG_GUEST_R10: 2425 return (&vmxctx->guest_r10); 2426 case VM_REG_GUEST_R11: 2427 return (&vmxctx->guest_r11); 2428 case VM_REG_GUEST_R12: 2429 return (&vmxctx->guest_r12); 2430 case VM_REG_GUEST_R13: 2431 return (&vmxctx->guest_r13); 2432 case VM_REG_GUEST_R14: 2433 return (&vmxctx->guest_r14); 2434 case VM_REG_GUEST_R15: 2435 return (&vmxctx->guest_r15); 2436 case VM_REG_GUEST_CR2: 2437 return (&vmxctx->guest_cr2); 2438 default: 2439 break; 2440 } 2441 return (NULL); 2442} 2443 2444static int 2445vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 2446{ 2447 register_t *regp; 2448 2449 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2450 *retval = *regp; 2451 return (0); 2452 } else 2453 return (EINVAL); 2454} 2455 2456static int 2457vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 2458{ 2459 register_t *regp; 2460 2461 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2462 *regp = val; 2463 return (0); 2464 } else 2465 return (EINVAL); 2466} 2467 2468static int 2469vmx_shadow_reg(int reg) 2470{ 2471 int shreg; 2472 2473 shreg = -1; 2474 2475 switch (reg) { 2476 case VM_REG_GUEST_CR0: 2477 shreg = VMCS_CR0_SHADOW; 2478 break; 2479 case VM_REG_GUEST_CR4: 2480 shreg = VMCS_CR4_SHADOW; 2481 break; 2482 default: 2483 break; 2484 } 2485 2486 return (shreg); 2487} 2488 2489static int 2490vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2491{ 2492 int running, hostcpu; 2493 struct vmx *vmx = arg; 2494 2495 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2496 if (running && hostcpu != curcpu) 2497 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2498 2499 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2500 return (0); 2501 2502 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2503} 2504 2505static int 2506vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2507{ 2508 int error, hostcpu, running, shadow; 2509 uint64_t ctls; 2510 struct vmx *vmx = arg; 2511 2512 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2513 if (running && hostcpu != curcpu) 2514 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2515 2516 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2517 return (0); 2518 2519 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2520 2521 if (error == 0) { 2522 /* 2523 * If the "load EFER" VM-entry control is 1 then the 2524 * value of EFER.LMA must be identical to "IA-32e mode guest" 2525 * bit in the VM-entry control. 2526 */ 2527 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2528 (reg == VM_REG_GUEST_EFER)) { 2529 vmcs_getreg(&vmx->vmcs[vcpu], running, 2530 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2531 if (val & EFER_LMA) 2532 ctls |= VM_ENTRY_GUEST_LMA; 2533 else 2534 ctls &= ~VM_ENTRY_GUEST_LMA; 2535 vmcs_setreg(&vmx->vmcs[vcpu], running, 2536 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2537 } 2538 2539 shadow = vmx_shadow_reg(reg); 2540 if (shadow > 0) { 2541 /* 2542 * Store the unmodified value in the shadow 2543 */ 2544 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2545 VMCS_IDENT(shadow), val); 2546 } 2547 } 2548 2549 return (error); 2550} 2551 2552static int 2553vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2554{ 2555 int hostcpu, running; 2556 struct vmx *vmx = arg; 2557 2558 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2559 if (running && hostcpu != curcpu) 2560 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 2561 2562 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc)); 2563} 2564 2565static int 2566vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2567{ 2568 int hostcpu, running; 2569 struct vmx *vmx = arg; 2570 2571 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2572 if (running && hostcpu != curcpu) 2573 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 2574 2575 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc)); 2576} 2577 2578static int 2579vmx_getcap(void *arg, int vcpu, int type, int *retval) 2580{ 2581 struct vmx *vmx = arg; 2582 int vcap; 2583 int ret; 2584 2585 ret = ENOENT; 2586 2587 vcap = vmx->cap[vcpu].set; 2588 2589 switch (type) { 2590 case VM_CAP_HALT_EXIT: 2591 if (cap_halt_exit) 2592 ret = 0; 2593 break; 2594 case VM_CAP_PAUSE_EXIT: 2595 if (cap_pause_exit) 2596 ret = 0; 2597 break; 2598 case VM_CAP_MTRAP_EXIT: 2599 if (cap_monitor_trap) 2600 ret = 0; 2601 break; 2602 case VM_CAP_UNRESTRICTED_GUEST: 2603 if (cap_unrestricted_guest) 2604 ret = 0; 2605 break; 2606 case VM_CAP_ENABLE_INVPCID: 2607 if (cap_invpcid) 2608 ret = 0; 2609 break; 2610 default: 2611 break; 2612 } 2613 2614 if (ret == 0) 2615 *retval = (vcap & (1 << type)) ? 1 : 0; 2616 2617 return (ret); 2618} 2619 2620static int 2621vmx_setcap(void *arg, int vcpu, int type, int val) 2622{ 2623 struct vmx *vmx = arg; 2624 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2625 uint32_t baseval; 2626 uint32_t *pptr; 2627 int error; 2628 int flag; 2629 int reg; 2630 int retval; 2631 2632 retval = ENOENT; 2633 pptr = NULL; 2634 2635 switch (type) { 2636 case VM_CAP_HALT_EXIT: 2637 if (cap_halt_exit) { 2638 retval = 0; 2639 pptr = &vmx->cap[vcpu].proc_ctls; 2640 baseval = *pptr; 2641 flag = PROCBASED_HLT_EXITING; 2642 reg = VMCS_PRI_PROC_BASED_CTLS; 2643 } 2644 break; 2645 case VM_CAP_MTRAP_EXIT: 2646 if (cap_monitor_trap) { 2647 retval = 0; 2648 pptr = &vmx->cap[vcpu].proc_ctls; 2649 baseval = *pptr; 2650 flag = PROCBASED_MTF; 2651 reg = VMCS_PRI_PROC_BASED_CTLS; 2652 } 2653 break; 2654 case VM_CAP_PAUSE_EXIT: 2655 if (cap_pause_exit) { 2656 retval = 0; 2657 pptr = &vmx->cap[vcpu].proc_ctls; 2658 baseval = *pptr; 2659 flag = PROCBASED_PAUSE_EXITING; 2660 reg = VMCS_PRI_PROC_BASED_CTLS; 2661 } 2662 break; 2663 case VM_CAP_UNRESTRICTED_GUEST: 2664 if (cap_unrestricted_guest) { 2665 retval = 0; 2666 pptr = &vmx->cap[vcpu].proc_ctls2; 2667 baseval = *pptr; 2668 flag = PROCBASED2_UNRESTRICTED_GUEST; 2669 reg = VMCS_SEC_PROC_BASED_CTLS; 2670 } 2671 break; 2672 case VM_CAP_ENABLE_INVPCID: 2673 if (cap_invpcid) { 2674 retval = 0; 2675 pptr = &vmx->cap[vcpu].proc_ctls2; 2676 baseval = *pptr; 2677 flag = PROCBASED2_ENABLE_INVPCID; 2678 reg = VMCS_SEC_PROC_BASED_CTLS; 2679 } 2680 break; 2681 default: 2682 break; 2683 } 2684 2685 if (retval == 0) { 2686 if (val) { 2687 baseval |= flag; 2688 } else { 2689 baseval &= ~flag; 2690 } 2691 VMPTRLD(vmcs); 2692 error = vmwrite(reg, baseval); 2693 VMCLEAR(vmcs); 2694 2695 if (error) { 2696 retval = error; 2697 } else { 2698 /* 2699 * Update optional stored flags, and record 2700 * setting 2701 */ 2702 if (pptr != NULL) { 2703 *pptr = baseval; 2704 } 2705 2706 if (val) { 2707 vmx->cap[vcpu].set |= (1 << type); 2708 } else { 2709 vmx->cap[vcpu].set &= ~(1 << type); 2710 } 2711 } 2712 } 2713 2714 return (retval); 2715} 2716 2717struct vlapic_vtx { 2718 struct vlapic vlapic; 2719 struct pir_desc *pir_desc; 2720 struct vmx *vmx; 2721}; 2722 2723#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2724do { \ 2725 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2726 level ? "level" : "edge", vector); \ 2727 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2728 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2729 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2730 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2731 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2732} while (0) 2733 2734/* 2735 * vlapic->ops handlers that utilize the APICv hardware assist described in 2736 * Chapter 29 of the Intel SDM. 2737 */ 2738static int 2739vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2740{ 2741 struct vlapic_vtx *vlapic_vtx; 2742 struct pir_desc *pir_desc; 2743 uint64_t mask; 2744 int idx, notify; 2745 2746 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2747 pir_desc = vlapic_vtx->pir_desc; 2748 2749 /* 2750 * Keep track of interrupt requests in the PIR descriptor. This is 2751 * because the virtual APIC page pointed to by the VMCS cannot be 2752 * modified if the vcpu is running. 2753 */ 2754 idx = vector / 64; 2755 mask = 1UL << (vector % 64); 2756 atomic_set_long(&pir_desc->pir[idx], mask); 2757 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2758 2759 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2760 level, "vmx_set_intr_ready"); 2761 return (notify); 2762} 2763 2764static int 2765vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2766{ 2767 struct vlapic_vtx *vlapic_vtx; 2768 struct pir_desc *pir_desc; 2769 struct LAPIC *lapic; 2770 uint64_t pending, pirval; 2771 uint32_t ppr, vpr; 2772 int i; 2773 2774 /* 2775 * This function is only expected to be called from the 'HLT' exit 2776 * handler which does not care about the vector that is pending. 2777 */ 2778 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2779 2780 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2781 pir_desc = vlapic_vtx->pir_desc; 2782 2783 pending = atomic_load_acq_long(&pir_desc->pending); 2784 if (!pending) 2785 return (0); /* common case */ 2786 2787 /* 2788 * If there is an interrupt pending then it will be recognized only 2789 * if its priority is greater than the processor priority. 2790 * 2791 * Special case: if the processor priority is zero then any pending 2792 * interrupt will be recognized. 2793 */ 2794 lapic = vlapic->apic_page; 2795 ppr = lapic->ppr & 0xf0; 2796 if (ppr == 0) 2797 return (1); 2798 2799 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2800 lapic->ppr); 2801 2802 for (i = 3; i >= 0; i--) { 2803 pirval = pir_desc->pir[i]; 2804 if (pirval != 0) { 2805 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2806 return (vpr > ppr); 2807 } 2808 } 2809 return (0); 2810} 2811 2812static void 2813vmx_intr_accepted(struct vlapic *vlapic, int vector) 2814{ 2815 2816 panic("vmx_intr_accepted: not expected to be called"); 2817} 2818 2819static void 2820vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 2821{ 2822 struct vlapic_vtx *vlapic_vtx; 2823 struct vmx *vmx; 2824 struct vmcs *vmcs; 2825 uint64_t mask, val; 2826 2827 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 2828 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 2829 ("vmx_set_tmr: vcpu cannot be running")); 2830 2831 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2832 vmx = vlapic_vtx->vmx; 2833 vmcs = &vmx->vmcs[vlapic->vcpuid]; 2834 mask = 1UL << (vector % 64); 2835 2836 VMPTRLD(vmcs); 2837 val = vmcs_read(VMCS_EOI_EXIT(vector)); 2838 if (level) 2839 val |= mask; 2840 else 2841 val &= ~mask; 2842 vmcs_write(VMCS_EOI_EXIT(vector), val); 2843 VMCLEAR(vmcs); 2844} 2845 2846static void 2847vmx_enable_x2apic_mode(struct vlapic *vlapic) 2848{ 2849 struct vmx *vmx; 2850 struct vmcs *vmcs; 2851 uint32_t proc_ctls2; 2852 int vcpuid, error; 2853 2854 vcpuid = vlapic->vcpuid; 2855 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 2856 vmcs = &vmx->vmcs[vcpuid]; 2857 2858 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2859 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 2860 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 2861 2862 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 2863 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 2864 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 2865 2866 VMPTRLD(vmcs); 2867 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 2868 VMCLEAR(vmcs); 2869 2870 if (vlapic->vcpuid == 0) { 2871 /* 2872 * The nested page table mappings are shared by all vcpus 2873 * so unmap the APIC access page just once. 2874 */ 2875 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2876 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 2877 __func__, error)); 2878 2879 /* 2880 * The MSR bitmap is shared by all vcpus so modify it only 2881 * once in the context of vcpu 0. 2882 */ 2883 error = vmx_allow_x2apic_msrs(vmx); 2884 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 2885 __func__, error)); 2886 } 2887} 2888 2889static void 2890vmx_post_intr(struct vlapic *vlapic, int hostcpu) 2891{ 2892 2893 ipi_cpu(hostcpu, pirvec); 2894} 2895 2896/* 2897 * Transfer the pending interrupts in the PIR descriptor to the IRR 2898 * in the virtual APIC page. 2899 */ 2900static void 2901vmx_inject_pir(struct vlapic *vlapic) 2902{ 2903 struct vlapic_vtx *vlapic_vtx; 2904 struct pir_desc *pir_desc; 2905 struct LAPIC *lapic; 2906 uint64_t val, pirval; 2907 int rvi, pirbase = -1; 2908 uint16_t intr_status_old, intr_status_new; 2909 2910 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2911 pir_desc = vlapic_vtx->pir_desc; 2912 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 2913 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2914 "no posted interrupt pending"); 2915 return; 2916 } 2917 2918 pirval = 0; 2919 pirbase = -1; 2920 lapic = vlapic->apic_page; 2921 2922 val = atomic_readandclear_long(&pir_desc->pir[0]); 2923 if (val != 0) { 2924 lapic->irr0 |= val; 2925 lapic->irr1 |= val >> 32; 2926 pirbase = 0; 2927 pirval = val; 2928 } 2929 2930 val = atomic_readandclear_long(&pir_desc->pir[1]); 2931 if (val != 0) { 2932 lapic->irr2 |= val; 2933 lapic->irr3 |= val >> 32; 2934 pirbase = 64; 2935 pirval = val; 2936 } 2937 2938 val = atomic_readandclear_long(&pir_desc->pir[2]); 2939 if (val != 0) { 2940 lapic->irr4 |= val; 2941 lapic->irr5 |= val >> 32; 2942 pirbase = 128; 2943 pirval = val; 2944 } 2945 2946 val = atomic_readandclear_long(&pir_desc->pir[3]); 2947 if (val != 0) { 2948 lapic->irr6 |= val; 2949 lapic->irr7 |= val >> 32; 2950 pirbase = 192; 2951 pirval = val; 2952 } 2953 2954 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 2955 2956 /* 2957 * Update RVI so the processor can evaluate pending virtual 2958 * interrupts on VM-entry. 2959 * 2960 * It is possible for pirval to be 0 here, even though the 2961 * pending bit has been set. The scenario is: 2962 * CPU-Y is sending a posted interrupt to CPU-X, which 2963 * is running a guest and processing posted interrupts in h/w. 2964 * CPU-X will eventually exit and the state seen in s/w is 2965 * the pending bit set, but no PIR bits set. 2966 * 2967 * CPU-X CPU-Y 2968 * (vm running) (host running) 2969 * rx posted interrupt 2970 * CLEAR pending bit 2971 * SET PIR bit 2972 * READ/CLEAR PIR bits 2973 * SET pending bit 2974 * (vm exit) 2975 * pending bit set, PIR 0 2976 */ 2977 if (pirval != 0) { 2978 rvi = pirbase + flsl(pirval) - 1; 2979 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 2980 intr_status_new = (intr_status_old & 0xFF00) | rvi; 2981 if (intr_status_new > intr_status_old) { 2982 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 2983 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2984 "guest_intr_status changed from 0x%04x to 0x%04x", 2985 intr_status_old, intr_status_new); 2986 } 2987 } 2988} 2989 2990static struct vlapic * 2991vmx_vlapic_init(void *arg, int vcpuid) 2992{ 2993 struct vmx *vmx; 2994 struct vlapic *vlapic; 2995 struct vlapic_vtx *vlapic_vtx; 2996 2997 vmx = arg; 2998 2999 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3000 vlapic->vm = vmx->vm; 3001 vlapic->vcpuid = vcpuid; 3002 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3003 3004 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3005 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3006 vlapic_vtx->vmx = vmx; 3007 3008 if (virtual_interrupt_delivery) { 3009 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3010 vlapic->ops.pending_intr = vmx_pending_intr; 3011 vlapic->ops.intr_accepted = vmx_intr_accepted; 3012 vlapic->ops.set_tmr = vmx_set_tmr; 3013 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode; 3014 } 3015 3016 if (posted_interrupts) 3017 vlapic->ops.post_intr = vmx_post_intr; 3018 3019 vlapic_init(vlapic); 3020 3021 return (vlapic); 3022} 3023 3024static void 3025vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3026{ 3027 3028 vlapic_cleanup(vlapic); 3029 free(vlapic, M_VLAPIC); 3030} 3031 3032struct vmm_ops vmm_ops_intel = { 3033 vmx_init, 3034 vmx_cleanup, 3035 vmx_restore, 3036 vmx_vminit, 3037 vmx_run, 3038 vmx_vmcleanup, 3039 vmx_getreg, 3040 vmx_setreg, 3041 vmx_getdesc, 3042 vmx_setdesc, 3043 vmx_getcap, 3044 vmx_setcap, 3045 ept_vmspace_alloc, 3046 ept_vmspace_free, 3047 vmx_vlapic_init, 3048 vmx_vlapic_cleanup, 3049}; 3050