vmm.c revision 276349
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 276349 2014-12-28 21:27:13Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 276349 2014-12-28 21:27:13Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/module.h> 36#include <sys/sysctl.h> 37#include <sys/malloc.h> 38#include <sys/pcpu.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rwlock.h> 43#include <sys/sched.h> 44#include <sys/smp.h> 45#include <sys/systm.h> 46 47#include <vm/vm.h> 48#include <vm/vm_object.h> 49#include <vm/vm_page.h> 50#include <vm/pmap.h> 51#include <vm/vm_map.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_param.h> 54 55#include <machine/cpu.h> 56#include <machine/vm.h> 57#include <machine/pcb.h> 58#include <machine/smp.h> 59#include <x86/psl.h> 60#include <x86/apicreg.h> 61#include <machine/vmparam.h> 62 63#include <machine/vmm.h> 64#include <machine/vmm_dev.h> 65#include <machine/vmm_instruction_emul.h> 66 67#include "vmm_ioport.h" 68#include "vmm_ktr.h" 69#include "vmm_host.h" 70#include "vmm_mem.h" 71#include "vmm_util.h" 72#include "vatpic.h" 73#include "vatpit.h" 74#include "vhpet.h" 75#include "vioapic.h" 76#include "vlapic.h" 77#include "vmm_ipi.h" 78#include "vmm_stat.h" 79#include "vmm_lapic.h" 80 81#include "io/ppt.h" 82#include "io/iommu.h" 83 84struct vlapic; 85 86/* 87 * Initialization: 88 * (a) allocated when vcpu is created 89 * (i) initialized when vcpu is created and when it is reinitialized 90 * (o) initialized the first time the vcpu is created 91 * (x) initialized before use 92 */ 93struct vcpu { 94 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 95 enum vcpu_state state; /* (o) vcpu state */ 96 int hostcpu; /* (o) vcpu's host cpu */ 97 struct vlapic *vlapic; /* (i) APIC device model */ 98 enum x2apic_state x2apic_state; /* (i) APIC mode */ 99 uint64_t exitintinfo; /* (i) events pending at VM exit */ 100 int nmi_pending; /* (i) NMI pending */ 101 int extint_pending; /* (i) INTR pending */ 102 struct vm_exception exception; /* (x) exception collateral */ 103 int exception_pending; /* (i) exception pending */ 104 struct savefpu *guestfpu; /* (a,i) guest fpu state */ 105 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 106 void *stats; /* (a,i) statistics */ 107 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 108}; 109 110#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 111#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 112#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 113#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 114#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 115 116struct mem_seg { 117 vm_paddr_t gpa; 118 size_t len; 119 boolean_t wired; 120 vm_object_t object; 121}; 122#define VM_MAX_MEMORY_SEGMENTS 2 123 124/* 125 * Initialization: 126 * (o) initialized the first time the VM is created 127 * (i) initialized when VM is created and when it is reinitialized 128 * (x) initialized before use 129 */ 130struct vm { 131 void *cookie; /* (i) cpu-specific data */ 132 void *iommu; /* (x) iommu-specific data */ 133 struct vhpet *vhpet; /* (i) virtual HPET */ 134 struct vioapic *vioapic; /* (i) virtual ioapic */ 135 struct vatpic *vatpic; /* (i) virtual atpic */ 136 struct vatpit *vatpit; /* (i) virtual atpit */ 137 volatile cpuset_t active_cpus; /* (i) active vcpus */ 138 int suspend; /* (i) stop VM execution */ 139 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 140 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 141 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 142 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 143 void *rendezvous_arg; /* (x) rendezvous func/arg */ 144 vm_rendezvous_func_t rendezvous_func; 145 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 146 int num_mem_segs; /* (o) guest memory segments */ 147 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 148 struct vmspace *vmspace; /* (o) guest's address space */ 149 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 150 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 151}; 152 153static int vmm_initialized; 154 155static struct vmm_ops *ops; 156#define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 157#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 158#define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 159 160#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 161#define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \ 162 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO) 163#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 164#define VMSPACE_ALLOC(min, max) \ 165 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 166#define VMSPACE_FREE(vmspace) \ 167 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 168#define VMGETREG(vmi, vcpu, num, retval) \ 169 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 170#define VMSETREG(vmi, vcpu, num, val) \ 171 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 172#define VMGETDESC(vmi, vcpu, num, desc) \ 173 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 174#define VMSETDESC(vmi, vcpu, num, desc) \ 175 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 176#define VMGETCAP(vmi, vcpu, num, retval) \ 177 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 178#define VMSETCAP(vmi, vcpu, num, val) \ 179 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 180#define VLAPIC_INIT(vmi, vcpu) \ 181 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 182#define VLAPIC_CLEANUP(vmi, vlapic) \ 183 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 184 185#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 186#define fpu_stop_emulating() clts() 187 188static MALLOC_DEFINE(M_VM, "vm", "vm"); 189 190/* statistics */ 191static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 192 193SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 194 195/* 196 * Halt the guest if all vcpus are executing a HLT instruction with 197 * interrupts disabled. 198 */ 199static int halt_detection_enabled = 1; 200TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled); 201SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 202 &halt_detection_enabled, 0, 203 "Halt VM if all vcpus execute HLT with interrupts disabled"); 204 205static int vmm_ipinum; 206SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 207 "IPI vector used for vcpu notifications"); 208 209static void 210vcpu_cleanup(struct vm *vm, int i, bool destroy) 211{ 212 struct vcpu *vcpu = &vm->vcpu[i]; 213 214 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 215 if (destroy) { 216 vmm_stat_free(vcpu->stats); 217 fpu_save_area_free(vcpu->guestfpu); 218 } 219} 220 221static void 222vcpu_init(struct vm *vm, int vcpu_id, bool create) 223{ 224 struct vcpu *vcpu; 225 226 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 227 ("vcpu_init: invalid vcpu %d", vcpu_id)); 228 229 vcpu = &vm->vcpu[vcpu_id]; 230 231 if (create) { 232 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 233 "initialized", vcpu_id)); 234 vcpu_lock_init(vcpu); 235 vcpu->state = VCPU_IDLE; 236 vcpu->hostcpu = NOCPU; 237 vcpu->guestfpu = fpu_save_area_alloc(); 238 vcpu->stats = vmm_stat_alloc(); 239 } 240 241 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 242 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 243 vcpu->exitintinfo = 0; 244 vcpu->nmi_pending = 0; 245 vcpu->extint_pending = 0; 246 vcpu->exception_pending = 0; 247 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 248 fpu_save_area_reset(vcpu->guestfpu); 249 vmm_stat_init(vcpu->stats); 250} 251 252struct vm_exit * 253vm_exitinfo(struct vm *vm, int cpuid) 254{ 255 struct vcpu *vcpu; 256 257 if (cpuid < 0 || cpuid >= VM_MAXCPU) 258 panic("vm_exitinfo: invalid cpuid %d", cpuid); 259 260 vcpu = &vm->vcpu[cpuid]; 261 262 return (&vcpu->exitinfo); 263} 264 265static void 266vmm_resume(void) 267{ 268 VMM_RESUME(); 269} 270 271static int 272vmm_init(void) 273{ 274 int error; 275 276 vmm_host_state_init(); 277 278 vmm_ipinum = vmm_ipi_alloc(); 279 if (vmm_ipinum == 0) 280 vmm_ipinum = IPI_AST; 281 282 error = vmm_mem_init(); 283 if (error) 284 return (error); 285 286 if (vmm_is_intel()) 287 ops = &vmm_ops_intel; 288 else if (vmm_is_amd()) 289 ops = &vmm_ops_amd; 290 else 291 return (ENXIO); 292 293 vmm_resume_p = vmm_resume; 294 295 return (VMM_INIT(vmm_ipinum)); 296} 297 298static int 299vmm_handler(module_t mod, int what, void *arg) 300{ 301 int error; 302 303 switch (what) { 304 case MOD_LOAD: 305 vmmdev_init(); 306 if (ppt_avail_devices() > 0) 307 iommu_init(); 308 error = vmm_init(); 309 if (error == 0) 310 vmm_initialized = 1; 311 break; 312 case MOD_UNLOAD: 313 error = vmmdev_cleanup(); 314 if (error == 0) { 315 vmm_resume_p = NULL; 316 iommu_cleanup(); 317 if (vmm_ipinum != IPI_AST) 318 vmm_ipi_free(vmm_ipinum); 319 error = VMM_CLEANUP(); 320 /* 321 * Something bad happened - prevent new 322 * VMs from being created 323 */ 324 if (error) 325 vmm_initialized = 0; 326 } 327 break; 328 default: 329 error = 0; 330 break; 331 } 332 return (error); 333} 334 335static moduledata_t vmm_kmod = { 336 "vmm", 337 vmm_handler, 338 NULL 339}; 340 341/* 342 * vmm initialization has the following dependencies: 343 * 344 * - iommu initialization must happen after the pci passthru driver has had 345 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 346 * 347 * - VT-x initialization requires smp_rendezvous() and therefore must happen 348 * after SMP is fully functional (after SI_SUB_SMP). 349 */ 350DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 351MODULE_VERSION(vmm, 1); 352 353static void 354vm_init(struct vm *vm, bool create) 355{ 356 int i; 357 358 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 359 vm->iommu = NULL; 360 vm->vioapic = vioapic_init(vm); 361 vm->vhpet = vhpet_init(vm); 362 vm->vatpic = vatpic_init(vm); 363 vm->vatpit = vatpit_init(vm); 364 365 CPU_ZERO(&vm->active_cpus); 366 367 vm->suspend = 0; 368 CPU_ZERO(&vm->suspended_cpus); 369 370 for (i = 0; i < VM_MAXCPU; i++) 371 vcpu_init(vm, i, create); 372} 373 374int 375vm_create(const char *name, struct vm **retvm) 376{ 377 struct vm *vm; 378 struct vmspace *vmspace; 379 380 /* 381 * If vmm.ko could not be successfully initialized then don't attempt 382 * to create the virtual machine. 383 */ 384 if (!vmm_initialized) 385 return (ENXIO); 386 387 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 388 return (EINVAL); 389 390 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 391 if (vmspace == NULL) 392 return (ENOMEM); 393 394 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 395 strcpy(vm->name, name); 396 vm->num_mem_segs = 0; 397 vm->vmspace = vmspace; 398 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 399 400 vm_init(vm, true); 401 402 *retvm = vm; 403 return (0); 404} 405 406static void 407vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 408{ 409 410 if (seg->object != NULL) 411 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 412 413 bzero(seg, sizeof(*seg)); 414} 415 416static void 417vm_cleanup(struct vm *vm, bool destroy) 418{ 419 int i; 420 421 ppt_unassign_all(vm); 422 423 if (vm->iommu != NULL) 424 iommu_destroy_domain(vm->iommu); 425 426 vatpit_cleanup(vm->vatpit); 427 vhpet_cleanup(vm->vhpet); 428 vatpic_cleanup(vm->vatpic); 429 vioapic_cleanup(vm->vioapic); 430 431 for (i = 0; i < VM_MAXCPU; i++) 432 vcpu_cleanup(vm, i, destroy); 433 434 VMCLEANUP(vm->cookie); 435 436 if (destroy) { 437 for (i = 0; i < vm->num_mem_segs; i++) 438 vm_free_mem_seg(vm, &vm->mem_segs[i]); 439 440 vm->num_mem_segs = 0; 441 442 VMSPACE_FREE(vm->vmspace); 443 vm->vmspace = NULL; 444 } 445} 446 447void 448vm_destroy(struct vm *vm) 449{ 450 vm_cleanup(vm, true); 451 free(vm, M_VM); 452} 453 454int 455vm_reinit(struct vm *vm) 456{ 457 int error; 458 459 /* 460 * A virtual machine can be reset only if all vcpus are suspended. 461 */ 462 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 463 vm_cleanup(vm, false); 464 vm_init(vm, false); 465 error = 0; 466 } else { 467 error = EBUSY; 468 } 469 470 return (error); 471} 472 473const char * 474vm_name(struct vm *vm) 475{ 476 return (vm->name); 477} 478 479int 480vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 481{ 482 vm_object_t obj; 483 484 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 485 return (ENOMEM); 486 else 487 return (0); 488} 489 490int 491vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 492{ 493 494 vmm_mmio_free(vm->vmspace, gpa, len); 495 return (0); 496} 497 498boolean_t 499vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 500{ 501 int i; 502 vm_paddr_t gpabase, gpalimit; 503 504 for (i = 0; i < vm->num_mem_segs; i++) { 505 gpabase = vm->mem_segs[i].gpa; 506 gpalimit = gpabase + vm->mem_segs[i].len; 507 if (gpa >= gpabase && gpa < gpalimit) 508 return (TRUE); /* 'gpa' is regular memory */ 509 } 510 511 if (ppt_is_mmio(vm, gpa)) 512 return (TRUE); /* 'gpa' is pci passthru mmio */ 513 514 return (FALSE); 515} 516 517int 518vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 519{ 520 int available, allocated; 521 struct mem_seg *seg; 522 vm_object_t object; 523 vm_paddr_t g; 524 525 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 526 return (EINVAL); 527 528 available = allocated = 0; 529 g = gpa; 530 while (g < gpa + len) { 531 if (vm_mem_allocated(vm, g)) 532 allocated++; 533 else 534 available++; 535 536 g += PAGE_SIZE; 537 } 538 539 /* 540 * If there are some allocated and some available pages in the address 541 * range then it is an error. 542 */ 543 if (allocated && available) 544 return (EINVAL); 545 546 /* 547 * If the entire address range being requested has already been 548 * allocated then there isn't anything more to do. 549 */ 550 if (allocated && available == 0) 551 return (0); 552 553 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 554 return (E2BIG); 555 556 seg = &vm->mem_segs[vm->num_mem_segs]; 557 558 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 559 return (ENOMEM); 560 561 seg->gpa = gpa; 562 seg->len = len; 563 seg->object = object; 564 seg->wired = FALSE; 565 566 vm->num_mem_segs++; 567 568 return (0); 569} 570 571static vm_paddr_t 572vm_maxmem(struct vm *vm) 573{ 574 int i; 575 vm_paddr_t gpa, maxmem; 576 577 maxmem = 0; 578 for (i = 0; i < vm->num_mem_segs; i++) { 579 gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len; 580 if (gpa > maxmem) 581 maxmem = gpa; 582 } 583 return (maxmem); 584} 585 586static void 587vm_gpa_unwire(struct vm *vm) 588{ 589 int i, rv; 590 struct mem_seg *seg; 591 592 for (i = 0; i < vm->num_mem_segs; i++) { 593 seg = &vm->mem_segs[i]; 594 if (!seg->wired) 595 continue; 596 597 rv = vm_map_unwire(&vm->vmspace->vm_map, 598 seg->gpa, seg->gpa + seg->len, 599 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 600 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 601 "%#lx/%ld could not be unwired: %d", 602 vm_name(vm), seg->gpa, seg->len, rv)); 603 604 seg->wired = FALSE; 605 } 606} 607 608static int 609vm_gpa_wire(struct vm *vm) 610{ 611 int i, rv; 612 struct mem_seg *seg; 613 614 for (i = 0; i < vm->num_mem_segs; i++) { 615 seg = &vm->mem_segs[i]; 616 if (seg->wired) 617 continue; 618 619 /* XXX rlimits? */ 620 rv = vm_map_wire(&vm->vmspace->vm_map, 621 seg->gpa, seg->gpa + seg->len, 622 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 623 if (rv != KERN_SUCCESS) 624 break; 625 626 seg->wired = TRUE; 627 } 628 629 if (i < vm->num_mem_segs) { 630 /* 631 * Undo the wiring before returning an error. 632 */ 633 vm_gpa_unwire(vm); 634 return (EAGAIN); 635 } 636 637 return (0); 638} 639 640static void 641vm_iommu_modify(struct vm *vm, boolean_t map) 642{ 643 int i, sz; 644 vm_paddr_t gpa, hpa; 645 struct mem_seg *seg; 646 void *vp, *cookie, *host_domain; 647 648 sz = PAGE_SIZE; 649 host_domain = iommu_host_domain(); 650 651 for (i = 0; i < vm->num_mem_segs; i++) { 652 seg = &vm->mem_segs[i]; 653 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 654 vm_name(vm), seg->gpa, seg->len)); 655 656 gpa = seg->gpa; 657 while (gpa < seg->gpa + seg->len) { 658 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 659 &cookie); 660 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 661 vm_name(vm), gpa)); 662 663 vm_gpa_release(cookie); 664 665 hpa = DMAP_TO_PHYS((uintptr_t)vp); 666 if (map) { 667 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 668 iommu_remove_mapping(host_domain, hpa, sz); 669 } else { 670 iommu_remove_mapping(vm->iommu, gpa, sz); 671 iommu_create_mapping(host_domain, hpa, hpa, sz); 672 } 673 674 gpa += PAGE_SIZE; 675 } 676 } 677 678 /* 679 * Invalidate the cached translations associated with the domain 680 * from which pages were removed. 681 */ 682 if (map) 683 iommu_invalidate_tlb(host_domain); 684 else 685 iommu_invalidate_tlb(vm->iommu); 686} 687 688#define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 689#define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 690 691int 692vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 693{ 694 int error; 695 696 error = ppt_unassign_device(vm, bus, slot, func); 697 if (error) 698 return (error); 699 700 if (ppt_assigned_devices(vm) == 0) { 701 vm_iommu_unmap(vm); 702 vm_gpa_unwire(vm); 703 } 704 return (0); 705} 706 707int 708vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 709{ 710 int error; 711 vm_paddr_t maxaddr; 712 713 /* 714 * Virtual machines with pci passthru devices get special treatment: 715 * - the guest physical memory is wired 716 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 717 * 718 * We need to do this before the first pci passthru device is attached. 719 */ 720 if (ppt_assigned_devices(vm) == 0) { 721 KASSERT(vm->iommu == NULL, 722 ("vm_assign_pptdev: iommu must be NULL")); 723 maxaddr = vm_maxmem(vm); 724 vm->iommu = iommu_create_domain(maxaddr); 725 726 error = vm_gpa_wire(vm); 727 if (error) 728 return (error); 729 730 vm_iommu_map(vm); 731 } 732 733 error = ppt_assign_device(vm, bus, slot, func); 734 return (error); 735} 736 737void * 738vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 739 void **cookie) 740{ 741 int count, pageoff; 742 vm_page_t m; 743 744 pageoff = gpa & PAGE_MASK; 745 if (len > PAGE_SIZE - pageoff) 746 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 747 748 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 749 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 750 751 if (count == 1) { 752 *cookie = m; 753 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 754 } else { 755 *cookie = NULL; 756 return (NULL); 757 } 758} 759 760void 761vm_gpa_release(void *cookie) 762{ 763 vm_page_t m = cookie; 764 765 vm_page_lock(m); 766 vm_page_unhold(m); 767 vm_page_unlock(m); 768} 769 770int 771vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 772 struct vm_memory_segment *seg) 773{ 774 int i; 775 776 for (i = 0; i < vm->num_mem_segs; i++) { 777 if (gpabase == vm->mem_segs[i].gpa) { 778 seg->gpa = vm->mem_segs[i].gpa; 779 seg->len = vm->mem_segs[i].len; 780 seg->wired = vm->mem_segs[i].wired; 781 return (0); 782 } 783 } 784 return (-1); 785} 786 787int 788vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 789 vm_offset_t *offset, struct vm_object **object) 790{ 791 int i; 792 size_t seg_len; 793 vm_paddr_t seg_gpa; 794 vm_object_t seg_obj; 795 796 for (i = 0; i < vm->num_mem_segs; i++) { 797 if ((seg_obj = vm->mem_segs[i].object) == NULL) 798 continue; 799 800 seg_gpa = vm->mem_segs[i].gpa; 801 seg_len = vm->mem_segs[i].len; 802 803 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 804 *offset = gpa - seg_gpa; 805 *object = seg_obj; 806 vm_object_reference(seg_obj); 807 return (0); 808 } 809 } 810 811 return (EINVAL); 812} 813 814int 815vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 816{ 817 818 if (vcpu < 0 || vcpu >= VM_MAXCPU) 819 return (EINVAL); 820 821 if (reg >= VM_REG_LAST) 822 return (EINVAL); 823 824 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 825} 826 827int 828vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val) 829{ 830 831 if (vcpu < 0 || vcpu >= VM_MAXCPU) 832 return (EINVAL); 833 834 if (reg >= VM_REG_LAST) 835 return (EINVAL); 836 837 return (VMSETREG(vm->cookie, vcpu, reg, val)); 838} 839 840static boolean_t 841is_descriptor_table(int reg) 842{ 843 844 switch (reg) { 845 case VM_REG_GUEST_IDTR: 846 case VM_REG_GUEST_GDTR: 847 return (TRUE); 848 default: 849 return (FALSE); 850 } 851} 852 853static boolean_t 854is_segment_register(int reg) 855{ 856 857 switch (reg) { 858 case VM_REG_GUEST_ES: 859 case VM_REG_GUEST_CS: 860 case VM_REG_GUEST_SS: 861 case VM_REG_GUEST_DS: 862 case VM_REG_GUEST_FS: 863 case VM_REG_GUEST_GS: 864 case VM_REG_GUEST_TR: 865 case VM_REG_GUEST_LDTR: 866 return (TRUE); 867 default: 868 return (FALSE); 869 } 870} 871 872int 873vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 874 struct seg_desc *desc) 875{ 876 877 if (vcpu < 0 || vcpu >= VM_MAXCPU) 878 return (EINVAL); 879 880 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 881 return (EINVAL); 882 883 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 884} 885 886int 887vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 888 struct seg_desc *desc) 889{ 890 if (vcpu < 0 || vcpu >= VM_MAXCPU) 891 return (EINVAL); 892 893 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 894 return (EINVAL); 895 896 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 897} 898 899static void 900restore_guest_fpustate(struct vcpu *vcpu) 901{ 902 903 /* flush host state to the pcb */ 904 fpuexit(curthread); 905 906 /* restore guest FPU state */ 907 fpu_stop_emulating(); 908 fpurestore(vcpu->guestfpu); 909 910 /* restore guest XCR0 if XSAVE is enabled in the host */ 911 if (rcr4() & CR4_XSAVE) 912 load_xcr(0, vcpu->guest_xcr0); 913 914 /* 915 * The FPU is now "dirty" with the guest's state so turn on emulation 916 * to trap any access to the FPU by the host. 917 */ 918 fpu_start_emulating(); 919} 920 921static void 922save_guest_fpustate(struct vcpu *vcpu) 923{ 924 925 if ((rcr0() & CR0_TS) == 0) 926 panic("fpu emulation not enabled in host!"); 927 928 /* save guest XCR0 and restore host XCR0 */ 929 if (rcr4() & CR4_XSAVE) { 930 vcpu->guest_xcr0 = rxcr(0); 931 load_xcr(0, vmm_get_host_xcr0()); 932 } 933 934 /* save guest FPU state */ 935 fpu_stop_emulating(); 936 fpusave(vcpu->guestfpu); 937 fpu_start_emulating(); 938} 939 940static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 941 942static int 943vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 944 bool from_idle) 945{ 946 int error; 947 948 vcpu_assert_locked(vcpu); 949 950 /* 951 * State transitions from the vmmdev_ioctl() must always begin from 952 * the VCPU_IDLE state. This guarantees that there is only a single 953 * ioctl() operating on a vcpu at any point. 954 */ 955 if (from_idle) { 956 while (vcpu->state != VCPU_IDLE) 957 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 958 } else { 959 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 960 "vcpu idle state")); 961 } 962 963 if (vcpu->state == VCPU_RUNNING) { 964 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 965 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 966 } else { 967 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 968 "vcpu that is not running", vcpu->hostcpu)); 969 } 970 971 /* 972 * The following state transitions are allowed: 973 * IDLE -> FROZEN -> IDLE 974 * FROZEN -> RUNNING -> FROZEN 975 * FROZEN -> SLEEPING -> FROZEN 976 */ 977 switch (vcpu->state) { 978 case VCPU_IDLE: 979 case VCPU_RUNNING: 980 case VCPU_SLEEPING: 981 error = (newstate != VCPU_FROZEN); 982 break; 983 case VCPU_FROZEN: 984 error = (newstate == VCPU_FROZEN); 985 break; 986 default: 987 error = 1; 988 break; 989 } 990 991 if (error) 992 return (EBUSY); 993 994 vcpu->state = newstate; 995 if (newstate == VCPU_RUNNING) 996 vcpu->hostcpu = curcpu; 997 else 998 vcpu->hostcpu = NOCPU; 999 1000 if (newstate == VCPU_IDLE) 1001 wakeup(&vcpu->state); 1002 1003 return (0); 1004} 1005 1006static void 1007vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1008{ 1009 int error; 1010 1011 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1012 panic("Error %d setting state to %d\n", error, newstate); 1013} 1014 1015static void 1016vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1017{ 1018 int error; 1019 1020 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1021 panic("Error %d setting state to %d", error, newstate); 1022} 1023 1024static void 1025vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 1026{ 1027 1028 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 1029 1030 /* 1031 * Update 'rendezvous_func' and execute a write memory barrier to 1032 * ensure that it is visible across all host cpus. This is not needed 1033 * for correctness but it does ensure that all the vcpus will notice 1034 * that the rendezvous is requested immediately. 1035 */ 1036 vm->rendezvous_func = func; 1037 wmb(); 1038} 1039 1040#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 1041 do { \ 1042 if (vcpuid >= 0) \ 1043 VCPU_CTR0(vm, vcpuid, fmt); \ 1044 else \ 1045 VM_CTR0(vm, fmt); \ 1046 } while (0) 1047 1048static void 1049vm_handle_rendezvous(struct vm *vm, int vcpuid) 1050{ 1051 1052 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1053 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 1054 1055 mtx_lock(&vm->rendezvous_mtx); 1056 while (vm->rendezvous_func != NULL) { 1057 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1058 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 1059 1060 if (vcpuid != -1 && 1061 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1062 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1063 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 1064 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 1065 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1066 } 1067 if (CPU_CMP(&vm->rendezvous_req_cpus, 1068 &vm->rendezvous_done_cpus) == 0) { 1069 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 1070 vm_set_rendezvous_func(vm, NULL); 1071 wakeup(&vm->rendezvous_func); 1072 break; 1073 } 1074 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 1075 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1076 "vmrndv", 0); 1077 } 1078 mtx_unlock(&vm->rendezvous_mtx); 1079} 1080 1081/* 1082 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1083 */ 1084static int 1085vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1086{ 1087 struct vcpu *vcpu; 1088 const char *wmesg; 1089 int error, t, vcpu_halted, vm_halted; 1090 1091 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1092 1093 vcpu = &vm->vcpu[vcpuid]; 1094 vcpu_halted = 0; 1095 vm_halted = 0; 1096 1097 /* 1098 * The typical way to halt a cpu is to execute: "sti; hlt" 1099 * 1100 * STI sets RFLAGS.IF to enable interrupts. However, the processor 1101 * remains in an "interrupt shadow" for an additional instruction 1102 * following the STI. This guarantees that "sti; hlt" sequence is 1103 * atomic and a pending interrupt will be recognized after the HLT. 1104 * 1105 * After the HLT emulation is done the vcpu is no longer in an 1106 * interrupt shadow and a pending interrupt can be injected on 1107 * the next entry into the guest. 1108 */ 1109 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 1110 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 1111 __func__, error)); 1112 1113 vcpu_lock(vcpu); 1114 while (1) { 1115 /* 1116 * Do a final check for pending NMI or interrupts before 1117 * really putting this thread to sleep. Also check for 1118 * software events that would cause this vcpu to wakeup. 1119 * 1120 * These interrupts/events could have happened after the 1121 * vcpu returned from VMRUN() and before it acquired the 1122 * vcpu lock above. 1123 */ 1124 if (vm->rendezvous_func != NULL || vm->suspend) 1125 break; 1126 if (vm_nmi_pending(vm, vcpuid)) 1127 break; 1128 if (!intr_disabled) { 1129 if (vm_extint_pending(vm, vcpuid) || 1130 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1131 break; 1132 } 1133 } 1134 1135 /* Don't go to sleep if the vcpu thread needs to yield */ 1136 if (vcpu_should_yield(vm, vcpuid)) 1137 break; 1138 1139 /* 1140 * Some Linux guests implement "halt" by having all vcpus 1141 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1142 * track of the vcpus that have entered this state. When all 1143 * vcpus enter the halted state the virtual machine is halted. 1144 */ 1145 if (intr_disabled) { 1146 wmesg = "vmhalt"; 1147 VCPU_CTR0(vm, vcpuid, "Halted"); 1148 if (!vcpu_halted && halt_detection_enabled) { 1149 vcpu_halted = 1; 1150 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1151 } 1152 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1153 vm_halted = 1; 1154 break; 1155 } 1156 } else { 1157 wmesg = "vmidle"; 1158 } 1159 1160 t = ticks; 1161 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1162 /* 1163 * XXX msleep_spin() cannot be interrupted by signals so 1164 * wake up periodically to check pending signals. 1165 */ 1166 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1167 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1168 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1169 } 1170 1171 if (vcpu_halted) 1172 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1173 1174 vcpu_unlock(vcpu); 1175 1176 if (vm_halted) 1177 vm_suspend(vm, VM_SUSPEND_HALT); 1178 1179 return (0); 1180} 1181 1182static int 1183vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1184{ 1185 int rv, ftype; 1186 struct vm_map *map; 1187 struct vcpu *vcpu; 1188 struct vm_exit *vme; 1189 1190 vcpu = &vm->vcpu[vcpuid]; 1191 vme = &vcpu->exitinfo; 1192 1193 ftype = vme->u.paging.fault_type; 1194 KASSERT(ftype == VM_PROT_READ || 1195 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1196 ("vm_handle_paging: invalid fault_type %d", ftype)); 1197 1198 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1199 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1200 vme->u.paging.gpa, ftype); 1201 if (rv == 0) { 1202 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 1203 ftype == VM_PROT_READ ? "accessed" : "dirty", 1204 vme->u.paging.gpa); 1205 goto done; 1206 } 1207 } 1208 1209 map = &vm->vmspace->vm_map; 1210 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1211 1212 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1213 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1214 1215 if (rv != KERN_SUCCESS) 1216 return (EFAULT); 1217done: 1218 /* restart execution at the faulting instruction */ 1219 vme->inst_length = 0; 1220 1221 return (0); 1222} 1223 1224static int 1225vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1226{ 1227 struct vie *vie; 1228 struct vcpu *vcpu; 1229 struct vm_exit *vme; 1230 uint64_t gla, gpa; 1231 struct vm_guest_paging *paging; 1232 mem_region_read_t mread; 1233 mem_region_write_t mwrite; 1234 enum vm_cpu_mode cpu_mode; 1235 int cs_d, error; 1236 1237 vcpu = &vm->vcpu[vcpuid]; 1238 vme = &vcpu->exitinfo; 1239 1240 gla = vme->u.inst_emul.gla; 1241 gpa = vme->u.inst_emul.gpa; 1242 cs_d = vme->u.inst_emul.cs_d; 1243 vie = &vme->u.inst_emul.vie; 1244 paging = &vme->u.inst_emul.paging; 1245 cpu_mode = paging->cpu_mode; 1246 1247 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 1248 1249 vie_init(vie); 1250 1251 /* Fetch, decode and emulate the faulting instruction */ 1252 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, 1253 vme->inst_length, vie); 1254 if (error == 1) 1255 return (0); /* Resume guest to handle page fault */ 1256 else if (error == -1) 1257 return (EFAULT); 1258 else if (error != 0) 1259 panic("%s: vmm_fetch_instruction error %d", __func__, error); 1260 1261 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) 1262 return (EFAULT); 1263 1264 /* return to userland unless this is an in-kernel emulated device */ 1265 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1266 mread = lapic_mmio_read; 1267 mwrite = lapic_mmio_write; 1268 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1269 mread = vioapic_mmio_read; 1270 mwrite = vioapic_mmio_write; 1271 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1272 mread = vhpet_mmio_read; 1273 mwrite = vhpet_mmio_write; 1274 } else { 1275 *retu = true; 1276 return (0); 1277 } 1278 1279 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1280 mread, mwrite, retu); 1281 1282 return (error); 1283} 1284 1285static int 1286vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1287{ 1288 int i, done; 1289 struct vcpu *vcpu; 1290 1291 done = 0; 1292 vcpu = &vm->vcpu[vcpuid]; 1293 1294 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1295 1296 /* 1297 * Wait until all 'active_cpus' have suspended themselves. 1298 * 1299 * Since a VM may be suspended at any time including when one or 1300 * more vcpus are doing a rendezvous we need to call the rendezvous 1301 * handler while we are waiting to prevent a deadlock. 1302 */ 1303 vcpu_lock(vcpu); 1304 while (1) { 1305 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1306 VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1307 break; 1308 } 1309 1310 if (vm->rendezvous_func == NULL) { 1311 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1312 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1313 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1314 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1315 } else { 1316 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1317 vcpu_unlock(vcpu); 1318 vm_handle_rendezvous(vm, vcpuid); 1319 vcpu_lock(vcpu); 1320 } 1321 } 1322 vcpu_unlock(vcpu); 1323 1324 /* 1325 * Wakeup the other sleeping vcpus and return to userspace. 1326 */ 1327 for (i = 0; i < VM_MAXCPU; i++) { 1328 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1329 vcpu_notify_event(vm, i, false); 1330 } 1331 } 1332 1333 *retu = true; 1334 return (0); 1335} 1336 1337int 1338vm_suspend(struct vm *vm, enum vm_suspend_how how) 1339{ 1340 int i; 1341 1342 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1343 return (EINVAL); 1344 1345 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1346 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1347 vm->suspend, how); 1348 return (EALREADY); 1349 } 1350 1351 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1352 1353 /* 1354 * Notify all active vcpus that they are now suspended. 1355 */ 1356 for (i = 0; i < VM_MAXCPU; i++) { 1357 if (CPU_ISSET(i, &vm->active_cpus)) 1358 vcpu_notify_event(vm, i, false); 1359 } 1360 1361 return (0); 1362} 1363 1364void 1365vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1366{ 1367 struct vm_exit *vmexit; 1368 1369 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1370 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1371 1372 vmexit = vm_exitinfo(vm, vcpuid); 1373 vmexit->rip = rip; 1374 vmexit->inst_length = 0; 1375 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1376 vmexit->u.suspended.how = vm->suspend; 1377} 1378 1379void 1380vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 1381{ 1382 struct vm_exit *vmexit; 1383 1384 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 1385 1386 vmexit = vm_exitinfo(vm, vcpuid); 1387 vmexit->rip = rip; 1388 vmexit->inst_length = 0; 1389 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1390 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 1391} 1392 1393void 1394vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 1395{ 1396 struct vm_exit *vmexit; 1397 1398 vmexit = vm_exitinfo(vm, vcpuid); 1399 vmexit->rip = rip; 1400 vmexit->inst_length = 0; 1401 vmexit->exitcode = VM_EXITCODE_BOGUS; 1402 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 1403} 1404 1405int 1406vm_run(struct vm *vm, struct vm_run *vmrun) 1407{ 1408 int error, vcpuid; 1409 struct vcpu *vcpu; 1410 struct pcb *pcb; 1411 uint64_t tscval, rip; 1412 struct vm_exit *vme; 1413 bool retu, intr_disabled; 1414 pmap_t pmap; 1415 void *rptr, *sptr; 1416 1417 vcpuid = vmrun->cpuid; 1418 1419 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1420 return (EINVAL); 1421 1422 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1423 return (EINVAL); 1424 1425 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1426 return (EINVAL); 1427 1428 rptr = &vm->rendezvous_func; 1429 sptr = &vm->suspend; 1430 pmap = vmspace_pmap(vm->vmspace); 1431 vcpu = &vm->vcpu[vcpuid]; 1432 vme = &vcpu->exitinfo; 1433 rip = vmrun->rip; 1434restart: 1435 critical_enter(); 1436 1437 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1438 ("vm_run: absurd pm_active")); 1439 1440 tscval = rdtsc(); 1441 1442 pcb = PCPU_GET(curpcb); 1443 set_pcb_flags(pcb, PCB_FULL_IRET); 1444 1445 restore_guest_fpustate(vcpu); 1446 1447 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1448 error = VMRUN(vm->cookie, vcpuid, rip, pmap, rptr, sptr); 1449 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1450 1451 save_guest_fpustate(vcpu); 1452 1453 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1454 1455 critical_exit(); 1456 1457 if (error == 0) { 1458 retu = false; 1459 switch (vme->exitcode) { 1460 case VM_EXITCODE_SUSPENDED: 1461 error = vm_handle_suspend(vm, vcpuid, &retu); 1462 break; 1463 case VM_EXITCODE_IOAPIC_EOI: 1464 vioapic_process_eoi(vm, vcpuid, 1465 vme->u.ioapic_eoi.vector); 1466 break; 1467 case VM_EXITCODE_RENDEZVOUS: 1468 vm_handle_rendezvous(vm, vcpuid); 1469 error = 0; 1470 break; 1471 case VM_EXITCODE_HLT: 1472 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1473 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1474 break; 1475 case VM_EXITCODE_PAGING: 1476 error = vm_handle_paging(vm, vcpuid, &retu); 1477 break; 1478 case VM_EXITCODE_INST_EMUL: 1479 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1480 break; 1481 case VM_EXITCODE_INOUT: 1482 case VM_EXITCODE_INOUT_STR: 1483 error = vm_handle_inout(vm, vcpuid, vme, &retu); 1484 break; 1485 case VM_EXITCODE_MONITOR: 1486 case VM_EXITCODE_MWAIT: 1487 vm_inject_ud(vm, vcpuid); 1488 break; 1489 default: 1490 retu = true; /* handled in userland */ 1491 break; 1492 } 1493 } 1494 1495 if (error == 0 && retu == false) { 1496 rip = vme->rip + vme->inst_length; 1497 goto restart; 1498 } 1499 1500 /* copy the exit information */ 1501 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1502 return (error); 1503} 1504 1505int 1506vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1507{ 1508 struct vcpu *vcpu; 1509 int type, vector; 1510 1511 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1512 return (EINVAL); 1513 1514 vcpu = &vm->vcpu[vcpuid]; 1515 1516 if (info & VM_INTINFO_VALID) { 1517 type = info & VM_INTINFO_TYPE; 1518 vector = info & 0xff; 1519 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1520 return (EINVAL); 1521 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1522 return (EINVAL); 1523 if (info & VM_INTINFO_RSVD) 1524 return (EINVAL); 1525 } else { 1526 info = 0; 1527 } 1528 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1529 vcpu->exitintinfo = info; 1530 return (0); 1531} 1532 1533enum exc_class { 1534 EXC_BENIGN, 1535 EXC_CONTRIBUTORY, 1536 EXC_PAGEFAULT 1537}; 1538 1539#define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1540 1541static enum exc_class 1542exception_class(uint64_t info) 1543{ 1544 int type, vector; 1545 1546 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1547 type = info & VM_INTINFO_TYPE; 1548 vector = info & 0xff; 1549 1550 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1551 switch (type) { 1552 case VM_INTINFO_HWINTR: 1553 case VM_INTINFO_SWINTR: 1554 case VM_INTINFO_NMI: 1555 return (EXC_BENIGN); 1556 default: 1557 /* 1558 * Hardware exception. 1559 * 1560 * SVM and VT-x use identical type values to represent NMI, 1561 * hardware interrupt and software interrupt. 1562 * 1563 * SVM uses type '3' for all exceptions. VT-x uses type '3' 1564 * for exceptions except #BP and #OF. #BP and #OF use a type 1565 * value of '5' or '6'. Therefore we don't check for explicit 1566 * values of 'type' to classify 'intinfo' into a hardware 1567 * exception. 1568 */ 1569 break; 1570 } 1571 1572 switch (vector) { 1573 case IDT_PF: 1574 case IDT_VE: 1575 return (EXC_PAGEFAULT); 1576 case IDT_DE: 1577 case IDT_TS: 1578 case IDT_NP: 1579 case IDT_SS: 1580 case IDT_GP: 1581 return (EXC_CONTRIBUTORY); 1582 default: 1583 return (EXC_BENIGN); 1584 } 1585} 1586 1587static int 1588nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1589 uint64_t *retinfo) 1590{ 1591 enum exc_class exc1, exc2; 1592 int type1, vector1; 1593 1594 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1595 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1596 1597 /* 1598 * If an exception occurs while attempting to call the double-fault 1599 * handler the processor enters shutdown mode (aka triple fault). 1600 */ 1601 type1 = info1 & VM_INTINFO_TYPE; 1602 vector1 = info1 & 0xff; 1603 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1604 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1605 info1, info2); 1606 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1607 *retinfo = 0; 1608 return (0); 1609 } 1610 1611 /* 1612 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1613 */ 1614 exc1 = exception_class(info1); 1615 exc2 = exception_class(info2); 1616 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1617 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1618 /* Convert nested fault into a double fault. */ 1619 *retinfo = IDT_DF; 1620 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1621 *retinfo |= VM_INTINFO_DEL_ERRCODE; 1622 } else { 1623 /* Handle exceptions serially */ 1624 *retinfo = info2; 1625 } 1626 return (1); 1627} 1628 1629static uint64_t 1630vcpu_exception_intinfo(struct vcpu *vcpu) 1631{ 1632 uint64_t info = 0; 1633 1634 if (vcpu->exception_pending) { 1635 info = vcpu->exception.vector & 0xff; 1636 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1637 if (vcpu->exception.error_code_valid) { 1638 info |= VM_INTINFO_DEL_ERRCODE; 1639 info |= (uint64_t)vcpu->exception.error_code << 32; 1640 } 1641 } 1642 return (info); 1643} 1644 1645int 1646vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1647{ 1648 struct vcpu *vcpu; 1649 uint64_t info1, info2; 1650 int valid; 1651 1652 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1653 1654 vcpu = &vm->vcpu[vcpuid]; 1655 1656 info1 = vcpu->exitintinfo; 1657 vcpu->exitintinfo = 0; 1658 1659 info2 = 0; 1660 if (vcpu->exception_pending) { 1661 info2 = vcpu_exception_intinfo(vcpu); 1662 vcpu->exception_pending = 0; 1663 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1664 vcpu->exception.vector, info2); 1665 } 1666 1667 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1668 valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1669 } else if (info1 & VM_INTINFO_VALID) { 1670 *retinfo = info1; 1671 valid = 1; 1672 } else if (info2 & VM_INTINFO_VALID) { 1673 *retinfo = info2; 1674 valid = 1; 1675 } else { 1676 valid = 0; 1677 } 1678 1679 if (valid) { 1680 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1681 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1682 } 1683 1684 return (valid); 1685} 1686 1687int 1688vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1689{ 1690 struct vcpu *vcpu; 1691 1692 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1693 return (EINVAL); 1694 1695 vcpu = &vm->vcpu[vcpuid]; 1696 *info1 = vcpu->exitintinfo; 1697 *info2 = vcpu_exception_intinfo(vcpu); 1698 return (0); 1699} 1700 1701int 1702vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception) 1703{ 1704 struct vcpu *vcpu; 1705 1706 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1707 return (EINVAL); 1708 1709 if (exception->vector < 0 || exception->vector >= 32) 1710 return (EINVAL); 1711 1712 /* 1713 * A double fault exception should never be injected directly into 1714 * the guest. It is a derived exception that results from specific 1715 * combinations of nested faults. 1716 */ 1717 if (exception->vector == IDT_DF) 1718 return (EINVAL); 1719 1720 vcpu = &vm->vcpu[vcpuid]; 1721 1722 if (vcpu->exception_pending) { 1723 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1724 "pending exception %d", exception->vector, 1725 vcpu->exception.vector); 1726 return (EBUSY); 1727 } 1728 1729 vcpu->exception_pending = 1; 1730 vcpu->exception = *exception; 1731 VCPU_CTR1(vm, vcpuid, "Exception %d pending", exception->vector); 1732 return (0); 1733} 1734 1735void 1736vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 1737 int errcode) 1738{ 1739 struct vm_exception exception; 1740 struct vm_exit *vmexit; 1741 struct vm *vm; 1742 int error; 1743 1744 vm = vmarg; 1745 1746 exception.vector = vector; 1747 exception.error_code = errcode; 1748 exception.error_code_valid = errcode_valid; 1749 error = vm_inject_exception(vm, vcpuid, &exception); 1750 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1751 1752 /* 1753 * A fault-like exception allows the instruction to be restarted 1754 * after the exception handler returns. 1755 * 1756 * By setting the inst_length to 0 we ensure that the instruction 1757 * pointer remains at the faulting instruction. 1758 */ 1759 vmexit = vm_exitinfo(vm, vcpuid); 1760 vmexit->inst_length = 0; 1761} 1762 1763void 1764vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 1765{ 1766 struct vm *vm; 1767 int error; 1768 1769 vm = vmarg; 1770 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 1771 error_code, cr2); 1772 1773 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 1774 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 1775 1776 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 1777} 1778 1779static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1780 1781int 1782vm_inject_nmi(struct vm *vm, int vcpuid) 1783{ 1784 struct vcpu *vcpu; 1785 1786 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1787 return (EINVAL); 1788 1789 vcpu = &vm->vcpu[vcpuid]; 1790 1791 vcpu->nmi_pending = 1; 1792 vcpu_notify_event(vm, vcpuid, false); 1793 return (0); 1794} 1795 1796int 1797vm_nmi_pending(struct vm *vm, int vcpuid) 1798{ 1799 struct vcpu *vcpu; 1800 1801 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1802 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1803 1804 vcpu = &vm->vcpu[vcpuid]; 1805 1806 return (vcpu->nmi_pending); 1807} 1808 1809void 1810vm_nmi_clear(struct vm *vm, int vcpuid) 1811{ 1812 struct vcpu *vcpu; 1813 1814 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1815 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1816 1817 vcpu = &vm->vcpu[vcpuid]; 1818 1819 if (vcpu->nmi_pending == 0) 1820 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1821 1822 vcpu->nmi_pending = 0; 1823 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1824} 1825 1826static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 1827 1828int 1829vm_inject_extint(struct vm *vm, int vcpuid) 1830{ 1831 struct vcpu *vcpu; 1832 1833 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1834 return (EINVAL); 1835 1836 vcpu = &vm->vcpu[vcpuid]; 1837 1838 vcpu->extint_pending = 1; 1839 vcpu_notify_event(vm, vcpuid, false); 1840 return (0); 1841} 1842 1843int 1844vm_extint_pending(struct vm *vm, int vcpuid) 1845{ 1846 struct vcpu *vcpu; 1847 1848 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1849 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1850 1851 vcpu = &vm->vcpu[vcpuid]; 1852 1853 return (vcpu->extint_pending); 1854} 1855 1856void 1857vm_extint_clear(struct vm *vm, int vcpuid) 1858{ 1859 struct vcpu *vcpu; 1860 1861 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1862 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1863 1864 vcpu = &vm->vcpu[vcpuid]; 1865 1866 if (vcpu->extint_pending == 0) 1867 panic("vm_extint_clear: inconsistent extint_pending state"); 1868 1869 vcpu->extint_pending = 0; 1870 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 1871} 1872 1873int 1874vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1875{ 1876 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1877 return (EINVAL); 1878 1879 if (type < 0 || type >= VM_CAP_MAX) 1880 return (EINVAL); 1881 1882 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1883} 1884 1885int 1886vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1887{ 1888 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1889 return (EINVAL); 1890 1891 if (type < 0 || type >= VM_CAP_MAX) 1892 return (EINVAL); 1893 1894 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1895} 1896 1897struct vlapic * 1898vm_lapic(struct vm *vm, int cpu) 1899{ 1900 return (vm->vcpu[cpu].vlapic); 1901} 1902 1903struct vioapic * 1904vm_ioapic(struct vm *vm) 1905{ 1906 1907 return (vm->vioapic); 1908} 1909 1910struct vhpet * 1911vm_hpet(struct vm *vm) 1912{ 1913 1914 return (vm->vhpet); 1915} 1916 1917boolean_t 1918vmm_is_pptdev(int bus, int slot, int func) 1919{ 1920 int found, i, n; 1921 int b, s, f; 1922 char *val, *cp, *cp2; 1923 1924 /* 1925 * XXX 1926 * The length of an environment variable is limited to 128 bytes which 1927 * puts an upper limit on the number of passthru devices that may be 1928 * specified using a single environment variable. 1929 * 1930 * Work around this by scanning multiple environment variable 1931 * names instead of a single one - yuck! 1932 */ 1933 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 1934 1935 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 1936 found = 0; 1937 for (i = 0; names[i] != NULL && !found; i++) { 1938 cp = val = getenv(names[i]); 1939 while (cp != NULL && *cp != '\0') { 1940 if ((cp2 = strchr(cp, ' ')) != NULL) 1941 *cp2 = '\0'; 1942 1943 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 1944 if (n == 3 && bus == b && slot == s && func == f) { 1945 found = 1; 1946 break; 1947 } 1948 1949 if (cp2 != NULL) 1950 *cp2++ = ' '; 1951 1952 cp = cp2; 1953 } 1954 freeenv(val); 1955 } 1956 return (found); 1957} 1958 1959void * 1960vm_iommu_domain(struct vm *vm) 1961{ 1962 1963 return (vm->iommu); 1964} 1965 1966int 1967vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1968 bool from_idle) 1969{ 1970 int error; 1971 struct vcpu *vcpu; 1972 1973 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1974 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 1975 1976 vcpu = &vm->vcpu[vcpuid]; 1977 1978 vcpu_lock(vcpu); 1979 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 1980 vcpu_unlock(vcpu); 1981 1982 return (error); 1983} 1984 1985enum vcpu_state 1986vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 1987{ 1988 struct vcpu *vcpu; 1989 enum vcpu_state state; 1990 1991 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1992 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 1993 1994 vcpu = &vm->vcpu[vcpuid]; 1995 1996 vcpu_lock(vcpu); 1997 state = vcpu->state; 1998 if (hostcpu != NULL) 1999 *hostcpu = vcpu->hostcpu; 2000 vcpu_unlock(vcpu); 2001 2002 return (state); 2003} 2004 2005int 2006vm_activate_cpu(struct vm *vm, int vcpuid) 2007{ 2008 2009 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2010 return (EINVAL); 2011 2012 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 2013 return (EBUSY); 2014 2015 VCPU_CTR0(vm, vcpuid, "activated"); 2016 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 2017 return (0); 2018} 2019 2020cpuset_t 2021vm_active_cpus(struct vm *vm) 2022{ 2023 2024 return (vm->active_cpus); 2025} 2026 2027cpuset_t 2028vm_suspended_cpus(struct vm *vm) 2029{ 2030 2031 return (vm->suspended_cpus); 2032} 2033 2034void * 2035vcpu_stats(struct vm *vm, int vcpuid) 2036{ 2037 2038 return (vm->vcpu[vcpuid].stats); 2039} 2040 2041int 2042vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2043{ 2044 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2045 return (EINVAL); 2046 2047 *state = vm->vcpu[vcpuid].x2apic_state; 2048 2049 return (0); 2050} 2051 2052int 2053vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2054{ 2055 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2056 return (EINVAL); 2057 2058 if (state >= X2APIC_STATE_LAST) 2059 return (EINVAL); 2060 2061 vm->vcpu[vcpuid].x2apic_state = state; 2062 2063 vlapic_set_x2apic_state(vm, vcpuid, state); 2064 2065 return (0); 2066} 2067 2068/* 2069 * This function is called to ensure that a vcpu "sees" a pending event 2070 * as soon as possible: 2071 * - If the vcpu thread is sleeping then it is woken up. 2072 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2073 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2074 */ 2075void 2076vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2077{ 2078 int hostcpu; 2079 struct vcpu *vcpu; 2080 2081 vcpu = &vm->vcpu[vcpuid]; 2082 2083 vcpu_lock(vcpu); 2084 hostcpu = vcpu->hostcpu; 2085 if (vcpu->state == VCPU_RUNNING) { 2086 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2087 if (hostcpu != curcpu) { 2088 if (lapic_intr) { 2089 vlapic_post_intr(vcpu->vlapic, hostcpu, 2090 vmm_ipinum); 2091 } else { 2092 ipi_cpu(hostcpu, vmm_ipinum); 2093 } 2094 } else { 2095 /* 2096 * If the 'vcpu' is running on 'curcpu' then it must 2097 * be sending a notification to itself (e.g. SELF_IPI). 2098 * The pending event will be picked up when the vcpu 2099 * transitions back to guest context. 2100 */ 2101 } 2102 } else { 2103 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2104 "with hostcpu %d", vcpu->state, hostcpu)); 2105 if (vcpu->state == VCPU_SLEEPING) 2106 wakeup_one(vcpu); 2107 } 2108 vcpu_unlock(vcpu); 2109} 2110 2111struct vmspace * 2112vm_get_vmspace(struct vm *vm) 2113{ 2114 2115 return (vm->vmspace); 2116} 2117 2118int 2119vm_apicid2vcpuid(struct vm *vm, int apicid) 2120{ 2121 /* 2122 * XXX apic id is assumed to be numerically identical to vcpu id 2123 */ 2124 return (apicid); 2125} 2126 2127void 2128vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 2129 vm_rendezvous_func_t func, void *arg) 2130{ 2131 int i; 2132 2133 /* 2134 * Enforce that this function is called without any locks 2135 */ 2136 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2137 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 2138 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 2139 2140restart: 2141 mtx_lock(&vm->rendezvous_mtx); 2142 if (vm->rendezvous_func != NULL) { 2143 /* 2144 * If a rendezvous is already in progress then we need to 2145 * call the rendezvous handler in case this 'vcpuid' is one 2146 * of the targets of the rendezvous. 2147 */ 2148 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 2149 mtx_unlock(&vm->rendezvous_mtx); 2150 vm_handle_rendezvous(vm, vcpuid); 2151 goto restart; 2152 } 2153 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2154 "rendezvous is still in progress")); 2155 2156 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 2157 vm->rendezvous_req_cpus = dest; 2158 CPU_ZERO(&vm->rendezvous_done_cpus); 2159 vm->rendezvous_arg = arg; 2160 vm_set_rendezvous_func(vm, func); 2161 mtx_unlock(&vm->rendezvous_mtx); 2162 2163 /* 2164 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2165 * vcpus so they handle the rendezvous as soon as possible. 2166 */ 2167 for (i = 0; i < VM_MAXCPU; i++) { 2168 if (CPU_ISSET(i, &dest)) 2169 vcpu_notify_event(vm, i, false); 2170 } 2171 2172 vm_handle_rendezvous(vm, vcpuid); 2173} 2174 2175struct vatpic * 2176vm_atpic(struct vm *vm) 2177{ 2178 return (vm->vatpic); 2179} 2180 2181struct vatpit * 2182vm_atpit(struct vm *vm) 2183{ 2184 return (vm->vatpit); 2185} 2186 2187enum vm_reg_name 2188vm_segment_name(int seg) 2189{ 2190 static enum vm_reg_name seg_names[] = { 2191 VM_REG_GUEST_ES, 2192 VM_REG_GUEST_CS, 2193 VM_REG_GUEST_SS, 2194 VM_REG_GUEST_DS, 2195 VM_REG_GUEST_FS, 2196 VM_REG_GUEST_GS 2197 }; 2198 2199 KASSERT(seg >= 0 && seg < nitems(seg_names), 2200 ("%s: invalid segment encoding %d", __func__, seg)); 2201 return (seg_names[seg]); 2202} 2203 2204void 2205vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2206 int num_copyinfo) 2207{ 2208 int idx; 2209 2210 for (idx = 0; idx < num_copyinfo; idx++) { 2211 if (copyinfo[idx].cookie != NULL) 2212 vm_gpa_release(copyinfo[idx].cookie); 2213 } 2214 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2215} 2216 2217int 2218vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2219 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2220 int num_copyinfo) 2221{ 2222 int error, idx, nused; 2223 size_t n, off, remaining; 2224 void *hva, *cookie; 2225 uint64_t gpa; 2226 2227 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2228 2229 nused = 0; 2230 remaining = len; 2231 while (remaining > 0) { 2232 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2233 error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa); 2234 if (error) 2235 return (error); 2236 off = gpa & PAGE_MASK; 2237 n = min(remaining, PAGE_SIZE - off); 2238 copyinfo[nused].gpa = gpa; 2239 copyinfo[nused].len = n; 2240 remaining -= n; 2241 gla += n; 2242 nused++; 2243 } 2244 2245 for (idx = 0; idx < nused; idx++) { 2246 hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len, 2247 prot, &cookie); 2248 if (hva == NULL) 2249 break; 2250 copyinfo[idx].hva = hva; 2251 copyinfo[idx].cookie = cookie; 2252 } 2253 2254 if (idx != nused) { 2255 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2256 return (-1); 2257 } else { 2258 return (0); 2259 } 2260} 2261 2262void 2263vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2264 size_t len) 2265{ 2266 char *dst; 2267 int idx; 2268 2269 dst = kaddr; 2270 idx = 0; 2271 while (len > 0) { 2272 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2273 len -= copyinfo[idx].len; 2274 dst += copyinfo[idx].len; 2275 idx++; 2276 } 2277} 2278 2279void 2280vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2281 struct vm_copyinfo *copyinfo, size_t len) 2282{ 2283 const char *src; 2284 int idx; 2285 2286 src = kaddr; 2287 idx = 0; 2288 while (len > 0) { 2289 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2290 len -= copyinfo[idx].len; 2291 src += copyinfo[idx].len; 2292 idx++; 2293 } 2294} 2295 2296/* 2297 * Return the amount of in-use and wired memory for the VM. Since 2298 * these are global stats, only return the values with for vCPU 0 2299 */ 2300VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2301VMM_STAT_DECLARE(VMM_MEM_WIRED); 2302 2303static void 2304vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2305{ 2306 2307 if (vcpu == 0) { 2308 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2309 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2310 } 2311} 2312 2313static void 2314vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2315{ 2316 2317 if (vcpu == 0) { 2318 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2319 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2320 } 2321} 2322 2323VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2324VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2325