vmm.c revision 284894
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 284894 2015-06-27 22:48:22Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 284894 2015-06-27 22:48:22Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/module.h> 36#include <sys/sysctl.h> 37#include <sys/malloc.h> 38#include <sys/pcpu.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rwlock.h> 43#include <sys/sched.h> 44#include <sys/smp.h> 45#include <sys/systm.h> 46 47#include <vm/vm.h> 48#include <vm/vm_object.h> 49#include <vm/vm_page.h> 50#include <vm/pmap.h> 51#include <vm/vm_map.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_param.h> 54 55#include <machine/cpu.h> 56#include <machine/vm.h> 57#include <machine/pcb.h> 58#include <machine/smp.h> 59#include <x86/psl.h> 60#include <x86/apicreg.h> 61#include <machine/vmparam.h> 62 63#include <machine/vmm.h> 64#include <machine/vmm_dev.h> 65#include <machine/vmm_instruction_emul.h> 66 67#include "vmm_ioport.h" 68#include "vmm_ktr.h" 69#include "vmm_host.h" 70#include "vmm_mem.h" 71#include "vmm_util.h" 72#include "vatpic.h" 73#include "vatpit.h" 74#include "vhpet.h" 75#include "vioapic.h" 76#include "vlapic.h" 77#include "vpmtmr.h" 78#include "vrtc.h" 79#include "vmm_ipi.h" 80#include "vmm_stat.h" 81#include "vmm_lapic.h" 82 83#include "io/ppt.h" 84#include "io/iommu.h" 85 86struct vlapic; 87 88/* 89 * Initialization: 90 * (a) allocated when vcpu is created 91 * (i) initialized when vcpu is created and when it is reinitialized 92 * (o) initialized the first time the vcpu is created 93 * (x) initialized before use 94 */ 95struct vcpu { 96 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 97 enum vcpu_state state; /* (o) vcpu state */ 98 int hostcpu; /* (o) vcpu's host cpu */ 99 struct vlapic *vlapic; /* (i) APIC device model */ 100 enum x2apic_state x2apic_state; /* (i) APIC mode */ 101 uint64_t exitintinfo; /* (i) events pending at VM exit */ 102 int nmi_pending; /* (i) NMI pending */ 103 int extint_pending; /* (i) INTR pending */ 104 int exception_pending; /* (i) exception pending */ 105 int exc_vector; /* (x) exception collateral */ 106 int exc_errcode_valid; 107 uint32_t exc_errcode; 108 struct savefpu *guestfpu; /* (a,i) guest fpu state */ 109 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 110 void *stats; /* (a,i) statistics */ 111 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 112 uint64_t nextrip; /* (x) next instruction to execute */ 113}; 114 115#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 116#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 117#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 118#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 119#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 120 121struct mem_seg { 122 vm_paddr_t gpa; 123 size_t len; 124 boolean_t wired; 125 vm_object_t object; 126}; 127#define VM_MAX_MEMORY_SEGMENTS 2 128 129/* 130 * Initialization: 131 * (o) initialized the first time the VM is created 132 * (i) initialized when VM is created and when it is reinitialized 133 * (x) initialized before use 134 */ 135struct vm { 136 void *cookie; /* (i) cpu-specific data */ 137 void *iommu; /* (x) iommu-specific data */ 138 struct vhpet *vhpet; /* (i) virtual HPET */ 139 struct vioapic *vioapic; /* (i) virtual ioapic */ 140 struct vatpic *vatpic; /* (i) virtual atpic */ 141 struct vatpit *vatpit; /* (i) virtual atpit */ 142 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 143 struct vrtc *vrtc; /* (o) virtual RTC */ 144 volatile cpuset_t active_cpus; /* (i) active vcpus */ 145 int suspend; /* (i) stop VM execution */ 146 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 147 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 148 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 149 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 150 void *rendezvous_arg; /* (x) rendezvous func/arg */ 151 vm_rendezvous_func_t rendezvous_func; 152 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 153 int num_mem_segs; /* (o) guest memory segments */ 154 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 155 struct vmspace *vmspace; /* (o) guest's address space */ 156 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 157 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 158}; 159 160static int vmm_initialized; 161 162static struct vmm_ops *ops; 163#define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 164#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 165#define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 166 167#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 168#define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \ 169 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO) 170#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 171#define VMSPACE_ALLOC(min, max) \ 172 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 173#define VMSPACE_FREE(vmspace) \ 174 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 175#define VMGETREG(vmi, vcpu, num, retval) \ 176 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 177#define VMSETREG(vmi, vcpu, num, val) \ 178 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 179#define VMGETDESC(vmi, vcpu, num, desc) \ 180 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 181#define VMSETDESC(vmi, vcpu, num, desc) \ 182 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 183#define VMGETCAP(vmi, vcpu, num, retval) \ 184 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 185#define VMSETCAP(vmi, vcpu, num, val) \ 186 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 187#define VLAPIC_INIT(vmi, vcpu) \ 188 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 189#define VLAPIC_CLEANUP(vmi, vlapic) \ 190 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 191 192#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 193#define fpu_stop_emulating() clts() 194 195static MALLOC_DEFINE(M_VM, "vm", "vm"); 196 197/* statistics */ 198static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 199 200SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 201 202/* 203 * Halt the guest if all vcpus are executing a HLT instruction with 204 * interrupts disabled. 205 */ 206static int halt_detection_enabled = 1; 207TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled); 208SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 209 &halt_detection_enabled, 0, 210 "Halt VM if all vcpus execute HLT with interrupts disabled"); 211 212static int vmm_ipinum; 213SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 214 "IPI vector used for vcpu notifications"); 215 216static int trace_guest_exceptions; 217SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 218 &trace_guest_exceptions, 0, 219 "Trap into hypervisor on all guest exceptions and reflect them back"); 220 221static void 222vcpu_cleanup(struct vm *vm, int i, bool destroy) 223{ 224 struct vcpu *vcpu = &vm->vcpu[i]; 225 226 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 227 if (destroy) { 228 vmm_stat_free(vcpu->stats); 229 fpu_save_area_free(vcpu->guestfpu); 230 } 231} 232 233static void 234vcpu_init(struct vm *vm, int vcpu_id, bool create) 235{ 236 struct vcpu *vcpu; 237 238 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 239 ("vcpu_init: invalid vcpu %d", vcpu_id)); 240 241 vcpu = &vm->vcpu[vcpu_id]; 242 243 if (create) { 244 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 245 "initialized", vcpu_id)); 246 vcpu_lock_init(vcpu); 247 vcpu->state = VCPU_IDLE; 248 vcpu->hostcpu = NOCPU; 249 vcpu->guestfpu = fpu_save_area_alloc(); 250 vcpu->stats = vmm_stat_alloc(); 251 } 252 253 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 254 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 255 vcpu->exitintinfo = 0; 256 vcpu->nmi_pending = 0; 257 vcpu->extint_pending = 0; 258 vcpu->exception_pending = 0; 259 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 260 fpu_save_area_reset(vcpu->guestfpu); 261 vmm_stat_init(vcpu->stats); 262} 263 264int 265vcpu_trace_exceptions(struct vm *vm, int vcpuid) 266{ 267 268 return (trace_guest_exceptions); 269} 270 271struct vm_exit * 272vm_exitinfo(struct vm *vm, int cpuid) 273{ 274 struct vcpu *vcpu; 275 276 if (cpuid < 0 || cpuid >= VM_MAXCPU) 277 panic("vm_exitinfo: invalid cpuid %d", cpuid); 278 279 vcpu = &vm->vcpu[cpuid]; 280 281 return (&vcpu->exitinfo); 282} 283 284static void 285vmm_resume(void) 286{ 287 VMM_RESUME(); 288} 289 290static int 291vmm_init(void) 292{ 293 int error; 294 295 vmm_host_state_init(); 296 297 vmm_ipinum = vmm_ipi_alloc(); 298 if (vmm_ipinum == 0) 299 vmm_ipinum = IPI_AST; 300 301 error = vmm_mem_init(); 302 if (error) 303 return (error); 304 305 if (vmm_is_intel()) 306 ops = &vmm_ops_intel; 307 else if (vmm_is_amd()) 308 ops = &vmm_ops_amd; 309 else 310 return (ENXIO); 311 312 vmm_resume_p = vmm_resume; 313 314 return (VMM_INIT(vmm_ipinum)); 315} 316 317static int 318vmm_handler(module_t mod, int what, void *arg) 319{ 320 int error; 321 322 switch (what) { 323 case MOD_LOAD: 324 vmmdev_init(); 325 if (ppt_avail_devices() > 0) 326 iommu_init(); 327 error = vmm_init(); 328 if (error == 0) 329 vmm_initialized = 1; 330 break; 331 case MOD_UNLOAD: 332 error = vmmdev_cleanup(); 333 if (error == 0) { 334 vmm_resume_p = NULL; 335 iommu_cleanup(); 336 if (vmm_ipinum != IPI_AST) 337 vmm_ipi_free(vmm_ipinum); 338 error = VMM_CLEANUP(); 339 /* 340 * Something bad happened - prevent new 341 * VMs from being created 342 */ 343 if (error) 344 vmm_initialized = 0; 345 } 346 break; 347 default: 348 error = 0; 349 break; 350 } 351 return (error); 352} 353 354static moduledata_t vmm_kmod = { 355 "vmm", 356 vmm_handler, 357 NULL 358}; 359 360/* 361 * vmm initialization has the following dependencies: 362 * 363 * - iommu initialization must happen after the pci passthru driver has had 364 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 365 * 366 * - VT-x initialization requires smp_rendezvous() and therefore must happen 367 * after SMP is fully functional (after SI_SUB_SMP). 368 */ 369DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 370MODULE_VERSION(vmm, 1); 371 372static void 373vm_init(struct vm *vm, bool create) 374{ 375 int i; 376 377 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 378 vm->iommu = NULL; 379 vm->vioapic = vioapic_init(vm); 380 vm->vhpet = vhpet_init(vm); 381 vm->vatpic = vatpic_init(vm); 382 vm->vatpit = vatpit_init(vm); 383 vm->vpmtmr = vpmtmr_init(vm); 384 if (create) 385 vm->vrtc = vrtc_init(vm); 386 387 CPU_ZERO(&vm->active_cpus); 388 389 vm->suspend = 0; 390 CPU_ZERO(&vm->suspended_cpus); 391 392 for (i = 0; i < VM_MAXCPU; i++) 393 vcpu_init(vm, i, create); 394} 395 396int 397vm_create(const char *name, struct vm **retvm) 398{ 399 struct vm *vm; 400 struct vmspace *vmspace; 401 402 /* 403 * If vmm.ko could not be successfully initialized then don't attempt 404 * to create the virtual machine. 405 */ 406 if (!vmm_initialized) 407 return (ENXIO); 408 409 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 410 return (EINVAL); 411 412 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS); 413 if (vmspace == NULL) 414 return (ENOMEM); 415 416 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 417 strcpy(vm->name, name); 418 vm->num_mem_segs = 0; 419 vm->vmspace = vmspace; 420 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 421 422 vm_init(vm, true); 423 424 *retvm = vm; 425 return (0); 426} 427 428static void 429vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 430{ 431 432 if (seg->object != NULL) 433 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 434 435 bzero(seg, sizeof(*seg)); 436} 437 438static void 439vm_cleanup(struct vm *vm, bool destroy) 440{ 441 int i; 442 443 ppt_unassign_all(vm); 444 445 if (vm->iommu != NULL) 446 iommu_destroy_domain(vm->iommu); 447 448 if (destroy) 449 vrtc_cleanup(vm->vrtc); 450 else 451 vrtc_reset(vm->vrtc); 452 vpmtmr_cleanup(vm->vpmtmr); 453 vatpit_cleanup(vm->vatpit); 454 vhpet_cleanup(vm->vhpet); 455 vatpic_cleanup(vm->vatpic); 456 vioapic_cleanup(vm->vioapic); 457 458 for (i = 0; i < VM_MAXCPU; i++) 459 vcpu_cleanup(vm, i, destroy); 460 461 VMCLEANUP(vm->cookie); 462 463 if (destroy) { 464 for (i = 0; i < vm->num_mem_segs; i++) 465 vm_free_mem_seg(vm, &vm->mem_segs[i]); 466 467 vm->num_mem_segs = 0; 468 469 VMSPACE_FREE(vm->vmspace); 470 vm->vmspace = NULL; 471 } 472} 473 474void 475vm_destroy(struct vm *vm) 476{ 477 vm_cleanup(vm, true); 478 free(vm, M_VM); 479} 480 481int 482vm_reinit(struct vm *vm) 483{ 484 int error; 485 486 /* 487 * A virtual machine can be reset only if all vcpus are suspended. 488 */ 489 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 490 vm_cleanup(vm, false); 491 vm_init(vm, false); 492 error = 0; 493 } else { 494 error = EBUSY; 495 } 496 497 return (error); 498} 499 500const char * 501vm_name(struct vm *vm) 502{ 503 return (vm->name); 504} 505 506int 507vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 508{ 509 vm_object_t obj; 510 511 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 512 return (ENOMEM); 513 else 514 return (0); 515} 516 517int 518vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 519{ 520 521 vmm_mmio_free(vm->vmspace, gpa, len); 522 return (0); 523} 524 525boolean_t 526vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 527{ 528 int i; 529 vm_paddr_t gpabase, gpalimit; 530 531 for (i = 0; i < vm->num_mem_segs; i++) { 532 gpabase = vm->mem_segs[i].gpa; 533 gpalimit = gpabase + vm->mem_segs[i].len; 534 if (gpa >= gpabase && gpa < gpalimit) 535 return (TRUE); /* 'gpa' is regular memory */ 536 } 537 538 if (ppt_is_mmio(vm, gpa)) 539 return (TRUE); /* 'gpa' is pci passthru mmio */ 540 541 return (FALSE); 542} 543 544int 545vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 546{ 547 int available, allocated; 548 struct mem_seg *seg; 549 vm_object_t object; 550 vm_paddr_t g; 551 552 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 553 return (EINVAL); 554 555 available = allocated = 0; 556 g = gpa; 557 while (g < gpa + len) { 558 if (vm_mem_allocated(vm, g)) 559 allocated++; 560 else 561 available++; 562 563 g += PAGE_SIZE; 564 } 565 566 /* 567 * If there are some allocated and some available pages in the address 568 * range then it is an error. 569 */ 570 if (allocated && available) 571 return (EINVAL); 572 573 /* 574 * If the entire address range being requested has already been 575 * allocated then there isn't anything more to do. 576 */ 577 if (allocated && available == 0) 578 return (0); 579 580 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 581 return (E2BIG); 582 583 seg = &vm->mem_segs[vm->num_mem_segs]; 584 585 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 586 return (ENOMEM); 587 588 seg->gpa = gpa; 589 seg->len = len; 590 seg->object = object; 591 seg->wired = FALSE; 592 593 vm->num_mem_segs++; 594 595 return (0); 596} 597 598static vm_paddr_t 599vm_maxmem(struct vm *vm) 600{ 601 int i; 602 vm_paddr_t gpa, maxmem; 603 604 maxmem = 0; 605 for (i = 0; i < vm->num_mem_segs; i++) { 606 gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len; 607 if (gpa > maxmem) 608 maxmem = gpa; 609 } 610 return (maxmem); 611} 612 613static void 614vm_gpa_unwire(struct vm *vm) 615{ 616 int i, rv; 617 struct mem_seg *seg; 618 619 for (i = 0; i < vm->num_mem_segs; i++) { 620 seg = &vm->mem_segs[i]; 621 if (!seg->wired) 622 continue; 623 624 rv = vm_map_unwire(&vm->vmspace->vm_map, 625 seg->gpa, seg->gpa + seg->len, 626 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 627 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 628 "%#lx/%ld could not be unwired: %d", 629 vm_name(vm), seg->gpa, seg->len, rv)); 630 631 seg->wired = FALSE; 632 } 633} 634 635static int 636vm_gpa_wire(struct vm *vm) 637{ 638 int i, rv; 639 struct mem_seg *seg; 640 641 for (i = 0; i < vm->num_mem_segs; i++) { 642 seg = &vm->mem_segs[i]; 643 if (seg->wired) 644 continue; 645 646 /* XXX rlimits? */ 647 rv = vm_map_wire(&vm->vmspace->vm_map, 648 seg->gpa, seg->gpa + seg->len, 649 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 650 if (rv != KERN_SUCCESS) 651 break; 652 653 seg->wired = TRUE; 654 } 655 656 if (i < vm->num_mem_segs) { 657 /* 658 * Undo the wiring before returning an error. 659 */ 660 vm_gpa_unwire(vm); 661 return (EAGAIN); 662 } 663 664 return (0); 665} 666 667static void 668vm_iommu_modify(struct vm *vm, boolean_t map) 669{ 670 int i, sz; 671 vm_paddr_t gpa, hpa; 672 struct mem_seg *seg; 673 void *vp, *cookie, *host_domain; 674 675 sz = PAGE_SIZE; 676 host_domain = iommu_host_domain(); 677 678 for (i = 0; i < vm->num_mem_segs; i++) { 679 seg = &vm->mem_segs[i]; 680 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 681 vm_name(vm), seg->gpa, seg->len)); 682 683 gpa = seg->gpa; 684 while (gpa < seg->gpa + seg->len) { 685 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 686 &cookie); 687 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 688 vm_name(vm), gpa)); 689 690 vm_gpa_release(cookie); 691 692 hpa = DMAP_TO_PHYS((uintptr_t)vp); 693 if (map) { 694 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 695 iommu_remove_mapping(host_domain, hpa, sz); 696 } else { 697 iommu_remove_mapping(vm->iommu, gpa, sz); 698 iommu_create_mapping(host_domain, hpa, hpa, sz); 699 } 700 701 gpa += PAGE_SIZE; 702 } 703 } 704 705 /* 706 * Invalidate the cached translations associated with the domain 707 * from which pages were removed. 708 */ 709 if (map) 710 iommu_invalidate_tlb(host_domain); 711 else 712 iommu_invalidate_tlb(vm->iommu); 713} 714 715#define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 716#define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 717 718int 719vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 720{ 721 int error; 722 723 error = ppt_unassign_device(vm, bus, slot, func); 724 if (error) 725 return (error); 726 727 if (ppt_assigned_devices(vm) == 0) { 728 vm_iommu_unmap(vm); 729 vm_gpa_unwire(vm); 730 } 731 return (0); 732} 733 734int 735vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 736{ 737 int error; 738 vm_paddr_t maxaddr; 739 740 /* 741 * Virtual machines with pci passthru devices get special treatment: 742 * - the guest physical memory is wired 743 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 744 * 745 * We need to do this before the first pci passthru device is attached. 746 */ 747 if (ppt_assigned_devices(vm) == 0) { 748 KASSERT(vm->iommu == NULL, 749 ("vm_assign_pptdev: iommu must be NULL")); 750 maxaddr = vm_maxmem(vm); 751 vm->iommu = iommu_create_domain(maxaddr); 752 753 error = vm_gpa_wire(vm); 754 if (error) 755 return (error); 756 757 vm_iommu_map(vm); 758 } 759 760 error = ppt_assign_device(vm, bus, slot, func); 761 return (error); 762} 763 764void * 765vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 766 void **cookie) 767{ 768 int count, pageoff; 769 vm_page_t m; 770 771 pageoff = gpa & PAGE_MASK; 772 if (len > PAGE_SIZE - pageoff) 773 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 774 775 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 776 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 777 778 if (count == 1) { 779 *cookie = m; 780 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 781 } else { 782 *cookie = NULL; 783 return (NULL); 784 } 785} 786 787void 788vm_gpa_release(void *cookie) 789{ 790 vm_page_t m = cookie; 791 792 vm_page_lock(m); 793 vm_page_unhold(m); 794 vm_page_unlock(m); 795} 796 797int 798vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 799 struct vm_memory_segment *seg) 800{ 801 int i; 802 803 for (i = 0; i < vm->num_mem_segs; i++) { 804 if (gpabase == vm->mem_segs[i].gpa) { 805 seg->gpa = vm->mem_segs[i].gpa; 806 seg->len = vm->mem_segs[i].len; 807 seg->wired = vm->mem_segs[i].wired; 808 return (0); 809 } 810 } 811 return (-1); 812} 813 814int 815vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 816 vm_offset_t *offset, struct vm_object **object) 817{ 818 int i; 819 size_t seg_len; 820 vm_paddr_t seg_gpa; 821 vm_object_t seg_obj; 822 823 for (i = 0; i < vm->num_mem_segs; i++) { 824 if ((seg_obj = vm->mem_segs[i].object) == NULL) 825 continue; 826 827 seg_gpa = vm->mem_segs[i].gpa; 828 seg_len = vm->mem_segs[i].len; 829 830 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 831 *offset = gpa - seg_gpa; 832 *object = seg_obj; 833 vm_object_reference(seg_obj); 834 return (0); 835 } 836 } 837 838 return (EINVAL); 839} 840 841int 842vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 843{ 844 845 if (vcpu < 0 || vcpu >= VM_MAXCPU) 846 return (EINVAL); 847 848 if (reg >= VM_REG_LAST) 849 return (EINVAL); 850 851 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 852} 853 854int 855vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 856{ 857 struct vcpu *vcpu; 858 int error; 859 860 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 861 return (EINVAL); 862 863 if (reg >= VM_REG_LAST) 864 return (EINVAL); 865 866 error = VMSETREG(vm->cookie, vcpuid, reg, val); 867 if (error || reg != VM_REG_GUEST_RIP) 868 return (error); 869 870 /* Set 'nextrip' to match the value of %rip */ 871 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val); 872 vcpu = &vm->vcpu[vcpuid]; 873 vcpu->nextrip = val; 874 return (0); 875} 876 877static boolean_t 878is_descriptor_table(int reg) 879{ 880 881 switch (reg) { 882 case VM_REG_GUEST_IDTR: 883 case VM_REG_GUEST_GDTR: 884 return (TRUE); 885 default: 886 return (FALSE); 887 } 888} 889 890static boolean_t 891is_segment_register(int reg) 892{ 893 894 switch (reg) { 895 case VM_REG_GUEST_ES: 896 case VM_REG_GUEST_CS: 897 case VM_REG_GUEST_SS: 898 case VM_REG_GUEST_DS: 899 case VM_REG_GUEST_FS: 900 case VM_REG_GUEST_GS: 901 case VM_REG_GUEST_TR: 902 case VM_REG_GUEST_LDTR: 903 return (TRUE); 904 default: 905 return (FALSE); 906 } 907} 908 909int 910vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 911 struct seg_desc *desc) 912{ 913 914 if (vcpu < 0 || vcpu >= VM_MAXCPU) 915 return (EINVAL); 916 917 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 918 return (EINVAL); 919 920 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 921} 922 923int 924vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 925 struct seg_desc *desc) 926{ 927 if (vcpu < 0 || vcpu >= VM_MAXCPU) 928 return (EINVAL); 929 930 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 931 return (EINVAL); 932 933 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 934} 935 936static void 937restore_guest_fpustate(struct vcpu *vcpu) 938{ 939 940 /* flush host state to the pcb */ 941 fpuexit(curthread); 942 943 /* restore guest FPU state */ 944 fpu_stop_emulating(); 945 fpurestore(vcpu->guestfpu); 946 947 /* restore guest XCR0 if XSAVE is enabled in the host */ 948 if (rcr4() & CR4_XSAVE) 949 load_xcr(0, vcpu->guest_xcr0); 950 951 /* 952 * The FPU is now "dirty" with the guest's state so turn on emulation 953 * to trap any access to the FPU by the host. 954 */ 955 fpu_start_emulating(); 956} 957 958static void 959save_guest_fpustate(struct vcpu *vcpu) 960{ 961 962 if ((rcr0() & CR0_TS) == 0) 963 panic("fpu emulation not enabled in host!"); 964 965 /* save guest XCR0 and restore host XCR0 */ 966 if (rcr4() & CR4_XSAVE) { 967 vcpu->guest_xcr0 = rxcr(0); 968 load_xcr(0, vmm_get_host_xcr0()); 969 } 970 971 /* save guest FPU state */ 972 fpu_stop_emulating(); 973 fpusave(vcpu->guestfpu); 974 fpu_start_emulating(); 975} 976 977static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 978 979static int 980vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 981 bool from_idle) 982{ 983 int error; 984 985 vcpu_assert_locked(vcpu); 986 987 /* 988 * State transitions from the vmmdev_ioctl() must always begin from 989 * the VCPU_IDLE state. This guarantees that there is only a single 990 * ioctl() operating on a vcpu at any point. 991 */ 992 if (from_idle) { 993 while (vcpu->state != VCPU_IDLE) 994 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 995 } else { 996 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 997 "vcpu idle state")); 998 } 999 1000 if (vcpu->state == VCPU_RUNNING) { 1001 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1002 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1003 } else { 1004 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1005 "vcpu that is not running", vcpu->hostcpu)); 1006 } 1007 1008 /* 1009 * The following state transitions are allowed: 1010 * IDLE -> FROZEN -> IDLE 1011 * FROZEN -> RUNNING -> FROZEN 1012 * FROZEN -> SLEEPING -> FROZEN 1013 */ 1014 switch (vcpu->state) { 1015 case VCPU_IDLE: 1016 case VCPU_RUNNING: 1017 case VCPU_SLEEPING: 1018 error = (newstate != VCPU_FROZEN); 1019 break; 1020 case VCPU_FROZEN: 1021 error = (newstate == VCPU_FROZEN); 1022 break; 1023 default: 1024 error = 1; 1025 break; 1026 } 1027 1028 if (error) 1029 return (EBUSY); 1030 1031 vcpu->state = newstate; 1032 if (newstate == VCPU_RUNNING) 1033 vcpu->hostcpu = curcpu; 1034 else 1035 vcpu->hostcpu = NOCPU; 1036 1037 if (newstate == VCPU_IDLE) 1038 wakeup(&vcpu->state); 1039 1040 return (0); 1041} 1042 1043static void 1044vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1045{ 1046 int error; 1047 1048 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1049 panic("Error %d setting state to %d\n", error, newstate); 1050} 1051 1052static void 1053vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1054{ 1055 int error; 1056 1057 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1058 panic("Error %d setting state to %d", error, newstate); 1059} 1060 1061static void 1062vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 1063{ 1064 1065 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 1066 1067 /* 1068 * Update 'rendezvous_func' and execute a write memory barrier to 1069 * ensure that it is visible across all host cpus. This is not needed 1070 * for correctness but it does ensure that all the vcpus will notice 1071 * that the rendezvous is requested immediately. 1072 */ 1073 vm->rendezvous_func = func; 1074 wmb(); 1075} 1076 1077#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 1078 do { \ 1079 if (vcpuid >= 0) \ 1080 VCPU_CTR0(vm, vcpuid, fmt); \ 1081 else \ 1082 VM_CTR0(vm, fmt); \ 1083 } while (0) 1084 1085static void 1086vm_handle_rendezvous(struct vm *vm, int vcpuid) 1087{ 1088 1089 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1090 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 1091 1092 mtx_lock(&vm->rendezvous_mtx); 1093 while (vm->rendezvous_func != NULL) { 1094 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1095 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 1096 1097 if (vcpuid != -1 && 1098 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1099 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1100 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 1101 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 1102 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1103 } 1104 if (CPU_CMP(&vm->rendezvous_req_cpus, 1105 &vm->rendezvous_done_cpus) == 0) { 1106 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 1107 vm_set_rendezvous_func(vm, NULL); 1108 wakeup(&vm->rendezvous_func); 1109 break; 1110 } 1111 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 1112 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1113 "vmrndv", 0); 1114 } 1115 mtx_unlock(&vm->rendezvous_mtx); 1116} 1117 1118/* 1119 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1120 */ 1121static int 1122vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1123{ 1124 struct vcpu *vcpu; 1125 const char *wmesg; 1126 int t, vcpu_halted, vm_halted; 1127 1128 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1129 1130 vcpu = &vm->vcpu[vcpuid]; 1131 vcpu_halted = 0; 1132 vm_halted = 0; 1133 1134 vcpu_lock(vcpu); 1135 while (1) { 1136 /* 1137 * Do a final check for pending NMI or interrupts before 1138 * really putting this thread to sleep. Also check for 1139 * software events that would cause this vcpu to wakeup. 1140 * 1141 * These interrupts/events could have happened after the 1142 * vcpu returned from VMRUN() and before it acquired the 1143 * vcpu lock above. 1144 */ 1145 if (vm->rendezvous_func != NULL || vm->suspend) 1146 break; 1147 if (vm_nmi_pending(vm, vcpuid)) 1148 break; 1149 if (!intr_disabled) { 1150 if (vm_extint_pending(vm, vcpuid) || 1151 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1152 break; 1153 } 1154 } 1155 1156 /* Don't go to sleep if the vcpu thread needs to yield */ 1157 if (vcpu_should_yield(vm, vcpuid)) 1158 break; 1159 1160 /* 1161 * Some Linux guests implement "halt" by having all vcpus 1162 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1163 * track of the vcpus that have entered this state. When all 1164 * vcpus enter the halted state the virtual machine is halted. 1165 */ 1166 if (intr_disabled) { 1167 wmesg = "vmhalt"; 1168 VCPU_CTR0(vm, vcpuid, "Halted"); 1169 if (!vcpu_halted && halt_detection_enabled) { 1170 vcpu_halted = 1; 1171 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1172 } 1173 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1174 vm_halted = 1; 1175 break; 1176 } 1177 } else { 1178 wmesg = "vmidle"; 1179 } 1180 1181 t = ticks; 1182 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1183 /* 1184 * XXX msleep_spin() cannot be interrupted by signals so 1185 * wake up periodically to check pending signals. 1186 */ 1187 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1188 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1189 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1190 } 1191 1192 if (vcpu_halted) 1193 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1194 1195 vcpu_unlock(vcpu); 1196 1197 if (vm_halted) 1198 vm_suspend(vm, VM_SUSPEND_HALT); 1199 1200 return (0); 1201} 1202 1203static int 1204vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1205{ 1206 int rv, ftype; 1207 struct vm_map *map; 1208 struct vcpu *vcpu; 1209 struct vm_exit *vme; 1210 1211 vcpu = &vm->vcpu[vcpuid]; 1212 vme = &vcpu->exitinfo; 1213 1214 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1215 __func__, vme->inst_length)); 1216 1217 ftype = vme->u.paging.fault_type; 1218 KASSERT(ftype == VM_PROT_READ || 1219 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1220 ("vm_handle_paging: invalid fault_type %d", ftype)); 1221 1222 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1223 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1224 vme->u.paging.gpa, ftype); 1225 if (rv == 0) { 1226 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 1227 ftype == VM_PROT_READ ? "accessed" : "dirty", 1228 vme->u.paging.gpa); 1229 goto done; 1230 } 1231 } 1232 1233 map = &vm->vmspace->vm_map; 1234 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1235 1236 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1237 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1238 1239 if (rv != KERN_SUCCESS) 1240 return (EFAULT); 1241done: 1242 return (0); 1243} 1244 1245static int 1246vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1247{ 1248 struct vie *vie; 1249 struct vcpu *vcpu; 1250 struct vm_exit *vme; 1251 uint64_t gla, gpa; 1252 struct vm_guest_paging *paging; 1253 mem_region_read_t mread; 1254 mem_region_write_t mwrite; 1255 enum vm_cpu_mode cpu_mode; 1256 int cs_d, error, length; 1257 1258 vcpu = &vm->vcpu[vcpuid]; 1259 vme = &vcpu->exitinfo; 1260 1261 gla = vme->u.inst_emul.gla; 1262 gpa = vme->u.inst_emul.gpa; 1263 cs_d = vme->u.inst_emul.cs_d; 1264 vie = &vme->u.inst_emul.vie; 1265 paging = &vme->u.inst_emul.paging; 1266 cpu_mode = paging->cpu_mode; 1267 1268 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 1269 1270 /* Fetch, decode and emulate the faulting instruction */ 1271 if (vie->num_valid == 0) { 1272 /* 1273 * If the instruction length is not known then assume a 1274 * maximum size instruction. 1275 */ 1276 length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE; 1277 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, 1278 length, vie); 1279 } else { 1280 /* 1281 * The instruction bytes have already been copied into 'vie' 1282 */ 1283 error = 0; 1284 } 1285 if (error == 1) 1286 return (0); /* Resume guest to handle page fault */ 1287 else if (error == -1) 1288 return (EFAULT); 1289 else if (error != 0) 1290 panic("%s: vmm_fetch_instruction error %d", __func__, error); 1291 1292 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) 1293 return (EFAULT); 1294 1295 /* 1296 * If the instruction length was not specified then update it now 1297 * along with 'nextrip'. 1298 */ 1299 if (vme->inst_length == 0) { 1300 vme->inst_length = vie->num_processed; 1301 vcpu->nextrip += vie->num_processed; 1302 } 1303 1304 /* return to userland unless this is an in-kernel emulated device */ 1305 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1306 mread = lapic_mmio_read; 1307 mwrite = lapic_mmio_write; 1308 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1309 mread = vioapic_mmio_read; 1310 mwrite = vioapic_mmio_write; 1311 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1312 mread = vhpet_mmio_read; 1313 mwrite = vhpet_mmio_write; 1314 } else { 1315 *retu = true; 1316 return (0); 1317 } 1318 1319 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1320 mread, mwrite, retu); 1321 1322 return (error); 1323} 1324 1325static int 1326vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1327{ 1328 int i, done; 1329 struct vcpu *vcpu; 1330 1331 done = 0; 1332 vcpu = &vm->vcpu[vcpuid]; 1333 1334 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1335 1336 /* 1337 * Wait until all 'active_cpus' have suspended themselves. 1338 * 1339 * Since a VM may be suspended at any time including when one or 1340 * more vcpus are doing a rendezvous we need to call the rendezvous 1341 * handler while we are waiting to prevent a deadlock. 1342 */ 1343 vcpu_lock(vcpu); 1344 while (1) { 1345 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1346 VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1347 break; 1348 } 1349 1350 if (vm->rendezvous_func == NULL) { 1351 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1352 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1353 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1354 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1355 } else { 1356 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1357 vcpu_unlock(vcpu); 1358 vm_handle_rendezvous(vm, vcpuid); 1359 vcpu_lock(vcpu); 1360 } 1361 } 1362 vcpu_unlock(vcpu); 1363 1364 /* 1365 * Wakeup the other sleeping vcpus and return to userspace. 1366 */ 1367 for (i = 0; i < VM_MAXCPU; i++) { 1368 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1369 vcpu_notify_event(vm, i, false); 1370 } 1371 } 1372 1373 *retu = true; 1374 return (0); 1375} 1376 1377int 1378vm_suspend(struct vm *vm, enum vm_suspend_how how) 1379{ 1380 int i; 1381 1382 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1383 return (EINVAL); 1384 1385 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1386 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1387 vm->suspend, how); 1388 return (EALREADY); 1389 } 1390 1391 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1392 1393 /* 1394 * Notify all active vcpus that they are now suspended. 1395 */ 1396 for (i = 0; i < VM_MAXCPU; i++) { 1397 if (CPU_ISSET(i, &vm->active_cpus)) 1398 vcpu_notify_event(vm, i, false); 1399 } 1400 1401 return (0); 1402} 1403 1404void 1405vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1406{ 1407 struct vm_exit *vmexit; 1408 1409 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1410 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1411 1412 vmexit = vm_exitinfo(vm, vcpuid); 1413 vmexit->rip = rip; 1414 vmexit->inst_length = 0; 1415 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1416 vmexit->u.suspended.how = vm->suspend; 1417} 1418 1419void 1420vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 1421{ 1422 struct vm_exit *vmexit; 1423 1424 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 1425 1426 vmexit = vm_exitinfo(vm, vcpuid); 1427 vmexit->rip = rip; 1428 vmexit->inst_length = 0; 1429 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1430 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 1431} 1432 1433void 1434vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 1435{ 1436 struct vm_exit *vmexit; 1437 1438 vmexit = vm_exitinfo(vm, vcpuid); 1439 vmexit->rip = rip; 1440 vmexit->inst_length = 0; 1441 vmexit->exitcode = VM_EXITCODE_BOGUS; 1442 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 1443} 1444 1445int 1446vm_run(struct vm *vm, struct vm_run *vmrun) 1447{ 1448 int error, vcpuid; 1449 struct vcpu *vcpu; 1450 struct pcb *pcb; 1451 uint64_t tscval; 1452 struct vm_exit *vme; 1453 bool retu, intr_disabled; 1454 pmap_t pmap; 1455 void *rptr, *sptr; 1456 1457 vcpuid = vmrun->cpuid; 1458 1459 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1460 return (EINVAL); 1461 1462 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1463 return (EINVAL); 1464 1465 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1466 return (EINVAL); 1467 1468 rptr = &vm->rendezvous_func; 1469 sptr = &vm->suspend; 1470 pmap = vmspace_pmap(vm->vmspace); 1471 vcpu = &vm->vcpu[vcpuid]; 1472 vme = &vcpu->exitinfo; 1473restart: 1474 critical_enter(); 1475 1476 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1477 ("vm_run: absurd pm_active")); 1478 1479 tscval = rdtsc(); 1480 1481 pcb = PCPU_GET(curpcb); 1482 set_pcb_flags(pcb, PCB_FULL_IRET); 1483 1484 restore_guest_fpustate(vcpu); 1485 1486 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1487 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, rptr, sptr); 1488 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1489 1490 save_guest_fpustate(vcpu); 1491 1492 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1493 1494 critical_exit(); 1495 1496 if (error == 0) { 1497 retu = false; 1498 vcpu->nextrip = vme->rip + vme->inst_length; 1499 switch (vme->exitcode) { 1500 case VM_EXITCODE_SUSPENDED: 1501 error = vm_handle_suspend(vm, vcpuid, &retu); 1502 break; 1503 case VM_EXITCODE_IOAPIC_EOI: 1504 vioapic_process_eoi(vm, vcpuid, 1505 vme->u.ioapic_eoi.vector); 1506 break; 1507 case VM_EXITCODE_RENDEZVOUS: 1508 vm_handle_rendezvous(vm, vcpuid); 1509 error = 0; 1510 break; 1511 case VM_EXITCODE_HLT: 1512 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1513 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1514 break; 1515 case VM_EXITCODE_PAGING: 1516 error = vm_handle_paging(vm, vcpuid, &retu); 1517 break; 1518 case VM_EXITCODE_INST_EMUL: 1519 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1520 break; 1521 case VM_EXITCODE_INOUT: 1522 case VM_EXITCODE_INOUT_STR: 1523 error = vm_handle_inout(vm, vcpuid, vme, &retu); 1524 break; 1525 case VM_EXITCODE_MONITOR: 1526 case VM_EXITCODE_MWAIT: 1527 vm_inject_ud(vm, vcpuid); 1528 break; 1529 default: 1530 retu = true; /* handled in userland */ 1531 break; 1532 } 1533 } 1534 1535 if (error == 0 && retu == false) 1536 goto restart; 1537 1538 /* copy the exit information */ 1539 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1540 return (error); 1541} 1542 1543int 1544vm_restart_instruction(void *arg, int vcpuid) 1545{ 1546 struct vm *vm; 1547 struct vcpu *vcpu; 1548 enum vcpu_state state; 1549 uint64_t rip; 1550 int error; 1551 1552 vm = arg; 1553 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1554 return (EINVAL); 1555 1556 vcpu = &vm->vcpu[vcpuid]; 1557 state = vcpu_get_state(vm, vcpuid, NULL); 1558 if (state == VCPU_RUNNING) { 1559 /* 1560 * When a vcpu is "running" the next instruction is determined 1561 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 1562 * Thus setting 'inst_length' to zero will cause the current 1563 * instruction to be restarted. 1564 */ 1565 vcpu->exitinfo.inst_length = 0; 1566 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by " 1567 "setting inst_length to zero", vcpu->exitinfo.rip); 1568 } else if (state == VCPU_FROZEN) { 1569 /* 1570 * When a vcpu is "frozen" it is outside the critical section 1571 * around VMRUN() and 'nextrip' points to the next instruction. 1572 * Thus instruction restart is achieved by setting 'nextrip' 1573 * to the vcpu's %rip. 1574 */ 1575 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 1576 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 1577 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating " 1578 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); 1579 vcpu->nextrip = rip; 1580 } else { 1581 panic("%s: invalid state %d", __func__, state); 1582 } 1583 return (0); 1584} 1585 1586int 1587vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1588{ 1589 struct vcpu *vcpu; 1590 int type, vector; 1591 1592 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1593 return (EINVAL); 1594 1595 vcpu = &vm->vcpu[vcpuid]; 1596 1597 if (info & VM_INTINFO_VALID) { 1598 type = info & VM_INTINFO_TYPE; 1599 vector = info & 0xff; 1600 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1601 return (EINVAL); 1602 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1603 return (EINVAL); 1604 if (info & VM_INTINFO_RSVD) 1605 return (EINVAL); 1606 } else { 1607 info = 0; 1608 } 1609 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1610 vcpu->exitintinfo = info; 1611 return (0); 1612} 1613 1614enum exc_class { 1615 EXC_BENIGN, 1616 EXC_CONTRIBUTORY, 1617 EXC_PAGEFAULT 1618}; 1619 1620#define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1621 1622static enum exc_class 1623exception_class(uint64_t info) 1624{ 1625 int type, vector; 1626 1627 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1628 type = info & VM_INTINFO_TYPE; 1629 vector = info & 0xff; 1630 1631 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1632 switch (type) { 1633 case VM_INTINFO_HWINTR: 1634 case VM_INTINFO_SWINTR: 1635 case VM_INTINFO_NMI: 1636 return (EXC_BENIGN); 1637 default: 1638 /* 1639 * Hardware exception. 1640 * 1641 * SVM and VT-x use identical type values to represent NMI, 1642 * hardware interrupt and software interrupt. 1643 * 1644 * SVM uses type '3' for all exceptions. VT-x uses type '3' 1645 * for exceptions except #BP and #OF. #BP and #OF use a type 1646 * value of '5' or '6'. Therefore we don't check for explicit 1647 * values of 'type' to classify 'intinfo' into a hardware 1648 * exception. 1649 */ 1650 break; 1651 } 1652 1653 switch (vector) { 1654 case IDT_PF: 1655 case IDT_VE: 1656 return (EXC_PAGEFAULT); 1657 case IDT_DE: 1658 case IDT_TS: 1659 case IDT_NP: 1660 case IDT_SS: 1661 case IDT_GP: 1662 return (EXC_CONTRIBUTORY); 1663 default: 1664 return (EXC_BENIGN); 1665 } 1666} 1667 1668static int 1669nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1670 uint64_t *retinfo) 1671{ 1672 enum exc_class exc1, exc2; 1673 int type1, vector1; 1674 1675 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1676 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1677 1678 /* 1679 * If an exception occurs while attempting to call the double-fault 1680 * handler the processor enters shutdown mode (aka triple fault). 1681 */ 1682 type1 = info1 & VM_INTINFO_TYPE; 1683 vector1 = info1 & 0xff; 1684 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1685 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1686 info1, info2); 1687 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1688 *retinfo = 0; 1689 return (0); 1690 } 1691 1692 /* 1693 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1694 */ 1695 exc1 = exception_class(info1); 1696 exc2 = exception_class(info2); 1697 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1698 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1699 /* Convert nested fault into a double fault. */ 1700 *retinfo = IDT_DF; 1701 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1702 *retinfo |= VM_INTINFO_DEL_ERRCODE; 1703 } else { 1704 /* Handle exceptions serially */ 1705 *retinfo = info2; 1706 } 1707 return (1); 1708} 1709 1710static uint64_t 1711vcpu_exception_intinfo(struct vcpu *vcpu) 1712{ 1713 uint64_t info = 0; 1714 1715 if (vcpu->exception_pending) { 1716 info = vcpu->exc_vector & 0xff; 1717 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1718 if (vcpu->exc_errcode_valid) { 1719 info |= VM_INTINFO_DEL_ERRCODE; 1720 info |= (uint64_t)vcpu->exc_errcode << 32; 1721 } 1722 } 1723 return (info); 1724} 1725 1726int 1727vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1728{ 1729 struct vcpu *vcpu; 1730 uint64_t info1, info2; 1731 int valid; 1732 1733 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1734 1735 vcpu = &vm->vcpu[vcpuid]; 1736 1737 info1 = vcpu->exitintinfo; 1738 vcpu->exitintinfo = 0; 1739 1740 info2 = 0; 1741 if (vcpu->exception_pending) { 1742 info2 = vcpu_exception_intinfo(vcpu); 1743 vcpu->exception_pending = 0; 1744 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1745 vcpu->exc_vector, info2); 1746 } 1747 1748 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1749 valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1750 } else if (info1 & VM_INTINFO_VALID) { 1751 *retinfo = info1; 1752 valid = 1; 1753 } else if (info2 & VM_INTINFO_VALID) { 1754 *retinfo = info2; 1755 valid = 1; 1756 } else { 1757 valid = 0; 1758 } 1759 1760 if (valid) { 1761 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1762 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1763 } 1764 1765 return (valid); 1766} 1767 1768int 1769vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1770{ 1771 struct vcpu *vcpu; 1772 1773 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1774 return (EINVAL); 1775 1776 vcpu = &vm->vcpu[vcpuid]; 1777 *info1 = vcpu->exitintinfo; 1778 *info2 = vcpu_exception_intinfo(vcpu); 1779 return (0); 1780} 1781 1782int 1783vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid, 1784 uint32_t errcode, int restart_instruction) 1785{ 1786 struct vcpu *vcpu; 1787 int error; 1788 1789 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1790 return (EINVAL); 1791 1792 if (vector < 0 || vector >= 32) 1793 return (EINVAL); 1794 1795 /* 1796 * A double fault exception should never be injected directly into 1797 * the guest. It is a derived exception that results from specific 1798 * combinations of nested faults. 1799 */ 1800 if (vector == IDT_DF) 1801 return (EINVAL); 1802 1803 vcpu = &vm->vcpu[vcpuid]; 1804 1805 if (vcpu->exception_pending) { 1806 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1807 "pending exception %d", vector, vcpu->exc_vector); 1808 return (EBUSY); 1809 } 1810 1811 /* 1812 * From section 26.6.1 "Interruptibility State" in Intel SDM: 1813 * 1814 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 1815 * one instruction or incurs an exception. 1816 */ 1817 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 1818 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 1819 __func__, error)); 1820 1821 if (restart_instruction) 1822 vm_restart_instruction(vm, vcpuid); 1823 1824 vcpu->exception_pending = 1; 1825 vcpu->exc_vector = vector; 1826 vcpu->exc_errcode = errcode; 1827 vcpu->exc_errcode_valid = errcode_valid; 1828 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector); 1829 return (0); 1830} 1831 1832void 1833vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 1834 int errcode) 1835{ 1836 struct vm *vm; 1837 int error, restart_instruction; 1838 1839 vm = vmarg; 1840 restart_instruction = 1; 1841 1842 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid, 1843 errcode, restart_instruction); 1844 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1845} 1846 1847void 1848vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 1849{ 1850 struct vm *vm; 1851 int error; 1852 1853 vm = vmarg; 1854 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 1855 error_code, cr2); 1856 1857 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 1858 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 1859 1860 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 1861} 1862 1863static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1864 1865int 1866vm_inject_nmi(struct vm *vm, int vcpuid) 1867{ 1868 struct vcpu *vcpu; 1869 1870 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1871 return (EINVAL); 1872 1873 vcpu = &vm->vcpu[vcpuid]; 1874 1875 vcpu->nmi_pending = 1; 1876 vcpu_notify_event(vm, vcpuid, false); 1877 return (0); 1878} 1879 1880int 1881vm_nmi_pending(struct vm *vm, int vcpuid) 1882{ 1883 struct vcpu *vcpu; 1884 1885 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1886 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1887 1888 vcpu = &vm->vcpu[vcpuid]; 1889 1890 return (vcpu->nmi_pending); 1891} 1892 1893void 1894vm_nmi_clear(struct vm *vm, int vcpuid) 1895{ 1896 struct vcpu *vcpu; 1897 1898 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1899 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1900 1901 vcpu = &vm->vcpu[vcpuid]; 1902 1903 if (vcpu->nmi_pending == 0) 1904 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1905 1906 vcpu->nmi_pending = 0; 1907 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1908} 1909 1910static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 1911 1912int 1913vm_inject_extint(struct vm *vm, int vcpuid) 1914{ 1915 struct vcpu *vcpu; 1916 1917 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1918 return (EINVAL); 1919 1920 vcpu = &vm->vcpu[vcpuid]; 1921 1922 vcpu->extint_pending = 1; 1923 vcpu_notify_event(vm, vcpuid, false); 1924 return (0); 1925} 1926 1927int 1928vm_extint_pending(struct vm *vm, int vcpuid) 1929{ 1930 struct vcpu *vcpu; 1931 1932 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1933 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1934 1935 vcpu = &vm->vcpu[vcpuid]; 1936 1937 return (vcpu->extint_pending); 1938} 1939 1940void 1941vm_extint_clear(struct vm *vm, int vcpuid) 1942{ 1943 struct vcpu *vcpu; 1944 1945 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1946 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1947 1948 vcpu = &vm->vcpu[vcpuid]; 1949 1950 if (vcpu->extint_pending == 0) 1951 panic("vm_extint_clear: inconsistent extint_pending state"); 1952 1953 vcpu->extint_pending = 0; 1954 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 1955} 1956 1957int 1958vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1959{ 1960 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1961 return (EINVAL); 1962 1963 if (type < 0 || type >= VM_CAP_MAX) 1964 return (EINVAL); 1965 1966 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1967} 1968 1969int 1970vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1971{ 1972 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1973 return (EINVAL); 1974 1975 if (type < 0 || type >= VM_CAP_MAX) 1976 return (EINVAL); 1977 1978 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1979} 1980 1981struct vlapic * 1982vm_lapic(struct vm *vm, int cpu) 1983{ 1984 return (vm->vcpu[cpu].vlapic); 1985} 1986 1987struct vioapic * 1988vm_ioapic(struct vm *vm) 1989{ 1990 1991 return (vm->vioapic); 1992} 1993 1994struct vhpet * 1995vm_hpet(struct vm *vm) 1996{ 1997 1998 return (vm->vhpet); 1999} 2000 2001boolean_t 2002vmm_is_pptdev(int bus, int slot, int func) 2003{ 2004 int found, i, n; 2005 int b, s, f; 2006 char *val, *cp, *cp2; 2007 2008 /* 2009 * XXX 2010 * The length of an environment variable is limited to 128 bytes which 2011 * puts an upper limit on the number of passthru devices that may be 2012 * specified using a single environment variable. 2013 * 2014 * Work around this by scanning multiple environment variable 2015 * names instead of a single one - yuck! 2016 */ 2017 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 2018 2019 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 2020 found = 0; 2021 for (i = 0; names[i] != NULL && !found; i++) { 2022 cp = val = getenv(names[i]); 2023 while (cp != NULL && *cp != '\0') { 2024 if ((cp2 = strchr(cp, ' ')) != NULL) 2025 *cp2 = '\0'; 2026 2027 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 2028 if (n == 3 && bus == b && slot == s && func == f) { 2029 found = 1; 2030 break; 2031 } 2032 2033 if (cp2 != NULL) 2034 *cp2++ = ' '; 2035 2036 cp = cp2; 2037 } 2038 freeenv(val); 2039 } 2040 return (found); 2041} 2042 2043void * 2044vm_iommu_domain(struct vm *vm) 2045{ 2046 2047 return (vm->iommu); 2048} 2049 2050int 2051vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 2052 bool from_idle) 2053{ 2054 int error; 2055 struct vcpu *vcpu; 2056 2057 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2058 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2059 2060 vcpu = &vm->vcpu[vcpuid]; 2061 2062 vcpu_lock(vcpu); 2063 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 2064 vcpu_unlock(vcpu); 2065 2066 return (error); 2067} 2068 2069enum vcpu_state 2070vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 2071{ 2072 struct vcpu *vcpu; 2073 enum vcpu_state state; 2074 2075 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2076 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 2077 2078 vcpu = &vm->vcpu[vcpuid]; 2079 2080 vcpu_lock(vcpu); 2081 state = vcpu->state; 2082 if (hostcpu != NULL) 2083 *hostcpu = vcpu->hostcpu; 2084 vcpu_unlock(vcpu); 2085 2086 return (state); 2087} 2088 2089int 2090vm_activate_cpu(struct vm *vm, int vcpuid) 2091{ 2092 2093 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2094 return (EINVAL); 2095 2096 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 2097 return (EBUSY); 2098 2099 VCPU_CTR0(vm, vcpuid, "activated"); 2100 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 2101 return (0); 2102} 2103 2104cpuset_t 2105vm_active_cpus(struct vm *vm) 2106{ 2107 2108 return (vm->active_cpus); 2109} 2110 2111cpuset_t 2112vm_suspended_cpus(struct vm *vm) 2113{ 2114 2115 return (vm->suspended_cpus); 2116} 2117 2118void * 2119vcpu_stats(struct vm *vm, int vcpuid) 2120{ 2121 2122 return (vm->vcpu[vcpuid].stats); 2123} 2124 2125int 2126vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2127{ 2128 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2129 return (EINVAL); 2130 2131 *state = vm->vcpu[vcpuid].x2apic_state; 2132 2133 return (0); 2134} 2135 2136int 2137vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2138{ 2139 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2140 return (EINVAL); 2141 2142 if (state >= X2APIC_STATE_LAST) 2143 return (EINVAL); 2144 2145 vm->vcpu[vcpuid].x2apic_state = state; 2146 2147 vlapic_set_x2apic_state(vm, vcpuid, state); 2148 2149 return (0); 2150} 2151 2152/* 2153 * This function is called to ensure that a vcpu "sees" a pending event 2154 * as soon as possible: 2155 * - If the vcpu thread is sleeping then it is woken up. 2156 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2157 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2158 */ 2159void 2160vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2161{ 2162 int hostcpu; 2163 struct vcpu *vcpu; 2164 2165 vcpu = &vm->vcpu[vcpuid]; 2166 2167 vcpu_lock(vcpu); 2168 hostcpu = vcpu->hostcpu; 2169 if (vcpu->state == VCPU_RUNNING) { 2170 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2171 if (hostcpu != curcpu) { 2172 if (lapic_intr) { 2173 vlapic_post_intr(vcpu->vlapic, hostcpu, 2174 vmm_ipinum); 2175 } else { 2176 ipi_cpu(hostcpu, vmm_ipinum); 2177 } 2178 } else { 2179 /* 2180 * If the 'vcpu' is running on 'curcpu' then it must 2181 * be sending a notification to itself (e.g. SELF_IPI). 2182 * The pending event will be picked up when the vcpu 2183 * transitions back to guest context. 2184 */ 2185 } 2186 } else { 2187 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2188 "with hostcpu %d", vcpu->state, hostcpu)); 2189 if (vcpu->state == VCPU_SLEEPING) 2190 wakeup_one(vcpu); 2191 } 2192 vcpu_unlock(vcpu); 2193} 2194 2195struct vmspace * 2196vm_get_vmspace(struct vm *vm) 2197{ 2198 2199 return (vm->vmspace); 2200} 2201 2202int 2203vm_apicid2vcpuid(struct vm *vm, int apicid) 2204{ 2205 /* 2206 * XXX apic id is assumed to be numerically identical to vcpu id 2207 */ 2208 return (apicid); 2209} 2210 2211void 2212vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 2213 vm_rendezvous_func_t func, void *arg) 2214{ 2215 int i; 2216 2217 /* 2218 * Enforce that this function is called without any locks 2219 */ 2220 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2221 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 2222 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 2223 2224restart: 2225 mtx_lock(&vm->rendezvous_mtx); 2226 if (vm->rendezvous_func != NULL) { 2227 /* 2228 * If a rendezvous is already in progress then we need to 2229 * call the rendezvous handler in case this 'vcpuid' is one 2230 * of the targets of the rendezvous. 2231 */ 2232 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 2233 mtx_unlock(&vm->rendezvous_mtx); 2234 vm_handle_rendezvous(vm, vcpuid); 2235 goto restart; 2236 } 2237 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2238 "rendezvous is still in progress")); 2239 2240 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 2241 vm->rendezvous_req_cpus = dest; 2242 CPU_ZERO(&vm->rendezvous_done_cpus); 2243 vm->rendezvous_arg = arg; 2244 vm_set_rendezvous_func(vm, func); 2245 mtx_unlock(&vm->rendezvous_mtx); 2246 2247 /* 2248 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2249 * vcpus so they handle the rendezvous as soon as possible. 2250 */ 2251 for (i = 0; i < VM_MAXCPU; i++) { 2252 if (CPU_ISSET(i, &dest)) 2253 vcpu_notify_event(vm, i, false); 2254 } 2255 2256 vm_handle_rendezvous(vm, vcpuid); 2257} 2258 2259struct vatpic * 2260vm_atpic(struct vm *vm) 2261{ 2262 return (vm->vatpic); 2263} 2264 2265struct vatpit * 2266vm_atpit(struct vm *vm) 2267{ 2268 return (vm->vatpit); 2269} 2270 2271struct vpmtmr * 2272vm_pmtmr(struct vm *vm) 2273{ 2274 2275 return (vm->vpmtmr); 2276} 2277 2278struct vrtc * 2279vm_rtc(struct vm *vm) 2280{ 2281 2282 return (vm->vrtc); 2283} 2284 2285enum vm_reg_name 2286vm_segment_name(int seg) 2287{ 2288 static enum vm_reg_name seg_names[] = { 2289 VM_REG_GUEST_ES, 2290 VM_REG_GUEST_CS, 2291 VM_REG_GUEST_SS, 2292 VM_REG_GUEST_DS, 2293 VM_REG_GUEST_FS, 2294 VM_REG_GUEST_GS 2295 }; 2296 2297 KASSERT(seg >= 0 && seg < nitems(seg_names), 2298 ("%s: invalid segment encoding %d", __func__, seg)); 2299 return (seg_names[seg]); 2300} 2301 2302void 2303vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2304 int num_copyinfo) 2305{ 2306 int idx; 2307 2308 for (idx = 0; idx < num_copyinfo; idx++) { 2309 if (copyinfo[idx].cookie != NULL) 2310 vm_gpa_release(copyinfo[idx].cookie); 2311 } 2312 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2313} 2314 2315int 2316vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2317 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2318 int num_copyinfo) 2319{ 2320 int error, idx, nused; 2321 size_t n, off, remaining; 2322 void *hva, *cookie; 2323 uint64_t gpa; 2324 2325 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2326 2327 nused = 0; 2328 remaining = len; 2329 while (remaining > 0) { 2330 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2331 error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa); 2332 if (error) 2333 return (error); 2334 off = gpa & PAGE_MASK; 2335 n = min(remaining, PAGE_SIZE - off); 2336 copyinfo[nused].gpa = gpa; 2337 copyinfo[nused].len = n; 2338 remaining -= n; 2339 gla += n; 2340 nused++; 2341 } 2342 2343 for (idx = 0; idx < nused; idx++) { 2344 hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len, 2345 prot, &cookie); 2346 if (hva == NULL) 2347 break; 2348 copyinfo[idx].hva = hva; 2349 copyinfo[idx].cookie = cookie; 2350 } 2351 2352 if (idx != nused) { 2353 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2354 return (-1); 2355 } else { 2356 return (0); 2357 } 2358} 2359 2360void 2361vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2362 size_t len) 2363{ 2364 char *dst; 2365 int idx; 2366 2367 dst = kaddr; 2368 idx = 0; 2369 while (len > 0) { 2370 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2371 len -= copyinfo[idx].len; 2372 dst += copyinfo[idx].len; 2373 idx++; 2374 } 2375} 2376 2377void 2378vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2379 struct vm_copyinfo *copyinfo, size_t len) 2380{ 2381 const char *src; 2382 int idx; 2383 2384 src = kaddr; 2385 idx = 0; 2386 while (len > 0) { 2387 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2388 len -= copyinfo[idx].len; 2389 src += copyinfo[idx].len; 2390 idx++; 2391 } 2392} 2393 2394/* 2395 * Return the amount of in-use and wired memory for the VM. Since 2396 * these are global stats, only return the values with for vCPU 0 2397 */ 2398VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2399VMM_STAT_DECLARE(VMM_MEM_WIRED); 2400 2401static void 2402vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2403{ 2404 2405 if (vcpu == 0) { 2406 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2407 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2408 } 2409} 2410 2411static void 2412vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2413{ 2414 2415 if (vcpu == 0) { 2416 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2417 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2418 } 2419} 2420 2421VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2422VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2423