vmmapi.c revision 270074
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/lib/libvmmapi/vmmapi.c 270074 2014-08-17 01:23:52Z grehan $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/lib/libvmmapi/vmmapi.c 270074 2014-08-17 01:23:52Z grehan $"); 31 32#include <sys/param.h> 33#include <sys/sysctl.h> 34#include <sys/ioctl.h> 35#include <sys/mman.h> 36#include <sys/_iovec.h> 37#include <sys/cpuset.h> 38 39#include <machine/specialreg.h> 40#include <machine/param.h> 41 42#include <stdio.h> 43#include <stdlib.h> 44#include <assert.h> 45#include <string.h> 46#include <fcntl.h> 47#include <unistd.h> 48 49#include <libutil.h> 50 51#include <machine/vmm.h> 52#include <machine/vmm_dev.h> 53 54#include "vmmapi.h" 55 56#define MB (1024 * 1024UL) 57#define GB (1024 * 1024 * 1024UL) 58 59struct vmctx { 60 int fd; 61 uint32_t lowmem_limit; 62 enum vm_mmap_style vms; 63 int memflags; 64 size_t lowmem; 65 char *lowmem_addr; 66 size_t highmem; 67 char *highmem_addr; 68 char *name; 69}; 70 71#define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x))) 72#define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x))) 73 74static int 75vm_device_open(const char *name) 76{ 77 int fd, len; 78 char *vmfile; 79 80 len = strlen("/dev/vmm/") + strlen(name) + 1; 81 vmfile = malloc(len); 82 assert(vmfile != NULL); 83 snprintf(vmfile, len, "/dev/vmm/%s", name); 84 85 /* Open the device file */ 86 fd = open(vmfile, O_RDWR, 0); 87 88 free(vmfile); 89 return (fd); 90} 91 92int 93vm_create(const char *name) 94{ 95 96 return (CREATE((char *)name)); 97} 98 99struct vmctx * 100vm_open(const char *name) 101{ 102 struct vmctx *vm; 103 104 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1); 105 assert(vm != NULL); 106 107 vm->fd = -1; 108 vm->memflags = 0; 109 vm->lowmem_limit = 3 * GB; 110 vm->name = (char *)(vm + 1); 111 strcpy(vm->name, name); 112 113 if ((vm->fd = vm_device_open(vm->name)) < 0) 114 goto err; 115 116 return (vm); 117err: 118 vm_destroy(vm); 119 return (NULL); 120} 121 122void 123vm_destroy(struct vmctx *vm) 124{ 125 assert(vm != NULL); 126 127 if (vm->fd >= 0) 128 close(vm->fd); 129 DESTROY(vm->name); 130 131 free(vm); 132} 133 134int 135vm_parse_memsize(const char *optarg, size_t *ret_memsize) 136{ 137 char *endptr; 138 size_t optval; 139 int error; 140 141 optval = strtoul(optarg, &endptr, 0); 142 if (*optarg != '\0' && *endptr == '\0') { 143 /* 144 * For the sake of backward compatibility if the memory size 145 * specified on the command line is less than a megabyte then 146 * it is interpreted as being in units of MB. 147 */ 148 if (optval < MB) 149 optval *= MB; 150 *ret_memsize = optval; 151 error = 0; 152 } else 153 error = expand_number(optarg, ret_memsize); 154 155 return (error); 156} 157 158int 159vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len, 160 int *wired) 161{ 162 int error; 163 struct vm_memory_segment seg; 164 165 bzero(&seg, sizeof(seg)); 166 seg.gpa = gpa; 167 error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg); 168 *ret_len = seg.len; 169 if (wired != NULL) 170 *wired = seg.wired; 171 return (error); 172} 173 174uint32_t 175vm_get_lowmem_limit(struct vmctx *ctx) 176{ 177 178 return (ctx->lowmem_limit); 179} 180 181void 182vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) 183{ 184 185 ctx->lowmem_limit = limit; 186} 187 188void 189vm_set_memflags(struct vmctx *ctx, int flags) 190{ 191 192 ctx->memflags = flags; 193} 194 195static int 196setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr) 197{ 198 int error, mmap_flags; 199 struct vm_memory_segment seg; 200 201 /* 202 * Create and optionally map 'len' bytes of memory at guest 203 * physical address 'gpa' 204 */ 205 bzero(&seg, sizeof(seg)); 206 seg.gpa = gpa; 207 seg.len = len; 208 error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg); 209 if (error == 0 && addr != NULL) { 210 mmap_flags = MAP_SHARED; 211 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) 212 mmap_flags |= MAP_NOCORE; 213 *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags, 214 ctx->fd, gpa); 215 } 216 return (error); 217} 218 219int 220vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) 221{ 222 char **addr; 223 int error; 224 225 /* XXX VM_MMAP_SPARSE not implemented yet */ 226 assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL); 227 ctx->vms = vms; 228 229 /* 230 * If 'memsize' cannot fit entirely in the 'lowmem' segment then 231 * create another 'highmem' segment above 4GB for the remainder. 232 */ 233 if (memsize > ctx->lowmem_limit) { 234 ctx->lowmem = ctx->lowmem_limit; 235 ctx->highmem = memsize - ctx->lowmem; 236 } else { 237 ctx->lowmem = memsize; 238 ctx->highmem = 0; 239 } 240 241 if (ctx->lowmem > 0) { 242 addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL; 243 error = setup_memory_segment(ctx, 0, ctx->lowmem, addr); 244 if (error) 245 return (error); 246 } 247 248 if (ctx->highmem > 0) { 249 addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL; 250 error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr); 251 if (error) 252 return (error); 253 } 254 255 return (0); 256} 257 258void * 259vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) 260{ 261 262 /* XXX VM_MMAP_SPARSE not implemented yet */ 263 assert(ctx->vms == VM_MMAP_ALL); 264 265 if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem) 266 return ((void *)(ctx->lowmem_addr + gaddr)); 267 268 if (gaddr >= 4*GB) { 269 gaddr -= 4*GB; 270 if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem) 271 return ((void *)(ctx->highmem_addr + gaddr)); 272 } 273 274 return (NULL); 275} 276 277size_t 278vm_get_lowmem_size(struct vmctx *ctx) 279{ 280 281 return (ctx->lowmem); 282} 283 284size_t 285vm_get_highmem_size(struct vmctx *ctx) 286{ 287 288 return (ctx->highmem); 289} 290 291int 292vm_set_desc(struct vmctx *ctx, int vcpu, int reg, 293 uint64_t base, uint32_t limit, uint32_t access) 294{ 295 int error; 296 struct vm_seg_desc vmsegdesc; 297 298 bzero(&vmsegdesc, sizeof(vmsegdesc)); 299 vmsegdesc.cpuid = vcpu; 300 vmsegdesc.regnum = reg; 301 vmsegdesc.desc.base = base; 302 vmsegdesc.desc.limit = limit; 303 vmsegdesc.desc.access = access; 304 305 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc); 306 return (error); 307} 308 309int 310vm_get_desc(struct vmctx *ctx, int vcpu, int reg, 311 uint64_t *base, uint32_t *limit, uint32_t *access) 312{ 313 int error; 314 struct vm_seg_desc vmsegdesc; 315 316 bzero(&vmsegdesc, sizeof(vmsegdesc)); 317 vmsegdesc.cpuid = vcpu; 318 vmsegdesc.regnum = reg; 319 320 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc); 321 if (error == 0) { 322 *base = vmsegdesc.desc.base; 323 *limit = vmsegdesc.desc.limit; 324 *access = vmsegdesc.desc.access; 325 } 326 return (error); 327} 328 329int 330vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val) 331{ 332 int error; 333 struct vm_register vmreg; 334 335 bzero(&vmreg, sizeof(vmreg)); 336 vmreg.cpuid = vcpu; 337 vmreg.regnum = reg; 338 vmreg.regval = val; 339 340 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg); 341 return (error); 342} 343 344int 345vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val) 346{ 347 int error; 348 struct vm_register vmreg; 349 350 bzero(&vmreg, sizeof(vmreg)); 351 vmreg.cpuid = vcpu; 352 vmreg.regnum = reg; 353 354 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg); 355 *ret_val = vmreg.regval; 356 return (error); 357} 358 359int 360vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit) 361{ 362 int error; 363 struct vm_run vmrun; 364 365 bzero(&vmrun, sizeof(vmrun)); 366 vmrun.cpuid = vcpu; 367 vmrun.rip = rip; 368 369 error = ioctl(ctx->fd, VM_RUN, &vmrun); 370 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit)); 371 return (error); 372} 373 374int 375vm_suspend(struct vmctx *ctx, enum vm_suspend_how how) 376{ 377 struct vm_suspend vmsuspend; 378 379 bzero(&vmsuspend, sizeof(vmsuspend)); 380 vmsuspend.how = how; 381 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); 382} 383 384int 385vm_reinit(struct vmctx *ctx) 386{ 387 388 return (ioctl(ctx->fd, VM_REINIT, 0)); 389} 390 391static int 392vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector, 393 int error_code, int error_code_valid) 394{ 395 struct vm_exception exc; 396 397 bzero(&exc, sizeof(exc)); 398 exc.cpuid = vcpu; 399 exc.vector = vector; 400 exc.error_code = error_code; 401 exc.error_code_valid = error_code_valid; 402 403 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc)); 404} 405 406int 407vm_inject_exception(struct vmctx *ctx, int vcpu, int vector) 408{ 409 410 return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0)); 411} 412 413int 414vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode) 415{ 416 417 return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1)); 418} 419 420int 421vm_apicid2vcpu(struct vmctx *ctx, int apicid) 422{ 423 /* 424 * The apic id associated with the 'vcpu' has the same numerical value 425 * as the 'vcpu' itself. 426 */ 427 return (apicid); 428} 429 430int 431vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector) 432{ 433 struct vm_lapic_irq vmirq; 434 435 bzero(&vmirq, sizeof(vmirq)); 436 vmirq.cpuid = vcpu; 437 vmirq.vector = vector; 438 439 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq)); 440} 441 442int 443vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector) 444{ 445 struct vm_lapic_irq vmirq; 446 447 bzero(&vmirq, sizeof(vmirq)); 448 vmirq.cpuid = vcpu; 449 vmirq.vector = vector; 450 451 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq)); 452} 453 454int 455vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg) 456{ 457 struct vm_lapic_msi vmmsi; 458 459 bzero(&vmmsi, sizeof(vmmsi)); 460 vmmsi.addr = addr; 461 vmmsi.msg = msg; 462 463 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi)); 464} 465 466int 467vm_ioapic_assert_irq(struct vmctx *ctx, int irq) 468{ 469 struct vm_ioapic_irq ioapic_irq; 470 471 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 472 ioapic_irq.irq = irq; 473 474 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq)); 475} 476 477int 478vm_ioapic_deassert_irq(struct vmctx *ctx, int irq) 479{ 480 struct vm_ioapic_irq ioapic_irq; 481 482 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 483 ioapic_irq.irq = irq; 484 485 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq)); 486} 487 488int 489vm_ioapic_pulse_irq(struct vmctx *ctx, int irq) 490{ 491 struct vm_ioapic_irq ioapic_irq; 492 493 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq)); 494 ioapic_irq.irq = irq; 495 496 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq)); 497} 498 499int 500vm_ioapic_pincount(struct vmctx *ctx, int *pincount) 501{ 502 503 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount)); 504} 505 506int 507vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 508{ 509 struct vm_isa_irq isa_irq; 510 511 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 512 isa_irq.atpic_irq = atpic_irq; 513 isa_irq.ioapic_irq = ioapic_irq; 514 515 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq)); 516} 517 518int 519vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 520{ 521 struct vm_isa_irq isa_irq; 522 523 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 524 isa_irq.atpic_irq = atpic_irq; 525 isa_irq.ioapic_irq = ioapic_irq; 526 527 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq)); 528} 529 530int 531vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq) 532{ 533 struct vm_isa_irq isa_irq; 534 535 bzero(&isa_irq, sizeof(struct vm_isa_irq)); 536 isa_irq.atpic_irq = atpic_irq; 537 isa_irq.ioapic_irq = ioapic_irq; 538 539 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq)); 540} 541 542int 543vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, 544 enum vm_intr_trigger trigger) 545{ 546 struct vm_isa_irq_trigger isa_irq_trigger; 547 548 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger)); 549 isa_irq_trigger.atpic_irq = atpic_irq; 550 isa_irq_trigger.trigger = trigger; 551 552 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger)); 553} 554 555int 556vm_inject_nmi(struct vmctx *ctx, int vcpu) 557{ 558 struct vm_nmi vmnmi; 559 560 bzero(&vmnmi, sizeof(vmnmi)); 561 vmnmi.cpuid = vcpu; 562 563 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi)); 564} 565 566static struct { 567 const char *name; 568 int type; 569} capstrmap[] = { 570 { "hlt_exit", VM_CAP_HALT_EXIT }, 571 { "mtrap_exit", VM_CAP_MTRAP_EXIT }, 572 { "pause_exit", VM_CAP_PAUSE_EXIT }, 573 { "unrestricted_guest", VM_CAP_UNRESTRICTED_GUEST }, 574 { "enable_invpcid", VM_CAP_ENABLE_INVPCID }, 575 { 0 } 576}; 577 578int 579vm_capability_name2type(const char *capname) 580{ 581 int i; 582 583 for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) { 584 if (strcmp(capstrmap[i].name, capname) == 0) 585 return (capstrmap[i].type); 586 } 587 588 return (-1); 589} 590 591const char * 592vm_capability_type2name(int type) 593{ 594 int i; 595 596 for (i = 0; capstrmap[i].name != NULL; i++) { 597 if (capstrmap[i].type == type) 598 return (capstrmap[i].name); 599 } 600 601 return (NULL); 602} 603 604int 605vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, 606 int *retval) 607{ 608 int error; 609 struct vm_capability vmcap; 610 611 bzero(&vmcap, sizeof(vmcap)); 612 vmcap.cpuid = vcpu; 613 vmcap.captype = cap; 614 615 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap); 616 *retval = vmcap.capval; 617 return (error); 618} 619 620int 621vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val) 622{ 623 struct vm_capability vmcap; 624 625 bzero(&vmcap, sizeof(vmcap)); 626 vmcap.cpuid = vcpu; 627 vmcap.captype = cap; 628 vmcap.capval = val; 629 630 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap)); 631} 632 633int 634vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 635{ 636 struct vm_pptdev pptdev; 637 638 bzero(&pptdev, sizeof(pptdev)); 639 pptdev.bus = bus; 640 pptdev.slot = slot; 641 pptdev.func = func; 642 643 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev)); 644} 645 646int 647vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func) 648{ 649 struct vm_pptdev pptdev; 650 651 bzero(&pptdev, sizeof(pptdev)); 652 pptdev.bus = bus; 653 pptdev.slot = slot; 654 pptdev.func = func; 655 656 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev)); 657} 658 659int 660vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, 661 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 662{ 663 struct vm_pptdev_mmio pptmmio; 664 665 bzero(&pptmmio, sizeof(pptmmio)); 666 pptmmio.bus = bus; 667 pptmmio.slot = slot; 668 pptmmio.func = func; 669 pptmmio.gpa = gpa; 670 pptmmio.len = len; 671 pptmmio.hpa = hpa; 672 673 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); 674} 675 676int 677vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, 678 uint64_t addr, uint64_t msg, int numvec) 679{ 680 struct vm_pptdev_msi pptmsi; 681 682 bzero(&pptmsi, sizeof(pptmsi)); 683 pptmsi.vcpu = vcpu; 684 pptmsi.bus = bus; 685 pptmsi.slot = slot; 686 pptmsi.func = func; 687 pptmsi.msg = msg; 688 pptmsi.addr = addr; 689 pptmsi.numvec = numvec; 690 691 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi)); 692} 693 694int 695vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func, 696 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) 697{ 698 struct vm_pptdev_msix pptmsix; 699 700 bzero(&pptmsix, sizeof(pptmsix)); 701 pptmsix.vcpu = vcpu; 702 pptmsix.bus = bus; 703 pptmsix.slot = slot; 704 pptmsix.func = func; 705 pptmsix.idx = idx; 706 pptmsix.msg = msg; 707 pptmsix.addr = addr; 708 pptmsix.vector_control = vector_control; 709 710 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix); 711} 712 713uint64_t * 714vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, 715 int *ret_entries) 716{ 717 int error; 718 719 static struct vm_stats vmstats; 720 721 vmstats.cpuid = vcpu; 722 723 error = ioctl(ctx->fd, VM_STATS, &vmstats); 724 if (error == 0) { 725 if (ret_entries) 726 *ret_entries = vmstats.num_entries; 727 if (ret_tv) 728 *ret_tv = vmstats.tv; 729 return (vmstats.statbuf); 730 } else 731 return (NULL); 732} 733 734const char * 735vm_get_stat_desc(struct vmctx *ctx, int index) 736{ 737 static struct vm_stat_desc statdesc; 738 739 statdesc.index = index; 740 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) 741 return (statdesc.desc); 742 else 743 return (NULL); 744} 745 746int 747vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state) 748{ 749 int error; 750 struct vm_x2apic x2apic; 751 752 bzero(&x2apic, sizeof(x2apic)); 753 x2apic.cpuid = vcpu; 754 755 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic); 756 *state = x2apic.state; 757 return (error); 758} 759 760int 761vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state) 762{ 763 int error; 764 struct vm_x2apic x2apic; 765 766 bzero(&x2apic, sizeof(x2apic)); 767 x2apic.cpuid = vcpu; 768 x2apic.state = state; 769 770 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic); 771 772 return (error); 773} 774 775/* 776 * From Intel Vol 3a: 777 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT 778 */ 779int 780vcpu_reset(struct vmctx *vmctx, int vcpu) 781{ 782 int error; 783 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx; 784 uint32_t desc_access, desc_limit; 785 uint16_t sel; 786 787 zero = 0; 788 789 rflags = 0x2; 790 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags); 791 if (error) 792 goto done; 793 794 rip = 0xfff0; 795 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0) 796 goto done; 797 798 cr0 = CR0_NE; 799 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0) 800 goto done; 801 802 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0) 803 goto done; 804 805 cr4 = 0; 806 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0) 807 goto done; 808 809 /* 810 * CS: present, r/w, accessed, 16-bit, byte granularity, usable 811 */ 812 desc_base = 0xffff0000; 813 desc_limit = 0xffff; 814 desc_access = 0x0093; 815 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS, 816 desc_base, desc_limit, desc_access); 817 if (error) 818 goto done; 819 820 sel = 0xf000; 821 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0) 822 goto done; 823 824 /* 825 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity 826 */ 827 desc_base = 0; 828 desc_limit = 0xffff; 829 desc_access = 0x0093; 830 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS, 831 desc_base, desc_limit, desc_access); 832 if (error) 833 goto done; 834 835 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS, 836 desc_base, desc_limit, desc_access); 837 if (error) 838 goto done; 839 840 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES, 841 desc_base, desc_limit, desc_access); 842 if (error) 843 goto done; 844 845 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS, 846 desc_base, desc_limit, desc_access); 847 if (error) 848 goto done; 849 850 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS, 851 desc_base, desc_limit, desc_access); 852 if (error) 853 goto done; 854 855 sel = 0; 856 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0) 857 goto done; 858 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0) 859 goto done; 860 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0) 861 goto done; 862 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0) 863 goto done; 864 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0) 865 goto done; 866 867 /* General purpose registers */ 868 rdx = 0xf00; 869 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0) 870 goto done; 871 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0) 872 goto done; 873 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0) 874 goto done; 875 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0) 876 goto done; 877 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0) 878 goto done; 879 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0) 880 goto done; 881 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0) 882 goto done; 883 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0) 884 goto done; 885 886 /* GDTR, IDTR */ 887 desc_base = 0; 888 desc_limit = 0xffff; 889 desc_access = 0; 890 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR, 891 desc_base, desc_limit, desc_access); 892 if (error != 0) 893 goto done; 894 895 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR, 896 desc_base, desc_limit, desc_access); 897 if (error != 0) 898 goto done; 899 900 /* TR */ 901 desc_base = 0; 902 desc_limit = 0xffff; 903 desc_access = 0x0000008b; 904 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access); 905 if (error) 906 goto done; 907 908 sel = 0; 909 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0) 910 goto done; 911 912 /* LDTR */ 913 desc_base = 0; 914 desc_limit = 0xffff; 915 desc_access = 0x00000082; 916 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base, 917 desc_limit, desc_access); 918 if (error) 919 goto done; 920 921 sel = 0; 922 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0) 923 goto done; 924 925 /* XXX cr2, debug registers */ 926 927 error = 0; 928done: 929 return (error); 930} 931 932int 933vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) 934{ 935 int error, i; 936 struct vm_gpa_pte gpapte; 937 938 bzero(&gpapte, sizeof(gpapte)); 939 gpapte.gpa = gpa; 940 941 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); 942 943 if (error == 0) { 944 *num = gpapte.ptenum; 945 for (i = 0; i < gpapte.ptenum; i++) 946 pte[i] = gpapte.pte[i]; 947 } 948 949 return (error); 950} 951 952int 953vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities) 954{ 955 int error; 956 struct vm_hpet_cap cap; 957 958 bzero(&cap, sizeof(struct vm_hpet_cap)); 959 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap); 960 if (capabilities != NULL) 961 *capabilities = cap.capabilities; 962 return (error); 963} 964 965static int 966gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, 967 uint64_t gla, int prot, int *fault, uint64_t *gpa) 968{ 969 struct vm_gla2gpa gg; 970 int error; 971 972 bzero(&gg, sizeof(struct vm_gla2gpa)); 973 gg.vcpuid = vcpu; 974 gg.prot = prot; 975 gg.gla = gla; 976 gg.paging = *paging; 977 978 error = ioctl(ctx->fd, VM_GLA2GPA, &gg); 979 if (error == 0) { 980 *fault = gg.fault; 981 *gpa = gg.gpa; 982 } 983 return (error); 984} 985 986#ifndef min 987#define min(a,b) (((a) < (b)) ? (a) : (b)) 988#endif 989 990int 991vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, 992 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt) 993{ 994 uint64_t gpa; 995 int error, fault, i, n, off; 996 997 for (i = 0; i < iovcnt; i++) { 998 iov[i].iov_base = 0; 999 iov[i].iov_len = 0; 1000 } 1001 1002 while (len) { 1003 assert(iovcnt > 0); 1004 error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, &gpa); 1005 if (error) 1006 return (-1); 1007 if (fault) 1008 return (1); 1009 1010 off = gpa & PAGE_MASK; 1011 n = min(len, PAGE_SIZE - off); 1012 1013 iov->iov_base = (void *)gpa; 1014 iov->iov_len = n; 1015 iov++; 1016 iovcnt--; 1017 1018 gla += n; 1019 len -= n; 1020 } 1021 return (0); 1022} 1023 1024void 1025vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len) 1026{ 1027 const char *src; 1028 char *dst; 1029 uint64_t gpa; 1030 size_t n; 1031 1032 dst = vp; 1033 while (len) { 1034 assert(iov->iov_len); 1035 gpa = (uint64_t)iov->iov_base; 1036 n = min(len, iov->iov_len); 1037 src = vm_map_gpa(ctx, gpa, n); 1038 bcopy(src, dst, n); 1039 1040 iov++; 1041 dst += n; 1042 len -= n; 1043 } 1044} 1045 1046void 1047vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov, 1048 size_t len) 1049{ 1050 const char *src; 1051 char *dst; 1052 uint64_t gpa; 1053 size_t n; 1054 1055 src = vp; 1056 while (len) { 1057 assert(iov->iov_len); 1058 gpa = (uint64_t)iov->iov_base; 1059 n = min(len, iov->iov_len); 1060 dst = vm_map_gpa(ctx, gpa, n); 1061 bcopy(src, dst, n); 1062 1063 iov++; 1064 src += n; 1065 len -= n; 1066 } 1067} 1068 1069static int 1070vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus) 1071{ 1072 struct vm_cpuset vm_cpuset; 1073 int error; 1074 1075 bzero(&vm_cpuset, sizeof(struct vm_cpuset)); 1076 vm_cpuset.which = which; 1077 vm_cpuset.cpusetsize = sizeof(cpuset_t); 1078 vm_cpuset.cpus = cpus; 1079 1080 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); 1081 return (error); 1082} 1083 1084int 1085vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus) 1086{ 1087 1088 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus)); 1089} 1090 1091int 1092vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus) 1093{ 1094 1095 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus)); 1096} 1097 1098int 1099vm_activate_cpu(struct vmctx *ctx, int vcpu) 1100{ 1101 struct vm_activate_cpu ac; 1102 int error; 1103 1104 bzero(&ac, sizeof(struct vm_activate_cpu)); 1105 ac.vcpuid = vcpu; 1106 error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac); 1107 return (error); 1108} 1109