vmmapi.c revision 270159
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/lib/libvmmapi/vmmapi.c 270159 2014-08-19 01:20:24Z grehan $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/lib/libvmmapi/vmmapi.c 270159 2014-08-19 01:20:24Z grehan $");
31
32#include <sys/param.h>
33#include <sys/sysctl.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36#include <sys/_iovec.h>
37#include <sys/cpuset.h>
38
39#include <x86/segments.h>
40#include <machine/specialreg.h>
41#include <machine/param.h>
42
43#include <stdio.h>
44#include <stdlib.h>
45#include <assert.h>
46#include <string.h>
47#include <fcntl.h>
48#include <unistd.h>
49
50#include <libutil.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54
55#include "vmmapi.h"
56
57#define	MB	(1024 * 1024UL)
58#define	GB	(1024 * 1024 * 1024UL)
59
60struct vmctx {
61	int	fd;
62	uint32_t lowmem_limit;
63	enum vm_mmap_style vms;
64	int	memflags;
65	size_t	lowmem;
66	char	*lowmem_addr;
67	size_t	highmem;
68	char	*highmem_addr;
69	char	*name;
70};
71
72#define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
73#define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
74
75static int
76vm_device_open(const char *name)
77{
78        int fd, len;
79        char *vmfile;
80
81	len = strlen("/dev/vmm/") + strlen(name) + 1;
82	vmfile = malloc(len);
83	assert(vmfile != NULL);
84	snprintf(vmfile, len, "/dev/vmm/%s", name);
85
86        /* Open the device file */
87        fd = open(vmfile, O_RDWR, 0);
88
89	free(vmfile);
90        return (fd);
91}
92
93int
94vm_create(const char *name)
95{
96
97	return (CREATE((char *)name));
98}
99
100struct vmctx *
101vm_open(const char *name)
102{
103	struct vmctx *vm;
104
105	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
106	assert(vm != NULL);
107
108	vm->fd = -1;
109	vm->memflags = 0;
110	vm->lowmem_limit = 3 * GB;
111	vm->name = (char *)(vm + 1);
112	strcpy(vm->name, name);
113
114	if ((vm->fd = vm_device_open(vm->name)) < 0)
115		goto err;
116
117	return (vm);
118err:
119	vm_destroy(vm);
120	return (NULL);
121}
122
123void
124vm_destroy(struct vmctx *vm)
125{
126	assert(vm != NULL);
127
128	if (vm->fd >= 0)
129		close(vm->fd);
130	DESTROY(vm->name);
131
132	free(vm);
133}
134
135int
136vm_parse_memsize(const char *optarg, size_t *ret_memsize)
137{
138	char *endptr;
139	size_t optval;
140	int error;
141
142	optval = strtoul(optarg, &endptr, 0);
143	if (*optarg != '\0' && *endptr == '\0') {
144		/*
145		 * For the sake of backward compatibility if the memory size
146		 * specified on the command line is less than a megabyte then
147		 * it is interpreted as being in units of MB.
148		 */
149		if (optval < MB)
150			optval *= MB;
151		*ret_memsize = optval;
152		error = 0;
153	} else
154		error = expand_number(optarg, ret_memsize);
155
156	return (error);
157}
158
159int
160vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
161		  int *wired)
162{
163	int error;
164	struct vm_memory_segment seg;
165
166	bzero(&seg, sizeof(seg));
167	seg.gpa = gpa;
168	error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
169	*ret_len = seg.len;
170	if (wired != NULL)
171		*wired = seg.wired;
172	return (error);
173}
174
175uint32_t
176vm_get_lowmem_limit(struct vmctx *ctx)
177{
178
179	return (ctx->lowmem_limit);
180}
181
182void
183vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
184{
185
186	ctx->lowmem_limit = limit;
187}
188
189void
190vm_set_memflags(struct vmctx *ctx, int flags)
191{
192
193	ctx->memflags = flags;
194}
195
196static int
197setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr)
198{
199	int error, mmap_flags;
200	struct vm_memory_segment seg;
201
202	/*
203	 * Create and optionally map 'len' bytes of memory at guest
204	 * physical address 'gpa'
205	 */
206	bzero(&seg, sizeof(seg));
207	seg.gpa = gpa;
208	seg.len = len;
209	error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
210	if (error == 0 && addr != NULL) {
211		mmap_flags = MAP_SHARED;
212		if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
213			mmap_flags |= MAP_NOCORE;
214		*addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags,
215		    ctx->fd, gpa);
216	}
217	return (error);
218}
219
220int
221vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
222{
223	char **addr;
224	int error;
225
226	/* XXX VM_MMAP_SPARSE not implemented yet */
227	assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL);
228	ctx->vms = vms;
229
230	/*
231	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
232	 * create another 'highmem' segment above 4GB for the remainder.
233	 */
234	if (memsize > ctx->lowmem_limit) {
235		ctx->lowmem = ctx->lowmem_limit;
236		ctx->highmem = memsize - ctx->lowmem;
237	} else {
238		ctx->lowmem = memsize;
239		ctx->highmem = 0;
240	}
241
242	if (ctx->lowmem > 0) {
243		addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL;
244		error = setup_memory_segment(ctx, 0, ctx->lowmem, addr);
245		if (error)
246			return (error);
247	}
248
249	if (ctx->highmem > 0) {
250		addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL;
251		error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr);
252		if (error)
253			return (error);
254	}
255
256	return (0);
257}
258
259void *
260vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
261{
262
263	/* XXX VM_MMAP_SPARSE not implemented yet */
264	assert(ctx->vms == VM_MMAP_ALL);
265
266	if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
267		return ((void *)(ctx->lowmem_addr + gaddr));
268
269	if (gaddr >= 4*GB) {
270		gaddr -= 4*GB;
271		if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem)
272			return ((void *)(ctx->highmem_addr + gaddr));
273	}
274
275	return (NULL);
276}
277
278size_t
279vm_get_lowmem_size(struct vmctx *ctx)
280{
281
282	return (ctx->lowmem);
283}
284
285size_t
286vm_get_highmem_size(struct vmctx *ctx)
287{
288
289	return (ctx->highmem);
290}
291
292int
293vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
294	    uint64_t base, uint32_t limit, uint32_t access)
295{
296	int error;
297	struct vm_seg_desc vmsegdesc;
298
299	bzero(&vmsegdesc, sizeof(vmsegdesc));
300	vmsegdesc.cpuid = vcpu;
301	vmsegdesc.regnum = reg;
302	vmsegdesc.desc.base = base;
303	vmsegdesc.desc.limit = limit;
304	vmsegdesc.desc.access = access;
305
306	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
307	return (error);
308}
309
310int
311vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
312	    uint64_t *base, uint32_t *limit, uint32_t *access)
313{
314	int error;
315	struct vm_seg_desc vmsegdesc;
316
317	bzero(&vmsegdesc, sizeof(vmsegdesc));
318	vmsegdesc.cpuid = vcpu;
319	vmsegdesc.regnum = reg;
320
321	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
322	if (error == 0) {
323		*base = vmsegdesc.desc.base;
324		*limit = vmsegdesc.desc.limit;
325		*access = vmsegdesc.desc.access;
326	}
327	return (error);
328}
329
330int
331vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
332{
333	int error;
334
335	error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
336	    &seg_desc->access);
337	return (error);
338}
339
340int
341vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
342{
343	int error;
344	struct vm_register vmreg;
345
346	bzero(&vmreg, sizeof(vmreg));
347	vmreg.cpuid = vcpu;
348	vmreg.regnum = reg;
349	vmreg.regval = val;
350
351	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
352	return (error);
353}
354
355int
356vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
357{
358	int error;
359	struct vm_register vmreg;
360
361	bzero(&vmreg, sizeof(vmreg));
362	vmreg.cpuid = vcpu;
363	vmreg.regnum = reg;
364
365	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
366	*ret_val = vmreg.regval;
367	return (error);
368}
369
370int
371vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
372{
373	int error;
374	struct vm_run vmrun;
375
376	bzero(&vmrun, sizeof(vmrun));
377	vmrun.cpuid = vcpu;
378	vmrun.rip = rip;
379
380	error = ioctl(ctx->fd, VM_RUN, &vmrun);
381	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
382	return (error);
383}
384
385int
386vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
387{
388	struct vm_suspend vmsuspend;
389
390	bzero(&vmsuspend, sizeof(vmsuspend));
391	vmsuspend.how = how;
392	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
393}
394
395int
396vm_reinit(struct vmctx *ctx)
397{
398
399	return (ioctl(ctx->fd, VM_REINIT, 0));
400}
401
402static int
403vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
404    int error_code, int error_code_valid)
405{
406	struct vm_exception exc;
407
408	bzero(&exc, sizeof(exc));
409	exc.cpuid = vcpu;
410	exc.vector = vector;
411	exc.error_code = error_code;
412	exc.error_code_valid = error_code_valid;
413
414	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
415}
416
417int
418vm_inject_exception(struct vmctx *ctx, int vcpu, int vector)
419{
420
421	return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0));
422}
423
424int
425vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode)
426{
427
428	return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1));
429}
430
431int
432vm_apicid2vcpu(struct vmctx *ctx, int apicid)
433{
434	/*
435	 * The apic id associated with the 'vcpu' has the same numerical value
436	 * as the 'vcpu' itself.
437	 */
438	return (apicid);
439}
440
441int
442vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
443{
444	struct vm_lapic_irq vmirq;
445
446	bzero(&vmirq, sizeof(vmirq));
447	vmirq.cpuid = vcpu;
448	vmirq.vector = vector;
449
450	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
451}
452
453int
454vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
455{
456	struct vm_lapic_irq vmirq;
457
458	bzero(&vmirq, sizeof(vmirq));
459	vmirq.cpuid = vcpu;
460	vmirq.vector = vector;
461
462	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
463}
464
465int
466vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
467{
468	struct vm_lapic_msi vmmsi;
469
470	bzero(&vmmsi, sizeof(vmmsi));
471	vmmsi.addr = addr;
472	vmmsi.msg = msg;
473
474	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
475}
476
477int
478vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
479{
480	struct vm_ioapic_irq ioapic_irq;
481
482	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
483	ioapic_irq.irq = irq;
484
485	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
486}
487
488int
489vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
490{
491	struct vm_ioapic_irq ioapic_irq;
492
493	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
494	ioapic_irq.irq = irq;
495
496	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
497}
498
499int
500vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
501{
502	struct vm_ioapic_irq ioapic_irq;
503
504	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
505	ioapic_irq.irq = irq;
506
507	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
508}
509
510int
511vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
512{
513
514	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
515}
516
517int
518vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
519{
520	struct vm_isa_irq isa_irq;
521
522	bzero(&isa_irq, sizeof(struct vm_isa_irq));
523	isa_irq.atpic_irq = atpic_irq;
524	isa_irq.ioapic_irq = ioapic_irq;
525
526	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
527}
528
529int
530vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
531{
532	struct vm_isa_irq isa_irq;
533
534	bzero(&isa_irq, sizeof(struct vm_isa_irq));
535	isa_irq.atpic_irq = atpic_irq;
536	isa_irq.ioapic_irq = ioapic_irq;
537
538	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
539}
540
541int
542vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
543{
544	struct vm_isa_irq isa_irq;
545
546	bzero(&isa_irq, sizeof(struct vm_isa_irq));
547	isa_irq.atpic_irq = atpic_irq;
548	isa_irq.ioapic_irq = ioapic_irq;
549
550	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
551}
552
553int
554vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
555    enum vm_intr_trigger trigger)
556{
557	struct vm_isa_irq_trigger isa_irq_trigger;
558
559	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
560	isa_irq_trigger.atpic_irq = atpic_irq;
561	isa_irq_trigger.trigger = trigger;
562
563	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
564}
565
566int
567vm_inject_nmi(struct vmctx *ctx, int vcpu)
568{
569	struct vm_nmi vmnmi;
570
571	bzero(&vmnmi, sizeof(vmnmi));
572	vmnmi.cpuid = vcpu;
573
574	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
575}
576
577static struct {
578	const char	*name;
579	int		type;
580} capstrmap[] = {
581	{ "hlt_exit",		VM_CAP_HALT_EXIT },
582	{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
583	{ "pause_exit",		VM_CAP_PAUSE_EXIT },
584	{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
585	{ "enable_invpcid",	VM_CAP_ENABLE_INVPCID },
586	{ 0 }
587};
588
589int
590vm_capability_name2type(const char *capname)
591{
592	int i;
593
594	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
595		if (strcmp(capstrmap[i].name, capname) == 0)
596			return (capstrmap[i].type);
597	}
598
599	return (-1);
600}
601
602const char *
603vm_capability_type2name(int type)
604{
605	int i;
606
607	for (i = 0; capstrmap[i].name != NULL; i++) {
608		if (capstrmap[i].type == type)
609			return (capstrmap[i].name);
610	}
611
612	return (NULL);
613}
614
615int
616vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
617		  int *retval)
618{
619	int error;
620	struct vm_capability vmcap;
621
622	bzero(&vmcap, sizeof(vmcap));
623	vmcap.cpuid = vcpu;
624	vmcap.captype = cap;
625
626	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
627	*retval = vmcap.capval;
628	return (error);
629}
630
631int
632vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
633{
634	struct vm_capability vmcap;
635
636	bzero(&vmcap, sizeof(vmcap));
637	vmcap.cpuid = vcpu;
638	vmcap.captype = cap;
639	vmcap.capval = val;
640
641	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
642}
643
644int
645vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
646{
647	struct vm_pptdev pptdev;
648
649	bzero(&pptdev, sizeof(pptdev));
650	pptdev.bus = bus;
651	pptdev.slot = slot;
652	pptdev.func = func;
653
654	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
655}
656
657int
658vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
659{
660	struct vm_pptdev pptdev;
661
662	bzero(&pptdev, sizeof(pptdev));
663	pptdev.bus = bus;
664	pptdev.slot = slot;
665	pptdev.func = func;
666
667	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
668}
669
670int
671vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
672		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
673{
674	struct vm_pptdev_mmio pptmmio;
675
676	bzero(&pptmmio, sizeof(pptmmio));
677	pptmmio.bus = bus;
678	pptmmio.slot = slot;
679	pptmmio.func = func;
680	pptmmio.gpa = gpa;
681	pptmmio.len = len;
682	pptmmio.hpa = hpa;
683
684	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
685}
686
687int
688vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
689    uint64_t addr, uint64_t msg, int numvec)
690{
691	struct vm_pptdev_msi pptmsi;
692
693	bzero(&pptmsi, sizeof(pptmsi));
694	pptmsi.vcpu = vcpu;
695	pptmsi.bus = bus;
696	pptmsi.slot = slot;
697	pptmsi.func = func;
698	pptmsi.msg = msg;
699	pptmsi.addr = addr;
700	pptmsi.numvec = numvec;
701
702	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
703}
704
705int
706vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
707    int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
708{
709	struct vm_pptdev_msix pptmsix;
710
711	bzero(&pptmsix, sizeof(pptmsix));
712	pptmsix.vcpu = vcpu;
713	pptmsix.bus = bus;
714	pptmsix.slot = slot;
715	pptmsix.func = func;
716	pptmsix.idx = idx;
717	pptmsix.msg = msg;
718	pptmsix.addr = addr;
719	pptmsix.vector_control = vector_control;
720
721	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
722}
723
724uint64_t *
725vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
726	     int *ret_entries)
727{
728	int error;
729
730	static struct vm_stats vmstats;
731
732	vmstats.cpuid = vcpu;
733
734	error = ioctl(ctx->fd, VM_STATS, &vmstats);
735	if (error == 0) {
736		if (ret_entries)
737			*ret_entries = vmstats.num_entries;
738		if (ret_tv)
739			*ret_tv = vmstats.tv;
740		return (vmstats.statbuf);
741	} else
742		return (NULL);
743}
744
745const char *
746vm_get_stat_desc(struct vmctx *ctx, int index)
747{
748	static struct vm_stat_desc statdesc;
749
750	statdesc.index = index;
751	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
752		return (statdesc.desc);
753	else
754		return (NULL);
755}
756
757int
758vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
759{
760	int error;
761	struct vm_x2apic x2apic;
762
763	bzero(&x2apic, sizeof(x2apic));
764	x2apic.cpuid = vcpu;
765
766	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
767	*state = x2apic.state;
768	return (error);
769}
770
771int
772vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
773{
774	int error;
775	struct vm_x2apic x2apic;
776
777	bzero(&x2apic, sizeof(x2apic));
778	x2apic.cpuid = vcpu;
779	x2apic.state = state;
780
781	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
782
783	return (error);
784}
785
786/*
787 * From Intel Vol 3a:
788 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
789 */
790int
791vcpu_reset(struct vmctx *vmctx, int vcpu)
792{
793	int error;
794	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
795	uint32_t desc_access, desc_limit;
796	uint16_t sel;
797
798	zero = 0;
799
800	rflags = 0x2;
801	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
802	if (error)
803		goto done;
804
805	rip = 0xfff0;
806	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
807		goto done;
808
809	cr0 = CR0_NE;
810	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
811		goto done;
812
813	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
814		goto done;
815
816	cr4 = 0;
817	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
818		goto done;
819
820	/*
821	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
822	 */
823	desc_base = 0xffff0000;
824	desc_limit = 0xffff;
825	desc_access = 0x0093;
826	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
827			    desc_base, desc_limit, desc_access);
828	if (error)
829		goto done;
830
831	sel = 0xf000;
832	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
833		goto done;
834
835	/*
836	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
837	 */
838	desc_base = 0;
839	desc_limit = 0xffff;
840	desc_access = 0x0093;
841	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
842			    desc_base, desc_limit, desc_access);
843	if (error)
844		goto done;
845
846	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
847			    desc_base, desc_limit, desc_access);
848	if (error)
849		goto done;
850
851	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
852			    desc_base, desc_limit, desc_access);
853	if (error)
854		goto done;
855
856	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
857			    desc_base, desc_limit, desc_access);
858	if (error)
859		goto done;
860
861	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
862			    desc_base, desc_limit, desc_access);
863	if (error)
864		goto done;
865
866	sel = 0;
867	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
868		goto done;
869	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
870		goto done;
871	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
872		goto done;
873	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
874		goto done;
875	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
876		goto done;
877
878	/* General purpose registers */
879	rdx = 0xf00;
880	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
881		goto done;
882	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
883		goto done;
884	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
885		goto done;
886	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
887		goto done;
888	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
889		goto done;
890	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
891		goto done;
892	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
893		goto done;
894	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
895		goto done;
896
897	/* GDTR, IDTR */
898	desc_base = 0;
899	desc_limit = 0xffff;
900	desc_access = 0;
901	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
902			    desc_base, desc_limit, desc_access);
903	if (error != 0)
904		goto done;
905
906	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
907			    desc_base, desc_limit, desc_access);
908	if (error != 0)
909		goto done;
910
911	/* TR */
912	desc_base = 0;
913	desc_limit = 0xffff;
914	desc_access = 0x0000008b;
915	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
916	if (error)
917		goto done;
918
919	sel = 0;
920	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
921		goto done;
922
923	/* LDTR */
924	desc_base = 0;
925	desc_limit = 0xffff;
926	desc_access = 0x00000082;
927	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
928			    desc_limit, desc_access);
929	if (error)
930		goto done;
931
932	sel = 0;
933	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
934		goto done;
935
936	/* XXX cr2, debug registers */
937
938	error = 0;
939done:
940	return (error);
941}
942
943int
944vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
945{
946	int error, i;
947	struct vm_gpa_pte gpapte;
948
949	bzero(&gpapte, sizeof(gpapte));
950	gpapte.gpa = gpa;
951
952	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
953
954	if (error == 0) {
955		*num = gpapte.ptenum;
956		for (i = 0; i < gpapte.ptenum; i++)
957			pte[i] = gpapte.pte[i];
958	}
959
960	return (error);
961}
962
963int
964vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
965{
966	int error;
967	struct vm_hpet_cap cap;
968
969	bzero(&cap, sizeof(struct vm_hpet_cap));
970	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
971	if (capabilities != NULL)
972		*capabilities = cap.capabilities;
973	return (error);
974}
975
976static int
977gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
978    uint64_t gla, int prot, int *fault, uint64_t *gpa)
979{
980	struct vm_gla2gpa gg;
981	int error;
982
983	bzero(&gg, sizeof(struct vm_gla2gpa));
984	gg.vcpuid = vcpu;
985	gg.prot = prot;
986	gg.gla = gla;
987	gg.paging = *paging;
988
989	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
990	if (error == 0) {
991		*fault = gg.fault;
992		*gpa = gg.gpa;
993	}
994	return (error);
995}
996
997#ifndef min
998#define	min(a,b)	(((a) < (b)) ? (a) : (b))
999#endif
1000
1001int
1002vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1003    uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt)
1004{
1005	uint64_t gpa;
1006	int error, fault, i, n, off;
1007
1008	for (i = 0; i < iovcnt; i++) {
1009		iov[i].iov_base = 0;
1010		iov[i].iov_len = 0;
1011	}
1012
1013	while (len) {
1014		assert(iovcnt > 0);
1015		error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, &gpa);
1016		if (error)
1017			return (-1);
1018		if (fault)
1019			return (1);
1020
1021		off = gpa & PAGE_MASK;
1022		n = min(len, PAGE_SIZE - off);
1023
1024		iov->iov_base = (void *)gpa;
1025		iov->iov_len = n;
1026		iov++;
1027		iovcnt--;
1028
1029		gla += n;
1030		len -= n;
1031	}
1032	return (0);
1033}
1034
1035void
1036vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1037{
1038	const char *src;
1039	char *dst;
1040	uint64_t gpa;
1041	size_t n;
1042
1043	dst = vp;
1044	while (len) {
1045		assert(iov->iov_len);
1046		gpa = (uint64_t)iov->iov_base;
1047		n = min(len, iov->iov_len);
1048		src = vm_map_gpa(ctx, gpa, n);
1049		bcopy(src, dst, n);
1050
1051		iov++;
1052		dst += n;
1053		len -= n;
1054	}
1055}
1056
1057void
1058vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1059    size_t len)
1060{
1061	const char *src;
1062	char *dst;
1063	uint64_t gpa;
1064	size_t n;
1065
1066	src = vp;
1067	while (len) {
1068		assert(iov->iov_len);
1069		gpa = (uint64_t)iov->iov_base;
1070		n = min(len, iov->iov_len);
1071		dst = vm_map_gpa(ctx, gpa, n);
1072		bcopy(src, dst, n);
1073
1074		iov++;
1075		src += n;
1076		len -= n;
1077	}
1078}
1079
1080static int
1081vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1082{
1083	struct vm_cpuset vm_cpuset;
1084	int error;
1085
1086	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1087	vm_cpuset.which = which;
1088	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1089	vm_cpuset.cpus = cpus;
1090
1091	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1092	return (error);
1093}
1094
1095int
1096vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1097{
1098
1099	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1100}
1101
1102int
1103vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1104{
1105
1106	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1107}
1108
1109int
1110vm_activate_cpu(struct vmctx *ctx, int vcpu)
1111{
1112	struct vm_activate_cpu ac;
1113	int error;
1114
1115	bzero(&ac, sizeof(struct vm_activate_cpu));
1116	ac.vcpuid = vcpu;
1117	error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1118	return (error);
1119}
1120
1121int
1122vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1123{
1124	struct vm_intinfo vmii;
1125	int error;
1126
1127	bzero(&vmii, sizeof(struct vm_intinfo));
1128	vmii.vcpuid = vcpu;
1129	error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1130	if (error == 0) {
1131		*info1 = vmii.info1;
1132		*info2 = vmii.info2;
1133	}
1134	return (error);
1135}
1136
1137int
1138vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1139{
1140	struct vm_intinfo vmii;
1141	int error;
1142
1143	bzero(&vmii, sizeof(struct vm_intinfo));
1144	vmii.vcpuid = vcpu;
1145	vmii.info1 = info1;
1146	error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1147	return (error);
1148}
1149