vmmapi.c revision 268976
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/lib/libvmmapi/vmmapi.c 268976 2014-07-22 04:39:16Z jhb $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/lib/libvmmapi/vmmapi.c 268976 2014-07-22 04:39:16Z jhb $");
31
32#include <sys/types.h>
33#include <sys/sysctl.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36#include <sys/_iovec.h>
37
38#include <machine/specialreg.h>
39#include <machine/param.h>
40
41#include <stdio.h>
42#include <stdlib.h>
43#include <assert.h>
44#include <string.h>
45#include <fcntl.h>
46#include <unistd.h>
47
48#include <libutil.h>
49
50#include <machine/vmm.h>
51#include <machine/vmm_dev.h>
52
53#include "vmmapi.h"
54
55#define	MB	(1024 * 1024UL)
56#define	GB	(1024 * 1024 * 1024UL)
57
58struct vmctx {
59	int	fd;
60	uint32_t lowmem_limit;
61	enum vm_mmap_style vms;
62	int	memflags;
63	size_t	lowmem;
64	char	*lowmem_addr;
65	size_t	highmem;
66	char	*highmem_addr;
67	char	*name;
68};
69
70#define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
71#define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
72
73static int
74vm_device_open(const char *name)
75{
76        int fd, len;
77        char *vmfile;
78
79	len = strlen("/dev/vmm/") + strlen(name) + 1;
80	vmfile = malloc(len);
81	assert(vmfile != NULL);
82	snprintf(vmfile, len, "/dev/vmm/%s", name);
83
84        /* Open the device file */
85        fd = open(vmfile, O_RDWR, 0);
86
87	free(vmfile);
88        return (fd);
89}
90
91int
92vm_create(const char *name)
93{
94
95	return (CREATE((char *)name));
96}
97
98struct vmctx *
99vm_open(const char *name)
100{
101	struct vmctx *vm;
102
103	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
104	assert(vm != NULL);
105
106	vm->fd = -1;
107	vm->memflags = 0;
108	vm->lowmem_limit = 3 * GB;
109	vm->name = (char *)(vm + 1);
110	strcpy(vm->name, name);
111
112	if ((vm->fd = vm_device_open(vm->name)) < 0)
113		goto err;
114
115	return (vm);
116err:
117	vm_destroy(vm);
118	return (NULL);
119}
120
121void
122vm_destroy(struct vmctx *vm)
123{
124	assert(vm != NULL);
125
126	if (vm->fd >= 0)
127		close(vm->fd);
128	DESTROY(vm->name);
129
130	free(vm);
131}
132
133int
134vm_parse_memsize(const char *optarg, size_t *ret_memsize)
135{
136	char *endptr;
137	size_t optval;
138	int error;
139
140	optval = strtoul(optarg, &endptr, 0);
141	if (*optarg != '\0' && *endptr == '\0') {
142		/*
143		 * For the sake of backward compatibility if the memory size
144		 * specified on the command line is less than a megabyte then
145		 * it is interpreted as being in units of MB.
146		 */
147		if (optval < MB)
148			optval *= MB;
149		*ret_memsize = optval;
150		error = 0;
151	} else
152		error = expand_number(optarg, ret_memsize);
153
154	return (error);
155}
156
157int
158vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
159		  int *wired)
160{
161	int error;
162	struct vm_memory_segment seg;
163
164	bzero(&seg, sizeof(seg));
165	seg.gpa = gpa;
166	error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
167	*ret_len = seg.len;
168	if (wired != NULL)
169		*wired = seg.wired;
170	return (error);
171}
172
173uint32_t
174vm_get_lowmem_limit(struct vmctx *ctx)
175{
176
177	return (ctx->lowmem_limit);
178}
179
180void
181vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
182{
183
184	ctx->lowmem_limit = limit;
185}
186
187void
188vm_set_memflags(struct vmctx *ctx, int flags)
189{
190
191	ctx->memflags = flags;
192}
193
194static int
195setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr)
196{
197	int error, mmap_flags;
198	struct vm_memory_segment seg;
199
200	/*
201	 * Create and optionally map 'len' bytes of memory at guest
202	 * physical address 'gpa'
203	 */
204	bzero(&seg, sizeof(seg));
205	seg.gpa = gpa;
206	seg.len = len;
207	error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
208	if (error == 0 && addr != NULL) {
209		mmap_flags = MAP_SHARED;
210		if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
211			mmap_flags |= MAP_NOCORE;
212		*addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags,
213		    ctx->fd, gpa);
214	}
215	return (error);
216}
217
218int
219vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
220{
221	char **addr;
222	int error;
223
224	/* XXX VM_MMAP_SPARSE not implemented yet */
225	assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL);
226	ctx->vms = vms;
227
228	/*
229	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
230	 * create another 'highmem' segment above 4GB for the remainder.
231	 */
232	if (memsize > ctx->lowmem_limit) {
233		ctx->lowmem = ctx->lowmem_limit;
234		ctx->highmem = memsize - ctx->lowmem;
235	} else {
236		ctx->lowmem = memsize;
237		ctx->highmem = 0;
238	}
239
240	if (ctx->lowmem > 0) {
241		addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL;
242		error = setup_memory_segment(ctx, 0, ctx->lowmem, addr);
243		if (error)
244			return (error);
245	}
246
247	if (ctx->highmem > 0) {
248		addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL;
249		error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr);
250		if (error)
251			return (error);
252	}
253
254	return (0);
255}
256
257void *
258vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
259{
260
261	/* XXX VM_MMAP_SPARSE not implemented yet */
262	assert(ctx->vms == VM_MMAP_ALL);
263
264	if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
265		return ((void *)(ctx->lowmem_addr + gaddr));
266
267	if (gaddr >= 4*GB) {
268		gaddr -= 4*GB;
269		if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem)
270			return ((void *)(ctx->highmem_addr + gaddr));
271	}
272
273	return (NULL);
274}
275
276int
277vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
278	    uint64_t base, uint32_t limit, uint32_t access)
279{
280	int error;
281	struct vm_seg_desc vmsegdesc;
282
283	bzero(&vmsegdesc, sizeof(vmsegdesc));
284	vmsegdesc.cpuid = vcpu;
285	vmsegdesc.regnum = reg;
286	vmsegdesc.desc.base = base;
287	vmsegdesc.desc.limit = limit;
288	vmsegdesc.desc.access = access;
289
290	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
291	return (error);
292}
293
294int
295vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
296	    uint64_t *base, uint32_t *limit, uint32_t *access)
297{
298	int error;
299	struct vm_seg_desc vmsegdesc;
300
301	bzero(&vmsegdesc, sizeof(vmsegdesc));
302	vmsegdesc.cpuid = vcpu;
303	vmsegdesc.regnum = reg;
304
305	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
306	if (error == 0) {
307		*base = vmsegdesc.desc.base;
308		*limit = vmsegdesc.desc.limit;
309		*access = vmsegdesc.desc.access;
310	}
311	return (error);
312}
313
314int
315vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
316{
317	int error;
318	struct vm_register vmreg;
319
320	bzero(&vmreg, sizeof(vmreg));
321	vmreg.cpuid = vcpu;
322	vmreg.regnum = reg;
323	vmreg.regval = val;
324
325	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
326	return (error);
327}
328
329int
330vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
331{
332	int error;
333	struct vm_register vmreg;
334
335	bzero(&vmreg, sizeof(vmreg));
336	vmreg.cpuid = vcpu;
337	vmreg.regnum = reg;
338
339	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
340	*ret_val = vmreg.regval;
341	return (error);
342}
343
344int
345vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
346{
347	int error;
348	struct vm_run vmrun;
349
350	bzero(&vmrun, sizeof(vmrun));
351	vmrun.cpuid = vcpu;
352	vmrun.rip = rip;
353
354	error = ioctl(ctx->fd, VM_RUN, &vmrun);
355	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
356	return (error);
357}
358
359int
360vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
361{
362	struct vm_suspend vmsuspend;
363
364	bzero(&vmsuspend, sizeof(vmsuspend));
365	vmsuspend.how = how;
366	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
367}
368
369static int
370vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
371    int error_code, int error_code_valid)
372{
373	struct vm_exception exc;
374
375	bzero(&exc, sizeof(exc));
376	exc.cpuid = vcpu;
377	exc.vector = vector;
378	exc.error_code = error_code;
379	exc.error_code_valid = error_code_valid;
380
381	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
382}
383
384int
385vm_inject_exception(struct vmctx *ctx, int vcpu, int vector)
386{
387
388	return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0));
389}
390
391int
392vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode)
393{
394
395	return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1));
396}
397
398int
399vm_apicid2vcpu(struct vmctx *ctx, int apicid)
400{
401	/*
402	 * The apic id associated with the 'vcpu' has the same numerical value
403	 * as the 'vcpu' itself.
404	 */
405	return (apicid);
406}
407
408int
409vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
410{
411	struct vm_lapic_irq vmirq;
412
413	bzero(&vmirq, sizeof(vmirq));
414	vmirq.cpuid = vcpu;
415	vmirq.vector = vector;
416
417	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
418}
419
420int
421vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
422{
423	struct vm_lapic_irq vmirq;
424
425	bzero(&vmirq, sizeof(vmirq));
426	vmirq.cpuid = vcpu;
427	vmirq.vector = vector;
428
429	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
430}
431
432int
433vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
434{
435	struct vm_lapic_msi vmmsi;
436
437	bzero(&vmmsi, sizeof(vmmsi));
438	vmmsi.addr = addr;
439	vmmsi.msg = msg;
440
441	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
442}
443
444int
445vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
446{
447	struct vm_ioapic_irq ioapic_irq;
448
449	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
450	ioapic_irq.irq = irq;
451
452	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
453}
454
455int
456vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
457{
458	struct vm_ioapic_irq ioapic_irq;
459
460	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
461	ioapic_irq.irq = irq;
462
463	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
464}
465
466int
467vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
468{
469	struct vm_ioapic_irq ioapic_irq;
470
471	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
472	ioapic_irq.irq = irq;
473
474	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
475}
476
477int
478vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
479{
480
481	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
482}
483
484int
485vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
486{
487	struct vm_isa_irq isa_irq;
488
489	bzero(&isa_irq, sizeof(struct vm_isa_irq));
490	isa_irq.atpic_irq = atpic_irq;
491	isa_irq.ioapic_irq = ioapic_irq;
492
493	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
494}
495
496int
497vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
498{
499	struct vm_isa_irq isa_irq;
500
501	bzero(&isa_irq, sizeof(struct vm_isa_irq));
502	isa_irq.atpic_irq = atpic_irq;
503	isa_irq.ioapic_irq = ioapic_irq;
504
505	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
506}
507
508int
509vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
510{
511	struct vm_isa_irq isa_irq;
512
513	bzero(&isa_irq, sizeof(struct vm_isa_irq));
514	isa_irq.atpic_irq = atpic_irq;
515	isa_irq.ioapic_irq = ioapic_irq;
516
517	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
518}
519
520int
521vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
522    enum vm_intr_trigger trigger)
523{
524	struct vm_isa_irq_trigger isa_irq_trigger;
525
526	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
527	isa_irq_trigger.atpic_irq = atpic_irq;
528	isa_irq_trigger.trigger = trigger;
529
530	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
531}
532
533int
534vm_inject_nmi(struct vmctx *ctx, int vcpu)
535{
536	struct vm_nmi vmnmi;
537
538	bzero(&vmnmi, sizeof(vmnmi));
539	vmnmi.cpuid = vcpu;
540
541	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
542}
543
544static struct {
545	const char	*name;
546	int		type;
547} capstrmap[] = {
548	{ "hlt_exit",		VM_CAP_HALT_EXIT },
549	{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
550	{ "pause_exit",		VM_CAP_PAUSE_EXIT },
551	{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
552	{ "enable_invpcid",	VM_CAP_ENABLE_INVPCID },
553	{ 0 }
554};
555
556int
557vm_capability_name2type(const char *capname)
558{
559	int i;
560
561	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
562		if (strcmp(capstrmap[i].name, capname) == 0)
563			return (capstrmap[i].type);
564	}
565
566	return (-1);
567}
568
569const char *
570vm_capability_type2name(int type)
571{
572	int i;
573
574	for (i = 0; capstrmap[i].name != NULL; i++) {
575		if (capstrmap[i].type == type)
576			return (capstrmap[i].name);
577	}
578
579	return (NULL);
580}
581
582int
583vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
584		  int *retval)
585{
586	int error;
587	struct vm_capability vmcap;
588
589	bzero(&vmcap, sizeof(vmcap));
590	vmcap.cpuid = vcpu;
591	vmcap.captype = cap;
592
593	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
594	*retval = vmcap.capval;
595	return (error);
596}
597
598int
599vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
600{
601	struct vm_capability vmcap;
602
603	bzero(&vmcap, sizeof(vmcap));
604	vmcap.cpuid = vcpu;
605	vmcap.captype = cap;
606	vmcap.capval = val;
607
608	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
609}
610
611int
612vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
613{
614	struct vm_pptdev pptdev;
615
616	bzero(&pptdev, sizeof(pptdev));
617	pptdev.bus = bus;
618	pptdev.slot = slot;
619	pptdev.func = func;
620
621	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
622}
623
624int
625vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
626{
627	struct vm_pptdev pptdev;
628
629	bzero(&pptdev, sizeof(pptdev));
630	pptdev.bus = bus;
631	pptdev.slot = slot;
632	pptdev.func = func;
633
634	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
635}
636
637int
638vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
639		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
640{
641	struct vm_pptdev_mmio pptmmio;
642
643	bzero(&pptmmio, sizeof(pptmmio));
644	pptmmio.bus = bus;
645	pptmmio.slot = slot;
646	pptmmio.func = func;
647	pptmmio.gpa = gpa;
648	pptmmio.len = len;
649	pptmmio.hpa = hpa;
650
651	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
652}
653
654int
655vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
656    uint64_t addr, uint64_t msg, int numvec)
657{
658	struct vm_pptdev_msi pptmsi;
659
660	bzero(&pptmsi, sizeof(pptmsi));
661	pptmsi.vcpu = vcpu;
662	pptmsi.bus = bus;
663	pptmsi.slot = slot;
664	pptmsi.func = func;
665	pptmsi.msg = msg;
666	pptmsi.addr = addr;
667	pptmsi.numvec = numvec;
668
669	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
670}
671
672int
673vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
674    int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
675{
676	struct vm_pptdev_msix pptmsix;
677
678	bzero(&pptmsix, sizeof(pptmsix));
679	pptmsix.vcpu = vcpu;
680	pptmsix.bus = bus;
681	pptmsix.slot = slot;
682	pptmsix.func = func;
683	pptmsix.idx = idx;
684	pptmsix.msg = msg;
685	pptmsix.addr = addr;
686	pptmsix.vector_control = vector_control;
687
688	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
689}
690
691uint64_t *
692vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
693	     int *ret_entries)
694{
695	int error;
696
697	static struct vm_stats vmstats;
698
699	vmstats.cpuid = vcpu;
700
701	error = ioctl(ctx->fd, VM_STATS, &vmstats);
702	if (error == 0) {
703		if (ret_entries)
704			*ret_entries = vmstats.num_entries;
705		if (ret_tv)
706			*ret_tv = vmstats.tv;
707		return (vmstats.statbuf);
708	} else
709		return (NULL);
710}
711
712const char *
713vm_get_stat_desc(struct vmctx *ctx, int index)
714{
715	static struct vm_stat_desc statdesc;
716
717	statdesc.index = index;
718	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
719		return (statdesc.desc);
720	else
721		return (NULL);
722}
723
724int
725vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
726{
727	int error;
728	struct vm_x2apic x2apic;
729
730	bzero(&x2apic, sizeof(x2apic));
731	x2apic.cpuid = vcpu;
732
733	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
734	*state = x2apic.state;
735	return (error);
736}
737
738int
739vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
740{
741	int error;
742	struct vm_x2apic x2apic;
743
744	bzero(&x2apic, sizeof(x2apic));
745	x2apic.cpuid = vcpu;
746	x2apic.state = state;
747
748	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
749
750	return (error);
751}
752
753/*
754 * From Intel Vol 3a:
755 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
756 */
757int
758vcpu_reset(struct vmctx *vmctx, int vcpu)
759{
760	int error;
761	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
762	uint32_t desc_access, desc_limit;
763	uint16_t sel;
764
765	zero = 0;
766
767	rflags = 0x2;
768	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
769	if (error)
770		goto done;
771
772	rip = 0xfff0;
773	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
774		goto done;
775
776	cr0 = CR0_NE;
777	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
778		goto done;
779
780	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
781		goto done;
782
783	cr4 = 0;
784	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
785		goto done;
786
787	/*
788	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
789	 */
790	desc_base = 0xffff0000;
791	desc_limit = 0xffff;
792	desc_access = 0x0093;
793	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
794			    desc_base, desc_limit, desc_access);
795	if (error)
796		goto done;
797
798	sel = 0xf000;
799	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
800		goto done;
801
802	/*
803	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
804	 */
805	desc_base = 0;
806	desc_limit = 0xffff;
807	desc_access = 0x0093;
808	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
809			    desc_base, desc_limit, desc_access);
810	if (error)
811		goto done;
812
813	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
814			    desc_base, desc_limit, desc_access);
815	if (error)
816		goto done;
817
818	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
819			    desc_base, desc_limit, desc_access);
820	if (error)
821		goto done;
822
823	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
824			    desc_base, desc_limit, desc_access);
825	if (error)
826		goto done;
827
828	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
829			    desc_base, desc_limit, desc_access);
830	if (error)
831		goto done;
832
833	sel = 0;
834	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
835		goto done;
836	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
837		goto done;
838	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
839		goto done;
840	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
841		goto done;
842	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
843		goto done;
844
845	/* General purpose registers */
846	rdx = 0xf00;
847	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
848		goto done;
849	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
850		goto done;
851	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
852		goto done;
853	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
854		goto done;
855	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
856		goto done;
857	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
858		goto done;
859	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
860		goto done;
861	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
862		goto done;
863
864	/* GDTR, IDTR */
865	desc_base = 0;
866	desc_limit = 0xffff;
867	desc_access = 0;
868	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
869			    desc_base, desc_limit, desc_access);
870	if (error != 0)
871		goto done;
872
873	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
874			    desc_base, desc_limit, desc_access);
875	if (error != 0)
876		goto done;
877
878	/* TR */
879	desc_base = 0;
880	desc_limit = 0xffff;
881	desc_access = 0x0000008b;
882	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
883	if (error)
884		goto done;
885
886	sel = 0;
887	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
888		goto done;
889
890	/* LDTR */
891	desc_base = 0;
892	desc_limit = 0xffff;
893	desc_access = 0x00000082;
894	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
895			    desc_limit, desc_access);
896	if (error)
897		goto done;
898
899	sel = 0;
900	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
901		goto done;
902
903	/* XXX cr2, debug registers */
904
905	error = 0;
906done:
907	return (error);
908}
909
910int
911vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
912{
913	int error, i;
914	struct vm_gpa_pte gpapte;
915
916	bzero(&gpapte, sizeof(gpapte));
917	gpapte.gpa = gpa;
918
919	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
920
921	if (error == 0) {
922		*num = gpapte.ptenum;
923		for (i = 0; i < gpapte.ptenum; i++)
924			pte[i] = gpapte.pte[i];
925	}
926
927	return (error);
928}
929
930int
931vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
932{
933	int error;
934	struct vm_hpet_cap cap;
935
936	bzero(&cap, sizeof(struct vm_hpet_cap));
937	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
938	if (capabilities != NULL)
939		*capabilities = cap.capabilities;
940	return (error);
941}
942
943static int
944gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
945    uint64_t gla, int prot, int *fault, uint64_t *gpa)
946{
947	struct vm_gla2gpa gg;
948	int error;
949
950	bzero(&gg, sizeof(struct vm_gla2gpa));
951	gg.vcpuid = vcpu;
952	gg.prot = prot;
953	gg.gla = gla;
954	gg.paging = *paging;
955
956	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
957	if (error == 0) {
958		*fault = gg.fault;
959		*gpa = gg.gpa;
960	}
961	return (error);
962}
963
964#ifndef min
965#define	min(a,b)	(((a) < (b)) ? (a) : (b))
966#endif
967
968int
969vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
970    uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt)
971{
972	uint64_t gpa;
973	int error, fault, i, n, off;
974
975	for (i = 0; i < iovcnt; i++) {
976		iov[i].iov_base = 0;
977		iov[i].iov_len = 0;
978	}
979
980	while (len) {
981		assert(iovcnt > 0);
982		error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, &gpa);
983		if (error)
984			return (-1);
985		if (fault)
986			return (1);
987
988		off = gpa & PAGE_MASK;
989		n = min(len, PAGE_SIZE - off);
990
991		iov->iov_base = (void *)gpa;
992		iov->iov_len = n;
993		iov++;
994		iovcnt--;
995
996		gla += n;
997		len -= n;
998	}
999	return (0);
1000}
1001
1002void
1003vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1004{
1005	const char *src;
1006	char *dst;
1007	uint64_t gpa;
1008	size_t n;
1009
1010	dst = vp;
1011	while (len) {
1012		assert(iov->iov_len);
1013		gpa = (uint64_t)iov->iov_base;
1014		n = min(len, iov->iov_len);
1015		src = vm_map_gpa(ctx, gpa, n);
1016		bcopy(src, dst, n);
1017
1018		iov++;
1019		dst += n;
1020		len -= n;
1021	}
1022}
1023
1024void
1025vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1026    size_t len)
1027{
1028	const char *src;
1029	char *dst;
1030	uint64_t gpa;
1031	size_t n;
1032
1033	src = vp;
1034	while (len) {
1035		assert(iov->iov_len);
1036		gpa = (uint64_t)iov->iov_base;
1037		n = min(len, iov->iov_len);
1038		dst = vm_map_gpa(ctx, gpa, n);
1039		bcopy(src, dst, n);
1040
1041		iov++;
1042		src += n;
1043		len -= n;
1044	}
1045}
1046