1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/lib/libvmmapi/vmmapi.c 348201 2019-05-23 21:23:18Z rgrimes $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/lib/libvmmapi/vmmapi.c 348201 2019-05-23 21:23:18Z rgrimes $");
31
32#include <sys/param.h>
33#include <sys/sysctl.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36#include <sys/_iovec.h>
37#include <sys/cpuset.h>
38
39#include <x86/segments.h>
40#include <machine/specialreg.h>
41
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <assert.h>
46#include <string.h>
47#include <fcntl.h>
48#include <unistd.h>
49
50#include <libutil.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54
55#include "vmmapi.h"
56
57#define	MB	(1024 * 1024UL)
58#define	GB	(1024 * 1024 * 1024UL)
59
60/*
61 * Size of the guard region before and after the virtual address space
62 * mapping the guest physical memory. This must be a multiple of the
63 * superpage size for performance reasons.
64 */
65#define	VM_MMAP_GUARD_SIZE	(4 * MB)
66
67#define	PROT_RW		(PROT_READ | PROT_WRITE)
68#define	PROT_ALL	(PROT_READ | PROT_WRITE | PROT_EXEC)
69
70struct vmctx {
71	int	fd;
72	uint32_t lowmem_limit;
73	int	memflags;
74	size_t	lowmem;
75	size_t	highmem;
76	char	*baseaddr;
77	char	*name;
78};
79
80#define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
81#define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
82
83static int
84vm_device_open(const char *name)
85{
86	int fd, len;
87	char *vmfile;
88
89	len = strlen("/dev/vmm/") + strlen(name) + 1;
90	vmfile = malloc(len);
91	assert(vmfile != NULL);
92	snprintf(vmfile, len, "/dev/vmm/%s", name);
93
94	/* Open the device file */
95	fd = open(vmfile, O_RDWR, 0);
96
97	free(vmfile);
98	return (fd);
99}
100
101int
102vm_create(const char *name)
103{
104
105	return (CREATE((char *)name));
106}
107
108struct vmctx *
109vm_open(const char *name)
110{
111	struct vmctx *vm;
112
113	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
114	assert(vm != NULL);
115
116	vm->fd = -1;
117	vm->memflags = 0;
118	vm->lowmem_limit = 3 * GB;
119	vm->name = (char *)(vm + 1);
120	strcpy(vm->name, name);
121
122	if ((vm->fd = vm_device_open(vm->name)) < 0)
123		goto err;
124
125	return (vm);
126err:
127	vm_destroy(vm);
128	return (NULL);
129}
130
131void
132vm_destroy(struct vmctx *vm)
133{
134	assert(vm != NULL);
135
136	if (vm->fd >= 0)
137		close(vm->fd);
138	DESTROY(vm->name);
139
140	free(vm);
141}
142
143int
144vm_parse_memsize(const char *optarg, size_t *ret_memsize)
145{
146	char *endptr;
147	size_t optval;
148	int error;
149
150	optval = strtoul(optarg, &endptr, 0);
151	if (*optarg != '\0' && *endptr == '\0') {
152		/*
153		 * For the sake of backward compatibility if the memory size
154		 * specified on the command line is less than a megabyte then
155		 * it is interpreted as being in units of MB.
156		 */
157		if (optval < MB)
158			optval *= MB;
159		*ret_memsize = optval;
160		error = 0;
161	} else
162		error = expand_number(optarg, ret_memsize);
163
164	return (error);
165}
166
167uint32_t
168vm_get_lowmem_limit(struct vmctx *ctx)
169{
170
171	return (ctx->lowmem_limit);
172}
173
174void
175vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
176{
177
178	ctx->lowmem_limit = limit;
179}
180
181void
182vm_set_memflags(struct vmctx *ctx, int flags)
183{
184
185	ctx->memflags = flags;
186}
187
188int
189vm_get_memflags(struct vmctx *ctx)
190{
191
192	return (ctx->memflags);
193}
194
195/*
196 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
197 */
198int
199vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
200    size_t len, int prot)
201{
202	struct vm_memmap memmap;
203	int error, flags;
204
205	memmap.gpa = gpa;
206	memmap.segid = segid;
207	memmap.segoff = off;
208	memmap.len = len;
209	memmap.prot = prot;
210	memmap.flags = 0;
211
212	if (ctx->memflags & VM_MEM_F_WIRED)
213		memmap.flags |= VM_MEMMAP_F_WIRED;
214
215	/*
216	 * If this mapping already exists then don't create it again. This
217	 * is the common case for SYSMEM mappings created by bhyveload(8).
218	 */
219	error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
220	if (error == 0 && gpa == memmap.gpa) {
221		if (segid != memmap.segid || off != memmap.segoff ||
222		    prot != memmap.prot || flags != memmap.flags) {
223			errno = EEXIST;
224			return (-1);
225		} else {
226			return (0);
227		}
228	}
229
230	error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
231	return (error);
232}
233
234int
235vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
236    vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
237{
238	struct vm_memmap memmap;
239	int error;
240
241	bzero(&memmap, sizeof(struct vm_memmap));
242	memmap.gpa = *gpa;
243	error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
244	if (error == 0) {
245		*gpa = memmap.gpa;
246		*segid = memmap.segid;
247		*segoff = memmap.segoff;
248		*len = memmap.len;
249		*prot = memmap.prot;
250		*flags = memmap.flags;
251	}
252	return (error);
253}
254
255/*
256 * Return 0 if the segments are identical and non-zero otherwise.
257 *
258 * This is slightly complicated by the fact that only device memory segments
259 * are named.
260 */
261static int
262cmpseg(size_t len, const char *str, size_t len2, const char *str2)
263{
264
265	if (len == len2) {
266		if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
267			return (0);
268	}
269	return (-1);
270}
271
272static int
273vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
274{
275	struct vm_memseg memseg;
276	size_t n;
277	int error;
278
279	/*
280	 * If the memory segment has already been created then just return.
281	 * This is the usual case for the SYSMEM segment created by userspace
282	 * loaders like bhyveload(8).
283	 */
284	error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
285	    sizeof(memseg.name));
286	if (error)
287		return (error);
288
289	if (memseg.len != 0) {
290		if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
291			errno = EINVAL;
292			return (-1);
293		} else {
294			return (0);
295		}
296	}
297
298	bzero(&memseg, sizeof(struct vm_memseg));
299	memseg.segid = segid;
300	memseg.len = len;
301	if (name != NULL) {
302		n = strlcpy(memseg.name, name, sizeof(memseg.name));
303		if (n >= sizeof(memseg.name)) {
304			errno = ENAMETOOLONG;
305			return (-1);
306		}
307	}
308
309	error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
310	return (error);
311}
312
313int
314vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
315    size_t bufsize)
316{
317	struct vm_memseg memseg;
318	size_t n;
319	int error;
320
321	memseg.segid = segid;
322	error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
323	if (error == 0) {
324		*lenp = memseg.len;
325		n = strlcpy(namebuf, memseg.name, bufsize);
326		if (n >= bufsize) {
327			errno = ENAMETOOLONG;
328			error = -1;
329		}
330	}
331	return (error);
332}
333
334static int
335setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
336{
337	char *ptr;
338	int error, flags;
339
340	/* Map 'len' bytes starting at 'gpa' in the guest address space */
341	error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
342	if (error)
343		return (error);
344
345	flags = MAP_SHARED | MAP_FIXED;
346	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
347		flags |= MAP_NOCORE;
348
349	/* mmap into the process address space on the host */
350	ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
351	if (ptr == MAP_FAILED)
352		return (-1);
353
354	return (0);
355}
356
357int
358vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
359{
360	size_t objsize, len;
361	vm_paddr_t gpa;
362	char *baseaddr, *ptr;
363	int error;
364
365	assert(vms == VM_MMAP_ALL);
366
367	/*
368	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
369	 * create another 'highmem' segment above 4GB for the remainder.
370	 */
371	if (memsize > ctx->lowmem_limit) {
372		ctx->lowmem = ctx->lowmem_limit;
373		ctx->highmem = memsize - ctx->lowmem_limit;
374		objsize = 4*GB + ctx->highmem;
375	} else {
376		ctx->lowmem = memsize;
377		ctx->highmem = 0;
378		objsize = ctx->lowmem;
379	}
380
381	error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
382	if (error)
383		return (error);
384
385	/*
386	 * Stake out a contiguous region covering the guest physical memory
387	 * and the adjoining guard regions.
388	 */
389	len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
390	ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
391	if (ptr == MAP_FAILED)
392		return (-1);
393
394	baseaddr = ptr + VM_MMAP_GUARD_SIZE;
395	if (ctx->highmem > 0) {
396		gpa = 4*GB;
397		len = ctx->highmem;
398		error = setup_memory_segment(ctx, gpa, len, baseaddr);
399		if (error)
400			return (error);
401	}
402
403	if (ctx->lowmem > 0) {
404		gpa = 0;
405		len = ctx->lowmem;
406		error = setup_memory_segment(ctx, gpa, len, baseaddr);
407		if (error)
408			return (error);
409	}
410
411	ctx->baseaddr = baseaddr;
412
413	return (0);
414}
415
416/*
417 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
418 * the lowmem or highmem regions.
419 *
420 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
421 * The instruction emulation code depends on this behavior.
422 */
423void *
424vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
425{
426
427	if (ctx->lowmem > 0) {
428		if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
429		    gaddr + len <= ctx->lowmem)
430			return (ctx->baseaddr + gaddr);
431	}
432
433	if (ctx->highmem > 0) {
434                if (gaddr >= 4*GB) {
435			if (gaddr < 4*GB + ctx->highmem &&
436			    len <= ctx->highmem &&
437			    gaddr + len <= 4*GB + ctx->highmem)
438				return (ctx->baseaddr + gaddr);
439		}
440	}
441
442	return (NULL);
443}
444
445size_t
446vm_get_lowmem_size(struct vmctx *ctx)
447{
448
449	return (ctx->lowmem);
450}
451
452size_t
453vm_get_highmem_size(struct vmctx *ctx)
454{
455
456	return (ctx->highmem);
457}
458
459void *
460vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
461{
462	char pathname[MAXPATHLEN];
463	size_t len2;
464	char *base, *ptr;
465	int fd, error, flags;
466
467	fd = -1;
468	ptr = MAP_FAILED;
469	if (name == NULL || strlen(name) == 0) {
470		errno = EINVAL;
471		goto done;
472	}
473
474	error = vm_alloc_memseg(ctx, segid, len, name);
475	if (error)
476		goto done;
477
478	strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
479	strlcat(pathname, ctx->name, sizeof(pathname));
480	strlcat(pathname, ".", sizeof(pathname));
481	strlcat(pathname, name, sizeof(pathname));
482
483	fd = open(pathname, O_RDWR);
484	if (fd < 0)
485		goto done;
486
487	/*
488	 * Stake out a contiguous region covering the device memory and the
489	 * adjoining guard regions.
490	 */
491	len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
492	base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
493	    0);
494	if (base == MAP_FAILED)
495		goto done;
496
497	flags = MAP_SHARED | MAP_FIXED;
498	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
499		flags |= MAP_NOCORE;
500
501	/* mmap the devmem region in the host address space */
502	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
503done:
504	if (fd >= 0)
505		close(fd);
506	return (ptr);
507}
508
509int
510vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
511	    uint64_t base, uint32_t limit, uint32_t access)
512{
513	int error;
514	struct vm_seg_desc vmsegdesc;
515
516	bzero(&vmsegdesc, sizeof(vmsegdesc));
517	vmsegdesc.cpuid = vcpu;
518	vmsegdesc.regnum = reg;
519	vmsegdesc.desc.base = base;
520	vmsegdesc.desc.limit = limit;
521	vmsegdesc.desc.access = access;
522
523	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
524	return (error);
525}
526
527int
528vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
529	    uint64_t *base, uint32_t *limit, uint32_t *access)
530{
531	int error;
532	struct vm_seg_desc vmsegdesc;
533
534	bzero(&vmsegdesc, sizeof(vmsegdesc));
535	vmsegdesc.cpuid = vcpu;
536	vmsegdesc.regnum = reg;
537
538	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
539	if (error == 0) {
540		*base = vmsegdesc.desc.base;
541		*limit = vmsegdesc.desc.limit;
542		*access = vmsegdesc.desc.access;
543	}
544	return (error);
545}
546
547int
548vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
549{
550	int error;
551
552	error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
553	    &seg_desc->access);
554	return (error);
555}
556
557int
558vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
559{
560	int error;
561	struct vm_register vmreg;
562
563	bzero(&vmreg, sizeof(vmreg));
564	vmreg.cpuid = vcpu;
565	vmreg.regnum = reg;
566	vmreg.regval = val;
567
568	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
569	return (error);
570}
571
572int
573vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
574{
575	int error;
576	struct vm_register vmreg;
577
578	bzero(&vmreg, sizeof(vmreg));
579	vmreg.cpuid = vcpu;
580	vmreg.regnum = reg;
581
582	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
583	*ret_val = vmreg.regval;
584	return (error);
585}
586
587int
588vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
589{
590	int error;
591	struct vm_run vmrun;
592
593	bzero(&vmrun, sizeof(vmrun));
594	vmrun.cpuid = vcpu;
595
596	error = ioctl(ctx->fd, VM_RUN, &vmrun);
597	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
598	return (error);
599}
600
601int
602vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
603{
604	struct vm_suspend vmsuspend;
605
606	bzero(&vmsuspend, sizeof(vmsuspend));
607	vmsuspend.how = how;
608	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
609}
610
611int
612vm_reinit(struct vmctx *ctx)
613{
614
615	return (ioctl(ctx->fd, VM_REINIT, 0));
616}
617
618int
619vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
620    uint32_t errcode, int restart_instruction)
621{
622	struct vm_exception exc;
623
624	exc.cpuid = vcpu;
625	exc.vector = vector;
626	exc.error_code = errcode;
627	exc.error_code_valid = errcode_valid;
628	exc.restart_instruction = restart_instruction;
629
630	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
631}
632
633int
634vm_apicid2vcpu(struct vmctx *ctx, int apicid)
635{
636	/*
637	 * The apic id associated with the 'vcpu' has the same numerical value
638	 * as the 'vcpu' itself.
639	 */
640	return (apicid);
641}
642
643int
644vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
645{
646	struct vm_lapic_irq vmirq;
647
648	bzero(&vmirq, sizeof(vmirq));
649	vmirq.cpuid = vcpu;
650	vmirq.vector = vector;
651
652	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
653}
654
655int
656vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
657{
658	struct vm_lapic_irq vmirq;
659
660	bzero(&vmirq, sizeof(vmirq));
661	vmirq.cpuid = vcpu;
662	vmirq.vector = vector;
663
664	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
665}
666
667int
668vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
669{
670	struct vm_lapic_msi vmmsi;
671
672	bzero(&vmmsi, sizeof(vmmsi));
673	vmmsi.addr = addr;
674	vmmsi.msg = msg;
675
676	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
677}
678
679int
680vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
681{
682	struct vm_ioapic_irq ioapic_irq;
683
684	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
685	ioapic_irq.irq = irq;
686
687	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
688}
689
690int
691vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
692{
693	struct vm_ioapic_irq ioapic_irq;
694
695	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
696	ioapic_irq.irq = irq;
697
698	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
699}
700
701int
702vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
703{
704	struct vm_ioapic_irq ioapic_irq;
705
706	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
707	ioapic_irq.irq = irq;
708
709	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
710}
711
712int
713vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
714{
715
716	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
717}
718
719int
720vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
721{
722	struct vm_isa_irq isa_irq;
723
724	bzero(&isa_irq, sizeof(struct vm_isa_irq));
725	isa_irq.atpic_irq = atpic_irq;
726	isa_irq.ioapic_irq = ioapic_irq;
727
728	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
729}
730
731int
732vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
733{
734	struct vm_isa_irq isa_irq;
735
736	bzero(&isa_irq, sizeof(struct vm_isa_irq));
737	isa_irq.atpic_irq = atpic_irq;
738	isa_irq.ioapic_irq = ioapic_irq;
739
740	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
741}
742
743int
744vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
745{
746	struct vm_isa_irq isa_irq;
747
748	bzero(&isa_irq, sizeof(struct vm_isa_irq));
749	isa_irq.atpic_irq = atpic_irq;
750	isa_irq.ioapic_irq = ioapic_irq;
751
752	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
753}
754
755int
756vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
757    enum vm_intr_trigger trigger)
758{
759	struct vm_isa_irq_trigger isa_irq_trigger;
760
761	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
762	isa_irq_trigger.atpic_irq = atpic_irq;
763	isa_irq_trigger.trigger = trigger;
764
765	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
766}
767
768int
769vm_inject_nmi(struct vmctx *ctx, int vcpu)
770{
771	struct vm_nmi vmnmi;
772
773	bzero(&vmnmi, sizeof(vmnmi));
774	vmnmi.cpuid = vcpu;
775
776	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
777}
778
779static struct {
780	const char	*name;
781	int		type;
782} capstrmap[] = {
783	{ "hlt_exit",		VM_CAP_HALT_EXIT },
784	{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
785	{ "pause_exit",		VM_CAP_PAUSE_EXIT },
786	{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
787	{ "enable_invpcid",	VM_CAP_ENABLE_INVPCID },
788	{ 0 }
789};
790
791int
792vm_capability_name2type(const char *capname)
793{
794	int i;
795
796	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
797		if (strcmp(capstrmap[i].name, capname) == 0)
798			return (capstrmap[i].type);
799	}
800
801	return (-1);
802}
803
804const char *
805vm_capability_type2name(int type)
806{
807	int i;
808
809	for (i = 0; capstrmap[i].name != NULL; i++) {
810		if (capstrmap[i].type == type)
811			return (capstrmap[i].name);
812	}
813
814	return (NULL);
815}
816
817int
818vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
819		  int *retval)
820{
821	int error;
822	struct vm_capability vmcap;
823
824	bzero(&vmcap, sizeof(vmcap));
825	vmcap.cpuid = vcpu;
826	vmcap.captype = cap;
827
828	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
829	*retval = vmcap.capval;
830	return (error);
831}
832
833int
834vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
835{
836	struct vm_capability vmcap;
837
838	bzero(&vmcap, sizeof(vmcap));
839	vmcap.cpuid = vcpu;
840	vmcap.captype = cap;
841	vmcap.capval = val;
842
843	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
844}
845
846int
847vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
848{
849	struct vm_pptdev pptdev;
850
851	bzero(&pptdev, sizeof(pptdev));
852	pptdev.bus = bus;
853	pptdev.slot = slot;
854	pptdev.func = func;
855
856	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
857}
858
859int
860vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
861{
862	struct vm_pptdev pptdev;
863
864	bzero(&pptdev, sizeof(pptdev));
865	pptdev.bus = bus;
866	pptdev.slot = slot;
867	pptdev.func = func;
868
869	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
870}
871
872int
873vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
874		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
875{
876	struct vm_pptdev_mmio pptmmio;
877
878	bzero(&pptmmio, sizeof(pptmmio));
879	pptmmio.bus = bus;
880	pptmmio.slot = slot;
881	pptmmio.func = func;
882	pptmmio.gpa = gpa;
883	pptmmio.len = len;
884	pptmmio.hpa = hpa;
885
886	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
887}
888
889int
890vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
891    uint64_t addr, uint64_t msg, int numvec)
892{
893	struct vm_pptdev_msi pptmsi;
894
895	bzero(&pptmsi, sizeof(pptmsi));
896	pptmsi.vcpu = vcpu;
897	pptmsi.bus = bus;
898	pptmsi.slot = slot;
899	pptmsi.func = func;
900	pptmsi.msg = msg;
901	pptmsi.addr = addr;
902	pptmsi.numvec = numvec;
903
904	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
905}
906
907int
908vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
909    int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
910{
911	struct vm_pptdev_msix pptmsix;
912
913	bzero(&pptmsix, sizeof(pptmsix));
914	pptmsix.vcpu = vcpu;
915	pptmsix.bus = bus;
916	pptmsix.slot = slot;
917	pptmsix.func = func;
918	pptmsix.idx = idx;
919	pptmsix.msg = msg;
920	pptmsix.addr = addr;
921	pptmsix.vector_control = vector_control;
922
923	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
924}
925
926uint64_t *
927vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
928	     int *ret_entries)
929{
930	int error;
931
932	static struct vm_stats vmstats;
933
934	vmstats.cpuid = vcpu;
935
936	error = ioctl(ctx->fd, VM_STATS, &vmstats);
937	if (error == 0) {
938		if (ret_entries)
939			*ret_entries = vmstats.num_entries;
940		if (ret_tv)
941			*ret_tv = vmstats.tv;
942		return (vmstats.statbuf);
943	} else
944		return (NULL);
945}
946
947const char *
948vm_get_stat_desc(struct vmctx *ctx, int index)
949{
950	static struct vm_stat_desc statdesc;
951
952	statdesc.index = index;
953	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
954		return (statdesc.desc);
955	else
956		return (NULL);
957}
958
959int
960vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
961{
962	int error;
963	struct vm_x2apic x2apic;
964
965	bzero(&x2apic, sizeof(x2apic));
966	x2apic.cpuid = vcpu;
967
968	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
969	*state = x2apic.state;
970	return (error);
971}
972
973int
974vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
975{
976	int error;
977	struct vm_x2apic x2apic;
978
979	bzero(&x2apic, sizeof(x2apic));
980	x2apic.cpuid = vcpu;
981	x2apic.state = state;
982
983	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
984
985	return (error);
986}
987
988/*
989 * From Intel Vol 3a:
990 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
991 */
992int
993vcpu_reset(struct vmctx *vmctx, int vcpu)
994{
995	int error;
996	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
997	uint32_t desc_access, desc_limit;
998	uint16_t sel;
999
1000	zero = 0;
1001
1002	rflags = 0x2;
1003	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1004	if (error)
1005		goto done;
1006
1007	rip = 0xfff0;
1008	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1009		goto done;
1010
1011	cr0 = CR0_NE;
1012	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1013		goto done;
1014
1015	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1016		goto done;
1017
1018	cr4 = 0;
1019	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1020		goto done;
1021
1022	/*
1023	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1024	 */
1025	desc_base = 0xffff0000;
1026	desc_limit = 0xffff;
1027	desc_access = 0x0093;
1028	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1029			    desc_base, desc_limit, desc_access);
1030	if (error)
1031		goto done;
1032
1033	sel = 0xf000;
1034	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1035		goto done;
1036
1037	/*
1038	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1039	 */
1040	desc_base = 0;
1041	desc_limit = 0xffff;
1042	desc_access = 0x0093;
1043	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1044			    desc_base, desc_limit, desc_access);
1045	if (error)
1046		goto done;
1047
1048	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1049			    desc_base, desc_limit, desc_access);
1050	if (error)
1051		goto done;
1052
1053	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1054			    desc_base, desc_limit, desc_access);
1055	if (error)
1056		goto done;
1057
1058	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1059			    desc_base, desc_limit, desc_access);
1060	if (error)
1061		goto done;
1062
1063	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1064			    desc_base, desc_limit, desc_access);
1065	if (error)
1066		goto done;
1067
1068	sel = 0;
1069	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1070		goto done;
1071	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1072		goto done;
1073	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1074		goto done;
1075	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1076		goto done;
1077	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1078		goto done;
1079
1080	/* General purpose registers */
1081	rdx = 0xf00;
1082	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1083		goto done;
1084	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1085		goto done;
1086	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1087		goto done;
1088	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1089		goto done;
1090	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1091		goto done;
1092	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1093		goto done;
1094	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1095		goto done;
1096	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1097		goto done;
1098
1099	/* GDTR, IDTR */
1100	desc_base = 0;
1101	desc_limit = 0xffff;
1102	desc_access = 0;
1103	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1104			    desc_base, desc_limit, desc_access);
1105	if (error != 0)
1106		goto done;
1107
1108	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1109			    desc_base, desc_limit, desc_access);
1110	if (error != 0)
1111		goto done;
1112
1113	/* TR */
1114	desc_base = 0;
1115	desc_limit = 0xffff;
1116	desc_access = 0x0000008b;
1117	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1118	if (error)
1119		goto done;
1120
1121	sel = 0;
1122	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1123		goto done;
1124
1125	/* LDTR */
1126	desc_base = 0;
1127	desc_limit = 0xffff;
1128	desc_access = 0x00000082;
1129	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1130			    desc_limit, desc_access);
1131	if (error)
1132		goto done;
1133
1134	sel = 0;
1135	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1136		goto done;
1137
1138	/* XXX cr2, debug registers */
1139
1140	error = 0;
1141done:
1142	return (error);
1143}
1144
1145int
1146vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1147{
1148	int error, i;
1149	struct vm_gpa_pte gpapte;
1150
1151	bzero(&gpapte, sizeof(gpapte));
1152	gpapte.gpa = gpa;
1153
1154	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1155
1156	if (error == 0) {
1157		*num = gpapte.ptenum;
1158		for (i = 0; i < gpapte.ptenum; i++)
1159			pte[i] = gpapte.pte[i];
1160	}
1161
1162	return (error);
1163}
1164
1165int
1166vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1167{
1168	int error;
1169	struct vm_hpet_cap cap;
1170
1171	bzero(&cap, sizeof(struct vm_hpet_cap));
1172	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1173	if (capabilities != NULL)
1174		*capabilities = cap.capabilities;
1175	return (error);
1176}
1177
1178int
1179vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1180    uint64_t gla, int prot, uint64_t *gpa, int *fault)
1181{
1182	struct vm_gla2gpa gg;
1183	int error;
1184
1185	bzero(&gg, sizeof(struct vm_gla2gpa));
1186	gg.vcpuid = vcpu;
1187	gg.prot = prot;
1188	gg.gla = gla;
1189	gg.paging = *paging;
1190
1191	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1192	if (error == 0) {
1193		*fault = gg.fault;
1194		*gpa = gg.gpa;
1195	}
1196	return (error);
1197}
1198
1199#ifndef min
1200#define	min(a,b)	(((a) < (b)) ? (a) : (b))
1201#endif
1202
1203int
1204vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1205    uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1206    int *fault)
1207{
1208	void *va;
1209	uint64_t gpa;
1210	int error, i, n, off;
1211
1212	for (i = 0; i < iovcnt; i++) {
1213		iov[i].iov_base = 0;
1214		iov[i].iov_len = 0;
1215	}
1216
1217	while (len) {
1218		assert(iovcnt > 0);
1219		error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1220		if (error || *fault)
1221			return (error);
1222
1223		off = gpa & PAGE_MASK;
1224		n = min(len, PAGE_SIZE - off);
1225
1226		va = vm_map_gpa(ctx, gpa, n);
1227		if (va == NULL)
1228			return (EFAULT);
1229
1230		iov->iov_base = va;
1231		iov->iov_len = n;
1232		iov++;
1233		iovcnt--;
1234
1235		gla += n;
1236		len -= n;
1237	}
1238	return (0);
1239}
1240
1241void
1242vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt)
1243{
1244
1245	return;
1246}
1247
1248void
1249vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1250{
1251	const char *src;
1252	char *dst;
1253	size_t n;
1254
1255	dst = vp;
1256	while (len) {
1257		assert(iov->iov_len);
1258		n = min(len, iov->iov_len);
1259		src = iov->iov_base;
1260		bcopy(src, dst, n);
1261
1262		iov++;
1263		dst += n;
1264		len -= n;
1265	}
1266}
1267
1268void
1269vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1270    size_t len)
1271{
1272	const char *src;
1273	char *dst;
1274	size_t n;
1275
1276	src = vp;
1277	while (len) {
1278		assert(iov->iov_len);
1279		n = min(len, iov->iov_len);
1280		dst = iov->iov_base;
1281		bcopy(src, dst, n);
1282
1283		iov++;
1284		src += n;
1285		len -= n;
1286	}
1287}
1288
1289static int
1290vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1291{
1292	struct vm_cpuset vm_cpuset;
1293	int error;
1294
1295	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1296	vm_cpuset.which = which;
1297	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1298	vm_cpuset.cpus = cpus;
1299
1300	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1301	return (error);
1302}
1303
1304int
1305vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1306{
1307
1308	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1309}
1310
1311int
1312vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1313{
1314
1315	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1316}
1317
1318int
1319vm_activate_cpu(struct vmctx *ctx, int vcpu)
1320{
1321	struct vm_activate_cpu ac;
1322	int error;
1323
1324	bzero(&ac, sizeof(struct vm_activate_cpu));
1325	ac.vcpuid = vcpu;
1326	error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1327	return (error);
1328}
1329
1330int
1331vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1332{
1333	struct vm_intinfo vmii;
1334	int error;
1335
1336	bzero(&vmii, sizeof(struct vm_intinfo));
1337	vmii.vcpuid = vcpu;
1338	error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1339	if (error == 0) {
1340		*info1 = vmii.info1;
1341		*info2 = vmii.info2;
1342	}
1343	return (error);
1344}
1345
1346int
1347vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1348{
1349	struct vm_intinfo vmii;
1350	int error;
1351
1352	bzero(&vmii, sizeof(struct vm_intinfo));
1353	vmii.vcpuid = vcpu;
1354	vmii.info1 = info1;
1355	error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1356	return (error);
1357}
1358
1359int
1360vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1361{
1362	struct vm_rtc_data rtcdata;
1363	int error;
1364
1365	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1366	rtcdata.offset = offset;
1367	rtcdata.value = value;
1368	error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1369	return (error);
1370}
1371
1372int
1373vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1374{
1375	struct vm_rtc_data rtcdata;
1376	int error;
1377
1378	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1379	rtcdata.offset = offset;
1380	error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1381	if (error == 0)
1382		*retval = rtcdata.value;
1383	return (error);
1384}
1385
1386int
1387vm_rtc_settime(struct vmctx *ctx, time_t secs)
1388{
1389	struct vm_rtc_time rtctime;
1390	int error;
1391
1392	bzero(&rtctime, sizeof(struct vm_rtc_time));
1393	rtctime.secs = secs;
1394	error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1395	return (error);
1396}
1397
1398int
1399vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1400{
1401	struct vm_rtc_time rtctime;
1402	int error;
1403
1404	bzero(&rtctime, sizeof(struct vm_rtc_time));
1405	error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1406	if (error == 0)
1407		*secs = rtctime.secs;
1408	return (error);
1409}
1410
1411int
1412vm_restart_instruction(void *arg, int vcpu)
1413{
1414	struct vmctx *ctx = arg;
1415
1416	return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
1417}
1418
1419int
1420vm_set_topology(struct vmctx *ctx,
1421    uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1422{
1423	struct vm_cpu_topology topology;
1424
1425	bzero(&topology, sizeof (struct vm_cpu_topology));
1426	topology.sockets = sockets;
1427	topology.cores = cores;
1428	topology.threads = threads;
1429	topology.maxcpus = maxcpus;
1430	return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1431}
1432
1433int
1434vm_get_topology(struct vmctx *ctx,
1435    uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1436{
1437	struct vm_cpu_topology topology;
1438	int error;
1439
1440	bzero(&topology, sizeof (struct vm_cpu_topology));
1441	error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1442	if (error == 0) {
1443		*sockets = topology.sockets;
1444		*cores = topology.cores;
1445		*threads = topology.threads;
1446		*maxcpus = topology.maxcpus;
1447	}
1448	return (error);
1449}
1450
1451int
1452vm_get_device_fd(struct vmctx *ctx)
1453{
1454
1455	return (ctx->fd);
1456}
1457
1458const cap_ioctl_t *
1459vm_get_ioctls(size_t *len)
1460{
1461	cap_ioctl_t *cmds;
1462	/* keep in sync with machine/vmm_dev.h */
1463	static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1464	    VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1465	    VM_MMAP_GETNEXT, VM_SET_REGISTER, VM_GET_REGISTER,
1466	    VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1467	    VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1468	    VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1469	    VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1470	    VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1471	    VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1472	    VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1473	    VM_PPTDEV_MSIX, VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1474	    VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1475	    VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1476	    VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SET_INTINFO, VM_GET_INTINFO,
1477	    VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1478	    VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY };
1479
1480	if (len == NULL) {
1481		cmds = malloc(sizeof(vm_ioctl_cmds));
1482		if (cmds == NULL)
1483			return (NULL);
1484		bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1485		return (cmds);
1486	}
1487
1488	*len = nitems(vm_ioctl_cmds);
1489	return (NULL);
1490}
1491