vmm_dev.c revision 270070
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/sys/amd64/vmm/vmm_dev.c 270070 2014-08-17 00:52:07Z grehan $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_dev.c 270070 2014-08-17 00:52:07Z grehan $");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/queue.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/malloc.h>
38#include <sys/conf.h>
39#include <sys/sysctl.h>
40#include <sys/libkern.h>
41#include <sys/ioccom.h>
42#include <sys/mman.h>
43#include <sys/uio.h>
44
45#include <vm/vm.h>
46#include <vm/pmap.h>
47#include <vm/vm_map.h>
48
49#include <machine/vmparam.h>
50#include <machine/vmm.h>
51#include <machine/vmm_instruction_emul.h>
52#include <machine/vmm_dev.h>
53
54#include "vmm_lapic.h"
55#include "vmm_stat.h"
56#include "vmm_mem.h"
57#include "io/ppt.h"
58#include "io/vatpic.h"
59#include "io/vioapic.h"
60#include "io/vhpet.h"
61
62struct vmmdev_softc {
63	struct vm	*vm;		/* vm instance cookie */
64	struct cdev	*cdev;
65	SLIST_ENTRY(vmmdev_softc) link;
66	int		flags;
67};
68#define	VSC_LINKED		0x01
69
70static SLIST_HEAD(, vmmdev_softc) head;
71
72static struct mtx vmmdev_mtx;
73
74static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
75
76SYSCTL_DECL(_hw_vmm);
77
78static struct vmmdev_softc *
79vmmdev_lookup(const char *name)
80{
81	struct vmmdev_softc *sc;
82
83#ifdef notyet	/* XXX kernel is not compiled with invariants */
84	mtx_assert(&vmmdev_mtx, MA_OWNED);
85#endif
86
87	SLIST_FOREACH(sc, &head, link) {
88		if (strcmp(name, vm_name(sc->vm)) == 0)
89			break;
90	}
91
92	return (sc);
93}
94
95static struct vmmdev_softc *
96vmmdev_lookup2(struct cdev *cdev)
97{
98
99	return (cdev->si_drv1);
100}
101
102static int
103vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
104{
105	int error, off, c, prot;
106	vm_paddr_t gpa;
107	void *hpa, *cookie;
108	struct vmmdev_softc *sc;
109
110	static char zerobuf[PAGE_SIZE];
111
112	error = 0;
113	sc = vmmdev_lookup2(cdev);
114	if (sc == NULL)
115		error = ENXIO;
116
117	prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
118	while (uio->uio_resid > 0 && error == 0) {
119		gpa = uio->uio_offset;
120		off = gpa & PAGE_MASK;
121		c = min(uio->uio_resid, PAGE_SIZE - off);
122
123		/*
124		 * The VM has a hole in its physical memory map. If we want to
125		 * use 'dd' to inspect memory beyond the hole we need to
126		 * provide bogus data for memory that lies in the hole.
127		 *
128		 * Since this device does not support lseek(2), dd(1) will
129		 * read(2) blocks of data to simulate the lseek(2).
130		 */
131		hpa = vm_gpa_hold(sc->vm, gpa, c, prot, &cookie);
132		if (hpa == NULL) {
133			if (uio->uio_rw == UIO_READ)
134				error = uiomove(zerobuf, c, uio);
135			else
136				error = EFAULT;
137		} else {
138			error = uiomove(hpa, c, uio);
139			vm_gpa_release(cookie);
140		}
141	}
142	return (error);
143}
144
145static int
146vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
147	     struct thread *td)
148{
149	int error, vcpu, state_changed, size;
150	cpuset_t *cpuset;
151	struct vmmdev_softc *sc;
152	struct vm_memory_segment *seg;
153	struct vm_register *vmreg;
154	struct vm_seg_desc *vmsegdesc;
155	struct vm_run *vmrun;
156	struct vm_exception *vmexc;
157	struct vm_lapic_irq *vmirq;
158	struct vm_lapic_msi *vmmsi;
159	struct vm_ioapic_irq *ioapic_irq;
160	struct vm_isa_irq *isa_irq;
161	struct vm_isa_irq_trigger *isa_irq_trigger;
162	struct vm_capability *vmcap;
163	struct vm_pptdev *pptdev;
164	struct vm_pptdev_mmio *pptmmio;
165	struct vm_pptdev_msi *pptmsi;
166	struct vm_pptdev_msix *pptmsix;
167	struct vm_nmi *vmnmi;
168	struct vm_stats *vmstats;
169	struct vm_stat_desc *statdesc;
170	struct vm_x2apic *x2apic;
171	struct vm_gpa_pte *gpapte;
172	struct vm_suspend *vmsuspend;
173	struct vm_gla2gpa *gg;
174	struct vm_activate_cpu *vac;
175	struct vm_cpuset *vm_cpuset;
176
177	sc = vmmdev_lookup2(cdev);
178	if (sc == NULL)
179		return (ENXIO);
180
181	error = 0;
182	vcpu = -1;
183	state_changed = 0;
184
185	/*
186	 * Some VMM ioctls can operate only on vcpus that are not running.
187	 */
188	switch (cmd) {
189	case VM_RUN:
190	case VM_GET_REGISTER:
191	case VM_SET_REGISTER:
192	case VM_GET_SEGMENT_DESCRIPTOR:
193	case VM_SET_SEGMENT_DESCRIPTOR:
194	case VM_INJECT_EXCEPTION:
195	case VM_GET_CAPABILITY:
196	case VM_SET_CAPABILITY:
197	case VM_PPTDEV_MSI:
198	case VM_PPTDEV_MSIX:
199	case VM_SET_X2APIC_STATE:
200	case VM_GLA2GPA:
201	case VM_ACTIVATE_CPU:
202		/*
203		 * XXX fragile, handle with care
204		 * Assumes that the first field of the ioctl data is the vcpu.
205		 */
206		vcpu = *(int *)data;
207		if (vcpu < 0 || vcpu >= VM_MAXCPU) {
208			error = EINVAL;
209			goto done;
210		}
211
212		error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
213		if (error)
214			goto done;
215
216		state_changed = 1;
217		break;
218
219	case VM_MAP_PPTDEV_MMIO:
220	case VM_BIND_PPTDEV:
221	case VM_UNBIND_PPTDEV:
222	case VM_MAP_MEMORY:
223		/*
224		 * ioctls that operate on the entire virtual machine must
225		 * prevent all vcpus from running.
226		 */
227		error = 0;
228		for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) {
229			error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
230			if (error)
231				break;
232		}
233
234		if (error) {
235			while (--vcpu >= 0)
236				vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
237			goto done;
238		}
239
240		state_changed = 2;
241		break;
242
243	default:
244		break;
245	}
246
247	switch(cmd) {
248	case VM_RUN:
249		vmrun = (struct vm_run *)data;
250		error = vm_run(sc->vm, vmrun);
251		break;
252	case VM_SUSPEND:
253		vmsuspend = (struct vm_suspend *)data;
254		error = vm_suspend(sc->vm, vmsuspend->how);
255		break;
256	case VM_STAT_DESC: {
257		statdesc = (struct vm_stat_desc *)data;
258		error = vmm_stat_desc_copy(statdesc->index,
259					statdesc->desc, sizeof(statdesc->desc));
260		break;
261	}
262	case VM_STATS: {
263		CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_ELEMS);
264		vmstats = (struct vm_stats *)data;
265		getmicrotime(&vmstats->tv);
266		error = vmm_stat_copy(sc->vm, vmstats->cpuid,
267				      &vmstats->num_entries, vmstats->statbuf);
268		break;
269	}
270	case VM_PPTDEV_MSI:
271		pptmsi = (struct vm_pptdev_msi *)data;
272		error = ppt_setup_msi(sc->vm, pptmsi->vcpu,
273				      pptmsi->bus, pptmsi->slot, pptmsi->func,
274				      pptmsi->addr, pptmsi->msg,
275				      pptmsi->numvec);
276		break;
277	case VM_PPTDEV_MSIX:
278		pptmsix = (struct vm_pptdev_msix *)data;
279		error = ppt_setup_msix(sc->vm, pptmsix->vcpu,
280				       pptmsix->bus, pptmsix->slot,
281				       pptmsix->func, pptmsix->idx,
282				       pptmsix->addr, pptmsix->msg,
283				       pptmsix->vector_control);
284		break;
285	case VM_MAP_PPTDEV_MMIO:
286		pptmmio = (struct vm_pptdev_mmio *)data;
287		error = ppt_map_mmio(sc->vm, pptmmio->bus, pptmmio->slot,
288				     pptmmio->func, pptmmio->gpa, pptmmio->len,
289				     pptmmio->hpa);
290		break;
291	case VM_BIND_PPTDEV:
292		pptdev = (struct vm_pptdev *)data;
293		error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
294					 pptdev->func);
295		break;
296	case VM_UNBIND_PPTDEV:
297		pptdev = (struct vm_pptdev *)data;
298		error = vm_unassign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
299					   pptdev->func);
300		break;
301	case VM_INJECT_EXCEPTION:
302		vmexc = (struct vm_exception *)data;
303		error = vm_inject_exception(sc->vm, vmexc->cpuid, vmexc);
304		break;
305	case VM_INJECT_NMI:
306		vmnmi = (struct vm_nmi *)data;
307		error = vm_inject_nmi(sc->vm, vmnmi->cpuid);
308		break;
309	case VM_LAPIC_IRQ:
310		vmirq = (struct vm_lapic_irq *)data;
311		error = lapic_intr_edge(sc->vm, vmirq->cpuid, vmirq->vector);
312		break;
313	case VM_LAPIC_LOCAL_IRQ:
314		vmirq = (struct vm_lapic_irq *)data;
315		error = lapic_set_local_intr(sc->vm, vmirq->cpuid,
316		    vmirq->vector);
317		break;
318	case VM_LAPIC_MSI:
319		vmmsi = (struct vm_lapic_msi *)data;
320		error = lapic_intr_msi(sc->vm, vmmsi->addr, vmmsi->msg);
321		break;
322	case VM_IOAPIC_ASSERT_IRQ:
323		ioapic_irq = (struct vm_ioapic_irq *)data;
324		error = vioapic_assert_irq(sc->vm, ioapic_irq->irq);
325		break;
326	case VM_IOAPIC_DEASSERT_IRQ:
327		ioapic_irq = (struct vm_ioapic_irq *)data;
328		error = vioapic_deassert_irq(sc->vm, ioapic_irq->irq);
329		break;
330	case VM_IOAPIC_PULSE_IRQ:
331		ioapic_irq = (struct vm_ioapic_irq *)data;
332		error = vioapic_pulse_irq(sc->vm, ioapic_irq->irq);
333		break;
334	case VM_IOAPIC_PINCOUNT:
335		*(int *)data = vioapic_pincount(sc->vm);
336		break;
337	case VM_ISA_ASSERT_IRQ:
338		isa_irq = (struct vm_isa_irq *)data;
339		error = vatpic_assert_irq(sc->vm, isa_irq->atpic_irq);
340		if (error == 0 && isa_irq->ioapic_irq != -1)
341			error = vioapic_assert_irq(sc->vm,
342			    isa_irq->ioapic_irq);
343		break;
344	case VM_ISA_DEASSERT_IRQ:
345		isa_irq = (struct vm_isa_irq *)data;
346		error = vatpic_deassert_irq(sc->vm, isa_irq->atpic_irq);
347		if (error == 0 && isa_irq->ioapic_irq != -1)
348			error = vioapic_deassert_irq(sc->vm,
349			    isa_irq->ioapic_irq);
350		break;
351	case VM_ISA_PULSE_IRQ:
352		isa_irq = (struct vm_isa_irq *)data;
353		error = vatpic_pulse_irq(sc->vm, isa_irq->atpic_irq);
354		if (error == 0 && isa_irq->ioapic_irq != -1)
355			error = vioapic_pulse_irq(sc->vm, isa_irq->ioapic_irq);
356		break;
357	case VM_ISA_SET_IRQ_TRIGGER:
358		isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
359		error = vatpic_set_irq_trigger(sc->vm,
360		    isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
361		break;
362	case VM_MAP_MEMORY:
363		seg = (struct vm_memory_segment *)data;
364		error = vm_malloc(sc->vm, seg->gpa, seg->len);
365		break;
366	case VM_GET_MEMORY_SEG:
367		seg = (struct vm_memory_segment *)data;
368		seg->len = 0;
369		(void)vm_gpabase2memseg(sc->vm, seg->gpa, seg);
370		error = 0;
371		break;
372	case VM_GET_REGISTER:
373		vmreg = (struct vm_register *)data;
374		error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum,
375					&vmreg->regval);
376		break;
377	case VM_SET_REGISTER:
378		vmreg = (struct vm_register *)data;
379		error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum,
380					vmreg->regval);
381		break;
382	case VM_SET_SEGMENT_DESCRIPTOR:
383		vmsegdesc = (struct vm_seg_desc *)data;
384		error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid,
385					vmsegdesc->regnum,
386					&vmsegdesc->desc);
387		break;
388	case VM_GET_SEGMENT_DESCRIPTOR:
389		vmsegdesc = (struct vm_seg_desc *)data;
390		error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid,
391					vmsegdesc->regnum,
392					&vmsegdesc->desc);
393		break;
394	case VM_GET_CAPABILITY:
395		vmcap = (struct vm_capability *)data;
396		error = vm_get_capability(sc->vm, vmcap->cpuid,
397					  vmcap->captype,
398					  &vmcap->capval);
399		break;
400	case VM_SET_CAPABILITY:
401		vmcap = (struct vm_capability *)data;
402		error = vm_set_capability(sc->vm, vmcap->cpuid,
403					  vmcap->captype,
404					  vmcap->capval);
405		break;
406	case VM_SET_X2APIC_STATE:
407		x2apic = (struct vm_x2apic *)data;
408		error = vm_set_x2apic_state(sc->vm,
409					    x2apic->cpuid, x2apic->state);
410		break;
411	case VM_GET_X2APIC_STATE:
412		x2apic = (struct vm_x2apic *)data;
413		error = vm_get_x2apic_state(sc->vm,
414					    x2apic->cpuid, &x2apic->state);
415		break;
416	case VM_GET_GPA_PMAP:
417		gpapte = (struct vm_gpa_pte *)data;
418		pmap_get_mapping(vmspace_pmap(vm_get_vmspace(sc->vm)),
419				 gpapte->gpa, gpapte->pte, &gpapte->ptenum);
420		error = 0;
421		break;
422	case VM_GET_HPET_CAPABILITIES:
423		error = vhpet_getcap((struct vm_hpet_cap *)data);
424		break;
425	case VM_GLA2GPA: {
426		CTASSERT(PROT_READ == VM_PROT_READ);
427		CTASSERT(PROT_WRITE == VM_PROT_WRITE);
428		CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
429		gg = (struct vm_gla2gpa *)data;
430		error = vmm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
431		    gg->prot, &gg->gpa);
432		KASSERT(error == 0 || error == 1 || error == -1,
433		    ("%s: vmm_gla2gpa unknown error %d", __func__, error));
434		if (error >= 0) {
435			/*
436			 * error = 0: the translation was successful
437			 * error = 1: a fault was injected into the guest
438			 */
439			gg->fault = error;
440			error = 0;
441		} else {
442			error = EFAULT;
443		}
444		break;
445	}
446	case VM_ACTIVATE_CPU:
447		vac = (struct vm_activate_cpu *)data;
448		error = vm_activate_cpu(sc->vm, vac->vcpuid);
449		break;
450	case VM_GET_CPUS:
451		error = 0;
452		vm_cpuset = (struct vm_cpuset *)data;
453		size = vm_cpuset->cpusetsize;
454		if (size < sizeof(cpuset_t) || size > CPU_MAXSIZE / NBBY) {
455			error = ERANGE;
456			break;
457		}
458		cpuset = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
459		if (vm_cpuset->which == VM_ACTIVE_CPUS)
460			*cpuset = vm_active_cpus(sc->vm);
461		else if (vm_cpuset->which == VM_SUSPENDED_CPUS)
462			*cpuset = vm_suspended_cpus(sc->vm);
463		else
464			error = EINVAL;
465		if (error == 0)
466			error = copyout(cpuset, vm_cpuset->cpus, size);
467		free(cpuset, M_TEMP);
468		break;
469	default:
470		error = ENOTTY;
471		break;
472	}
473
474	if (state_changed == 1) {
475		vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
476	} else if (state_changed == 2) {
477		for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++)
478			vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
479	}
480
481done:
482	/* Make sure that no handler returns a bogus value like ERESTART */
483	KASSERT(error >= 0, ("vmmdev_ioctl: invalid error return %d", error));
484	return (error);
485}
486
487static int
488vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
489		   vm_size_t size, struct vm_object **object, int nprot)
490{
491	int error;
492	struct vmmdev_softc *sc;
493
494	sc = vmmdev_lookup2(cdev);
495	if (sc != NULL && (nprot & PROT_EXEC) == 0)
496		error = vm_get_memobj(sc->vm, *offset, size, offset, object);
497	else
498		error = EINVAL;
499
500	return (error);
501}
502
503static void
504vmmdev_destroy(void *arg)
505{
506
507	struct vmmdev_softc *sc = arg;
508
509	if (sc->cdev != NULL)
510		destroy_dev(sc->cdev);
511
512	if (sc->vm != NULL)
513		vm_destroy(sc->vm);
514
515	if ((sc->flags & VSC_LINKED) != 0) {
516		mtx_lock(&vmmdev_mtx);
517		SLIST_REMOVE(&head, sc, vmmdev_softc, link);
518		mtx_unlock(&vmmdev_mtx);
519	}
520
521	free(sc, M_VMMDEV);
522}
523
524static int
525sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
526{
527	int error;
528	char buf[VM_MAX_NAMELEN];
529	struct vmmdev_softc *sc;
530	struct cdev *cdev;
531
532	strlcpy(buf, "beavis", sizeof(buf));
533	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
534	if (error != 0 || req->newptr == NULL)
535		return (error);
536
537	mtx_lock(&vmmdev_mtx);
538	sc = vmmdev_lookup(buf);
539	if (sc == NULL || sc->cdev == NULL) {
540		mtx_unlock(&vmmdev_mtx);
541		return (EINVAL);
542	}
543
544	/*
545	 * The 'cdev' will be destroyed asynchronously when 'si_threadcount'
546	 * goes down to 0 so we should not do it again in the callback.
547	 */
548	cdev = sc->cdev;
549	sc->cdev = NULL;
550	mtx_unlock(&vmmdev_mtx);
551
552	/*
553	 * Schedule the 'cdev' to be destroyed:
554	 *
555	 * - any new operations on this 'cdev' will return an error (ENXIO).
556	 *
557	 * - when the 'si_threadcount' dwindles down to zero the 'cdev' will
558	 *   be destroyed and the callback will be invoked in a taskqueue
559	 *   context.
560	 */
561	destroy_dev_sched_cb(cdev, vmmdev_destroy, sc);
562
563	return (0);
564}
565SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW,
566	    NULL, 0, sysctl_vmm_destroy, "A", NULL);
567
568static struct cdevsw vmmdevsw = {
569	.d_name		= "vmmdev",
570	.d_version	= D_VERSION,
571	.d_ioctl	= vmmdev_ioctl,
572	.d_mmap_single	= vmmdev_mmap_single,
573	.d_read		= vmmdev_rw,
574	.d_write	= vmmdev_rw,
575};
576
577static int
578sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
579{
580	int error;
581	struct vm *vm;
582	struct cdev *cdev;
583	struct vmmdev_softc *sc, *sc2;
584	char buf[VM_MAX_NAMELEN];
585
586	strlcpy(buf, "beavis", sizeof(buf));
587	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
588	if (error != 0 || req->newptr == NULL)
589		return (error);
590
591	mtx_lock(&vmmdev_mtx);
592	sc = vmmdev_lookup(buf);
593	mtx_unlock(&vmmdev_mtx);
594	if (sc != NULL)
595		return (EEXIST);
596
597	error = vm_create(buf, &vm);
598	if (error != 0)
599		return (error);
600
601	sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO);
602	sc->vm = vm;
603
604	/*
605	 * Lookup the name again just in case somebody sneaked in when we
606	 * dropped the lock.
607	 */
608	mtx_lock(&vmmdev_mtx);
609	sc2 = vmmdev_lookup(buf);
610	if (sc2 == NULL) {
611		SLIST_INSERT_HEAD(&head, sc, link);
612		sc->flags |= VSC_LINKED;
613	}
614	mtx_unlock(&vmmdev_mtx);
615
616	if (sc2 != NULL) {
617		vmmdev_destroy(sc);
618		return (EEXIST);
619	}
620
621	error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &vmmdevsw, NULL,
622			   UID_ROOT, GID_WHEEL, 0600, "vmm/%s", buf);
623	if (error != 0) {
624		vmmdev_destroy(sc);
625		return (error);
626	}
627
628	mtx_lock(&vmmdev_mtx);
629	sc->cdev = cdev;
630	sc->cdev->si_drv1 = sc;
631	mtx_unlock(&vmmdev_mtx);
632
633	return (0);
634}
635SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW,
636	    NULL, 0, sysctl_vmm_create, "A", NULL);
637
638void
639vmmdev_init(void)
640{
641	mtx_init(&vmmdev_mtx, "vmm device mutex", NULL, MTX_DEF);
642}
643
644int
645vmmdev_cleanup(void)
646{
647	int error;
648
649	if (SLIST_EMPTY(&head))
650		error = 0;
651	else
652		error = EBUSY;
653
654	return (error);
655}
656