bhyverun.c revision 257396
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/usr.sbin/bhyve/bhyverun.c 257396 2013-10-30 20:42:09Z neel $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/usr.sbin/bhyve/bhyverun.c 257396 2013-10-30 20:42:09Z neel $");
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include <machine/segments.h>
37
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include <err.h>
42#include <libgen.h>
43#include <unistd.h>
44#include <assert.h>
45#include <errno.h>
46#include <pthread.h>
47#include <pthread_np.h>
48#include <sysexits.h>
49
50#include <machine/vmm.h>
51#include <vmmapi.h>
52
53#include "bhyverun.h"
54#include "acpi.h"
55#include "inout.h"
56#include "dbgport.h"
57#include "legacy_irq.h"
58#include "mem.h"
59#include "mevent.h"
60#include "mptbl.h"
61#include "pci_emul.h"
62#include "pci_lpc.h"
63#include "xmsr.h"
64#include "ioapic.h"
65#include "spinup_ap.h"
66#include "rtc.h"
67
68#define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
69
70#define	VMEXIT_SWITCH		0	/* force vcpu switch in mux mode */
71#define	VMEXIT_CONTINUE		1	/* continue from next instruction */
72#define	VMEXIT_RESTART		2	/* restart current instruction */
73#define	VMEXIT_ABORT		3	/* abort the vm run loop */
74#define	VMEXIT_RESET		4	/* guest machine has reset */
75
76#define MB		(1024UL * 1024)
77#define GB		(1024UL * MB)
78
79typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
80
81char *vmname;
82
83int guest_ncpus;
84
85static int pincpu = -1;
86static int guest_vmexit_on_hlt, guest_vmexit_on_pause, disable_x2apic;
87static int virtio_msix = 1;
88
89static int foundcpus;
90
91static int strictio;
92
93static int acpi;
94
95static char *progname;
96static const int BSP = 0;
97
98static int cpumask;
99
100static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
101
102struct vm_exit vmexit[VM_MAXCPU];
103
104struct bhyvestats {
105        uint64_t        vmexit_bogus;
106        uint64_t        vmexit_bogus_switch;
107        uint64_t        vmexit_hlt;
108        uint64_t        vmexit_pause;
109        uint64_t        vmexit_mtrap;
110        uint64_t        vmexit_inst_emul;
111        uint64_t        cpu_switch_rotate;
112        uint64_t        cpu_switch_direct;
113        int             io_reset;
114} stats;
115
116struct mt_vmm_info {
117	pthread_t	mt_thr;
118	struct vmctx	*mt_ctx;
119	int		mt_vcpu;
120} mt_vmm_info[VM_MAXCPU];
121
122static void
123usage(int code)
124{
125
126        fprintf(stderr,
127                "Usage: %s [-aehAHIPW] [-g <gdb port>] [-s <pci>] [-S <pci>]\n"
128		"       %*s [-c vcpus] [-p pincpu] [-m mem] [-l <lpc>] <vm>\n"
129		"       -a: local apic is in XAPIC mode (default is X2APIC)\n"
130		"       -A: create an ACPI table\n"
131		"       -g: gdb port\n"
132		"       -c: # cpus (default 1)\n"
133		"       -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
134		"       -H: vmexit from the guest on hlt\n"
135		"       -I: present an ioapic to the guest\n"
136		"       -P: vmexit from the guest on pause\n"
137		"       -W: force virtio to use single-vector MSI\n"
138		"       -e: exit on unhandled I/O access\n"
139		"       -h: help\n"
140		"       -s: <slot,driver,configinfo> PCI slot config\n"
141		"       -S: <slot,driver,configinfo> legacy PCI slot config\n"
142		"       -l: LPC device configuration\n"
143		"       -m: memory size in MB\n",
144		progname, (int)strlen(progname), "");
145
146	exit(code);
147}
148
149void *
150paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
151{
152
153	return (vm_map_gpa(ctx, gaddr, len));
154}
155
156int
157fbsdrun_disable_x2apic(void)
158{
159
160	return (disable_x2apic);
161}
162
163int
164fbsdrun_vmexit_on_pause(void)
165{
166
167	return (guest_vmexit_on_pause);
168}
169
170int
171fbsdrun_vmexit_on_hlt(void)
172{
173
174	return (guest_vmexit_on_hlt);
175}
176
177int
178fbsdrun_virtio_msix(void)
179{
180
181	return (virtio_msix);
182}
183
184static void *
185fbsdrun_start_thread(void *param)
186{
187	char tname[MAXCOMLEN + 1];
188	struct mt_vmm_info *mtp;
189	int vcpu;
190
191	mtp = param;
192	vcpu = mtp->mt_vcpu;
193
194	snprintf(tname, sizeof(tname), "%s vcpu %d", vmname, vcpu);
195	pthread_set_name_np(mtp->mt_thr, tname);
196
197	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
198
199	/* not reached */
200	exit(1);
201	return (NULL);
202}
203
204void
205fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
206{
207	int error;
208
209	if (cpumask & (1 << vcpu)) {
210		fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
211		    vcpu);
212		exit(1);
213	}
214
215	cpumask |= 1 << vcpu;
216	foundcpus++;
217
218	/*
219	 * Set up the vmexit struct to allow execution to start
220	 * at the given RIP
221	 */
222	vmexit[vcpu].rip = rip;
223	vmexit[vcpu].inst_length = 0;
224
225	mt_vmm_info[vcpu].mt_ctx = ctx;
226	mt_vmm_info[vcpu].mt_vcpu = vcpu;
227
228	error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
229	    fbsdrun_start_thread, &mt_vmm_info[vcpu]);
230	assert(error == 0);
231}
232
233static int
234vmexit_catch_reset(void)
235{
236        stats.io_reset++;
237        return (VMEXIT_RESET);
238}
239
240static int
241vmexit_catch_inout(void)
242{
243	return (VMEXIT_ABORT);
244}
245
246static int
247vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
248		     uint32_t eax)
249{
250#if BHYVE_DEBUG
251	/*
252	 * put guest-driven debug here
253	 */
254#endif
255        return (VMEXIT_CONTINUE);
256}
257
258static int
259vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
260{
261	int error;
262	int bytes, port, in, out;
263	uint32_t eax;
264	int vcpu;
265
266	vcpu = *pvcpu;
267
268	port = vme->u.inout.port;
269	bytes = vme->u.inout.bytes;
270	eax = vme->u.inout.eax;
271	in = vme->u.inout.in;
272	out = !in;
273
274	/* We don't deal with these */
275	if (vme->u.inout.string || vme->u.inout.rep)
276		return (VMEXIT_ABORT);
277
278	/* Special case of guest reset */
279	if (out && port == 0x64 && (uint8_t)eax == 0xFE)
280		return (vmexit_catch_reset());
281
282        /* Extra-special case of host notifications */
283        if (out && port == GUEST_NIO_PORT)
284                return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
285
286	error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
287	if (error == 0 && in)
288		error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
289
290	if (error == 0)
291		return (VMEXIT_CONTINUE);
292	else {
293		fprintf(stderr, "Unhandled %s%c 0x%04x\n",
294			in ? "in" : "out",
295			bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
296		return (vmexit_catch_inout());
297	}
298}
299
300static int
301vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
302{
303	fprintf(stderr, "vm exit rdmsr 0x%x, cpu %d\n", vme->u.msr.code,
304	    *pvcpu);
305	return (VMEXIT_ABORT);
306}
307
308static int
309vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
310{
311	int newcpu;
312	int retval = VMEXIT_CONTINUE;
313
314	newcpu = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code,vme->u.msr.wval);
315
316        return (retval);
317}
318
319static int
320vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
321{
322	int newcpu;
323	int retval = VMEXIT_CONTINUE;
324
325	newcpu = spinup_ap(ctx, *pvcpu,
326			   vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
327
328	return (retval);
329}
330
331static int
332vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
333{
334
335	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
336	fprintf(stderr, "\treason\t\tVMX\n");
337	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
338	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
339	fprintf(stderr, "\terror\t\t%d\n", vmexit->u.vmx.error);
340	fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
341	fprintf(stderr, "\tqualification\t0x%016lx\n",
342	    vmexit->u.vmx.exit_qualification);
343
344	return (VMEXIT_ABORT);
345}
346
347static int
348vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
349{
350
351	stats.vmexit_bogus++;
352
353	return (VMEXIT_RESTART);
354}
355
356static int
357vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
358{
359
360	stats.vmexit_hlt++;
361
362	/*
363	 * Just continue execution with the next instruction. We use
364	 * the HLT VM exit as a way to be friendly with the host
365	 * scheduler.
366	 */
367	return (VMEXIT_CONTINUE);
368}
369
370static int
371vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
372{
373
374	stats.vmexit_pause++;
375
376	return (VMEXIT_CONTINUE);
377}
378
379static int
380vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
381{
382
383	stats.vmexit_mtrap++;
384
385	return (VMEXIT_RESTART);
386}
387
388static int
389vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
390{
391	int err;
392	stats.vmexit_inst_emul++;
393
394	err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
395			  &vmexit->u.inst_emul.vie);
396
397	if (err) {
398		if (err == EINVAL) {
399			fprintf(stderr,
400			    "Failed to emulate instruction at 0x%lx\n",
401			    vmexit->rip);
402		} else if (err == ESRCH) {
403			fprintf(stderr, "Unhandled memory access to 0x%lx\n",
404			    vmexit->u.inst_emul.gpa);
405		}
406
407		return (VMEXIT_ABORT);
408	}
409
410	return (VMEXIT_CONTINUE);
411}
412
413static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
414	[VM_EXITCODE_INOUT]  = vmexit_inout,
415	[VM_EXITCODE_VMX]    = vmexit_vmx,
416	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
417	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
418	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
419	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
420	[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
421	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
422};
423
424static void
425vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
426{
427	cpuset_t mask;
428	int error, rc, prevcpu;
429	enum vm_exitcode exitcode;
430
431	if (pincpu >= 0) {
432		CPU_ZERO(&mask);
433		CPU_SET(pincpu + vcpu, &mask);
434		error = pthread_setaffinity_np(pthread_self(),
435					       sizeof(mask), &mask);
436		assert(error == 0);
437	}
438
439	while (1) {
440		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
441		if (error != 0) {
442			/*
443			 * It is possible that 'vmmctl' or some other process
444			 * has transitioned the vcpu to CANNOT_RUN state right
445			 * before we tried to transition it to RUNNING.
446			 *
447			 * This is expected to be temporary so just retry.
448			 */
449			if (errno == EBUSY)
450				continue;
451			else
452				break;
453		}
454
455		prevcpu = vcpu;
456
457		exitcode = vmexit[vcpu].exitcode;
458		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
459			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
460			    exitcode);
461			exit(1);
462		}
463
464                rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
465
466		switch (rc) {
467		case VMEXIT_CONTINUE:
468                        rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
469			break;
470		case VMEXIT_RESTART:
471                        rip = vmexit[vcpu].rip;
472			break;
473		case VMEXIT_RESET:
474			exit(0);
475		default:
476			exit(1);
477		}
478	}
479	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
480}
481
482static int
483num_vcpus_allowed(struct vmctx *ctx)
484{
485	int tmp, error;
486
487	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
488
489	/*
490	 * The guest is allowed to spinup more than one processor only if the
491	 * UNRESTRICTED_GUEST capability is available.
492	 */
493	if (error == 0)
494		return (VM_MAXCPU);
495	else
496		return (1);
497}
498
499void
500fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
501{
502	int err, tmp;
503
504	if (fbsdrun_vmexit_on_hlt()) {
505		err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
506		if (err < 0) {
507			fprintf(stderr, "VM exit on HLT not supported\n");
508			exit(1);
509		}
510		vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
511		if (cpu == BSP)
512			handler[VM_EXITCODE_HLT] = vmexit_hlt;
513	}
514
515        if (fbsdrun_vmexit_on_pause()) {
516		/*
517		 * pause exit support required for this mode
518		 */
519		err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
520		if (err < 0) {
521			fprintf(stderr,
522			    "SMP mux requested, no pause support\n");
523			exit(1);
524		}
525		vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
526		if (cpu == BSP)
527			handler[VM_EXITCODE_PAUSE] = vmexit_pause;
528        }
529
530	if (fbsdrun_disable_x2apic())
531		err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
532	else
533		err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
534
535	if (err) {
536		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
537		exit(1);
538	}
539
540	vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
541}
542
543int
544main(int argc, char *argv[])
545{
546	int c, error, gdb_port, err, ioapic, bvmcons;
547	int max_vcpus;
548	struct vmctx *ctx;
549	uint64_t rip;
550	size_t memsize;
551
552	bvmcons = 0;
553	progname = basename(argv[0]);
554	gdb_port = 0;
555	guest_ncpus = 1;
556	ioapic = 0;
557	memsize = 256 * MB;
558
559	while ((c = getopt(argc, argv, "abehAHIPWp:g:c:s:S:m:l:")) != -1) {
560		switch (c) {
561		case 'a':
562			disable_x2apic = 1;
563			break;
564		case 'A':
565			acpi = 1;
566			break;
567		case 'b':
568			bvmcons = 1;
569			break;
570		case 'p':
571			pincpu = atoi(optarg);
572			break;
573                case 'c':
574			guest_ncpus = atoi(optarg);
575			break;
576		case 'g':
577			gdb_port = atoi(optarg);
578			break;
579		case 'l':
580			if (lpc_device_parse(optarg) != 0) {
581				errx(EX_USAGE, "invalid lpc device "
582				    "configuration '%s'", optarg);
583			}
584			break;
585		case 's':
586			if (pci_parse_slot(optarg, 0) != 0)
587				exit(1);
588			else
589				break;
590		case 'S':
591			if (pci_parse_slot(optarg, 1) != 0)
592				exit(1);
593			else
594				break;
595                case 'm':
596			error = vm_parse_memsize(optarg, &memsize);
597			if (error)
598				errx(EX_USAGE, "invalid memsize '%s'", optarg);
599			break;
600		case 'H':
601			guest_vmexit_on_hlt = 1;
602			break;
603		case 'I':
604			ioapic = 1;
605			break;
606		case 'P':
607			guest_vmexit_on_pause = 1;
608			break;
609		case 'e':
610			strictio = 1;
611			break;
612		case 'W':
613			virtio_msix = 0;
614			break;
615		case 'h':
616			usage(0);
617		default:
618			usage(1);
619		}
620	}
621	argc -= optind;
622	argv += optind;
623
624	if (argc != 1)
625		usage(1);
626
627	vmname = argv[0];
628
629	ctx = vm_open(vmname);
630	if (ctx == NULL) {
631		perror("vm_open");
632		exit(1);
633	}
634
635	max_vcpus = num_vcpus_allowed(ctx);
636	if (guest_ncpus > max_vcpus) {
637		fprintf(stderr, "%d vCPUs requested but only %d available\n",
638			guest_ncpus, max_vcpus);
639		exit(1);
640	}
641
642	fbsdrun_set_capabilities(ctx, BSP);
643
644	err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
645	if (err) {
646		fprintf(stderr, "Unable to setup memory (%d)\n", err);
647		exit(1);
648	}
649
650	init_mem();
651	init_inout();
652	legacy_irq_init();
653
654	rtc_init(ctx);
655
656	/*
657	 * Exit if a device emulation finds an error in it's initilization
658	 */
659	if (init_pci(ctx) != 0)
660		exit(1);
661
662	if (ioapic)
663		ioapic_init(0);
664
665	if (gdb_port != 0)
666		init_dbgport(gdb_port);
667
668	if (bvmcons)
669		init_bvmcons();
670
671	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
672	assert(error == 0);
673
674	/*
675	 * build the guest tables, MP etc.
676	 */
677	mptable_build(ctx, guest_ncpus, ioapic);
678
679	if (acpi) {
680		error = acpi_build(ctx, guest_ncpus, ioapic);
681		assert(error == 0);
682	}
683
684	/*
685	 * Add CPU 0
686	 */
687	fbsdrun_addcpu(ctx, BSP, rip);
688
689	/*
690	 * Head off to the main event dispatch loop
691	 */
692	mevent_dispatch();
693
694	exit(1);
695}
696