vmx.c revision 276349
1129864Sstefanf/*-
2129864Sstefanf * Copyright (c) 2011 NetApp, Inc.
3129864Sstefanf * All rights reserved.
4129864Sstefanf *
5129864Sstefanf * Redistribution and use in source and binary forms, with or without
6129864Sstefanf * modification, are permitted provided that the following conditions
7129864Sstefanf * are met:
8129864Sstefanf * 1. Redistributions of source code must retain the above copyright
9129864Sstefanf *    notice, this list of conditions and the following disclaimer.
10129864Sstefanf * 2. Redistributions in binary form must reproduce the above copyright
11129864Sstefanf *    notice, this list of conditions and the following disclaimer in the
12129864Sstefanf *    documentation and/or other materials provided with the distribution.
13129864Sstefanf *
14129864Sstefanf * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15129864Sstefanf * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16129864Sstefanf * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17129864Sstefanf * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18129864Sstefanf * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19129864Sstefanf * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20129864Sstefanf * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21129864Sstefanf * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22129864Sstefanf * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23129864Sstefanf * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24129864Sstefanf * SUCH DAMAGE.
25129864Sstefanf *
26129864Sstefanf * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276349 2014-12-28 21:27:13Z neel $
27129864Sstefanf */
28129864Sstefanf
29129864Sstefanf#include <sys/cdefs.h>
30129864Sstefanf__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276349 2014-12-28 21:27:13Z neel $");
31129864Sstefanf
32129864Sstefanf#include <sys/param.h>
33129864Sstefanf#include <sys/systm.h>
34129864Sstefanf#include <sys/smp.h>
35129864Sstefanf#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/psl.h>
45#include <machine/cpufunc.h>
46#include <machine/md_var.h>
47#include <machine/segments.h>
48#include <machine/smp.h>
49#include <machine/specialreg.h>
50#include <machine/vmparam.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54#include <machine/vmm_instruction_emul.h>
55#include "vmm_lapic.h"
56#include "vmm_host.h"
57#include "vmm_ioport.h"
58#include "vmm_ipi.h"
59#include "vmm_ktr.h"
60#include "vmm_stat.h"
61#include "vatpic.h"
62#include "vlapic.h"
63#include "vlapic_priv.h"
64
65#include "ept.h"
66#include "vmx_cpufunc.h"
67#include "vmx.h"
68#include "vmx_msr.h"
69#include "x86.h"
70#include "vmx_controls.h"
71
72#define	PINBASED_CTLS_ONE_SETTING					\
73	(PINBASED_EXTINT_EXITING	|				\
74	 PINBASED_NMI_EXITING		|				\
75	 PINBASED_VIRTUAL_NMI)
76#define	PINBASED_CTLS_ZERO_SETTING	0
77
78#define PROCBASED_CTLS_WINDOW_SETTING					\
79	(PROCBASED_INT_WINDOW_EXITING	|				\
80	 PROCBASED_NMI_WINDOW_EXITING)
81
82#define	PROCBASED_CTLS_ONE_SETTING 					\
83	(PROCBASED_SECONDARY_CONTROLS	|				\
84	 PROCBASED_MWAIT_EXITING	|				\
85	 PROCBASED_MONITOR_EXITING	|				\
86	 PROCBASED_IO_EXITING		|				\
87	 PROCBASED_MSR_BITMAPS		|				\
88	 PROCBASED_CTLS_WINDOW_SETTING	|				\
89	 PROCBASED_CR8_LOAD_EXITING	|				\
90	 PROCBASED_CR8_STORE_EXITING)
91#define	PROCBASED_CTLS_ZERO_SETTING	\
92	(PROCBASED_CR3_LOAD_EXITING |	\
93	PROCBASED_CR3_STORE_EXITING |	\
94	PROCBASED_IO_BITMAPS)
95
96#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
97#define	PROCBASED_CTLS2_ZERO_SETTING	0
98
99#define	VM_EXIT_CTLS_ONE_SETTING					\
100	(VM_EXIT_HOST_LMA			|			\
101	VM_EXIT_SAVE_EFER			|			\
102	VM_EXIT_LOAD_EFER			|			\
103	VM_EXIT_ACKNOWLEDGE_INTERRUPT		|			\
104	VM_EXIT_SAVE_PAT			|			\
105	VM_EXIT_LOAD_PAT)
106
107#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
108
109#define	VM_ENTRY_CTLS_ONE_SETTING	(VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT)
110
111#define	VM_ENTRY_CTLS_ZERO_SETTING					\
112	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
113	VM_ENTRY_INTO_SMM			|			\
114	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
115
116#define	HANDLED		1
117#define	UNHANDLED	0
118
119static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
121
122SYSCTL_DECL(_hw_vmm);
123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
124
125int vmxon_enabled[MAXCPU];
126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
127
128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
129static uint32_t exit_ctls, entry_ctls;
130
131static uint64_t cr0_ones_mask, cr0_zeros_mask;
132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
133	     &cr0_ones_mask, 0, NULL);
134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
135	     &cr0_zeros_mask, 0, NULL);
136
137static uint64_t cr4_ones_mask, cr4_zeros_mask;
138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
139	     &cr4_ones_mask, 0, NULL);
140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
141	     &cr4_zeros_mask, 0, NULL);
142
143static int vmx_initialized;
144SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
145	   &vmx_initialized, 0, "Intel VMX initialized");
146
147/*
148 * Optional capabilities
149 */
150static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
151
152static int cap_halt_exit;
153SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
154    "HLT triggers a VM-exit");
155
156static int cap_pause_exit;
157SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
158    0, "PAUSE triggers a VM-exit");
159
160static int cap_unrestricted_guest;
161SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
162    &cap_unrestricted_guest, 0, "Unrestricted guests");
163
164static int cap_monitor_trap;
165SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
166    &cap_monitor_trap, 0, "Monitor trap flag");
167
168static int cap_invpcid;
169SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
170    0, "Guests are allowed to use INVPCID");
171
172static int virtual_interrupt_delivery;
173SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
174    &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
175
176static int posted_interrupts;
177SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
178    &posted_interrupts, 0, "APICv posted interrupt support");
179
180static int pirvec;
181SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
182    &pirvec, 0, "APICv posted interrupt vector");
183
184static struct unrhdr *vpid_unr;
185static u_int vpid_alloc_failed;
186SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
187	    &vpid_alloc_failed, 0, NULL);
188
189/*
190 * Use the last page below 4GB as the APIC access address. This address is
191 * occupied by the boot firmware so it is guaranteed that it will not conflict
192 * with a page in system memory.
193 */
194#define	APIC_ACCESS_ADDRESS	0xFFFFF000
195
196static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
197static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
198static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
199static void vmx_inject_pir(struct vlapic *vlapic);
200
201#ifdef KTR
202static const char *
203exit_reason_to_str(int reason)
204{
205	static char reasonbuf[32];
206
207	switch (reason) {
208	case EXIT_REASON_EXCEPTION:
209		return "exception";
210	case EXIT_REASON_EXT_INTR:
211		return "extint";
212	case EXIT_REASON_TRIPLE_FAULT:
213		return "triplefault";
214	case EXIT_REASON_INIT:
215		return "init";
216	case EXIT_REASON_SIPI:
217		return "sipi";
218	case EXIT_REASON_IO_SMI:
219		return "iosmi";
220	case EXIT_REASON_SMI:
221		return "smi";
222	case EXIT_REASON_INTR_WINDOW:
223		return "intrwindow";
224	case EXIT_REASON_NMI_WINDOW:
225		return "nmiwindow";
226	case EXIT_REASON_TASK_SWITCH:
227		return "taskswitch";
228	case EXIT_REASON_CPUID:
229		return "cpuid";
230	case EXIT_REASON_GETSEC:
231		return "getsec";
232	case EXIT_REASON_HLT:
233		return "hlt";
234	case EXIT_REASON_INVD:
235		return "invd";
236	case EXIT_REASON_INVLPG:
237		return "invlpg";
238	case EXIT_REASON_RDPMC:
239		return "rdpmc";
240	case EXIT_REASON_RDTSC:
241		return "rdtsc";
242	case EXIT_REASON_RSM:
243		return "rsm";
244	case EXIT_REASON_VMCALL:
245		return "vmcall";
246	case EXIT_REASON_VMCLEAR:
247		return "vmclear";
248	case EXIT_REASON_VMLAUNCH:
249		return "vmlaunch";
250	case EXIT_REASON_VMPTRLD:
251		return "vmptrld";
252	case EXIT_REASON_VMPTRST:
253		return "vmptrst";
254	case EXIT_REASON_VMREAD:
255		return "vmread";
256	case EXIT_REASON_VMRESUME:
257		return "vmresume";
258	case EXIT_REASON_VMWRITE:
259		return "vmwrite";
260	case EXIT_REASON_VMXOFF:
261		return "vmxoff";
262	case EXIT_REASON_VMXON:
263		return "vmxon";
264	case EXIT_REASON_CR_ACCESS:
265		return "craccess";
266	case EXIT_REASON_DR_ACCESS:
267		return "draccess";
268	case EXIT_REASON_INOUT:
269		return "inout";
270	case EXIT_REASON_RDMSR:
271		return "rdmsr";
272	case EXIT_REASON_WRMSR:
273		return "wrmsr";
274	case EXIT_REASON_INVAL_VMCS:
275		return "invalvmcs";
276	case EXIT_REASON_INVAL_MSR:
277		return "invalmsr";
278	case EXIT_REASON_MWAIT:
279		return "mwait";
280	case EXIT_REASON_MTF:
281		return "mtf";
282	case EXIT_REASON_MONITOR:
283		return "monitor";
284	case EXIT_REASON_PAUSE:
285		return "pause";
286	case EXIT_REASON_MCE:
287		return "mce";
288	case EXIT_REASON_TPR:
289		return "tpr";
290	case EXIT_REASON_APIC_ACCESS:
291		return "apic-access";
292	case EXIT_REASON_GDTR_IDTR:
293		return "gdtridtr";
294	case EXIT_REASON_LDTR_TR:
295		return "ldtrtr";
296	case EXIT_REASON_EPT_FAULT:
297		return "eptfault";
298	case EXIT_REASON_EPT_MISCONFIG:
299		return "eptmisconfig";
300	case EXIT_REASON_INVEPT:
301		return "invept";
302	case EXIT_REASON_RDTSCP:
303		return "rdtscp";
304	case EXIT_REASON_VMX_PREEMPT:
305		return "vmxpreempt";
306	case EXIT_REASON_INVVPID:
307		return "invvpid";
308	case EXIT_REASON_WBINVD:
309		return "wbinvd";
310	case EXIT_REASON_XSETBV:
311		return "xsetbv";
312	case EXIT_REASON_APIC_WRITE:
313		return "apic-write";
314	default:
315		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
316		return (reasonbuf);
317	}
318}
319#endif	/* KTR */
320
321static int
322vmx_allow_x2apic_msrs(struct vmx *vmx)
323{
324	int i, error;
325
326	error = 0;
327
328	/*
329	 * Allow readonly access to the following x2APIC MSRs from the guest.
330	 */
331	error += guest_msr_ro(vmx, MSR_APIC_ID);
332	error += guest_msr_ro(vmx, MSR_APIC_VERSION);
333	error += guest_msr_ro(vmx, MSR_APIC_LDR);
334	error += guest_msr_ro(vmx, MSR_APIC_SVR);
335
336	for (i = 0; i < 8; i++)
337		error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
338
339	for (i = 0; i < 8; i++)
340		error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
341
342	for (i = 0; i < 8; i++)
343		error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
344
345	error += guest_msr_ro(vmx, MSR_APIC_ESR);
346	error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
347	error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
348	error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
349	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
350	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
351	error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
352	error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
353	error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
354	error += guest_msr_ro(vmx, MSR_APIC_ICR);
355
356	/*
357	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
358	 *
359	 * These registers get special treatment described in the section
360	 * "Virtualizing MSR-Based APIC Accesses".
361	 */
362	error += guest_msr_rw(vmx, MSR_APIC_TPR);
363	error += guest_msr_rw(vmx, MSR_APIC_EOI);
364	error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
365
366	return (error);
367}
368
369u_long
370vmx_fix_cr0(u_long cr0)
371{
372
373	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
374}
375
376u_long
377vmx_fix_cr4(u_long cr4)
378{
379
380	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
381}
382
383static void
384vpid_free(int vpid)
385{
386	if (vpid < 0 || vpid > 0xffff)
387		panic("vpid_free: invalid vpid %d", vpid);
388
389	/*
390	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391	 * the unit number allocator.
392	 */
393
394	if (vpid > VM_MAXCPU)
395		free_unr(vpid_unr, vpid);
396}
397
398static void
399vpid_alloc(uint16_t *vpid, int num)
400{
401	int i, x;
402
403	if (num <= 0 || num > VM_MAXCPU)
404		panic("invalid number of vpids requested: %d", num);
405
406	/*
407	 * If the "enable vpid" execution control is not enabled then the
408	 * VPID is required to be 0 for all vcpus.
409	 */
410	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411		for (i = 0; i < num; i++)
412			vpid[i] = 0;
413		return;
414	}
415
416	/*
417	 * Allocate a unique VPID for each vcpu from the unit number allocator.
418	 */
419	for (i = 0; i < num; i++) {
420		x = alloc_unr(vpid_unr);
421		if (x == -1)
422			break;
423		else
424			vpid[i] = x;
425	}
426
427	if (i < num) {
428		atomic_add_int(&vpid_alloc_failed, 1);
429
430		/*
431		 * If the unit number allocator does not have enough unique
432		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
433		 *
434		 * These VPIDs are not be unique across VMs but this does not
435		 * affect correctness because the combined mappings are also
436		 * tagged with the EP4TA which is unique for each VM.
437		 *
438		 * It is still sub-optimal because the invvpid will invalidate
439		 * combined mappings for a particular VPID across all EP4TAs.
440		 */
441		while (i-- > 0)
442			vpid_free(vpid[i]);
443
444		for (i = 0; i < num; i++)
445			vpid[i] = i + 1;
446	}
447}
448
449static void
450vpid_init(void)
451{
452	/*
453	 * VPID 0 is required when the "enable VPID" execution control is
454	 * disabled.
455	 *
456	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457	 * unit number allocator does not have sufficient unique VPIDs to
458	 * satisfy the allocation.
459	 *
460	 * The remaining VPIDs are managed by the unit number allocator.
461	 */
462	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
463}
464
465static void
466vmx_disable(void *arg __unused)
467{
468	struct invvpid_desc invvpid_desc = { 0 };
469	struct invept_desc invept_desc = { 0 };
470
471	if (vmxon_enabled[curcpu]) {
472		/*
473		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
474		 *
475		 * VMXON or VMXOFF are not required to invalidate any TLB
476		 * caching structures. This prevents potential retention of
477		 * cached information in the TLB between distinct VMX episodes.
478		 */
479		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
480		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
481		vmxoff();
482	}
483	load_cr4(rcr4() & ~CR4_VMXE);
484}
485
486static int
487vmx_cleanup(void)
488{
489
490	if (pirvec != 0)
491		vmm_ipi_free(pirvec);
492
493	if (vpid_unr != NULL) {
494		delete_unrhdr(vpid_unr);
495		vpid_unr = NULL;
496	}
497
498	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
499
500	return (0);
501}
502
503static void
504vmx_enable(void *arg __unused)
505{
506	int error;
507	uint64_t feature_control;
508
509	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
510	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
511	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
512		wrmsr(MSR_IA32_FEATURE_CONTROL,
513		    feature_control | IA32_FEATURE_CONTROL_VMX_EN |
514		    IA32_FEATURE_CONTROL_LOCK);
515	}
516
517	load_cr4(rcr4() | CR4_VMXE);
518
519	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
520	error = vmxon(vmxon_region[curcpu]);
521	if (error == 0)
522		vmxon_enabled[curcpu] = 1;
523}
524
525static void
526vmx_restore(void)
527{
528
529	if (vmxon_enabled[curcpu])
530		vmxon(vmxon_region[curcpu]);
531}
532
533static int
534vmx_init(int ipinum)
535{
536	int error, use_tpr_shadow;
537	uint64_t basic, fixed0, fixed1, feature_control;
538	uint32_t tmp, procbased2_vid_bits;
539
540	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
541	if (!(cpu_feature2 & CPUID2_VMX)) {
542		printf("vmx_init: processor does not support VMX operation\n");
543		return (ENXIO);
544	}
545
546	/*
547	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
548	 * are set (bits 0 and 2 respectively).
549	 */
550	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
551	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
552	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
553		printf("vmx_init: VMX operation disabled by BIOS\n");
554		return (ENXIO);
555	}
556
557	/*
558	 * Verify capabilities MSR_VMX_BASIC:
559	 * - bit 54 indicates support for INS/OUTS decoding
560	 */
561	basic = rdmsr(MSR_VMX_BASIC);
562	if ((basic & (1UL << 54)) == 0) {
563		printf("vmx_init: processor does not support desired basic "
564		    "capabilities\n");
565		return (EINVAL);
566	}
567
568	/* Check support for primary processor-based VM-execution controls */
569	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
570			       MSR_VMX_TRUE_PROCBASED_CTLS,
571			       PROCBASED_CTLS_ONE_SETTING,
572			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
573	if (error) {
574		printf("vmx_init: processor does not support desired primary "
575		       "processor-based controls\n");
576		return (error);
577	}
578
579	/* Clear the processor-based ctl bits that are set on demand */
580	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
581
582	/* Check support for secondary processor-based VM-execution controls */
583	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
584			       MSR_VMX_PROCBASED_CTLS2,
585			       PROCBASED_CTLS2_ONE_SETTING,
586			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
587	if (error) {
588		printf("vmx_init: processor does not support desired secondary "
589		       "processor-based controls\n");
590		return (error);
591	}
592
593	/* Check support for VPID */
594	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
595			       PROCBASED2_ENABLE_VPID, 0, &tmp);
596	if (error == 0)
597		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
598
599	/* Check support for pin-based VM-execution controls */
600	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
601			       MSR_VMX_TRUE_PINBASED_CTLS,
602			       PINBASED_CTLS_ONE_SETTING,
603			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
604	if (error) {
605		printf("vmx_init: processor does not support desired "
606		       "pin-based controls\n");
607		return (error);
608	}
609
610	/* Check support for VM-exit controls */
611	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
612			       VM_EXIT_CTLS_ONE_SETTING,
613			       VM_EXIT_CTLS_ZERO_SETTING,
614			       &exit_ctls);
615	if (error) {
616		printf("vmx_init: processor does not support desired "
617		    "exit controls\n");
618		return (error);
619	}
620
621	/* Check support for VM-entry controls */
622	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
623	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
624	    &entry_ctls);
625	if (error) {
626		printf("vmx_init: processor does not support desired "
627		    "entry controls\n");
628		return (error);
629	}
630
631	/*
632	 * Check support for optional features by testing them
633	 * as individual bits
634	 */
635	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
636					MSR_VMX_TRUE_PROCBASED_CTLS,
637					PROCBASED_HLT_EXITING, 0,
638					&tmp) == 0);
639
640	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
641					MSR_VMX_PROCBASED_CTLS,
642					PROCBASED_MTF, 0,
643					&tmp) == 0);
644
645	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
646					 MSR_VMX_TRUE_PROCBASED_CTLS,
647					 PROCBASED_PAUSE_EXITING, 0,
648					 &tmp) == 0);
649
650	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
651					MSR_VMX_PROCBASED_CTLS2,
652					PROCBASED2_UNRESTRICTED_GUEST, 0,
653				        &tmp) == 0);
654
655	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
656	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
657	    &tmp) == 0);
658
659	/*
660	 * Check support for virtual interrupt delivery.
661	 */
662	procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
663	    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
664	    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
665	    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
666
667	use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
668	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
669	    &tmp) == 0);
670
671	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
672	    procbased2_vid_bits, 0, &tmp);
673	if (error == 0 && use_tpr_shadow) {
674		virtual_interrupt_delivery = 1;
675		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
676		    &virtual_interrupt_delivery);
677	}
678
679	if (virtual_interrupt_delivery) {
680		procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
681		procbased_ctls2 |= procbased2_vid_bits;
682		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
683
684		/*
685		 * No need to emulate accesses to %CR8 if virtual
686		 * interrupt delivery is enabled.
687		 */
688		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
689		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
690
691		/*
692		 * Check for Posted Interrupts only if Virtual Interrupt
693		 * Delivery is enabled.
694		 */
695		error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
696		    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
697		    &tmp);
698		if (error == 0) {
699			pirvec = vmm_ipi_alloc();
700			if (pirvec == 0) {
701				if (bootverbose) {
702					printf("vmx_init: unable to allocate "
703					    "posted interrupt vector\n");
704				}
705			} else {
706				posted_interrupts = 1;
707				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
708				    &posted_interrupts);
709			}
710		}
711	}
712
713	if (posted_interrupts)
714		    pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
715
716	/* Initialize EPT */
717	error = ept_init(ipinum);
718	if (error) {
719		printf("vmx_init: ept initialization failed (%d)\n", error);
720		return (error);
721	}
722
723	/*
724	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
725	 */
726	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
727	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
728	cr0_ones_mask = fixed0 & fixed1;
729	cr0_zeros_mask = ~fixed0 & ~fixed1;
730
731	/*
732	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
733	 * if unrestricted guest execution is allowed.
734	 */
735	if (cap_unrestricted_guest)
736		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
737
738	/*
739	 * Do not allow the guest to set CR0_NW or CR0_CD.
740	 */
741	cr0_zeros_mask |= (CR0_NW | CR0_CD);
742
743	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
744	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
745	cr4_ones_mask = fixed0 & fixed1;
746	cr4_zeros_mask = ~fixed0 & ~fixed1;
747
748	vpid_init();
749
750	vmx_msr_init();
751
752	/* enable VMX operation */
753	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
754
755	vmx_initialized = 1;
756
757	return (0);
758}
759
760static void
761vmx_trigger_hostintr(int vector)
762{
763	uintptr_t func;
764	struct gate_descriptor *gd;
765
766	gd = &idt[vector];
767
768	KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
769	    "invalid vector %d", vector));
770	KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
771	    vector));
772	KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
773	    "has invalid type %d", vector, gd->gd_type));
774	KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
775	    "has invalid dpl %d", vector, gd->gd_dpl));
776	KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
777	    "for vector %d has invalid selector %d", vector, gd->gd_selector));
778	KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
779	    "IST %d", vector, gd->gd_ist));
780
781	func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
782	vmx_call_isr(func);
783}
784
785static int
786vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
787{
788	int error, mask_ident, shadow_ident;
789	uint64_t mask_value;
790
791	if (which != 0 && which != 4)
792		panic("vmx_setup_cr_shadow: unknown cr%d", which);
793
794	if (which == 0) {
795		mask_ident = VMCS_CR0_MASK;
796		mask_value = cr0_ones_mask | cr0_zeros_mask;
797		shadow_ident = VMCS_CR0_SHADOW;
798	} else {
799		mask_ident = VMCS_CR4_MASK;
800		mask_value = cr4_ones_mask | cr4_zeros_mask;
801		shadow_ident = VMCS_CR4_SHADOW;
802	}
803
804	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
805	if (error)
806		return (error);
807
808	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
809	if (error)
810		return (error);
811
812	return (0);
813}
814#define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
815#define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
816
817static void *
818vmx_vminit(struct vm *vm, pmap_t pmap)
819{
820	uint16_t vpid[VM_MAXCPU];
821	int i, error;
822	struct vmx *vmx;
823	struct vmcs *vmcs;
824
825	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
826	if ((uintptr_t)vmx & PAGE_MASK) {
827		panic("malloc of struct vmx not aligned on %d byte boundary",
828		      PAGE_SIZE);
829	}
830	vmx->vm = vm;
831
832	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
833
834	/*
835	 * Clean up EPTP-tagged guest physical and combined mappings
836	 *
837	 * VMX transitions are not required to invalidate any guest physical
838	 * mappings. So, it may be possible for stale guest physical mappings
839	 * to be present in the processor TLBs.
840	 *
841	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
842	 */
843	ept_invalidate_mappings(vmx->eptp);
844
845	msr_bitmap_initialize(vmx->msr_bitmap);
846
847	/*
848	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
849	 * The guest FSBASE and GSBASE are saved and restored during
850	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
851	 * always restored from the vmcs host state area on vm-exit.
852	 *
853	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
854	 * how they are saved/restored so can be directly accessed by the
855	 * guest.
856	 *
857	 * MSR_EFER is saved and restored in the guest VMCS area on a
858	 * VM exit and entry respectively. It is also restored from the
859	 * host VMCS area on a VM exit.
860	 *
861	 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
862	 * and entry respectively. It is also restored from the host VMCS
863	 * area on a VM exit.
864	 *
865	 * The TSC MSR is exposed read-only. Writes are disallowed as that
866	 * will impact the host TSC.
867	 * XXX Writes would be implemented with a wrmsr trap, and
868	 * then modifying the TSC offset in the VMCS.
869	 */
870	if (guest_msr_rw(vmx, MSR_GSBASE) ||
871	    guest_msr_rw(vmx, MSR_FSBASE) ||
872	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
873	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
874	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
875	    guest_msr_rw(vmx, MSR_EFER) ||
876	    guest_msr_rw(vmx, MSR_PAT) ||
877	    guest_msr_ro(vmx, MSR_TSC))
878		panic("vmx_vminit: error setting guest msr access");
879
880	vpid_alloc(vpid, VM_MAXCPU);
881
882	if (virtual_interrupt_delivery) {
883		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
884		    APIC_ACCESS_ADDRESS);
885		/* XXX this should really return an error to the caller */
886		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
887	}
888
889	for (i = 0; i < VM_MAXCPU; i++) {
890		vmcs = &vmx->vmcs[i];
891		vmcs->identifier = vmx_revision();
892		error = vmclear(vmcs);
893		if (error != 0) {
894			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
895			      error, i);
896		}
897
898		vmx_msr_guest_init(vmx, i);
899
900		error = vmcs_init(vmcs);
901		KASSERT(error == 0, ("vmcs_init error %d", error));
902
903		VMPTRLD(vmcs);
904		error = 0;
905		error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
906		error += vmwrite(VMCS_EPTP, vmx->eptp);
907		error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
908		error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
909		error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
910		error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
911		error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
912		error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
913		error += vmwrite(VMCS_VPID, vpid[i]);
914		if (virtual_interrupt_delivery) {
915			error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
916			error += vmwrite(VMCS_VIRTUAL_APIC,
917			    vtophys(&vmx->apic_page[i]));
918			error += vmwrite(VMCS_EOI_EXIT0, 0);
919			error += vmwrite(VMCS_EOI_EXIT1, 0);
920			error += vmwrite(VMCS_EOI_EXIT2, 0);
921			error += vmwrite(VMCS_EOI_EXIT3, 0);
922		}
923		if (posted_interrupts) {
924			error += vmwrite(VMCS_PIR_VECTOR, pirvec);
925			error += vmwrite(VMCS_PIR_DESC,
926			    vtophys(&vmx->pir_desc[i]));
927		}
928		VMCLEAR(vmcs);
929		KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
930
931		vmx->cap[i].set = 0;
932		vmx->cap[i].proc_ctls = procbased_ctls;
933		vmx->cap[i].proc_ctls2 = procbased_ctls2;
934
935		vmx->state[i].lastcpu = NOCPU;
936		vmx->state[i].vpid = vpid[i];
937
938		/*
939		 * Set up the CR0/4 shadows, and init the read shadow
940		 * to the power-on register value from the Intel Sys Arch.
941		 *  CR0 - 0x60000010
942		 *  CR4 - 0
943		 */
944		error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
945		if (error != 0)
946			panic("vmx_setup_cr0_shadow %d", error);
947
948		error = vmx_setup_cr4_shadow(vmcs, 0);
949		if (error != 0)
950			panic("vmx_setup_cr4_shadow %d", error);
951
952		vmx->ctx[i].pmap = pmap;
953	}
954
955	return (vmx);
956}
957
958static int
959vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
960{
961	int handled, func;
962
963	func = vmxctx->guest_rax;
964
965	handled = x86_emulate_cpuid(vm, vcpu,
966				    (uint32_t*)(&vmxctx->guest_rax),
967				    (uint32_t*)(&vmxctx->guest_rbx),
968				    (uint32_t*)(&vmxctx->guest_rcx),
969				    (uint32_t*)(&vmxctx->guest_rdx));
970	return (handled);
971}
972
973static __inline void
974vmx_run_trace(struct vmx *vmx, int vcpu)
975{
976#ifdef KTR
977	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
978#endif
979}
980
981static __inline void
982vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
983	       int handled)
984{
985#ifdef KTR
986	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
987		 handled ? "handled" : "unhandled",
988		 exit_reason_to_str(exit_reason), rip);
989#endif
990}
991
992static __inline void
993vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
994{
995#ifdef KTR
996	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
997#endif
998}
999
1000static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1001static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1002
1003/*
1004 * Invalidate guest mappings identified by its vpid from the TLB.
1005 */
1006static __inline void
1007vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1008{
1009	struct vmxstate *vmxstate;
1010	struct invvpid_desc invvpid_desc;
1011
1012	vmxstate = &vmx->state[vcpu];
1013	if (vmxstate->vpid == 0)
1014		return;
1015
1016	if (!running) {
1017		/*
1018		 * Set the 'lastcpu' to an invalid host cpu.
1019		 *
1020		 * This will invalidate TLB entries tagged with the vcpu's
1021		 * vpid the next time it runs via vmx_set_pcpu_defaults().
1022		 */
1023		vmxstate->lastcpu = NOCPU;
1024		return;
1025	}
1026
1027	KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1028	    "critical section", __func__, vcpu));
1029
1030	/*
1031	 * Invalidate all mappings tagged with 'vpid'
1032	 *
1033	 * We do this because this vcpu was executing on a different host
1034	 * cpu when it last ran. We do not track whether it invalidated
1035	 * mappings associated with its 'vpid' during that run. So we must
1036	 * assume that the mappings associated with 'vpid' on 'curcpu' are
1037	 * stale and invalidate them.
1038	 *
1039	 * Note that we incur this penalty only when the scheduler chooses to
1040	 * move the thread associated with this vcpu between host cpus.
1041	 *
1042	 * Note also that this will invalidate mappings tagged with 'vpid'
1043	 * for "all" EP4TAs.
1044	 */
1045	if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1046		invvpid_desc._res1 = 0;
1047		invvpid_desc._res2 = 0;
1048		invvpid_desc.vpid = vmxstate->vpid;
1049		invvpid_desc.linear_addr = 0;
1050		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1051		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1052	} else {
1053		/*
1054		 * The invvpid can be skipped if an invept is going to
1055		 * be performed before entering the guest. The invept
1056		 * will invalidate combined mappings tagged with
1057		 * 'vmx->eptp' for all vpids.
1058		 */
1059		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1060	}
1061}
1062
1063static void
1064vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1065{
1066	struct vmxstate *vmxstate;
1067
1068	vmxstate = &vmx->state[vcpu];
1069	if (vmxstate->lastcpu == curcpu)
1070		return;
1071
1072	vmxstate->lastcpu = curcpu;
1073
1074	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1075
1076	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1077	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1078	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1079	vmx_invvpid(vmx, vcpu, pmap, 1);
1080}
1081
1082/*
1083 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1084 */
1085CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1086
1087static void __inline
1088vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1089{
1090
1091	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1092		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1093		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1094		VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1095	}
1096}
1097
1098static void __inline
1099vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1100{
1101
1102	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1103	    ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1104	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1105	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1106	VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1107}
1108
1109static void __inline
1110vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1111{
1112
1113	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1114		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1115		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1116		VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1117	}
1118}
1119
1120static void __inline
1121vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1122{
1123
1124	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1125	    ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1126	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1127	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1128	VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1129}
1130
1131#define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
1132			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1133#define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
1134			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1135
1136static void
1137vmx_inject_nmi(struct vmx *vmx, int vcpu)
1138{
1139	uint32_t gi, info;
1140
1141	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1142	KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1143	    "interruptibility-state %#x", gi));
1144
1145	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1146	KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1147	    "VM-entry interruption information %#x", info));
1148
1149	/*
1150	 * Inject the virtual NMI. The vector must be the NMI IDT entry
1151	 * or the VMCS entry check will fail.
1152	 */
1153	info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1154	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1155
1156	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1157
1158	/* Clear the request */
1159	vm_nmi_clear(vmx->vm, vcpu);
1160}
1161
1162static void
1163vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1164{
1165	int vector, need_nmi_exiting, extint_pending;
1166	uint64_t rflags, entryinfo;
1167	uint32_t gi, info;
1168
1169	if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1170		KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1171		    "intinfo is not valid: %#lx", __func__, entryinfo));
1172
1173		info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1174		KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1175		     "pending exception: %#lx/%#x", __func__, entryinfo, info));
1176
1177		info = entryinfo;
1178		vector = info & 0xff;
1179		if (vector == IDT_BP || vector == IDT_OF) {
1180			/*
1181			 * VT-x requires #BP and #OF to be injected as software
1182			 * exceptions.
1183			 */
1184			info &= ~VMCS_INTR_T_MASK;
1185			info |= VMCS_INTR_T_SWEXCEPTION;
1186		}
1187
1188		if (info & VMCS_INTR_DEL_ERRCODE)
1189			vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1190
1191		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1192	}
1193
1194	if (vm_nmi_pending(vmx->vm, vcpu)) {
1195		/*
1196		 * If there are no conditions blocking NMI injection then
1197		 * inject it directly here otherwise enable "NMI window
1198		 * exiting" to inject it as soon as we can.
1199		 *
1200		 * We also check for STI_BLOCKING because some implementations
1201		 * don't allow NMI injection in this case. If we are running
1202		 * on a processor that doesn't have this restriction it will
1203		 * immediately exit and the NMI will be injected in the
1204		 * "NMI window exiting" handler.
1205		 */
1206		need_nmi_exiting = 1;
1207		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1208		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1209			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1210			if ((info & VMCS_INTR_VALID) == 0) {
1211				vmx_inject_nmi(vmx, vcpu);
1212				need_nmi_exiting = 0;
1213			} else {
1214				VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1215				    "due to VM-entry intr info %#x", info);
1216			}
1217		} else {
1218			VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1219			    "Guest Interruptibility-state %#x", gi);
1220		}
1221
1222		if (need_nmi_exiting)
1223			vmx_set_nmi_window_exiting(vmx, vcpu);
1224	}
1225
1226	extint_pending = vm_extint_pending(vmx->vm, vcpu);
1227
1228	if (!extint_pending && virtual_interrupt_delivery) {
1229		vmx_inject_pir(vlapic);
1230		return;
1231	}
1232
1233	/*
1234	 * If interrupt-window exiting is already in effect then don't bother
1235	 * checking for pending interrupts. This is just an optimization and
1236	 * not needed for correctness.
1237	 */
1238	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1239		VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1240		    "pending int_window_exiting");
1241		return;
1242	}
1243
1244	if (!extint_pending) {
1245		/* Ask the local apic for a vector to inject */
1246		if (!vlapic_pending_intr(vlapic, &vector))
1247			return;
1248
1249		/*
1250		 * From the Intel SDM, Volume 3, Section "Maskable
1251		 * Hardware Interrupts":
1252		 * - maskable interrupt vectors [16,255] can be delivered
1253		 *   through the local APIC.
1254		*/
1255		KASSERT(vector >= 16 && vector <= 255,
1256		    ("invalid vector %d from local APIC", vector));
1257	} else {
1258		/* Ask the legacy pic for a vector to inject */
1259		vatpic_pending_intr(vmx->vm, &vector);
1260
1261		/*
1262		 * From the Intel SDM, Volume 3, Section "Maskable
1263		 * Hardware Interrupts":
1264		 * - maskable interrupt vectors [0,255] can be delivered
1265		 *   through the INTR pin.
1266		 */
1267		KASSERT(vector >= 0 && vector <= 255,
1268		    ("invalid vector %d from INTR", vector));
1269	}
1270
1271	/* Check RFLAGS.IF and the interruptibility state of the guest */
1272	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1273	if ((rflags & PSL_I) == 0) {
1274		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1275		    "rflags %#lx", vector, rflags);
1276		goto cantinject;
1277	}
1278
1279	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1280	if (gi & HWINTR_BLOCKING) {
1281		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1282		    "Guest Interruptibility-state %#x", vector, gi);
1283		goto cantinject;
1284	}
1285
1286	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1287	if (info & VMCS_INTR_VALID) {
1288		/*
1289		 * This is expected and could happen for multiple reasons:
1290		 * - A vectoring VM-entry was aborted due to astpending
1291		 * - A VM-exit happened during event injection.
1292		 * - An exception was injected above.
1293		 * - An NMI was injected above or after "NMI window exiting"
1294		 */
1295		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1296		    "VM-entry intr info %#x", vector, info);
1297		goto cantinject;
1298	}
1299
1300	/* Inject the interrupt */
1301	info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1302	info |= vector;
1303	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1304
1305	if (!extint_pending) {
1306		/* Update the Local APIC ISR */
1307		vlapic_intr_accepted(vlapic, vector);
1308	} else {
1309		vm_extint_clear(vmx->vm, vcpu);
1310		vatpic_intr_accepted(vmx->vm, vector);
1311
1312		/*
1313		 * After we accepted the current ExtINT the PIC may
1314		 * have posted another one.  If that is the case, set
1315		 * the Interrupt Window Exiting execution control so
1316		 * we can inject that one too.
1317		 *
1318		 * Also, interrupt window exiting allows us to inject any
1319		 * pending APIC vector that was preempted by the ExtINT
1320		 * as soon as possible. This applies both for the software
1321		 * emulated vlapic and the hardware assisted virtual APIC.
1322		 */
1323		vmx_set_int_window_exiting(vmx, vcpu);
1324	}
1325
1326	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1327
1328	return;
1329
1330cantinject:
1331	/*
1332	 * Set the Interrupt Window Exiting execution control so we can inject
1333	 * the interrupt as soon as blocking condition goes away.
1334	 */
1335	vmx_set_int_window_exiting(vmx, vcpu);
1336}
1337
1338/*
1339 * If the Virtual NMIs execution control is '1' then the logical processor
1340 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1341 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1342 * virtual-NMI blocking.
1343 *
1344 * This unblocking occurs even if the IRET causes a fault. In this case the
1345 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1346 */
1347static void
1348vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1349{
1350	uint32_t gi;
1351
1352	VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1353	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1354	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1355	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1356}
1357
1358static void
1359vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1360{
1361	uint32_t gi;
1362
1363	VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1364	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1365	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1366	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1367}
1368
1369static void
1370vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1371{
1372	uint32_t gi;
1373
1374	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1375	KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1376	    ("NMI blocking is not in effect %#x", gi));
1377}
1378
1379static int
1380vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1381{
1382	struct vmxctx *vmxctx;
1383	uint64_t xcrval;
1384	const struct xsave_limits *limits;
1385
1386	vmxctx = &vmx->ctx[vcpu];
1387	limits = vmm_get_xsave_limits();
1388
1389	/*
1390	 * Note that the processor raises a GP# fault on its own if
1391	 * xsetbv is executed for CPL != 0, so we do not have to
1392	 * emulate that fault here.
1393	 */
1394
1395	/* Only xcr0 is supported. */
1396	if (vmxctx->guest_rcx != 0) {
1397		vm_inject_gp(vmx->vm, vcpu);
1398		return (HANDLED);
1399	}
1400
1401	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1402	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1403		vm_inject_ud(vmx->vm, vcpu);
1404		return (HANDLED);
1405	}
1406
1407	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1408	if ((xcrval & ~limits->xcr0_allowed) != 0) {
1409		vm_inject_gp(vmx->vm, vcpu);
1410		return (HANDLED);
1411	}
1412
1413	if (!(xcrval & XFEATURE_ENABLED_X87)) {
1414		vm_inject_gp(vmx->vm, vcpu);
1415		return (HANDLED);
1416	}
1417
1418	/* AVX (YMM_Hi128) requires SSE. */
1419	if (xcrval & XFEATURE_ENABLED_AVX &&
1420	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1421		vm_inject_gp(vmx->vm, vcpu);
1422		return (HANDLED);
1423	}
1424
1425	/*
1426	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1427	 * ZMM_Hi256, and Hi16_ZMM.
1428	 */
1429	if (xcrval & XFEATURE_AVX512 &&
1430	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1431	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
1432		vm_inject_gp(vmx->vm, vcpu);
1433		return (HANDLED);
1434	}
1435
1436	/*
1437	 * Intel MPX requires both bound register state flags to be
1438	 * set.
1439	 */
1440	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1441	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1442		vm_inject_gp(vmx->vm, vcpu);
1443		return (HANDLED);
1444	}
1445
1446	/*
1447	 * This runs "inside" vmrun() with the guest's FPU state, so
1448	 * modifying xcr0 directly modifies the guest's xcr0, not the
1449	 * host's.
1450	 */
1451	load_xcr(0, xcrval);
1452	return (HANDLED);
1453}
1454
1455static uint64_t
1456vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1457{
1458	const struct vmxctx *vmxctx;
1459
1460	vmxctx = &vmx->ctx[vcpu];
1461
1462	switch (ident) {
1463	case 0:
1464		return (vmxctx->guest_rax);
1465	case 1:
1466		return (vmxctx->guest_rcx);
1467	case 2:
1468		return (vmxctx->guest_rdx);
1469	case 3:
1470		return (vmxctx->guest_rbx);
1471	case 4:
1472		return (vmcs_read(VMCS_GUEST_RSP));
1473	case 5:
1474		return (vmxctx->guest_rbp);
1475	case 6:
1476		return (vmxctx->guest_rsi);
1477	case 7:
1478		return (vmxctx->guest_rdi);
1479	case 8:
1480		return (vmxctx->guest_r8);
1481	case 9:
1482		return (vmxctx->guest_r9);
1483	case 10:
1484		return (vmxctx->guest_r10);
1485	case 11:
1486		return (vmxctx->guest_r11);
1487	case 12:
1488		return (vmxctx->guest_r12);
1489	case 13:
1490		return (vmxctx->guest_r13);
1491	case 14:
1492		return (vmxctx->guest_r14);
1493	case 15:
1494		return (vmxctx->guest_r15);
1495	default:
1496		panic("invalid vmx register %d", ident);
1497	}
1498}
1499
1500static void
1501vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1502{
1503	struct vmxctx *vmxctx;
1504
1505	vmxctx = &vmx->ctx[vcpu];
1506
1507	switch (ident) {
1508	case 0:
1509		vmxctx->guest_rax = regval;
1510		break;
1511	case 1:
1512		vmxctx->guest_rcx = regval;
1513		break;
1514	case 2:
1515		vmxctx->guest_rdx = regval;
1516		break;
1517	case 3:
1518		vmxctx->guest_rbx = regval;
1519		break;
1520	case 4:
1521		vmcs_write(VMCS_GUEST_RSP, regval);
1522		break;
1523	case 5:
1524		vmxctx->guest_rbp = regval;
1525		break;
1526	case 6:
1527		vmxctx->guest_rsi = regval;
1528		break;
1529	case 7:
1530		vmxctx->guest_rdi = regval;
1531		break;
1532	case 8:
1533		vmxctx->guest_r8 = regval;
1534		break;
1535	case 9:
1536		vmxctx->guest_r9 = regval;
1537		break;
1538	case 10:
1539		vmxctx->guest_r10 = regval;
1540		break;
1541	case 11:
1542		vmxctx->guest_r11 = regval;
1543		break;
1544	case 12:
1545		vmxctx->guest_r12 = regval;
1546		break;
1547	case 13:
1548		vmxctx->guest_r13 = regval;
1549		break;
1550	case 14:
1551		vmxctx->guest_r14 = regval;
1552		break;
1553	case 15:
1554		vmxctx->guest_r15 = regval;
1555		break;
1556	default:
1557		panic("invalid vmx register %d", ident);
1558	}
1559}
1560
1561static int
1562vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1563{
1564	uint64_t crval, regval;
1565
1566	/* We only handle mov to %cr0 at this time */
1567	if ((exitqual & 0xf0) != 0x00)
1568		return (UNHANDLED);
1569
1570	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1571
1572	vmcs_write(VMCS_CR0_SHADOW, regval);
1573
1574	crval = regval | cr0_ones_mask;
1575	crval &= ~cr0_zeros_mask;
1576	vmcs_write(VMCS_GUEST_CR0, crval);
1577
1578	if (regval & CR0_PG) {
1579		uint64_t efer, entry_ctls;
1580
1581		/*
1582		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1583		 * the "IA-32e mode guest" bit in VM-entry control must be
1584		 * equal.
1585		 */
1586		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1587		if (efer & EFER_LME) {
1588			efer |= EFER_LMA;
1589			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1590			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1591			entry_ctls |= VM_ENTRY_GUEST_LMA;
1592			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1593		}
1594	}
1595
1596	return (HANDLED);
1597}
1598
1599static int
1600vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1601{
1602	uint64_t crval, regval;
1603
1604	/* We only handle mov to %cr4 at this time */
1605	if ((exitqual & 0xf0) != 0x00)
1606		return (UNHANDLED);
1607
1608	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1609
1610	vmcs_write(VMCS_CR4_SHADOW, regval);
1611
1612	crval = regval | cr4_ones_mask;
1613	crval &= ~cr4_zeros_mask;
1614	vmcs_write(VMCS_GUEST_CR4, crval);
1615
1616	return (HANDLED);
1617}
1618
1619static int
1620vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1621{
1622	struct vlapic *vlapic;
1623	uint64_t cr8;
1624	int regnum;
1625
1626	/* We only handle mov %cr8 to/from a register at this time. */
1627	if ((exitqual & 0xe0) != 0x00) {
1628		return (UNHANDLED);
1629	}
1630
1631	vlapic = vm_lapic(vmx->vm, vcpu);
1632	regnum = (exitqual >> 8) & 0xf;
1633	if (exitqual & 0x10) {
1634		cr8 = vlapic_get_cr8(vlapic);
1635		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1636	} else {
1637		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1638		vlapic_set_cr8(vlapic, cr8);
1639	}
1640
1641	return (HANDLED);
1642}
1643
1644/*
1645 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1646 */
1647static int
1648vmx_cpl(void)
1649{
1650	uint32_t ssar;
1651
1652	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1653	return ((ssar >> 5) & 0x3);
1654}
1655
1656static enum vm_cpu_mode
1657vmx_cpu_mode(void)
1658{
1659	uint32_t csar;
1660
1661	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1662		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1663		if (csar & 0x2000)
1664			return (CPU_MODE_64BIT);	/* CS.L = 1 */
1665		else
1666			return (CPU_MODE_COMPATIBILITY);
1667	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1668		return (CPU_MODE_PROTECTED);
1669	} else {
1670		return (CPU_MODE_REAL);
1671	}
1672}
1673
1674static enum vm_paging_mode
1675vmx_paging_mode(void)
1676{
1677
1678	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1679		return (PAGING_MODE_FLAT);
1680	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1681		return (PAGING_MODE_32);
1682	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1683		return (PAGING_MODE_64);
1684	else
1685		return (PAGING_MODE_PAE);
1686}
1687
1688static uint64_t
1689inout_str_index(struct vmx *vmx, int vcpuid, int in)
1690{
1691	uint64_t val;
1692	int error;
1693	enum vm_reg_name reg;
1694
1695	reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1696	error = vmx_getreg(vmx, vcpuid, reg, &val);
1697	KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1698	return (val);
1699}
1700
1701static uint64_t
1702inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1703{
1704	uint64_t val;
1705	int error;
1706
1707	if (rep) {
1708		error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1709		KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1710	} else {
1711		val = 1;
1712	}
1713	return (val);
1714}
1715
1716static int
1717inout_str_addrsize(uint32_t inst_info)
1718{
1719	uint32_t size;
1720
1721	size = (inst_info >> 7) & 0x7;
1722	switch (size) {
1723	case 0:
1724		return (2);	/* 16 bit */
1725	case 1:
1726		return (4);	/* 32 bit */
1727	case 2:
1728		return (8);	/* 64 bit */
1729	default:
1730		panic("%s: invalid size encoding %d", __func__, size);
1731	}
1732}
1733
1734static void
1735inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1736    struct vm_inout_str *vis)
1737{
1738	int error, s;
1739
1740	if (in) {
1741		vis->seg_name = VM_REG_GUEST_ES;
1742	} else {
1743		s = (inst_info >> 15) & 0x7;
1744		vis->seg_name = vm_segment_name(s);
1745	}
1746
1747	error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1748	KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1749
1750	/* XXX modify svm.c to update bit 16 of seg_desc.access (unusable) */
1751}
1752
1753static void
1754vmx_paging_info(struct vm_guest_paging *paging)
1755{
1756	paging->cr3 = vmcs_guest_cr3();
1757	paging->cpl = vmx_cpl();
1758	paging->cpu_mode = vmx_cpu_mode();
1759	paging->paging_mode = vmx_paging_mode();
1760}
1761
1762static void
1763vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1764{
1765	struct vm_guest_paging *paging;
1766	uint32_t csar;
1767
1768	paging = &vmexit->u.inst_emul.paging;
1769
1770	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1771	vmexit->u.inst_emul.gpa = gpa;
1772	vmexit->u.inst_emul.gla = gla;
1773	vmx_paging_info(paging);
1774	switch (paging->cpu_mode) {
1775	case CPU_MODE_PROTECTED:
1776	case CPU_MODE_COMPATIBILITY:
1777		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1778		vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1779		break;
1780	default:
1781		vmexit->u.inst_emul.cs_d = 0;
1782		break;
1783	}
1784}
1785
1786static int
1787ept_fault_type(uint64_t ept_qual)
1788{
1789	int fault_type;
1790
1791	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1792		fault_type = VM_PROT_WRITE;
1793	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1794		fault_type = VM_PROT_EXECUTE;
1795	else
1796		fault_type= VM_PROT_READ;
1797
1798	return (fault_type);
1799}
1800
1801static boolean_t
1802ept_emulation_fault(uint64_t ept_qual)
1803{
1804	int read, write;
1805
1806	/* EPT fault on an instruction fetch doesn't make sense here */
1807	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1808		return (FALSE);
1809
1810	/* EPT fault must be a read fault or a write fault */
1811	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1812	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1813	if ((read | write) == 0)
1814		return (FALSE);
1815
1816	/*
1817	 * The EPT violation must have been caused by accessing a
1818	 * guest-physical address that is a translation of a guest-linear
1819	 * address.
1820	 */
1821	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1822	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1823		return (FALSE);
1824	}
1825
1826	return (TRUE);
1827}
1828
1829static __inline int
1830apic_access_virtualization(struct vmx *vmx, int vcpuid)
1831{
1832	uint32_t proc_ctls2;
1833
1834	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1835	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1836}
1837
1838static __inline int
1839x2apic_virtualization(struct vmx *vmx, int vcpuid)
1840{
1841	uint32_t proc_ctls2;
1842
1843	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1844	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1845}
1846
1847static int
1848vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1849    uint64_t qual)
1850{
1851	int error, handled, offset;
1852	uint32_t *apic_regs, vector;
1853	bool retu;
1854
1855	handled = HANDLED;
1856	offset = APIC_WRITE_OFFSET(qual);
1857
1858	if (!apic_access_virtualization(vmx, vcpuid)) {
1859		/*
1860		 * In general there should not be any APIC write VM-exits
1861		 * unless APIC-access virtualization is enabled.
1862		 *
1863		 * However self-IPI virtualization can legitimately trigger
1864		 * an APIC-write VM-exit so treat it specially.
1865		 */
1866		if (x2apic_virtualization(vmx, vcpuid) &&
1867		    offset == APIC_OFFSET_SELF_IPI) {
1868			apic_regs = (uint32_t *)(vlapic->apic_page);
1869			vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1870			vlapic_self_ipi_handler(vlapic, vector);
1871			return (HANDLED);
1872		} else
1873			return (UNHANDLED);
1874	}
1875
1876	switch (offset) {
1877	case APIC_OFFSET_ID:
1878		vlapic_id_write_handler(vlapic);
1879		break;
1880	case APIC_OFFSET_LDR:
1881		vlapic_ldr_write_handler(vlapic);
1882		break;
1883	case APIC_OFFSET_DFR:
1884		vlapic_dfr_write_handler(vlapic);
1885		break;
1886	case APIC_OFFSET_SVR:
1887		vlapic_svr_write_handler(vlapic);
1888		break;
1889	case APIC_OFFSET_ESR:
1890		vlapic_esr_write_handler(vlapic);
1891		break;
1892	case APIC_OFFSET_ICR_LOW:
1893		retu = false;
1894		error = vlapic_icrlo_write_handler(vlapic, &retu);
1895		if (error != 0 || retu)
1896			handled = UNHANDLED;
1897		break;
1898	case APIC_OFFSET_CMCI_LVT:
1899	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1900		vlapic_lvt_write_handler(vlapic, offset);
1901		break;
1902	case APIC_OFFSET_TIMER_ICR:
1903		vlapic_icrtmr_write_handler(vlapic);
1904		break;
1905	case APIC_OFFSET_TIMER_DCR:
1906		vlapic_dcr_write_handler(vlapic);
1907		break;
1908	default:
1909		handled = UNHANDLED;
1910		break;
1911	}
1912	return (handled);
1913}
1914
1915static bool
1916apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1917{
1918
1919	if (apic_access_virtualization(vmx, vcpuid) &&
1920	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1921		return (true);
1922	else
1923		return (false);
1924}
1925
1926static int
1927vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1928{
1929	uint64_t qual;
1930	int access_type, offset, allowed;
1931
1932	if (!apic_access_virtualization(vmx, vcpuid))
1933		return (UNHANDLED);
1934
1935	qual = vmexit->u.vmx.exit_qualification;
1936	access_type = APIC_ACCESS_TYPE(qual);
1937	offset = APIC_ACCESS_OFFSET(qual);
1938
1939	allowed = 0;
1940	if (access_type == 0) {
1941		/*
1942		 * Read data access to the following registers is expected.
1943		 */
1944		switch (offset) {
1945		case APIC_OFFSET_APR:
1946		case APIC_OFFSET_PPR:
1947		case APIC_OFFSET_RRR:
1948		case APIC_OFFSET_CMCI_LVT:
1949		case APIC_OFFSET_TIMER_CCR:
1950			allowed = 1;
1951			break;
1952		default:
1953			break;
1954		}
1955	} else if (access_type == 1) {
1956		/*
1957		 * Write data access to the following registers is expected.
1958		 */
1959		switch (offset) {
1960		case APIC_OFFSET_VER:
1961		case APIC_OFFSET_APR:
1962		case APIC_OFFSET_PPR:
1963		case APIC_OFFSET_RRR:
1964		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1965		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1966		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1967		case APIC_OFFSET_CMCI_LVT:
1968		case APIC_OFFSET_TIMER_CCR:
1969			allowed = 1;
1970			break;
1971		default:
1972			break;
1973		}
1974	}
1975
1976	if (allowed) {
1977		vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
1978		    VIE_INVALID_GLA);
1979	}
1980
1981	/*
1982	 * Regardless of whether the APIC-access is allowed this handler
1983	 * always returns UNHANDLED:
1984	 * - if the access is allowed then it is handled by emulating the
1985	 *   instruction that caused the VM-exit (outside the critical section)
1986	 * - if the access is not allowed then it will be converted to an
1987	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
1988	 */
1989	return (UNHANDLED);
1990}
1991
1992static enum task_switch_reason
1993vmx_task_switch_reason(uint64_t qual)
1994{
1995	int reason;
1996
1997	reason = (qual >> 30) & 0x3;
1998	switch (reason) {
1999	case 0:
2000		return (TSR_CALL);
2001	case 1:
2002		return (TSR_IRET);
2003	case 2:
2004		return (TSR_JMP);
2005	case 3:
2006		return (TSR_IDT_GATE);
2007	default:
2008		panic("%s: invalid reason %d", __func__, reason);
2009	}
2010}
2011
2012static int
2013emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2014{
2015	int error;
2016
2017	if (lapic_msr(num))
2018		error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2019	else
2020		error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2021
2022	return (error);
2023}
2024
2025static int
2026emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2027{
2028	struct vmxctx *vmxctx;
2029	uint64_t result;
2030	uint32_t eax, edx;
2031	int error;
2032
2033	if (lapic_msr(num))
2034		error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2035	else
2036		error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2037
2038	if (error == 0) {
2039		eax = result;
2040		vmxctx = &vmx->ctx[vcpuid];
2041		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2042		KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2043
2044		edx = result >> 32;
2045		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2046		KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2047	}
2048
2049	return (error);
2050}
2051
2052static int
2053vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2054{
2055	int error, handled, in;
2056	struct vmxctx *vmxctx;
2057	struct vlapic *vlapic;
2058	struct vm_inout_str *vis;
2059	struct vm_task_switch *ts;
2060	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2061	uint32_t intr_type, reason;
2062	uint64_t exitintinfo, qual, gpa;
2063	bool retu;
2064
2065	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2066	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2067
2068	handled = UNHANDLED;
2069	vmxctx = &vmx->ctx[vcpu];
2070
2071	qual = vmexit->u.vmx.exit_qualification;
2072	reason = vmexit->u.vmx.exit_reason;
2073	vmexit->exitcode = VM_EXITCODE_BOGUS;
2074
2075	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2076
2077	/*
2078	 * VM exits that can be triggered during event delivery need to
2079	 * be handled specially by re-injecting the event if the IDT
2080	 * vectoring information field's valid bit is set.
2081	 *
2082	 * See "Information for VM Exits During Event Delivery" in Intel SDM
2083	 * for details.
2084	 */
2085	idtvec_info = vmcs_idt_vectoring_info();
2086	if (idtvec_info & VMCS_IDT_VEC_VALID) {
2087		idtvec_info &= ~(1 << 12); /* clear undefined bit */
2088		exitintinfo = idtvec_info;
2089		if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2090			idtvec_err = vmcs_idt_vectoring_err();
2091			exitintinfo |= (uint64_t)idtvec_err << 32;
2092		}
2093		error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2094		KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2095		    __func__, error));
2096
2097		/*
2098		 * If 'virtual NMIs' are being used and the VM-exit
2099		 * happened while injecting an NMI during the previous
2100		 * VM-entry, then clear "blocking by NMI" in the
2101		 * Guest Interruptibility-State so the NMI can be
2102		 * reinjected on the subsequent VM-entry.
2103		 *
2104		 * However, if the NMI was being delivered through a task
2105		 * gate, then the new task must start execution with NMIs
2106		 * blocked so don't clear NMI blocking in this case.
2107		 */
2108		intr_type = idtvec_info & VMCS_INTR_T_MASK;
2109		if (intr_type == VMCS_INTR_T_NMI) {
2110			if (reason != EXIT_REASON_TASK_SWITCH)
2111				vmx_clear_nmi_blocking(vmx, vcpu);
2112			else
2113				vmx_assert_nmi_blocking(vmx, vcpu);
2114		}
2115
2116		/*
2117		 * Update VM-entry instruction length if the event being
2118		 * delivered was a software interrupt or software exception.
2119		 */
2120		if (intr_type == VMCS_INTR_T_SWINTR ||
2121		    intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2122		    intr_type == VMCS_INTR_T_SWEXCEPTION) {
2123			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2124		}
2125	}
2126
2127	switch (reason) {
2128	case EXIT_REASON_TASK_SWITCH:
2129		ts = &vmexit->u.task_switch;
2130		ts->tsssel = qual & 0xffff;
2131		ts->reason = vmx_task_switch_reason(qual);
2132		ts->ext = 0;
2133		ts->errcode_valid = 0;
2134		vmx_paging_info(&ts->paging);
2135		/*
2136		 * If the task switch was due to a CALL, JMP, IRET, software
2137		 * interrupt (INT n) or software exception (INT3, INTO),
2138		 * then the saved %rip references the instruction that caused
2139		 * the task switch. The instruction length field in the VMCS
2140		 * is valid in this case.
2141		 *
2142		 * In all other cases (e.g., NMI, hardware exception) the
2143		 * saved %rip is one that would have been saved in the old TSS
2144		 * had the task switch completed normally so the instruction
2145		 * length field is not needed in this case and is explicitly
2146		 * set to 0.
2147		 */
2148		if (ts->reason == TSR_IDT_GATE) {
2149			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2150			    ("invalid idtvec_info %#x for IDT task switch",
2151			    idtvec_info));
2152			intr_type = idtvec_info & VMCS_INTR_T_MASK;
2153			if (intr_type != VMCS_INTR_T_SWINTR &&
2154			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
2155			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2156				/* Task switch triggered by external event */
2157				ts->ext = 1;
2158				vmexit->inst_length = 0;
2159				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2160					ts->errcode_valid = 1;
2161					ts->errcode = vmcs_idt_vectoring_err();
2162				}
2163			}
2164		}
2165		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2166		VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2167		    "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2168		    ts->ext ? "external" : "internal",
2169		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2170		break;
2171	case EXIT_REASON_CR_ACCESS:
2172		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2173		switch (qual & 0xf) {
2174		case 0:
2175			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2176			break;
2177		case 4:
2178			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2179			break;
2180		case 8:
2181			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2182			break;
2183		}
2184		break;
2185	case EXIT_REASON_RDMSR:
2186		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2187		retu = false;
2188		ecx = vmxctx->guest_rcx;
2189		VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2190		error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2191		if (error) {
2192			vmexit->exitcode = VM_EXITCODE_RDMSR;
2193			vmexit->u.msr.code = ecx;
2194		} else if (!retu) {
2195			handled = HANDLED;
2196		} else {
2197			/* Return to userspace with a valid exitcode */
2198			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2199			    ("emulate_rdmsr retu with bogus exitcode"));
2200		}
2201		break;
2202	case EXIT_REASON_WRMSR:
2203		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2204		retu = false;
2205		eax = vmxctx->guest_rax;
2206		ecx = vmxctx->guest_rcx;
2207		edx = vmxctx->guest_rdx;
2208		VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2209		    ecx, (uint64_t)edx << 32 | eax);
2210		error = emulate_wrmsr(vmx, vcpu, ecx,
2211		    (uint64_t)edx << 32 | eax, &retu);
2212		if (error) {
2213			vmexit->exitcode = VM_EXITCODE_WRMSR;
2214			vmexit->u.msr.code = ecx;
2215			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2216		} else if (!retu) {
2217			handled = HANDLED;
2218		} else {
2219			/* Return to userspace with a valid exitcode */
2220			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2221			    ("emulate_wrmsr retu with bogus exitcode"));
2222		}
2223		break;
2224	case EXIT_REASON_HLT:
2225		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2226		vmexit->exitcode = VM_EXITCODE_HLT;
2227		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2228		break;
2229	case EXIT_REASON_MTF:
2230		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2231		vmexit->exitcode = VM_EXITCODE_MTRAP;
2232		break;
2233	case EXIT_REASON_PAUSE:
2234		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2235		vmexit->exitcode = VM_EXITCODE_PAUSE;
2236		break;
2237	case EXIT_REASON_INTR_WINDOW:
2238		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2239		vmx_clear_int_window_exiting(vmx, vcpu);
2240		return (1);
2241	case EXIT_REASON_EXT_INTR:
2242		/*
2243		 * External interrupts serve only to cause VM exits and allow
2244		 * the host interrupt handler to run.
2245		 *
2246		 * If this external interrupt triggers a virtual interrupt
2247		 * to a VM, then that state will be recorded by the
2248		 * host interrupt handler in the VM's softc. We will inject
2249		 * this virtual interrupt during the subsequent VM enter.
2250		 */
2251		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2252
2253		/*
2254		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2255		 * This appears to be a bug in VMware Fusion?
2256		 */
2257		if (!(intr_info & VMCS_INTR_VALID))
2258			return (1);
2259		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2260		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2261		    ("VM exit interruption info invalid: %#x", intr_info));
2262		vmx_trigger_hostintr(intr_info & 0xff);
2263
2264		/*
2265		 * This is special. We want to treat this as an 'handled'
2266		 * VM-exit but not increment the instruction pointer.
2267		 */
2268		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2269		return (1);
2270	case EXIT_REASON_NMI_WINDOW:
2271		/* Exit to allow the pending virtual NMI to be injected */
2272		if (vm_nmi_pending(vmx->vm, vcpu))
2273			vmx_inject_nmi(vmx, vcpu);
2274		vmx_clear_nmi_window_exiting(vmx, vcpu);
2275		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2276		return (1);
2277	case EXIT_REASON_INOUT:
2278		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2279		vmexit->exitcode = VM_EXITCODE_INOUT;
2280		vmexit->u.inout.bytes = (qual & 0x7) + 1;
2281		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2282		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2283		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2284		vmexit->u.inout.port = (uint16_t)(qual >> 16);
2285		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2286		if (vmexit->u.inout.string) {
2287			inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2288			vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2289			vis = &vmexit->u.inout_str;
2290			vmx_paging_info(&vis->paging);
2291			vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2292			vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2293			vis->index = inout_str_index(vmx, vcpu, in);
2294			vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2295			vis->addrsize = inout_str_addrsize(inst_info);
2296			inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2297		}
2298		break;
2299	case EXIT_REASON_CPUID:
2300		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2301		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2302		break;
2303	case EXIT_REASON_EXCEPTION:
2304		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2305		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2306		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2307		    ("VM exit interruption info invalid: %#x", intr_info));
2308
2309		/*
2310		 * If Virtual NMIs control is 1 and the VM-exit is due to a
2311		 * fault encountered during the execution of IRET then we must
2312		 * restore the state of "virtual-NMI blocking" before resuming
2313		 * the guest.
2314		 *
2315		 * See "Resuming Guest Software after Handling an Exception".
2316		 * See "Information for VM Exits Due to Vectored Events".
2317		 */
2318		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2319		    (intr_info & 0xff) != IDT_DF &&
2320		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2321			vmx_restore_nmi_blocking(vmx, vcpu);
2322
2323		/*
2324		 * The NMI has already been handled in vmx_exit_handle_nmi().
2325		 */
2326		if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI)
2327			return (1);
2328		break;
2329	case EXIT_REASON_EPT_FAULT:
2330		/*
2331		 * If 'gpa' lies within the address space allocated to
2332		 * memory then this must be a nested page fault otherwise
2333		 * this must be an instruction that accesses MMIO space.
2334		 */
2335		gpa = vmcs_gpa();
2336		if (vm_mem_allocated(vmx->vm, gpa) ||
2337		    apic_access_fault(vmx, vcpu, gpa)) {
2338			vmexit->exitcode = VM_EXITCODE_PAGING;
2339			vmexit->u.paging.gpa = gpa;
2340			vmexit->u.paging.fault_type = ept_fault_type(qual);
2341			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2342		} else if (ept_emulation_fault(qual)) {
2343			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2344			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2345		}
2346		/*
2347		 * If Virtual NMIs control is 1 and the VM-exit is due to an
2348		 * EPT fault during the execution of IRET then we must restore
2349		 * the state of "virtual-NMI blocking" before resuming.
2350		 *
2351		 * See description of "NMI unblocking due to IRET" in
2352		 * "Exit Qualification for EPT Violations".
2353		 */
2354		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2355		    (qual & EXIT_QUAL_NMIUDTI) != 0)
2356			vmx_restore_nmi_blocking(vmx, vcpu);
2357		break;
2358	case EXIT_REASON_VIRTUALIZED_EOI:
2359		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2360		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2361		vmexit->inst_length = 0;	/* trap-like */
2362		break;
2363	case EXIT_REASON_APIC_ACCESS:
2364		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2365		break;
2366	case EXIT_REASON_APIC_WRITE:
2367		/*
2368		 * APIC-write VM exit is trap-like so the %rip is already
2369		 * pointing to the next instruction.
2370		 */
2371		vmexit->inst_length = 0;
2372		vlapic = vm_lapic(vmx->vm, vcpu);
2373		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2374		break;
2375	case EXIT_REASON_XSETBV:
2376		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2377		break;
2378	case EXIT_REASON_MONITOR:
2379		vmexit->exitcode = VM_EXITCODE_MONITOR;
2380		break;
2381	case EXIT_REASON_MWAIT:
2382		vmexit->exitcode = VM_EXITCODE_MWAIT;
2383		break;
2384	default:
2385		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2386		break;
2387	}
2388
2389	if (handled) {
2390		/*
2391		 * It is possible that control is returned to userland
2392		 * even though we were able to handle the VM exit in the
2393		 * kernel.
2394		 *
2395		 * In such a case we want to make sure that the userland
2396		 * restarts guest execution at the instruction *after*
2397		 * the one we just processed. Therefore we update the
2398		 * guest rip in the VMCS and in 'vmexit'.
2399		 */
2400		vmexit->rip += vmexit->inst_length;
2401		vmexit->inst_length = 0;
2402		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2403	} else {
2404		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2405			/*
2406			 * If this VM exit was not claimed by anybody then
2407			 * treat it as a generic VMX exit.
2408			 */
2409			vmexit->exitcode = VM_EXITCODE_VMX;
2410			vmexit->u.vmx.status = VM_SUCCESS;
2411			vmexit->u.vmx.inst_type = 0;
2412			vmexit->u.vmx.inst_error = 0;
2413		} else {
2414			/*
2415			 * The exitcode and collateral have been populated.
2416			 * The VM exit will be processed further in userland.
2417			 */
2418		}
2419	}
2420	return (handled);
2421}
2422
2423static __inline void
2424vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2425{
2426
2427	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2428	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
2429	    vmxctx->inst_fail_status));
2430
2431	vmexit->inst_length = 0;
2432	vmexit->exitcode = VM_EXITCODE_VMX;
2433	vmexit->u.vmx.status = vmxctx->inst_fail_status;
2434	vmexit->u.vmx.inst_error = vmcs_instruction_error();
2435	vmexit->u.vmx.exit_reason = ~0;
2436	vmexit->u.vmx.exit_qualification = ~0;
2437
2438	switch (rc) {
2439	case VMX_VMRESUME_ERROR:
2440	case VMX_VMLAUNCH_ERROR:
2441	case VMX_INVEPT_ERROR:
2442		vmexit->u.vmx.inst_type = rc;
2443		break;
2444	default:
2445		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2446	}
2447}
2448
2449/*
2450 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2451 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2452 * sufficient to simply vector to the NMI handler via a software interrupt.
2453 * However, this must be done before maskable interrupts are enabled
2454 * otherwise the "iret" issued by an interrupt handler will incorrectly
2455 * clear NMI blocking.
2456 */
2457static __inline void
2458vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2459{
2460	uint32_t intr_info;
2461
2462	KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2463
2464	if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2465		return;
2466
2467	intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2468	KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2469	    ("VM exit interruption info invalid: %#x", intr_info));
2470
2471	if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2472		KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2473		    "to NMI has invalid vector: %#x", intr_info));
2474		VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2475		__asm __volatile("int $2");
2476	}
2477}
2478
2479static int
2480vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2481    void *rendezvous_cookie, void *suspend_cookie)
2482{
2483	int rc, handled, launched;
2484	struct vmx *vmx;
2485	struct vm *vm;
2486	struct vmxctx *vmxctx;
2487	struct vmcs *vmcs;
2488	struct vm_exit *vmexit;
2489	struct vlapic *vlapic;
2490	uint64_t rip;
2491	uint32_t exit_reason;
2492
2493	vmx = arg;
2494	vm = vmx->vm;
2495	vmcs = &vmx->vmcs[vcpu];
2496	vmxctx = &vmx->ctx[vcpu];
2497	vlapic = vm_lapic(vm, vcpu);
2498	vmexit = vm_exitinfo(vm, vcpu);
2499	launched = 0;
2500
2501	KASSERT(vmxctx->pmap == pmap,
2502	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2503
2504	vmx_msr_guest_enter(vmx, vcpu);
2505
2506	VMPTRLD(vmcs);
2507
2508	/*
2509	 * XXX
2510	 * We do this every time because we may setup the virtual machine
2511	 * from a different process than the one that actually runs it.
2512	 *
2513	 * If the life of a virtual machine was spent entirely in the context
2514	 * of a single process we could do this once in vmx_vminit().
2515	 */
2516	vmcs_write(VMCS_HOST_CR3, rcr3());
2517
2518	vmcs_write(VMCS_GUEST_RIP, startrip);
2519	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2520	do {
2521		handled = UNHANDLED;
2522
2523		/*
2524		 * Interrupts are disabled from this point on until the
2525		 * guest starts executing. This is done for the following
2526		 * reasons:
2527		 *
2528		 * If an AST is asserted on this thread after the check below,
2529		 * then the IPI_AST notification will not be lost, because it
2530		 * will cause a VM exit due to external interrupt as soon as
2531		 * the guest state is loaded.
2532		 *
2533		 * A posted interrupt after 'vmx_inject_interrupts()' will
2534		 * not be "lost" because it will be held pending in the host
2535		 * APIC because interrupts are disabled. The pending interrupt
2536		 * will be recognized as soon as the guest state is loaded.
2537		 *
2538		 * The same reasoning applies to the IPI generated by
2539		 * pmap_invalidate_ept().
2540		 */
2541		disable_intr();
2542		vmx_inject_interrupts(vmx, vcpu, vlapic);
2543
2544		/*
2545		 * Check for vcpu suspension after injecting events because
2546		 * vmx_inject_interrupts() can suspend the vcpu due to a
2547		 * triple fault.
2548		 */
2549		if (vcpu_suspended(suspend_cookie)) {
2550			enable_intr();
2551			vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2552			break;
2553		}
2554
2555		if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2556			enable_intr();
2557			vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
2558			break;
2559		}
2560
2561		if (vcpu_should_yield(vm, vcpu)) {
2562			enable_intr();
2563			vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
2564			vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2565			handled = HANDLED;
2566			break;
2567		}
2568
2569		vmx_run_trace(vmx, vcpu);
2570		rc = vmx_enter_guest(vmxctx, vmx, launched);
2571
2572		/* Collect some information for VM exit processing */
2573		vmexit->rip = rip = vmcs_guest_rip();
2574		vmexit->inst_length = vmexit_instruction_length();
2575		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2576		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2577
2578		if (rc == VMX_GUEST_VMEXIT) {
2579			vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2580			enable_intr();
2581			handled = vmx_exit_process(vmx, vcpu, vmexit);
2582		} else {
2583			enable_intr();
2584			vmx_exit_inst_error(vmxctx, rc, vmexit);
2585		}
2586		launched = 1;
2587		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2588	} while (handled);
2589
2590	/*
2591	 * If a VM exit has been handled then the exitcode must be BOGUS
2592	 * If a VM exit is not handled then the exitcode must not be BOGUS
2593	 */
2594	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2595	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2596		panic("Mismatch between handled (%d) and exitcode (%d)",
2597		      handled, vmexit->exitcode);
2598	}
2599
2600	if (!handled)
2601		vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2602
2603	VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2604	    vmexit->exitcode);
2605
2606	VMCLEAR(vmcs);
2607	vmx_msr_guest_exit(vmx, vcpu);
2608
2609	return (0);
2610}
2611
2612static void
2613vmx_vmcleanup(void *arg)
2614{
2615	int i;
2616	struct vmx *vmx = arg;
2617
2618	if (apic_access_virtualization(vmx, 0))
2619		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2620
2621	for (i = 0; i < VM_MAXCPU; i++)
2622		vpid_free(vmx->state[i].vpid);
2623
2624	free(vmx, M_VMX);
2625
2626	return;
2627}
2628
2629static register_t *
2630vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2631{
2632
2633	switch (reg) {
2634	case VM_REG_GUEST_RAX:
2635		return (&vmxctx->guest_rax);
2636	case VM_REG_GUEST_RBX:
2637		return (&vmxctx->guest_rbx);
2638	case VM_REG_GUEST_RCX:
2639		return (&vmxctx->guest_rcx);
2640	case VM_REG_GUEST_RDX:
2641		return (&vmxctx->guest_rdx);
2642	case VM_REG_GUEST_RSI:
2643		return (&vmxctx->guest_rsi);
2644	case VM_REG_GUEST_RDI:
2645		return (&vmxctx->guest_rdi);
2646	case VM_REG_GUEST_RBP:
2647		return (&vmxctx->guest_rbp);
2648	case VM_REG_GUEST_R8:
2649		return (&vmxctx->guest_r8);
2650	case VM_REG_GUEST_R9:
2651		return (&vmxctx->guest_r9);
2652	case VM_REG_GUEST_R10:
2653		return (&vmxctx->guest_r10);
2654	case VM_REG_GUEST_R11:
2655		return (&vmxctx->guest_r11);
2656	case VM_REG_GUEST_R12:
2657		return (&vmxctx->guest_r12);
2658	case VM_REG_GUEST_R13:
2659		return (&vmxctx->guest_r13);
2660	case VM_REG_GUEST_R14:
2661		return (&vmxctx->guest_r14);
2662	case VM_REG_GUEST_R15:
2663		return (&vmxctx->guest_r15);
2664	case VM_REG_GUEST_CR2:
2665		return (&vmxctx->guest_cr2);
2666	default:
2667		break;
2668	}
2669	return (NULL);
2670}
2671
2672static int
2673vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2674{
2675	register_t *regp;
2676
2677	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2678		*retval = *regp;
2679		return (0);
2680	} else
2681		return (EINVAL);
2682}
2683
2684static int
2685vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2686{
2687	register_t *regp;
2688
2689	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2690		*regp = val;
2691		return (0);
2692	} else
2693		return (EINVAL);
2694}
2695
2696static int
2697vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2698{
2699	uint64_t gi;
2700	int error;
2701
2702	error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2703	    VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2704	*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2705	return (error);
2706}
2707
2708static int
2709vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2710{
2711	struct vmcs *vmcs;
2712	uint64_t gi;
2713	int error, ident;
2714
2715	/*
2716	 * Forcing the vcpu into an interrupt shadow is not supported.
2717	 */
2718	if (val) {
2719		error = EINVAL;
2720		goto done;
2721	}
2722
2723	vmcs = &vmx->vmcs[vcpu];
2724	ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2725	error = vmcs_getreg(vmcs, running, ident, &gi);
2726	if (error == 0) {
2727		gi &= ~HWINTR_BLOCKING;
2728		error = vmcs_setreg(vmcs, running, ident, gi);
2729	}
2730done:
2731	VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2732	    error ? "failed" : "succeeded");
2733	return (error);
2734}
2735
2736static int
2737vmx_shadow_reg(int reg)
2738{
2739	int shreg;
2740
2741	shreg = -1;
2742
2743	switch (reg) {
2744	case VM_REG_GUEST_CR0:
2745		shreg = VMCS_CR0_SHADOW;
2746                break;
2747        case VM_REG_GUEST_CR4:
2748		shreg = VMCS_CR4_SHADOW;
2749		break;
2750	default:
2751		break;
2752	}
2753
2754	return (shreg);
2755}
2756
2757static int
2758vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2759{
2760	int running, hostcpu;
2761	struct vmx *vmx = arg;
2762
2763	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2764	if (running && hostcpu != curcpu)
2765		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2766
2767	if (reg == VM_REG_GUEST_INTR_SHADOW)
2768		return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2769
2770	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2771		return (0);
2772
2773	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2774}
2775
2776static int
2777vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2778{
2779	int error, hostcpu, running, shadow;
2780	uint64_t ctls;
2781	pmap_t pmap;
2782	struct vmx *vmx = arg;
2783
2784	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2785	if (running && hostcpu != curcpu)
2786		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2787
2788	if (reg == VM_REG_GUEST_INTR_SHADOW)
2789		return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2790
2791	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2792		return (0);
2793
2794	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2795
2796	if (error == 0) {
2797		/*
2798		 * If the "load EFER" VM-entry control is 1 then the
2799		 * value of EFER.LMA must be identical to "IA-32e mode guest"
2800		 * bit in the VM-entry control.
2801		 */
2802		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2803		    (reg == VM_REG_GUEST_EFER)) {
2804			vmcs_getreg(&vmx->vmcs[vcpu], running,
2805				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2806			if (val & EFER_LMA)
2807				ctls |= VM_ENTRY_GUEST_LMA;
2808			else
2809				ctls &= ~VM_ENTRY_GUEST_LMA;
2810			vmcs_setreg(&vmx->vmcs[vcpu], running,
2811				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2812		}
2813
2814		shadow = vmx_shadow_reg(reg);
2815		if (shadow > 0) {
2816			/*
2817			 * Store the unmodified value in the shadow
2818			 */
2819			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2820				    VMCS_IDENT(shadow), val);
2821		}
2822
2823		if (reg == VM_REG_GUEST_CR3) {
2824			/*
2825			 * Invalidate the guest vcpu's TLB mappings to emulate
2826			 * the behavior of updating %cr3.
2827			 *
2828			 * XXX the processor retains global mappings when %cr3
2829			 * is updated but vmx_invvpid() does not.
2830			 */
2831			pmap = vmx->ctx[vcpu].pmap;
2832			vmx_invvpid(vmx, vcpu, pmap, running);
2833		}
2834	}
2835
2836	return (error);
2837}
2838
2839static int
2840vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2841{
2842	int hostcpu, running;
2843	struct vmx *vmx = arg;
2844
2845	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2846	if (running && hostcpu != curcpu)
2847		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2848
2849	return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2850}
2851
2852static int
2853vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2854{
2855	int hostcpu, running;
2856	struct vmx *vmx = arg;
2857
2858	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2859	if (running && hostcpu != curcpu)
2860		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2861
2862	return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2863}
2864
2865static int
2866vmx_getcap(void *arg, int vcpu, int type, int *retval)
2867{
2868	struct vmx *vmx = arg;
2869	int vcap;
2870	int ret;
2871
2872	ret = ENOENT;
2873
2874	vcap = vmx->cap[vcpu].set;
2875
2876	switch (type) {
2877	case VM_CAP_HALT_EXIT:
2878		if (cap_halt_exit)
2879			ret = 0;
2880		break;
2881	case VM_CAP_PAUSE_EXIT:
2882		if (cap_pause_exit)
2883			ret = 0;
2884		break;
2885	case VM_CAP_MTRAP_EXIT:
2886		if (cap_monitor_trap)
2887			ret = 0;
2888		break;
2889	case VM_CAP_UNRESTRICTED_GUEST:
2890		if (cap_unrestricted_guest)
2891			ret = 0;
2892		break;
2893	case VM_CAP_ENABLE_INVPCID:
2894		if (cap_invpcid)
2895			ret = 0;
2896		break;
2897	default:
2898		break;
2899	}
2900
2901	if (ret == 0)
2902		*retval = (vcap & (1 << type)) ? 1 : 0;
2903
2904	return (ret);
2905}
2906
2907static int
2908vmx_setcap(void *arg, int vcpu, int type, int val)
2909{
2910	struct vmx *vmx = arg;
2911	struct vmcs *vmcs = &vmx->vmcs[vcpu];
2912	uint32_t baseval;
2913	uint32_t *pptr;
2914	int error;
2915	int flag;
2916	int reg;
2917	int retval;
2918
2919	retval = ENOENT;
2920	pptr = NULL;
2921
2922	switch (type) {
2923	case VM_CAP_HALT_EXIT:
2924		if (cap_halt_exit) {
2925			retval = 0;
2926			pptr = &vmx->cap[vcpu].proc_ctls;
2927			baseval = *pptr;
2928			flag = PROCBASED_HLT_EXITING;
2929			reg = VMCS_PRI_PROC_BASED_CTLS;
2930		}
2931		break;
2932	case VM_CAP_MTRAP_EXIT:
2933		if (cap_monitor_trap) {
2934			retval = 0;
2935			pptr = &vmx->cap[vcpu].proc_ctls;
2936			baseval = *pptr;
2937			flag = PROCBASED_MTF;
2938			reg = VMCS_PRI_PROC_BASED_CTLS;
2939		}
2940		break;
2941	case VM_CAP_PAUSE_EXIT:
2942		if (cap_pause_exit) {
2943			retval = 0;
2944			pptr = &vmx->cap[vcpu].proc_ctls;
2945			baseval = *pptr;
2946			flag = PROCBASED_PAUSE_EXITING;
2947			reg = VMCS_PRI_PROC_BASED_CTLS;
2948		}
2949		break;
2950	case VM_CAP_UNRESTRICTED_GUEST:
2951		if (cap_unrestricted_guest) {
2952			retval = 0;
2953			pptr = &vmx->cap[vcpu].proc_ctls2;
2954			baseval = *pptr;
2955			flag = PROCBASED2_UNRESTRICTED_GUEST;
2956			reg = VMCS_SEC_PROC_BASED_CTLS;
2957		}
2958		break;
2959	case VM_CAP_ENABLE_INVPCID:
2960		if (cap_invpcid) {
2961			retval = 0;
2962			pptr = &vmx->cap[vcpu].proc_ctls2;
2963			baseval = *pptr;
2964			flag = PROCBASED2_ENABLE_INVPCID;
2965			reg = VMCS_SEC_PROC_BASED_CTLS;
2966		}
2967		break;
2968	default:
2969		break;
2970	}
2971
2972	if (retval == 0) {
2973		if (val) {
2974			baseval |= flag;
2975		} else {
2976			baseval &= ~flag;
2977		}
2978		VMPTRLD(vmcs);
2979		error = vmwrite(reg, baseval);
2980		VMCLEAR(vmcs);
2981
2982		if (error) {
2983			retval = error;
2984		} else {
2985			/*
2986			 * Update optional stored flags, and record
2987			 * setting
2988			 */
2989			if (pptr != NULL) {
2990				*pptr = baseval;
2991			}
2992
2993			if (val) {
2994				vmx->cap[vcpu].set |= (1 << type);
2995			} else {
2996				vmx->cap[vcpu].set &= ~(1 << type);
2997			}
2998		}
2999	}
3000
3001        return (retval);
3002}
3003
3004struct vlapic_vtx {
3005	struct vlapic	vlapic;
3006	struct pir_desc	*pir_desc;
3007	struct vmx	*vmx;
3008};
3009
3010#define	VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)	\
3011do {									\
3012	VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",	\
3013	    level ? "level" : "edge", vector);				\
3014	VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);	\
3015	VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);	\
3016	VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);	\
3017	VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);	\
3018	VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3019} while (0)
3020
3021/*
3022 * vlapic->ops handlers that utilize the APICv hardware assist described in
3023 * Chapter 29 of the Intel SDM.
3024 */
3025static int
3026vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3027{
3028	struct vlapic_vtx *vlapic_vtx;
3029	struct pir_desc *pir_desc;
3030	uint64_t mask;
3031	int idx, notify;
3032
3033	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3034	pir_desc = vlapic_vtx->pir_desc;
3035
3036	/*
3037	 * Keep track of interrupt requests in the PIR descriptor. This is
3038	 * because the virtual APIC page pointed to by the VMCS cannot be
3039	 * modified if the vcpu is running.
3040	 */
3041	idx = vector / 64;
3042	mask = 1UL << (vector % 64);
3043	atomic_set_long(&pir_desc->pir[idx], mask);
3044	notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3045
3046	VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3047	    level, "vmx_set_intr_ready");
3048	return (notify);
3049}
3050
3051static int
3052vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3053{
3054	struct vlapic_vtx *vlapic_vtx;
3055	struct pir_desc *pir_desc;
3056	struct LAPIC *lapic;
3057	uint64_t pending, pirval;
3058	uint32_t ppr, vpr;
3059	int i;
3060
3061	/*
3062	 * This function is only expected to be called from the 'HLT' exit
3063	 * handler which does not care about the vector that is pending.
3064	 */
3065	KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3066
3067	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3068	pir_desc = vlapic_vtx->pir_desc;
3069
3070	pending = atomic_load_acq_long(&pir_desc->pending);
3071	if (!pending)
3072		return (0);	/* common case */
3073
3074	/*
3075	 * If there is an interrupt pending then it will be recognized only
3076	 * if its priority is greater than the processor priority.
3077	 *
3078	 * Special case: if the processor priority is zero then any pending
3079	 * interrupt will be recognized.
3080	 */
3081	lapic = vlapic->apic_page;
3082	ppr = lapic->ppr & 0xf0;
3083	if (ppr == 0)
3084		return (1);
3085
3086	VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3087	    lapic->ppr);
3088
3089	for (i = 3; i >= 0; i--) {
3090		pirval = pir_desc->pir[i];
3091		if (pirval != 0) {
3092			vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3093			return (vpr > ppr);
3094		}
3095	}
3096	return (0);
3097}
3098
3099static void
3100vmx_intr_accepted(struct vlapic *vlapic, int vector)
3101{
3102
3103	panic("vmx_intr_accepted: not expected to be called");
3104}
3105
3106static void
3107vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3108{
3109	struct vlapic_vtx *vlapic_vtx;
3110	struct vmx *vmx;
3111	struct vmcs *vmcs;
3112	uint64_t mask, val;
3113
3114	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3115	KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3116	    ("vmx_set_tmr: vcpu cannot be running"));
3117
3118	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3119	vmx = vlapic_vtx->vmx;
3120	vmcs = &vmx->vmcs[vlapic->vcpuid];
3121	mask = 1UL << (vector % 64);
3122
3123	VMPTRLD(vmcs);
3124	val = vmcs_read(VMCS_EOI_EXIT(vector));
3125	if (level)
3126		val |= mask;
3127	else
3128		val &= ~mask;
3129	vmcs_write(VMCS_EOI_EXIT(vector), val);
3130	VMCLEAR(vmcs);
3131}
3132
3133static void
3134vmx_enable_x2apic_mode(struct vlapic *vlapic)
3135{
3136	struct vmx *vmx;
3137	struct vmcs *vmcs;
3138	uint32_t proc_ctls2;
3139	int vcpuid, error;
3140
3141	vcpuid = vlapic->vcpuid;
3142	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3143	vmcs = &vmx->vmcs[vcpuid];
3144
3145	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3146	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3147	    ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3148
3149	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3150	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3151	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3152
3153	VMPTRLD(vmcs);
3154	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3155	VMCLEAR(vmcs);
3156
3157	if (vlapic->vcpuid == 0) {
3158		/*
3159		 * The nested page table mappings are shared by all vcpus
3160		 * so unmap the APIC access page just once.
3161		 */
3162		error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3163		KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3164		    __func__, error));
3165
3166		/*
3167		 * The MSR bitmap is shared by all vcpus so modify it only
3168		 * once in the context of vcpu 0.
3169		 */
3170		error = vmx_allow_x2apic_msrs(vmx);
3171		KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3172		    __func__, error));
3173	}
3174}
3175
3176static void
3177vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3178{
3179
3180	ipi_cpu(hostcpu, pirvec);
3181}
3182
3183/*
3184 * Transfer the pending interrupts in the PIR descriptor to the IRR
3185 * in the virtual APIC page.
3186 */
3187static void
3188vmx_inject_pir(struct vlapic *vlapic)
3189{
3190	struct vlapic_vtx *vlapic_vtx;
3191	struct pir_desc *pir_desc;
3192	struct LAPIC *lapic;
3193	uint64_t val, pirval;
3194	int rvi, pirbase = -1;
3195	uint16_t intr_status_old, intr_status_new;
3196
3197	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3198	pir_desc = vlapic_vtx->pir_desc;
3199	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3200		VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3201		    "no posted interrupt pending");
3202		return;
3203	}
3204
3205	pirval = 0;
3206	pirbase = -1;
3207	lapic = vlapic->apic_page;
3208
3209	val = atomic_readandclear_long(&pir_desc->pir[0]);
3210	if (val != 0) {
3211		lapic->irr0 |= val;
3212		lapic->irr1 |= val >> 32;
3213		pirbase = 0;
3214		pirval = val;
3215	}
3216
3217	val = atomic_readandclear_long(&pir_desc->pir[1]);
3218	if (val != 0) {
3219		lapic->irr2 |= val;
3220		lapic->irr3 |= val >> 32;
3221		pirbase = 64;
3222		pirval = val;
3223	}
3224
3225	val = atomic_readandclear_long(&pir_desc->pir[2]);
3226	if (val != 0) {
3227		lapic->irr4 |= val;
3228		lapic->irr5 |= val >> 32;
3229		pirbase = 128;
3230		pirval = val;
3231	}
3232
3233	val = atomic_readandclear_long(&pir_desc->pir[3]);
3234	if (val != 0) {
3235		lapic->irr6 |= val;
3236		lapic->irr7 |= val >> 32;
3237		pirbase = 192;
3238		pirval = val;
3239	}
3240
3241	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3242
3243	/*
3244	 * Update RVI so the processor can evaluate pending virtual
3245	 * interrupts on VM-entry.
3246	 *
3247	 * It is possible for pirval to be 0 here, even though the
3248	 * pending bit has been set. The scenario is:
3249	 * CPU-Y is sending a posted interrupt to CPU-X, which
3250	 * is running a guest and processing posted interrupts in h/w.
3251	 * CPU-X will eventually exit and the state seen in s/w is
3252	 * the pending bit set, but no PIR bits set.
3253	 *
3254	 *      CPU-X                      CPU-Y
3255	 *   (vm running)                (host running)
3256	 *   rx posted interrupt
3257	 *   CLEAR pending bit
3258	 *				 SET PIR bit
3259	 *   READ/CLEAR PIR bits
3260	 *				 SET pending bit
3261	 *   (vm exit)
3262	 *   pending bit set, PIR 0
3263	 */
3264	if (pirval != 0) {
3265		rvi = pirbase + flsl(pirval) - 1;
3266		intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3267		intr_status_new = (intr_status_old & 0xFF00) | rvi;
3268		if (intr_status_new > intr_status_old) {
3269			vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3270			VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3271			    "guest_intr_status changed from 0x%04x to 0x%04x",
3272			    intr_status_old, intr_status_new);
3273		}
3274	}
3275}
3276
3277static struct vlapic *
3278vmx_vlapic_init(void *arg, int vcpuid)
3279{
3280	struct vmx *vmx;
3281	struct vlapic *vlapic;
3282	struct vlapic_vtx *vlapic_vtx;
3283
3284	vmx = arg;
3285
3286	vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3287	vlapic->vm = vmx->vm;
3288	vlapic->vcpuid = vcpuid;
3289	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3290
3291	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3292	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3293	vlapic_vtx->vmx = vmx;
3294
3295	if (virtual_interrupt_delivery) {
3296		vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3297		vlapic->ops.pending_intr = vmx_pending_intr;
3298		vlapic->ops.intr_accepted = vmx_intr_accepted;
3299		vlapic->ops.set_tmr = vmx_set_tmr;
3300		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3301	}
3302
3303	if (posted_interrupts)
3304		vlapic->ops.post_intr = vmx_post_intr;
3305
3306	vlapic_init(vlapic);
3307
3308	return (vlapic);
3309}
3310
3311static void
3312vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3313{
3314
3315	vlapic_cleanup(vlapic);
3316	free(vlapic, M_VLAPIC);
3317}
3318
3319struct vmm_ops vmm_ops_intel = {
3320	vmx_init,
3321	vmx_cleanup,
3322	vmx_restore,
3323	vmx_vminit,
3324	vmx_run,
3325	vmx_vmcleanup,
3326	vmx_getreg,
3327	vmx_setreg,
3328	vmx_getdesc,
3329	vmx_setdesc,
3330	vmx_getcap,
3331	vmx_setcap,
3332	ept_vmspace_alloc,
3333	ept_vmspace_free,
3334	vmx_vlapic_init,
3335	vmx_vlapic_cleanup,
3336};
3337