vmx.c revision 276403
183364Sdfr/*-
283364Sdfr * Copyright (c) 2011 NetApp, Inc.
383364Sdfr * All rights reserved.
483364Sdfr *
583364Sdfr * Redistribution and use in source and binary forms, with or without
683364Sdfr * modification, are permitted provided that the following conditions
783364Sdfr * are met:
883364Sdfr * 1. Redistributions of source code must retain the above copyright
983364Sdfr *    notice, this list of conditions and the following disclaimer.
1083364Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1183364Sdfr *    notice, this list of conditions and the following disclaimer in the
1283364Sdfr *    documentation and/or other materials provided with the distribution.
1383364Sdfr *
1483364Sdfr * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
1583364Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1683364Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1783364Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
1883364Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1983364Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2083364Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2183364Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2283364Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2383364Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2483364Sdfr * SUCH DAMAGE.
2583364Sdfr *
2683364Sdfr * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276403 2014-12-30 08:24:14Z neel $
2783364Sdfr */
2883364Sdfr
2983364Sdfr#include <sys/cdefs.h>
3083364Sdfr__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276403 2014-12-30 08:24:14Z neel $");
3183364Sdfr
3283364Sdfr#include <sys/param.h>
3383364Sdfr#include <sys/systm.h>
3483364Sdfr#include <sys/smp.h>
3583364Sdfr#include <sys/kernel.h>
3683364Sdfr#include <sys/malloc.h>
3783364Sdfr#include <sys/pcpu.h>
3883364Sdfr#include <sys/proc.h>
3983364Sdfr#include <sys/sysctl.h>
4083364Sdfr
4183364Sdfr#include <vm/vm.h>
4283364Sdfr#include <vm/pmap.h>
4383364Sdfr
4483364Sdfr#include <machine/psl.h>
4583364Sdfr#include <machine/cpufunc.h>
4683364Sdfr#include <machine/md_var.h>
4783408Sdfr#include <machine/segments.h>
4883408Sdfr#include <machine/smp.h>
4983408Sdfr#include <machine/specialreg.h>
5083364Sdfr#include <machine/vmparam.h>
5183364Sdfr
5283364Sdfr#include <machine/vmm.h>
5383364Sdfr#include <machine/vmm_dev.h>
5483364Sdfr#include <machine/vmm_instruction_emul.h>
5583364Sdfr#include "vmm_lapic.h"
5683364Sdfr#include "vmm_host.h"
5783364Sdfr#include "vmm_ioport.h"
5883364Sdfr#include "vmm_ipi.h"
5983710Sdfr#include "vmm_ktr.h"
6083364Sdfr#include "vmm_stat.h"
6183364Sdfr#include "vatpic.h"
62110211Smarcel#include "vlapic.h"
6383364Sdfr#include "vlapic_priv.h"
6483364Sdfr
6583364Sdfr#include "ept.h"
66#include "vmx_cpufunc.h"
67#include "vmx.h"
68#include "vmx_msr.h"
69#include "x86.h"
70#include "vmx_controls.h"
71
72#define	PINBASED_CTLS_ONE_SETTING					\
73	(PINBASED_EXTINT_EXITING	|				\
74	 PINBASED_NMI_EXITING		|				\
75	 PINBASED_VIRTUAL_NMI)
76#define	PINBASED_CTLS_ZERO_SETTING	0
77
78#define PROCBASED_CTLS_WINDOW_SETTING					\
79	(PROCBASED_INT_WINDOW_EXITING	|				\
80	 PROCBASED_NMI_WINDOW_EXITING)
81
82#define	PROCBASED_CTLS_ONE_SETTING 					\
83	(PROCBASED_SECONDARY_CONTROLS	|				\
84	 PROCBASED_MWAIT_EXITING	|				\
85	 PROCBASED_MONITOR_EXITING	|				\
86	 PROCBASED_IO_EXITING		|				\
87	 PROCBASED_MSR_BITMAPS		|				\
88	 PROCBASED_CTLS_WINDOW_SETTING	|				\
89	 PROCBASED_CR8_LOAD_EXITING	|				\
90	 PROCBASED_CR8_STORE_EXITING)
91#define	PROCBASED_CTLS_ZERO_SETTING	\
92	(PROCBASED_CR3_LOAD_EXITING |	\
93	PROCBASED_CR3_STORE_EXITING |	\
94	PROCBASED_IO_BITMAPS)
95
96#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
97#define	PROCBASED_CTLS2_ZERO_SETTING	0
98
99#define	VM_EXIT_CTLS_ONE_SETTING					\
100	(VM_EXIT_HOST_LMA			|			\
101	VM_EXIT_SAVE_EFER			|			\
102	VM_EXIT_LOAD_EFER			|			\
103	VM_EXIT_ACKNOWLEDGE_INTERRUPT		|			\
104	VM_EXIT_SAVE_PAT			|			\
105	VM_EXIT_LOAD_PAT)
106
107#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
108
109#define	VM_ENTRY_CTLS_ONE_SETTING	(VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT)
110
111#define	VM_ENTRY_CTLS_ZERO_SETTING					\
112	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
113	VM_ENTRY_INTO_SMM			|			\
114	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
115
116#define	HANDLED		1
117#define	UNHANDLED	0
118
119static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
121
122SYSCTL_DECL(_hw_vmm);
123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
124
125int vmxon_enabled[MAXCPU];
126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
127
128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
129static uint32_t exit_ctls, entry_ctls;
130
131static uint64_t cr0_ones_mask, cr0_zeros_mask;
132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
133	     &cr0_ones_mask, 0, NULL);
134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
135	     &cr0_zeros_mask, 0, NULL);
136
137static uint64_t cr4_ones_mask, cr4_zeros_mask;
138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
139	     &cr4_ones_mask, 0, NULL);
140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
141	     &cr4_zeros_mask, 0, NULL);
142
143static int vmx_initialized;
144SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
145	   &vmx_initialized, 0, "Intel VMX initialized");
146
147/*
148 * Optional capabilities
149 */
150static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
151
152static int cap_halt_exit;
153SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
154    "HLT triggers a VM-exit");
155
156static int cap_pause_exit;
157SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
158    0, "PAUSE triggers a VM-exit");
159
160static int cap_unrestricted_guest;
161SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
162    &cap_unrestricted_guest, 0, "Unrestricted guests");
163
164static int cap_monitor_trap;
165SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
166    &cap_monitor_trap, 0, "Monitor trap flag");
167
168static int cap_invpcid;
169SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
170    0, "Guests are allowed to use INVPCID");
171
172static int virtual_interrupt_delivery;
173SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
174    &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
175
176static int posted_interrupts;
177SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
178    &posted_interrupts, 0, "APICv posted interrupt support");
179
180static int pirvec;
181SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
182    &pirvec, 0, "APICv posted interrupt vector");
183
184static struct unrhdr *vpid_unr;
185static u_int vpid_alloc_failed;
186SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
187	    &vpid_alloc_failed, 0, NULL);
188
189/*
190 * Use the last page below 4GB as the APIC access address. This address is
191 * occupied by the boot firmware so it is guaranteed that it will not conflict
192 * with a page in system memory.
193 */
194#define	APIC_ACCESS_ADDRESS	0xFFFFF000
195
196static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
197static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
198static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
199static void vmx_inject_pir(struct vlapic *vlapic);
200
201#ifdef KTR
202static const char *
203exit_reason_to_str(int reason)
204{
205	static char reasonbuf[32];
206
207	switch (reason) {
208	case EXIT_REASON_EXCEPTION:
209		return "exception";
210	case EXIT_REASON_EXT_INTR:
211		return "extint";
212	case EXIT_REASON_TRIPLE_FAULT:
213		return "triplefault";
214	case EXIT_REASON_INIT:
215		return "init";
216	case EXIT_REASON_SIPI:
217		return "sipi";
218	case EXIT_REASON_IO_SMI:
219		return "iosmi";
220	case EXIT_REASON_SMI:
221		return "smi";
222	case EXIT_REASON_INTR_WINDOW:
223		return "intrwindow";
224	case EXIT_REASON_NMI_WINDOW:
225		return "nmiwindow";
226	case EXIT_REASON_TASK_SWITCH:
227		return "taskswitch";
228	case EXIT_REASON_CPUID:
229		return "cpuid";
230	case EXIT_REASON_GETSEC:
231		return "getsec";
232	case EXIT_REASON_HLT:
233		return "hlt";
234	case EXIT_REASON_INVD:
235		return "invd";
236	case EXIT_REASON_INVLPG:
237		return "invlpg";
238	case EXIT_REASON_RDPMC:
239		return "rdpmc";
240	case EXIT_REASON_RDTSC:
241		return "rdtsc";
242	case EXIT_REASON_RSM:
243		return "rsm";
244	case EXIT_REASON_VMCALL:
245		return "vmcall";
246	case EXIT_REASON_VMCLEAR:
247		return "vmclear";
248	case EXIT_REASON_VMLAUNCH:
249		return "vmlaunch";
250	case EXIT_REASON_VMPTRLD:
251		return "vmptrld";
252	case EXIT_REASON_VMPTRST:
253		return "vmptrst";
254	case EXIT_REASON_VMREAD:
255		return "vmread";
256	case EXIT_REASON_VMRESUME:
257		return "vmresume";
258	case EXIT_REASON_VMWRITE:
259		return "vmwrite";
260	case EXIT_REASON_VMXOFF:
261		return "vmxoff";
262	case EXIT_REASON_VMXON:
263		return "vmxon";
264	case EXIT_REASON_CR_ACCESS:
265		return "craccess";
266	case EXIT_REASON_DR_ACCESS:
267		return "draccess";
268	case EXIT_REASON_INOUT:
269		return "inout";
270	case EXIT_REASON_RDMSR:
271		return "rdmsr";
272	case EXIT_REASON_WRMSR:
273		return "wrmsr";
274	case EXIT_REASON_INVAL_VMCS:
275		return "invalvmcs";
276	case EXIT_REASON_INVAL_MSR:
277		return "invalmsr";
278	case EXIT_REASON_MWAIT:
279		return "mwait";
280	case EXIT_REASON_MTF:
281		return "mtf";
282	case EXIT_REASON_MONITOR:
283		return "monitor";
284	case EXIT_REASON_PAUSE:
285		return "pause";
286	case EXIT_REASON_MCE_DURING_ENTRY:
287		return "mce-during-entry";
288	case EXIT_REASON_TPR:
289		return "tpr";
290	case EXIT_REASON_APIC_ACCESS:
291		return "apic-access";
292	case EXIT_REASON_GDTR_IDTR:
293		return "gdtridtr";
294	case EXIT_REASON_LDTR_TR:
295		return "ldtrtr";
296	case EXIT_REASON_EPT_FAULT:
297		return "eptfault";
298	case EXIT_REASON_EPT_MISCONFIG:
299		return "eptmisconfig";
300	case EXIT_REASON_INVEPT:
301		return "invept";
302	case EXIT_REASON_RDTSCP:
303		return "rdtscp";
304	case EXIT_REASON_VMX_PREEMPT:
305		return "vmxpreempt";
306	case EXIT_REASON_INVVPID:
307		return "invvpid";
308	case EXIT_REASON_WBINVD:
309		return "wbinvd";
310	case EXIT_REASON_XSETBV:
311		return "xsetbv";
312	case EXIT_REASON_APIC_WRITE:
313		return "apic-write";
314	default:
315		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
316		return (reasonbuf);
317	}
318}
319#endif	/* KTR */
320
321static int
322vmx_allow_x2apic_msrs(struct vmx *vmx)
323{
324	int i, error;
325
326	error = 0;
327
328	/*
329	 * Allow readonly access to the following x2APIC MSRs from the guest.
330	 */
331	error += guest_msr_ro(vmx, MSR_APIC_ID);
332	error += guest_msr_ro(vmx, MSR_APIC_VERSION);
333	error += guest_msr_ro(vmx, MSR_APIC_LDR);
334	error += guest_msr_ro(vmx, MSR_APIC_SVR);
335
336	for (i = 0; i < 8; i++)
337		error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
338
339	for (i = 0; i < 8; i++)
340		error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
341
342	for (i = 0; i < 8; i++)
343		error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
344
345	error += guest_msr_ro(vmx, MSR_APIC_ESR);
346	error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
347	error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
348	error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
349	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
350	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
351	error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
352	error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
353	error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
354	error += guest_msr_ro(vmx, MSR_APIC_ICR);
355
356	/*
357	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
358	 *
359	 * These registers get special treatment described in the section
360	 * "Virtualizing MSR-Based APIC Accesses".
361	 */
362	error += guest_msr_rw(vmx, MSR_APIC_TPR);
363	error += guest_msr_rw(vmx, MSR_APIC_EOI);
364	error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
365
366	return (error);
367}
368
369u_long
370vmx_fix_cr0(u_long cr0)
371{
372
373	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
374}
375
376u_long
377vmx_fix_cr4(u_long cr4)
378{
379
380	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
381}
382
383static void
384vpid_free(int vpid)
385{
386	if (vpid < 0 || vpid > 0xffff)
387		panic("vpid_free: invalid vpid %d", vpid);
388
389	/*
390	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391	 * the unit number allocator.
392	 */
393
394	if (vpid > VM_MAXCPU)
395		free_unr(vpid_unr, vpid);
396}
397
398static void
399vpid_alloc(uint16_t *vpid, int num)
400{
401	int i, x;
402
403	if (num <= 0 || num > VM_MAXCPU)
404		panic("invalid number of vpids requested: %d", num);
405
406	/*
407	 * If the "enable vpid" execution control is not enabled then the
408	 * VPID is required to be 0 for all vcpus.
409	 */
410	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411		for (i = 0; i < num; i++)
412			vpid[i] = 0;
413		return;
414	}
415
416	/*
417	 * Allocate a unique VPID for each vcpu from the unit number allocator.
418	 */
419	for (i = 0; i < num; i++) {
420		x = alloc_unr(vpid_unr);
421		if (x == -1)
422			break;
423		else
424			vpid[i] = x;
425	}
426
427	if (i < num) {
428		atomic_add_int(&vpid_alloc_failed, 1);
429
430		/*
431		 * If the unit number allocator does not have enough unique
432		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
433		 *
434		 * These VPIDs are not be unique across VMs but this does not
435		 * affect correctness because the combined mappings are also
436		 * tagged with the EP4TA which is unique for each VM.
437		 *
438		 * It is still sub-optimal because the invvpid will invalidate
439		 * combined mappings for a particular VPID across all EP4TAs.
440		 */
441		while (i-- > 0)
442			vpid_free(vpid[i]);
443
444		for (i = 0; i < num; i++)
445			vpid[i] = i + 1;
446	}
447}
448
449static void
450vpid_init(void)
451{
452	/*
453	 * VPID 0 is required when the "enable VPID" execution control is
454	 * disabled.
455	 *
456	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457	 * unit number allocator does not have sufficient unique VPIDs to
458	 * satisfy the allocation.
459	 *
460	 * The remaining VPIDs are managed by the unit number allocator.
461	 */
462	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
463}
464
465static void
466vmx_disable(void *arg __unused)
467{
468	struct invvpid_desc invvpid_desc = { 0 };
469	struct invept_desc invept_desc = { 0 };
470
471	if (vmxon_enabled[curcpu]) {
472		/*
473		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
474		 *
475		 * VMXON or VMXOFF are not required to invalidate any TLB
476		 * caching structures. This prevents potential retention of
477		 * cached information in the TLB between distinct VMX episodes.
478		 */
479		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
480		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
481		vmxoff();
482	}
483	load_cr4(rcr4() & ~CR4_VMXE);
484}
485
486static int
487vmx_cleanup(void)
488{
489
490	if (pirvec != 0)
491		vmm_ipi_free(pirvec);
492
493	if (vpid_unr != NULL) {
494		delete_unrhdr(vpid_unr);
495		vpid_unr = NULL;
496	}
497
498	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
499
500	return (0);
501}
502
503static void
504vmx_enable(void *arg __unused)
505{
506	int error;
507	uint64_t feature_control;
508
509	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
510	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
511	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
512		wrmsr(MSR_IA32_FEATURE_CONTROL,
513		    feature_control | IA32_FEATURE_CONTROL_VMX_EN |
514		    IA32_FEATURE_CONTROL_LOCK);
515	}
516
517	load_cr4(rcr4() | CR4_VMXE);
518
519	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
520	error = vmxon(vmxon_region[curcpu]);
521	if (error == 0)
522		vmxon_enabled[curcpu] = 1;
523}
524
525static void
526vmx_restore(void)
527{
528
529	if (vmxon_enabled[curcpu])
530		vmxon(vmxon_region[curcpu]);
531}
532
533static int
534vmx_init(int ipinum)
535{
536	int error, use_tpr_shadow;
537	uint64_t basic, fixed0, fixed1, feature_control;
538	uint32_t tmp, procbased2_vid_bits;
539
540	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
541	if (!(cpu_feature2 & CPUID2_VMX)) {
542		printf("vmx_init: processor does not support VMX operation\n");
543		return (ENXIO);
544	}
545
546	/*
547	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
548	 * are set (bits 0 and 2 respectively).
549	 */
550	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
551	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
552	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
553		printf("vmx_init: VMX operation disabled by BIOS\n");
554		return (ENXIO);
555	}
556
557	/*
558	 * Verify capabilities MSR_VMX_BASIC:
559	 * - bit 54 indicates support for INS/OUTS decoding
560	 */
561	basic = rdmsr(MSR_VMX_BASIC);
562	if ((basic & (1UL << 54)) == 0) {
563		printf("vmx_init: processor does not support desired basic "
564		    "capabilities\n");
565		return (EINVAL);
566	}
567
568	/* Check support for primary processor-based VM-execution controls */
569	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
570			       MSR_VMX_TRUE_PROCBASED_CTLS,
571			       PROCBASED_CTLS_ONE_SETTING,
572			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
573	if (error) {
574		printf("vmx_init: processor does not support desired primary "
575		       "processor-based controls\n");
576		return (error);
577	}
578
579	/* Clear the processor-based ctl bits that are set on demand */
580	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
581
582	/* Check support for secondary processor-based VM-execution controls */
583	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
584			       MSR_VMX_PROCBASED_CTLS2,
585			       PROCBASED_CTLS2_ONE_SETTING,
586			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
587	if (error) {
588		printf("vmx_init: processor does not support desired secondary "
589		       "processor-based controls\n");
590		return (error);
591	}
592
593	/* Check support for VPID */
594	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
595			       PROCBASED2_ENABLE_VPID, 0, &tmp);
596	if (error == 0)
597		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
598
599	/* Check support for pin-based VM-execution controls */
600	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
601			       MSR_VMX_TRUE_PINBASED_CTLS,
602			       PINBASED_CTLS_ONE_SETTING,
603			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
604	if (error) {
605		printf("vmx_init: processor does not support desired "
606		       "pin-based controls\n");
607		return (error);
608	}
609
610	/* Check support for VM-exit controls */
611	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
612			       VM_EXIT_CTLS_ONE_SETTING,
613			       VM_EXIT_CTLS_ZERO_SETTING,
614			       &exit_ctls);
615	if (error) {
616		printf("vmx_init: processor does not support desired "
617		    "exit controls\n");
618		return (error);
619	}
620
621	/* Check support for VM-entry controls */
622	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
623	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
624	    &entry_ctls);
625	if (error) {
626		printf("vmx_init: processor does not support desired "
627		    "entry controls\n");
628		return (error);
629	}
630
631	/*
632	 * Check support for optional features by testing them
633	 * as individual bits
634	 */
635	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
636					MSR_VMX_TRUE_PROCBASED_CTLS,
637					PROCBASED_HLT_EXITING, 0,
638					&tmp) == 0);
639
640	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
641					MSR_VMX_PROCBASED_CTLS,
642					PROCBASED_MTF, 0,
643					&tmp) == 0);
644
645	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
646					 MSR_VMX_TRUE_PROCBASED_CTLS,
647					 PROCBASED_PAUSE_EXITING, 0,
648					 &tmp) == 0);
649
650	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
651					MSR_VMX_PROCBASED_CTLS2,
652					PROCBASED2_UNRESTRICTED_GUEST, 0,
653				        &tmp) == 0);
654
655	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
656	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
657	    &tmp) == 0);
658
659	/*
660	 * Check support for virtual interrupt delivery.
661	 */
662	procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
663	    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
664	    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
665	    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
666
667	use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
668	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
669	    &tmp) == 0);
670
671	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
672	    procbased2_vid_bits, 0, &tmp);
673	if (error == 0 && use_tpr_shadow) {
674		virtual_interrupt_delivery = 1;
675		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
676		    &virtual_interrupt_delivery);
677	}
678
679	if (virtual_interrupt_delivery) {
680		procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
681		procbased_ctls2 |= procbased2_vid_bits;
682		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
683
684		/*
685		 * No need to emulate accesses to %CR8 if virtual
686		 * interrupt delivery is enabled.
687		 */
688		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
689		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
690
691		/*
692		 * Check for Posted Interrupts only if Virtual Interrupt
693		 * Delivery is enabled.
694		 */
695		error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
696		    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
697		    &tmp);
698		if (error == 0) {
699			pirvec = vmm_ipi_alloc();
700			if (pirvec == 0) {
701				if (bootverbose) {
702					printf("vmx_init: unable to allocate "
703					    "posted interrupt vector\n");
704				}
705			} else {
706				posted_interrupts = 1;
707				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
708				    &posted_interrupts);
709			}
710		}
711	}
712
713	if (posted_interrupts)
714		    pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
715
716	/* Initialize EPT */
717	error = ept_init(ipinum);
718	if (error) {
719		printf("vmx_init: ept initialization failed (%d)\n", error);
720		return (error);
721	}
722
723	/*
724	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
725	 */
726	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
727	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
728	cr0_ones_mask = fixed0 & fixed1;
729	cr0_zeros_mask = ~fixed0 & ~fixed1;
730
731	/*
732	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
733	 * if unrestricted guest execution is allowed.
734	 */
735	if (cap_unrestricted_guest)
736		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
737
738	/*
739	 * Do not allow the guest to set CR0_NW or CR0_CD.
740	 */
741	cr0_zeros_mask |= (CR0_NW | CR0_CD);
742
743	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
744	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
745	cr4_ones_mask = fixed0 & fixed1;
746	cr4_zeros_mask = ~fixed0 & ~fixed1;
747
748	vpid_init();
749
750	vmx_msr_init();
751
752	/* enable VMX operation */
753	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
754
755	vmx_initialized = 1;
756
757	return (0);
758}
759
760static void
761vmx_trigger_hostintr(int vector)
762{
763	uintptr_t func;
764	struct gate_descriptor *gd;
765
766	gd = &idt[vector];
767
768	KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
769	    "invalid vector %d", vector));
770	KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
771	    vector));
772	KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
773	    "has invalid type %d", vector, gd->gd_type));
774	KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
775	    "has invalid dpl %d", vector, gd->gd_dpl));
776	KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
777	    "for vector %d has invalid selector %d", vector, gd->gd_selector));
778	KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
779	    "IST %d", vector, gd->gd_ist));
780
781	func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
782	vmx_call_isr(func);
783}
784
785static int
786vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
787{
788	int error, mask_ident, shadow_ident;
789	uint64_t mask_value;
790
791	if (which != 0 && which != 4)
792		panic("vmx_setup_cr_shadow: unknown cr%d", which);
793
794	if (which == 0) {
795		mask_ident = VMCS_CR0_MASK;
796		mask_value = cr0_ones_mask | cr0_zeros_mask;
797		shadow_ident = VMCS_CR0_SHADOW;
798	} else {
799		mask_ident = VMCS_CR4_MASK;
800		mask_value = cr4_ones_mask | cr4_zeros_mask;
801		shadow_ident = VMCS_CR4_SHADOW;
802	}
803
804	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
805	if (error)
806		return (error);
807
808	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
809	if (error)
810		return (error);
811
812	return (0);
813}
814#define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
815#define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
816
817static void *
818vmx_vminit(struct vm *vm, pmap_t pmap)
819{
820	uint16_t vpid[VM_MAXCPU];
821	int i, error;
822	struct vmx *vmx;
823	struct vmcs *vmcs;
824	uint32_t exc_bitmap;
825
826	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
827	if ((uintptr_t)vmx & PAGE_MASK) {
828		panic("malloc of struct vmx not aligned on %d byte boundary",
829		      PAGE_SIZE);
830	}
831	vmx->vm = vm;
832
833	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
834
835	/*
836	 * Clean up EPTP-tagged guest physical and combined mappings
837	 *
838	 * VMX transitions are not required to invalidate any guest physical
839	 * mappings. So, it may be possible for stale guest physical mappings
840	 * to be present in the processor TLBs.
841	 *
842	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
843	 */
844	ept_invalidate_mappings(vmx->eptp);
845
846	msr_bitmap_initialize(vmx->msr_bitmap);
847
848	/*
849	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
850	 * The guest FSBASE and GSBASE are saved and restored during
851	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
852	 * always restored from the vmcs host state area on vm-exit.
853	 *
854	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
855	 * how they are saved/restored so can be directly accessed by the
856	 * guest.
857	 *
858	 * MSR_EFER is saved and restored in the guest VMCS area on a
859	 * VM exit and entry respectively. It is also restored from the
860	 * host VMCS area on a VM exit.
861	 *
862	 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
863	 * and entry respectively. It is also restored from the host VMCS
864	 * area on a VM exit.
865	 *
866	 * The TSC MSR is exposed read-only. Writes are disallowed as that
867	 * will impact the host TSC.
868	 * XXX Writes would be implemented with a wrmsr trap, and
869	 * then modifying the TSC offset in the VMCS.
870	 */
871	if (guest_msr_rw(vmx, MSR_GSBASE) ||
872	    guest_msr_rw(vmx, MSR_FSBASE) ||
873	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
874	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
875	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
876	    guest_msr_rw(vmx, MSR_EFER) ||
877	    guest_msr_rw(vmx, MSR_PAT) ||
878	    guest_msr_ro(vmx, MSR_TSC))
879		panic("vmx_vminit: error setting guest msr access");
880
881	vpid_alloc(vpid, VM_MAXCPU);
882
883	if (virtual_interrupt_delivery) {
884		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
885		    APIC_ACCESS_ADDRESS);
886		/* XXX this should really return an error to the caller */
887		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
888	}
889
890	for (i = 0; i < VM_MAXCPU; i++) {
891		vmcs = &vmx->vmcs[i];
892		vmcs->identifier = vmx_revision();
893		error = vmclear(vmcs);
894		if (error != 0) {
895			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
896			      error, i);
897		}
898
899		vmx_msr_guest_init(vmx, i);
900
901		error = vmcs_init(vmcs);
902		KASSERT(error == 0, ("vmcs_init error %d", error));
903
904		VMPTRLD(vmcs);
905		error = 0;
906		error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
907		error += vmwrite(VMCS_EPTP, vmx->eptp);
908		error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
909		error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
910		error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
911		error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
912		error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
913		error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
914		error += vmwrite(VMCS_VPID, vpid[i]);
915
916		/* exception bitmap */
917		if (vcpu_trace_exceptions(vm, i))
918			exc_bitmap = 0xffffffff;
919		else
920			exc_bitmap = 1 << IDT_MC;
921		error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
922
923		if (virtual_interrupt_delivery) {
924			error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
925			error += vmwrite(VMCS_VIRTUAL_APIC,
926			    vtophys(&vmx->apic_page[i]));
927			error += vmwrite(VMCS_EOI_EXIT0, 0);
928			error += vmwrite(VMCS_EOI_EXIT1, 0);
929			error += vmwrite(VMCS_EOI_EXIT2, 0);
930			error += vmwrite(VMCS_EOI_EXIT3, 0);
931		}
932		if (posted_interrupts) {
933			error += vmwrite(VMCS_PIR_VECTOR, pirvec);
934			error += vmwrite(VMCS_PIR_DESC,
935			    vtophys(&vmx->pir_desc[i]));
936		}
937		VMCLEAR(vmcs);
938		KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
939
940		vmx->cap[i].set = 0;
941		vmx->cap[i].proc_ctls = procbased_ctls;
942		vmx->cap[i].proc_ctls2 = procbased_ctls2;
943
944		vmx->state[i].lastcpu = NOCPU;
945		vmx->state[i].vpid = vpid[i];
946
947		/*
948		 * Set up the CR0/4 shadows, and init the read shadow
949		 * to the power-on register value from the Intel Sys Arch.
950		 *  CR0 - 0x60000010
951		 *  CR4 - 0
952		 */
953		error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
954		if (error != 0)
955			panic("vmx_setup_cr0_shadow %d", error);
956
957		error = vmx_setup_cr4_shadow(vmcs, 0);
958		if (error != 0)
959			panic("vmx_setup_cr4_shadow %d", error);
960
961		vmx->ctx[i].pmap = pmap;
962	}
963
964	return (vmx);
965}
966
967static int
968vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
969{
970	int handled, func;
971
972	func = vmxctx->guest_rax;
973
974	handled = x86_emulate_cpuid(vm, vcpu,
975				    (uint32_t*)(&vmxctx->guest_rax),
976				    (uint32_t*)(&vmxctx->guest_rbx),
977				    (uint32_t*)(&vmxctx->guest_rcx),
978				    (uint32_t*)(&vmxctx->guest_rdx));
979	return (handled);
980}
981
982static __inline void
983vmx_run_trace(struct vmx *vmx, int vcpu)
984{
985#ifdef KTR
986	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
987#endif
988}
989
990static __inline void
991vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
992	       int handled)
993{
994#ifdef KTR
995	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
996		 handled ? "handled" : "unhandled",
997		 exit_reason_to_str(exit_reason), rip);
998#endif
999}
1000
1001static __inline void
1002vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1003{
1004#ifdef KTR
1005	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1006#endif
1007}
1008
1009static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1010static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1011
1012/*
1013 * Invalidate guest mappings identified by its vpid from the TLB.
1014 */
1015static __inline void
1016vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1017{
1018	struct vmxstate *vmxstate;
1019	struct invvpid_desc invvpid_desc;
1020
1021	vmxstate = &vmx->state[vcpu];
1022	if (vmxstate->vpid == 0)
1023		return;
1024
1025	if (!running) {
1026		/*
1027		 * Set the 'lastcpu' to an invalid host cpu.
1028		 *
1029		 * This will invalidate TLB entries tagged with the vcpu's
1030		 * vpid the next time it runs via vmx_set_pcpu_defaults().
1031		 */
1032		vmxstate->lastcpu = NOCPU;
1033		return;
1034	}
1035
1036	KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1037	    "critical section", __func__, vcpu));
1038
1039	/*
1040	 * Invalidate all mappings tagged with 'vpid'
1041	 *
1042	 * We do this because this vcpu was executing on a different host
1043	 * cpu when it last ran. We do not track whether it invalidated
1044	 * mappings associated with its 'vpid' during that run. So we must
1045	 * assume that the mappings associated with 'vpid' on 'curcpu' are
1046	 * stale and invalidate them.
1047	 *
1048	 * Note that we incur this penalty only when the scheduler chooses to
1049	 * move the thread associated with this vcpu between host cpus.
1050	 *
1051	 * Note also that this will invalidate mappings tagged with 'vpid'
1052	 * for "all" EP4TAs.
1053	 */
1054	if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1055		invvpid_desc._res1 = 0;
1056		invvpid_desc._res2 = 0;
1057		invvpid_desc.vpid = vmxstate->vpid;
1058		invvpid_desc.linear_addr = 0;
1059		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1060		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1061	} else {
1062		/*
1063		 * The invvpid can be skipped if an invept is going to
1064		 * be performed before entering the guest. The invept
1065		 * will invalidate combined mappings tagged with
1066		 * 'vmx->eptp' for all vpids.
1067		 */
1068		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1069	}
1070}
1071
1072static void
1073vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1074{
1075	struct vmxstate *vmxstate;
1076
1077	vmxstate = &vmx->state[vcpu];
1078	if (vmxstate->lastcpu == curcpu)
1079		return;
1080
1081	vmxstate->lastcpu = curcpu;
1082
1083	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1084
1085	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1086	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1087	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1088	vmx_invvpid(vmx, vcpu, pmap, 1);
1089}
1090
1091/*
1092 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1093 */
1094CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1095
1096static void __inline
1097vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1098{
1099
1100	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1101		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1102		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1103		VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1104	}
1105}
1106
1107static void __inline
1108vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1109{
1110
1111	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1112	    ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1113	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1114	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1115	VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1116}
1117
1118static void __inline
1119vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1120{
1121
1122	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1123		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1124		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1125		VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1126	}
1127}
1128
1129static void __inline
1130vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1131{
1132
1133	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1134	    ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1135	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1136	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1137	VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1138}
1139
1140#define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
1141			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1142#define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
1143			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1144
1145static void
1146vmx_inject_nmi(struct vmx *vmx, int vcpu)
1147{
1148	uint32_t gi, info;
1149
1150	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1151	KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1152	    "interruptibility-state %#x", gi));
1153
1154	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1155	KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1156	    "VM-entry interruption information %#x", info));
1157
1158	/*
1159	 * Inject the virtual NMI. The vector must be the NMI IDT entry
1160	 * or the VMCS entry check will fail.
1161	 */
1162	info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1163	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1164
1165	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1166
1167	/* Clear the request */
1168	vm_nmi_clear(vmx->vm, vcpu);
1169}
1170
1171static void
1172vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1173{
1174	int vector, need_nmi_exiting, extint_pending;
1175	uint64_t rflags, entryinfo;
1176	uint32_t gi, info;
1177
1178	if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1179		KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1180		    "intinfo is not valid: %#lx", __func__, entryinfo));
1181
1182		info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1183		KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1184		     "pending exception: %#lx/%#x", __func__, entryinfo, info));
1185
1186		info = entryinfo;
1187		vector = info & 0xff;
1188		if (vector == IDT_BP || vector == IDT_OF) {
1189			/*
1190			 * VT-x requires #BP and #OF to be injected as software
1191			 * exceptions.
1192			 */
1193			info &= ~VMCS_INTR_T_MASK;
1194			info |= VMCS_INTR_T_SWEXCEPTION;
1195		}
1196
1197		if (info & VMCS_INTR_DEL_ERRCODE)
1198			vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1199
1200		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1201	}
1202
1203	if (vm_nmi_pending(vmx->vm, vcpu)) {
1204		/*
1205		 * If there are no conditions blocking NMI injection then
1206		 * inject it directly here otherwise enable "NMI window
1207		 * exiting" to inject it as soon as we can.
1208		 *
1209		 * We also check for STI_BLOCKING because some implementations
1210		 * don't allow NMI injection in this case. If we are running
1211		 * on a processor that doesn't have this restriction it will
1212		 * immediately exit and the NMI will be injected in the
1213		 * "NMI window exiting" handler.
1214		 */
1215		need_nmi_exiting = 1;
1216		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1217		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1218			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1219			if ((info & VMCS_INTR_VALID) == 0) {
1220				vmx_inject_nmi(vmx, vcpu);
1221				need_nmi_exiting = 0;
1222			} else {
1223				VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1224				    "due to VM-entry intr info %#x", info);
1225			}
1226		} else {
1227			VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1228			    "Guest Interruptibility-state %#x", gi);
1229		}
1230
1231		if (need_nmi_exiting)
1232			vmx_set_nmi_window_exiting(vmx, vcpu);
1233	}
1234
1235	extint_pending = vm_extint_pending(vmx->vm, vcpu);
1236
1237	if (!extint_pending && virtual_interrupt_delivery) {
1238		vmx_inject_pir(vlapic);
1239		return;
1240	}
1241
1242	/*
1243	 * If interrupt-window exiting is already in effect then don't bother
1244	 * checking for pending interrupts. This is just an optimization and
1245	 * not needed for correctness.
1246	 */
1247	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1248		VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1249		    "pending int_window_exiting");
1250		return;
1251	}
1252
1253	if (!extint_pending) {
1254		/* Ask the local apic for a vector to inject */
1255		if (!vlapic_pending_intr(vlapic, &vector))
1256			return;
1257
1258		/*
1259		 * From the Intel SDM, Volume 3, Section "Maskable
1260		 * Hardware Interrupts":
1261		 * - maskable interrupt vectors [16,255] can be delivered
1262		 *   through the local APIC.
1263		*/
1264		KASSERT(vector >= 16 && vector <= 255,
1265		    ("invalid vector %d from local APIC", vector));
1266	} else {
1267		/* Ask the legacy pic for a vector to inject */
1268		vatpic_pending_intr(vmx->vm, &vector);
1269
1270		/*
1271		 * From the Intel SDM, Volume 3, Section "Maskable
1272		 * Hardware Interrupts":
1273		 * - maskable interrupt vectors [0,255] can be delivered
1274		 *   through the INTR pin.
1275		 */
1276		KASSERT(vector >= 0 && vector <= 255,
1277		    ("invalid vector %d from INTR", vector));
1278	}
1279
1280	/* Check RFLAGS.IF and the interruptibility state of the guest */
1281	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1282	if ((rflags & PSL_I) == 0) {
1283		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1284		    "rflags %#lx", vector, rflags);
1285		goto cantinject;
1286	}
1287
1288	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1289	if (gi & HWINTR_BLOCKING) {
1290		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1291		    "Guest Interruptibility-state %#x", vector, gi);
1292		goto cantinject;
1293	}
1294
1295	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1296	if (info & VMCS_INTR_VALID) {
1297		/*
1298		 * This is expected and could happen for multiple reasons:
1299		 * - A vectoring VM-entry was aborted due to astpending
1300		 * - A VM-exit happened during event injection.
1301		 * - An exception was injected above.
1302		 * - An NMI was injected above or after "NMI window exiting"
1303		 */
1304		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1305		    "VM-entry intr info %#x", vector, info);
1306		goto cantinject;
1307	}
1308
1309	/* Inject the interrupt */
1310	info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1311	info |= vector;
1312	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1313
1314	if (!extint_pending) {
1315		/* Update the Local APIC ISR */
1316		vlapic_intr_accepted(vlapic, vector);
1317	} else {
1318		vm_extint_clear(vmx->vm, vcpu);
1319		vatpic_intr_accepted(vmx->vm, vector);
1320
1321		/*
1322		 * After we accepted the current ExtINT the PIC may
1323		 * have posted another one.  If that is the case, set
1324		 * the Interrupt Window Exiting execution control so
1325		 * we can inject that one too.
1326		 *
1327		 * Also, interrupt window exiting allows us to inject any
1328		 * pending APIC vector that was preempted by the ExtINT
1329		 * as soon as possible. This applies both for the software
1330		 * emulated vlapic and the hardware assisted virtual APIC.
1331		 */
1332		vmx_set_int_window_exiting(vmx, vcpu);
1333	}
1334
1335	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1336
1337	return;
1338
1339cantinject:
1340	/*
1341	 * Set the Interrupt Window Exiting execution control so we can inject
1342	 * the interrupt as soon as blocking condition goes away.
1343	 */
1344	vmx_set_int_window_exiting(vmx, vcpu);
1345}
1346
1347/*
1348 * If the Virtual NMIs execution control is '1' then the logical processor
1349 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1350 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1351 * virtual-NMI blocking.
1352 *
1353 * This unblocking occurs even if the IRET causes a fault. In this case the
1354 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1355 */
1356static void
1357vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1358{
1359	uint32_t gi;
1360
1361	VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1362	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1363	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1364	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1365}
1366
1367static void
1368vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1369{
1370	uint32_t gi;
1371
1372	VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1373	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1374	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1375	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1376}
1377
1378static void
1379vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1380{
1381	uint32_t gi;
1382
1383	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1384	KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1385	    ("NMI blocking is not in effect %#x", gi));
1386}
1387
1388static int
1389vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1390{
1391	struct vmxctx *vmxctx;
1392	uint64_t xcrval;
1393	const struct xsave_limits *limits;
1394
1395	vmxctx = &vmx->ctx[vcpu];
1396	limits = vmm_get_xsave_limits();
1397
1398	/*
1399	 * Note that the processor raises a GP# fault on its own if
1400	 * xsetbv is executed for CPL != 0, so we do not have to
1401	 * emulate that fault here.
1402	 */
1403
1404	/* Only xcr0 is supported. */
1405	if (vmxctx->guest_rcx != 0) {
1406		vm_inject_gp(vmx->vm, vcpu);
1407		return (HANDLED);
1408	}
1409
1410	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1411	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1412		vm_inject_ud(vmx->vm, vcpu);
1413		return (HANDLED);
1414	}
1415
1416	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1417	if ((xcrval & ~limits->xcr0_allowed) != 0) {
1418		vm_inject_gp(vmx->vm, vcpu);
1419		return (HANDLED);
1420	}
1421
1422	if (!(xcrval & XFEATURE_ENABLED_X87)) {
1423		vm_inject_gp(vmx->vm, vcpu);
1424		return (HANDLED);
1425	}
1426
1427	/* AVX (YMM_Hi128) requires SSE. */
1428	if (xcrval & XFEATURE_ENABLED_AVX &&
1429	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1430		vm_inject_gp(vmx->vm, vcpu);
1431		return (HANDLED);
1432	}
1433
1434	/*
1435	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1436	 * ZMM_Hi256, and Hi16_ZMM.
1437	 */
1438	if (xcrval & XFEATURE_AVX512 &&
1439	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1440	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
1441		vm_inject_gp(vmx->vm, vcpu);
1442		return (HANDLED);
1443	}
1444
1445	/*
1446	 * Intel MPX requires both bound register state flags to be
1447	 * set.
1448	 */
1449	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1450	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1451		vm_inject_gp(vmx->vm, vcpu);
1452		return (HANDLED);
1453	}
1454
1455	/*
1456	 * This runs "inside" vmrun() with the guest's FPU state, so
1457	 * modifying xcr0 directly modifies the guest's xcr0, not the
1458	 * host's.
1459	 */
1460	load_xcr(0, xcrval);
1461	return (HANDLED);
1462}
1463
1464static uint64_t
1465vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1466{
1467	const struct vmxctx *vmxctx;
1468
1469	vmxctx = &vmx->ctx[vcpu];
1470
1471	switch (ident) {
1472	case 0:
1473		return (vmxctx->guest_rax);
1474	case 1:
1475		return (vmxctx->guest_rcx);
1476	case 2:
1477		return (vmxctx->guest_rdx);
1478	case 3:
1479		return (vmxctx->guest_rbx);
1480	case 4:
1481		return (vmcs_read(VMCS_GUEST_RSP));
1482	case 5:
1483		return (vmxctx->guest_rbp);
1484	case 6:
1485		return (vmxctx->guest_rsi);
1486	case 7:
1487		return (vmxctx->guest_rdi);
1488	case 8:
1489		return (vmxctx->guest_r8);
1490	case 9:
1491		return (vmxctx->guest_r9);
1492	case 10:
1493		return (vmxctx->guest_r10);
1494	case 11:
1495		return (vmxctx->guest_r11);
1496	case 12:
1497		return (vmxctx->guest_r12);
1498	case 13:
1499		return (vmxctx->guest_r13);
1500	case 14:
1501		return (vmxctx->guest_r14);
1502	case 15:
1503		return (vmxctx->guest_r15);
1504	default:
1505		panic("invalid vmx register %d", ident);
1506	}
1507}
1508
1509static void
1510vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1511{
1512	struct vmxctx *vmxctx;
1513
1514	vmxctx = &vmx->ctx[vcpu];
1515
1516	switch (ident) {
1517	case 0:
1518		vmxctx->guest_rax = regval;
1519		break;
1520	case 1:
1521		vmxctx->guest_rcx = regval;
1522		break;
1523	case 2:
1524		vmxctx->guest_rdx = regval;
1525		break;
1526	case 3:
1527		vmxctx->guest_rbx = regval;
1528		break;
1529	case 4:
1530		vmcs_write(VMCS_GUEST_RSP, regval);
1531		break;
1532	case 5:
1533		vmxctx->guest_rbp = regval;
1534		break;
1535	case 6:
1536		vmxctx->guest_rsi = regval;
1537		break;
1538	case 7:
1539		vmxctx->guest_rdi = regval;
1540		break;
1541	case 8:
1542		vmxctx->guest_r8 = regval;
1543		break;
1544	case 9:
1545		vmxctx->guest_r9 = regval;
1546		break;
1547	case 10:
1548		vmxctx->guest_r10 = regval;
1549		break;
1550	case 11:
1551		vmxctx->guest_r11 = regval;
1552		break;
1553	case 12:
1554		vmxctx->guest_r12 = regval;
1555		break;
1556	case 13:
1557		vmxctx->guest_r13 = regval;
1558		break;
1559	case 14:
1560		vmxctx->guest_r14 = regval;
1561		break;
1562	case 15:
1563		vmxctx->guest_r15 = regval;
1564		break;
1565	default:
1566		panic("invalid vmx register %d", ident);
1567	}
1568}
1569
1570static int
1571vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1572{
1573	uint64_t crval, regval;
1574
1575	/* We only handle mov to %cr0 at this time */
1576	if ((exitqual & 0xf0) != 0x00)
1577		return (UNHANDLED);
1578
1579	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1580
1581	vmcs_write(VMCS_CR0_SHADOW, regval);
1582
1583	crval = regval | cr0_ones_mask;
1584	crval &= ~cr0_zeros_mask;
1585	vmcs_write(VMCS_GUEST_CR0, crval);
1586
1587	if (regval & CR0_PG) {
1588		uint64_t efer, entry_ctls;
1589
1590		/*
1591		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1592		 * the "IA-32e mode guest" bit in VM-entry control must be
1593		 * equal.
1594		 */
1595		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1596		if (efer & EFER_LME) {
1597			efer |= EFER_LMA;
1598			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1599			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1600			entry_ctls |= VM_ENTRY_GUEST_LMA;
1601			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1602		}
1603	}
1604
1605	return (HANDLED);
1606}
1607
1608static int
1609vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1610{
1611	uint64_t crval, regval;
1612
1613	/* We only handle mov to %cr4 at this time */
1614	if ((exitqual & 0xf0) != 0x00)
1615		return (UNHANDLED);
1616
1617	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1618
1619	vmcs_write(VMCS_CR4_SHADOW, regval);
1620
1621	crval = regval | cr4_ones_mask;
1622	crval &= ~cr4_zeros_mask;
1623	vmcs_write(VMCS_GUEST_CR4, crval);
1624
1625	return (HANDLED);
1626}
1627
1628static int
1629vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1630{
1631	struct vlapic *vlapic;
1632	uint64_t cr8;
1633	int regnum;
1634
1635	/* We only handle mov %cr8 to/from a register at this time. */
1636	if ((exitqual & 0xe0) != 0x00) {
1637		return (UNHANDLED);
1638	}
1639
1640	vlapic = vm_lapic(vmx->vm, vcpu);
1641	regnum = (exitqual >> 8) & 0xf;
1642	if (exitqual & 0x10) {
1643		cr8 = vlapic_get_cr8(vlapic);
1644		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1645	} else {
1646		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1647		vlapic_set_cr8(vlapic, cr8);
1648	}
1649
1650	return (HANDLED);
1651}
1652
1653/*
1654 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1655 */
1656static int
1657vmx_cpl(void)
1658{
1659	uint32_t ssar;
1660
1661	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1662	return ((ssar >> 5) & 0x3);
1663}
1664
1665static enum vm_cpu_mode
1666vmx_cpu_mode(void)
1667{
1668	uint32_t csar;
1669
1670	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1671		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1672		if (csar & 0x2000)
1673			return (CPU_MODE_64BIT);	/* CS.L = 1 */
1674		else
1675			return (CPU_MODE_COMPATIBILITY);
1676	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1677		return (CPU_MODE_PROTECTED);
1678	} else {
1679		return (CPU_MODE_REAL);
1680	}
1681}
1682
1683static enum vm_paging_mode
1684vmx_paging_mode(void)
1685{
1686
1687	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1688		return (PAGING_MODE_FLAT);
1689	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1690		return (PAGING_MODE_32);
1691	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1692		return (PAGING_MODE_64);
1693	else
1694		return (PAGING_MODE_PAE);
1695}
1696
1697static uint64_t
1698inout_str_index(struct vmx *vmx, int vcpuid, int in)
1699{
1700	uint64_t val;
1701	int error;
1702	enum vm_reg_name reg;
1703
1704	reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1705	error = vmx_getreg(vmx, vcpuid, reg, &val);
1706	KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1707	return (val);
1708}
1709
1710static uint64_t
1711inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1712{
1713	uint64_t val;
1714	int error;
1715
1716	if (rep) {
1717		error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1718		KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1719	} else {
1720		val = 1;
1721	}
1722	return (val);
1723}
1724
1725static int
1726inout_str_addrsize(uint32_t inst_info)
1727{
1728	uint32_t size;
1729
1730	size = (inst_info >> 7) & 0x7;
1731	switch (size) {
1732	case 0:
1733		return (2);	/* 16 bit */
1734	case 1:
1735		return (4);	/* 32 bit */
1736	case 2:
1737		return (8);	/* 64 bit */
1738	default:
1739		panic("%s: invalid size encoding %d", __func__, size);
1740	}
1741}
1742
1743static void
1744inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1745    struct vm_inout_str *vis)
1746{
1747	int error, s;
1748
1749	if (in) {
1750		vis->seg_name = VM_REG_GUEST_ES;
1751	} else {
1752		s = (inst_info >> 15) & 0x7;
1753		vis->seg_name = vm_segment_name(s);
1754	}
1755
1756	error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1757	KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1758}
1759
1760static void
1761vmx_paging_info(struct vm_guest_paging *paging)
1762{
1763	paging->cr3 = vmcs_guest_cr3();
1764	paging->cpl = vmx_cpl();
1765	paging->cpu_mode = vmx_cpu_mode();
1766	paging->paging_mode = vmx_paging_mode();
1767}
1768
1769static void
1770vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1771{
1772	struct vm_guest_paging *paging;
1773	uint32_t csar;
1774
1775	paging = &vmexit->u.inst_emul.paging;
1776
1777	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1778	vmexit->u.inst_emul.gpa = gpa;
1779	vmexit->u.inst_emul.gla = gla;
1780	vmx_paging_info(paging);
1781	switch (paging->cpu_mode) {
1782	case CPU_MODE_PROTECTED:
1783	case CPU_MODE_COMPATIBILITY:
1784		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1785		vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1786		break;
1787	default:
1788		vmexit->u.inst_emul.cs_d = 0;
1789		break;
1790	}
1791	vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1792}
1793
1794static int
1795ept_fault_type(uint64_t ept_qual)
1796{
1797	int fault_type;
1798
1799	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1800		fault_type = VM_PROT_WRITE;
1801	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1802		fault_type = VM_PROT_EXECUTE;
1803	else
1804		fault_type= VM_PROT_READ;
1805
1806	return (fault_type);
1807}
1808
1809static boolean_t
1810ept_emulation_fault(uint64_t ept_qual)
1811{
1812	int read, write;
1813
1814	/* EPT fault on an instruction fetch doesn't make sense here */
1815	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1816		return (FALSE);
1817
1818	/* EPT fault must be a read fault or a write fault */
1819	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1820	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1821	if ((read | write) == 0)
1822		return (FALSE);
1823
1824	/*
1825	 * The EPT violation must have been caused by accessing a
1826	 * guest-physical address that is a translation of a guest-linear
1827	 * address.
1828	 */
1829	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1830	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1831		return (FALSE);
1832	}
1833
1834	return (TRUE);
1835}
1836
1837static __inline int
1838apic_access_virtualization(struct vmx *vmx, int vcpuid)
1839{
1840	uint32_t proc_ctls2;
1841
1842	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1843	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1844}
1845
1846static __inline int
1847x2apic_virtualization(struct vmx *vmx, int vcpuid)
1848{
1849	uint32_t proc_ctls2;
1850
1851	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1852	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1853}
1854
1855static int
1856vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1857    uint64_t qual)
1858{
1859	int error, handled, offset;
1860	uint32_t *apic_regs, vector;
1861	bool retu;
1862
1863	handled = HANDLED;
1864	offset = APIC_WRITE_OFFSET(qual);
1865
1866	if (!apic_access_virtualization(vmx, vcpuid)) {
1867		/*
1868		 * In general there should not be any APIC write VM-exits
1869		 * unless APIC-access virtualization is enabled.
1870		 *
1871		 * However self-IPI virtualization can legitimately trigger
1872		 * an APIC-write VM-exit so treat it specially.
1873		 */
1874		if (x2apic_virtualization(vmx, vcpuid) &&
1875		    offset == APIC_OFFSET_SELF_IPI) {
1876			apic_regs = (uint32_t *)(vlapic->apic_page);
1877			vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1878			vlapic_self_ipi_handler(vlapic, vector);
1879			return (HANDLED);
1880		} else
1881			return (UNHANDLED);
1882	}
1883
1884	switch (offset) {
1885	case APIC_OFFSET_ID:
1886		vlapic_id_write_handler(vlapic);
1887		break;
1888	case APIC_OFFSET_LDR:
1889		vlapic_ldr_write_handler(vlapic);
1890		break;
1891	case APIC_OFFSET_DFR:
1892		vlapic_dfr_write_handler(vlapic);
1893		break;
1894	case APIC_OFFSET_SVR:
1895		vlapic_svr_write_handler(vlapic);
1896		break;
1897	case APIC_OFFSET_ESR:
1898		vlapic_esr_write_handler(vlapic);
1899		break;
1900	case APIC_OFFSET_ICR_LOW:
1901		retu = false;
1902		error = vlapic_icrlo_write_handler(vlapic, &retu);
1903		if (error != 0 || retu)
1904			handled = UNHANDLED;
1905		break;
1906	case APIC_OFFSET_CMCI_LVT:
1907	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1908		vlapic_lvt_write_handler(vlapic, offset);
1909		break;
1910	case APIC_OFFSET_TIMER_ICR:
1911		vlapic_icrtmr_write_handler(vlapic);
1912		break;
1913	case APIC_OFFSET_TIMER_DCR:
1914		vlapic_dcr_write_handler(vlapic);
1915		break;
1916	default:
1917		handled = UNHANDLED;
1918		break;
1919	}
1920	return (handled);
1921}
1922
1923static bool
1924apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1925{
1926
1927	if (apic_access_virtualization(vmx, vcpuid) &&
1928	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1929		return (true);
1930	else
1931		return (false);
1932}
1933
1934static int
1935vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1936{
1937	uint64_t qual;
1938	int access_type, offset, allowed;
1939
1940	if (!apic_access_virtualization(vmx, vcpuid))
1941		return (UNHANDLED);
1942
1943	qual = vmexit->u.vmx.exit_qualification;
1944	access_type = APIC_ACCESS_TYPE(qual);
1945	offset = APIC_ACCESS_OFFSET(qual);
1946
1947	allowed = 0;
1948	if (access_type == 0) {
1949		/*
1950		 * Read data access to the following registers is expected.
1951		 */
1952		switch (offset) {
1953		case APIC_OFFSET_APR:
1954		case APIC_OFFSET_PPR:
1955		case APIC_OFFSET_RRR:
1956		case APIC_OFFSET_CMCI_LVT:
1957		case APIC_OFFSET_TIMER_CCR:
1958			allowed = 1;
1959			break;
1960		default:
1961			break;
1962		}
1963	} else if (access_type == 1) {
1964		/*
1965		 * Write data access to the following registers is expected.
1966		 */
1967		switch (offset) {
1968		case APIC_OFFSET_VER:
1969		case APIC_OFFSET_APR:
1970		case APIC_OFFSET_PPR:
1971		case APIC_OFFSET_RRR:
1972		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1973		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1974		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1975		case APIC_OFFSET_CMCI_LVT:
1976		case APIC_OFFSET_TIMER_CCR:
1977			allowed = 1;
1978			break;
1979		default:
1980			break;
1981		}
1982	}
1983
1984	if (allowed) {
1985		vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
1986		    VIE_INVALID_GLA);
1987	}
1988
1989	/*
1990	 * Regardless of whether the APIC-access is allowed this handler
1991	 * always returns UNHANDLED:
1992	 * - if the access is allowed then it is handled by emulating the
1993	 *   instruction that caused the VM-exit (outside the critical section)
1994	 * - if the access is not allowed then it will be converted to an
1995	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
1996	 */
1997	return (UNHANDLED);
1998}
1999
2000static enum task_switch_reason
2001vmx_task_switch_reason(uint64_t qual)
2002{
2003	int reason;
2004
2005	reason = (qual >> 30) & 0x3;
2006	switch (reason) {
2007	case 0:
2008		return (TSR_CALL);
2009	case 1:
2010		return (TSR_IRET);
2011	case 2:
2012		return (TSR_JMP);
2013	case 3:
2014		return (TSR_IDT_GATE);
2015	default:
2016		panic("%s: invalid reason %d", __func__, reason);
2017	}
2018}
2019
2020static int
2021emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2022{
2023	int error;
2024
2025	if (lapic_msr(num))
2026		error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2027	else
2028		error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2029
2030	return (error);
2031}
2032
2033static int
2034emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2035{
2036	struct vmxctx *vmxctx;
2037	uint64_t result;
2038	uint32_t eax, edx;
2039	int error;
2040
2041	if (lapic_msr(num))
2042		error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2043	else
2044		error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2045
2046	if (error == 0) {
2047		eax = result;
2048		vmxctx = &vmx->ctx[vcpuid];
2049		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2050		KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2051
2052		edx = result >> 32;
2053		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2054		KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2055	}
2056
2057	return (error);
2058}
2059
2060static int
2061vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2062{
2063	int error, handled, in;
2064	struct vmxctx *vmxctx;
2065	struct vlapic *vlapic;
2066	struct vm_inout_str *vis;
2067	struct vm_task_switch *ts;
2068	struct vm_exception vmexc;
2069	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2070	uint32_t intr_type, intr_vec, reason;
2071	uint64_t exitintinfo, qual, gpa;
2072	bool retu;
2073
2074	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2075	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2076
2077	handled = UNHANDLED;
2078	vmxctx = &vmx->ctx[vcpu];
2079
2080	qual = vmexit->u.vmx.exit_qualification;
2081	reason = vmexit->u.vmx.exit_reason;
2082	vmexit->exitcode = VM_EXITCODE_BOGUS;
2083
2084	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2085
2086	/*
2087	 * VM-entry failures during or after loading guest state.
2088	 *
2089	 * These VM-exits are uncommon but must be handled specially
2090	 * as most VM-exit fields are not populated as usual.
2091	 */
2092	if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2093		VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2094		__asm __volatile("int $18");
2095		return (1);
2096	}
2097
2098	/*
2099	 * VM exits that can be triggered during event delivery need to
2100	 * be handled specially by re-injecting the event if the IDT
2101	 * vectoring information field's valid bit is set.
2102	 *
2103	 * See "Information for VM Exits During Event Delivery" in Intel SDM
2104	 * for details.
2105	 */
2106	idtvec_info = vmcs_idt_vectoring_info();
2107	if (idtvec_info & VMCS_IDT_VEC_VALID) {
2108		idtvec_info &= ~(1 << 12); /* clear undefined bit */
2109		exitintinfo = idtvec_info;
2110		if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2111			idtvec_err = vmcs_idt_vectoring_err();
2112			exitintinfo |= (uint64_t)idtvec_err << 32;
2113		}
2114		error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2115		KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2116		    __func__, error));
2117
2118		/*
2119		 * If 'virtual NMIs' are being used and the VM-exit
2120		 * happened while injecting an NMI during the previous
2121		 * VM-entry, then clear "blocking by NMI" in the
2122		 * Guest Interruptibility-State so the NMI can be
2123		 * reinjected on the subsequent VM-entry.
2124		 *
2125		 * However, if the NMI was being delivered through a task
2126		 * gate, then the new task must start execution with NMIs
2127		 * blocked so don't clear NMI blocking in this case.
2128		 */
2129		intr_type = idtvec_info & VMCS_INTR_T_MASK;
2130		if (intr_type == VMCS_INTR_T_NMI) {
2131			if (reason != EXIT_REASON_TASK_SWITCH)
2132				vmx_clear_nmi_blocking(vmx, vcpu);
2133			else
2134				vmx_assert_nmi_blocking(vmx, vcpu);
2135		}
2136
2137		/*
2138		 * Update VM-entry instruction length if the event being
2139		 * delivered was a software interrupt or software exception.
2140		 */
2141		if (intr_type == VMCS_INTR_T_SWINTR ||
2142		    intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2143		    intr_type == VMCS_INTR_T_SWEXCEPTION) {
2144			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2145		}
2146	}
2147
2148	switch (reason) {
2149	case EXIT_REASON_TASK_SWITCH:
2150		ts = &vmexit->u.task_switch;
2151		ts->tsssel = qual & 0xffff;
2152		ts->reason = vmx_task_switch_reason(qual);
2153		ts->ext = 0;
2154		ts->errcode_valid = 0;
2155		vmx_paging_info(&ts->paging);
2156		/*
2157		 * If the task switch was due to a CALL, JMP, IRET, software
2158		 * interrupt (INT n) or software exception (INT3, INTO),
2159		 * then the saved %rip references the instruction that caused
2160		 * the task switch. The instruction length field in the VMCS
2161		 * is valid in this case.
2162		 *
2163		 * In all other cases (e.g., NMI, hardware exception) the
2164		 * saved %rip is one that would have been saved in the old TSS
2165		 * had the task switch completed normally so the instruction
2166		 * length field is not needed in this case and is explicitly
2167		 * set to 0.
2168		 */
2169		if (ts->reason == TSR_IDT_GATE) {
2170			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2171			    ("invalid idtvec_info %#x for IDT task switch",
2172			    idtvec_info));
2173			intr_type = idtvec_info & VMCS_INTR_T_MASK;
2174			if (intr_type != VMCS_INTR_T_SWINTR &&
2175			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
2176			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2177				/* Task switch triggered by external event */
2178				ts->ext = 1;
2179				vmexit->inst_length = 0;
2180				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2181					ts->errcode_valid = 1;
2182					ts->errcode = vmcs_idt_vectoring_err();
2183				}
2184			}
2185		}
2186		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2187		VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2188		    "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2189		    ts->ext ? "external" : "internal",
2190		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2191		break;
2192	case EXIT_REASON_CR_ACCESS:
2193		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2194		switch (qual & 0xf) {
2195		case 0:
2196			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2197			break;
2198		case 4:
2199			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2200			break;
2201		case 8:
2202			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2203			break;
2204		}
2205		break;
2206	case EXIT_REASON_RDMSR:
2207		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2208		retu = false;
2209		ecx = vmxctx->guest_rcx;
2210		VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2211		error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2212		if (error) {
2213			vmexit->exitcode = VM_EXITCODE_RDMSR;
2214			vmexit->u.msr.code = ecx;
2215		} else if (!retu) {
2216			handled = HANDLED;
2217		} else {
2218			/* Return to userspace with a valid exitcode */
2219			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2220			    ("emulate_rdmsr retu with bogus exitcode"));
2221		}
2222		break;
2223	case EXIT_REASON_WRMSR:
2224		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2225		retu = false;
2226		eax = vmxctx->guest_rax;
2227		ecx = vmxctx->guest_rcx;
2228		edx = vmxctx->guest_rdx;
2229		VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2230		    ecx, (uint64_t)edx << 32 | eax);
2231		error = emulate_wrmsr(vmx, vcpu, ecx,
2232		    (uint64_t)edx << 32 | eax, &retu);
2233		if (error) {
2234			vmexit->exitcode = VM_EXITCODE_WRMSR;
2235			vmexit->u.msr.code = ecx;
2236			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2237		} else if (!retu) {
2238			handled = HANDLED;
2239		} else {
2240			/* Return to userspace with a valid exitcode */
2241			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2242			    ("emulate_wrmsr retu with bogus exitcode"));
2243		}
2244		break;
2245	case EXIT_REASON_HLT:
2246		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2247		vmexit->exitcode = VM_EXITCODE_HLT;
2248		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2249		break;
2250	case EXIT_REASON_MTF:
2251		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2252		vmexit->exitcode = VM_EXITCODE_MTRAP;
2253		break;
2254	case EXIT_REASON_PAUSE:
2255		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2256		vmexit->exitcode = VM_EXITCODE_PAUSE;
2257		break;
2258	case EXIT_REASON_INTR_WINDOW:
2259		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2260		vmx_clear_int_window_exiting(vmx, vcpu);
2261		return (1);
2262	case EXIT_REASON_EXT_INTR:
2263		/*
2264		 * External interrupts serve only to cause VM exits and allow
2265		 * the host interrupt handler to run.
2266		 *
2267		 * If this external interrupt triggers a virtual interrupt
2268		 * to a VM, then that state will be recorded by the
2269		 * host interrupt handler in the VM's softc. We will inject
2270		 * this virtual interrupt during the subsequent VM enter.
2271		 */
2272		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2273
2274		/*
2275		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2276		 * This appears to be a bug in VMware Fusion?
2277		 */
2278		if (!(intr_info & VMCS_INTR_VALID))
2279			return (1);
2280		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2281		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2282		    ("VM exit interruption info invalid: %#x", intr_info));
2283		vmx_trigger_hostintr(intr_info & 0xff);
2284
2285		/*
2286		 * This is special. We want to treat this as an 'handled'
2287		 * VM-exit but not increment the instruction pointer.
2288		 */
2289		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2290		return (1);
2291	case EXIT_REASON_NMI_WINDOW:
2292		/* Exit to allow the pending virtual NMI to be injected */
2293		if (vm_nmi_pending(vmx->vm, vcpu))
2294			vmx_inject_nmi(vmx, vcpu);
2295		vmx_clear_nmi_window_exiting(vmx, vcpu);
2296		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2297		return (1);
2298	case EXIT_REASON_INOUT:
2299		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2300		vmexit->exitcode = VM_EXITCODE_INOUT;
2301		vmexit->u.inout.bytes = (qual & 0x7) + 1;
2302		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2303		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2304		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2305		vmexit->u.inout.port = (uint16_t)(qual >> 16);
2306		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2307		if (vmexit->u.inout.string) {
2308			inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2309			vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2310			vis = &vmexit->u.inout_str;
2311			vmx_paging_info(&vis->paging);
2312			vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2313			vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2314			vis->index = inout_str_index(vmx, vcpu, in);
2315			vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2316			vis->addrsize = inout_str_addrsize(inst_info);
2317			inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2318		}
2319		break;
2320	case EXIT_REASON_CPUID:
2321		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2322		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2323		break;
2324	case EXIT_REASON_EXCEPTION:
2325		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2326		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2327		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2328		    ("VM exit interruption info invalid: %#x", intr_info));
2329
2330		intr_vec = intr_info & 0xff;
2331		intr_type = intr_info & VMCS_INTR_T_MASK;
2332
2333		/*
2334		 * If Virtual NMIs control is 1 and the VM-exit is due to a
2335		 * fault encountered during the execution of IRET then we must
2336		 * restore the state of "virtual-NMI blocking" before resuming
2337		 * the guest.
2338		 *
2339		 * See "Resuming Guest Software after Handling an Exception".
2340		 * See "Information for VM Exits Due to Vectored Events".
2341		 */
2342		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2343		    (intr_vec != IDT_DF) &&
2344		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2345			vmx_restore_nmi_blocking(vmx, vcpu);
2346
2347		/*
2348		 * The NMI has already been handled in vmx_exit_handle_nmi().
2349		 */
2350		if (intr_type == VMCS_INTR_T_NMI)
2351			return (1);
2352
2353		/*
2354		 * Call the machine check handler by hand. Also don't reflect
2355		 * the machine check back into the guest.
2356		 */
2357		if (intr_vec == IDT_MC) {
2358			VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2359			__asm __volatile("int $18");
2360			return (1);
2361		}
2362
2363		if (intr_vec == IDT_PF) {
2364			error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2365			KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2366			    __func__, error));
2367		}
2368
2369		/*
2370		 * Software exceptions exhibit trap-like behavior. This in
2371		 * turn requires populating the VM-entry instruction length
2372		 * so that the %rip in the trap frame is past the INT3/INTO
2373		 * instruction.
2374		 */
2375		if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2376			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2377
2378		/* Reflect all other exceptions back into the guest */
2379		bzero(&vmexc, sizeof(struct vm_exception));
2380		vmexc.vector = intr_vec;
2381		if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2382			vmexc.error_code_valid = 1;
2383			vmexc.error_code = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2384		}
2385		VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2386		    "the guest", vmexc.vector, vmexc.error_code);
2387		error = vm_inject_exception(vmx->vm, vcpu, &vmexc);
2388		KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2389		    __func__, error));
2390		return (1);
2391
2392	case EXIT_REASON_EPT_FAULT:
2393		/*
2394		 * If 'gpa' lies within the address space allocated to
2395		 * memory then this must be a nested page fault otherwise
2396		 * this must be an instruction that accesses MMIO space.
2397		 */
2398		gpa = vmcs_gpa();
2399		if (vm_mem_allocated(vmx->vm, gpa) ||
2400		    apic_access_fault(vmx, vcpu, gpa)) {
2401			vmexit->exitcode = VM_EXITCODE_PAGING;
2402			vmexit->u.paging.gpa = gpa;
2403			vmexit->u.paging.fault_type = ept_fault_type(qual);
2404			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2405		} else if (ept_emulation_fault(qual)) {
2406			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2407			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2408		}
2409		/*
2410		 * If Virtual NMIs control is 1 and the VM-exit is due to an
2411		 * EPT fault during the execution of IRET then we must restore
2412		 * the state of "virtual-NMI blocking" before resuming.
2413		 *
2414		 * See description of "NMI unblocking due to IRET" in
2415		 * "Exit Qualification for EPT Violations".
2416		 */
2417		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2418		    (qual & EXIT_QUAL_NMIUDTI) != 0)
2419			vmx_restore_nmi_blocking(vmx, vcpu);
2420		break;
2421	case EXIT_REASON_VIRTUALIZED_EOI:
2422		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2423		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2424		vmexit->inst_length = 0;	/* trap-like */
2425		break;
2426	case EXIT_REASON_APIC_ACCESS:
2427		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2428		break;
2429	case EXIT_REASON_APIC_WRITE:
2430		/*
2431		 * APIC-write VM exit is trap-like so the %rip is already
2432		 * pointing to the next instruction.
2433		 */
2434		vmexit->inst_length = 0;
2435		vlapic = vm_lapic(vmx->vm, vcpu);
2436		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2437		break;
2438	case EXIT_REASON_XSETBV:
2439		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2440		break;
2441	case EXIT_REASON_MONITOR:
2442		vmexit->exitcode = VM_EXITCODE_MONITOR;
2443		break;
2444	case EXIT_REASON_MWAIT:
2445		vmexit->exitcode = VM_EXITCODE_MWAIT;
2446		break;
2447	default:
2448		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2449		break;
2450	}
2451
2452	if (handled) {
2453		/*
2454		 * It is possible that control is returned to userland
2455		 * even though we were able to handle the VM exit in the
2456		 * kernel.
2457		 *
2458		 * In such a case we want to make sure that the userland
2459		 * restarts guest execution at the instruction *after*
2460		 * the one we just processed. Therefore we update the
2461		 * guest rip in the VMCS and in 'vmexit'.
2462		 */
2463		vmexit->rip += vmexit->inst_length;
2464		vmexit->inst_length = 0;
2465		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2466	} else {
2467		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2468			/*
2469			 * If this VM exit was not claimed by anybody then
2470			 * treat it as a generic VMX exit.
2471			 */
2472			vmexit->exitcode = VM_EXITCODE_VMX;
2473			vmexit->u.vmx.status = VM_SUCCESS;
2474			vmexit->u.vmx.inst_type = 0;
2475			vmexit->u.vmx.inst_error = 0;
2476		} else {
2477			/*
2478			 * The exitcode and collateral have been populated.
2479			 * The VM exit will be processed further in userland.
2480			 */
2481		}
2482	}
2483	return (handled);
2484}
2485
2486static __inline void
2487vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2488{
2489
2490	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2491	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
2492	    vmxctx->inst_fail_status));
2493
2494	vmexit->inst_length = 0;
2495	vmexit->exitcode = VM_EXITCODE_VMX;
2496	vmexit->u.vmx.status = vmxctx->inst_fail_status;
2497	vmexit->u.vmx.inst_error = vmcs_instruction_error();
2498	vmexit->u.vmx.exit_reason = ~0;
2499	vmexit->u.vmx.exit_qualification = ~0;
2500
2501	switch (rc) {
2502	case VMX_VMRESUME_ERROR:
2503	case VMX_VMLAUNCH_ERROR:
2504	case VMX_INVEPT_ERROR:
2505		vmexit->u.vmx.inst_type = rc;
2506		break;
2507	default:
2508		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2509	}
2510}
2511
2512/*
2513 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2514 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2515 * sufficient to simply vector to the NMI handler via a software interrupt.
2516 * However, this must be done before maskable interrupts are enabled
2517 * otherwise the "iret" issued by an interrupt handler will incorrectly
2518 * clear NMI blocking.
2519 */
2520static __inline void
2521vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2522{
2523	uint32_t intr_info;
2524
2525	KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2526
2527	if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2528		return;
2529
2530	intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2531	KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2532	    ("VM exit interruption info invalid: %#x", intr_info));
2533
2534	if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2535		KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2536		    "to NMI has invalid vector: %#x", intr_info));
2537		VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2538		__asm __volatile("int $2");
2539	}
2540}
2541
2542static int
2543vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2544    void *rendezvous_cookie, void *suspend_cookie)
2545{
2546	int rc, handled, launched;
2547	struct vmx *vmx;
2548	struct vm *vm;
2549	struct vmxctx *vmxctx;
2550	struct vmcs *vmcs;
2551	struct vm_exit *vmexit;
2552	struct vlapic *vlapic;
2553	uint64_t rip;
2554	uint32_t exit_reason;
2555
2556	vmx = arg;
2557	vm = vmx->vm;
2558	vmcs = &vmx->vmcs[vcpu];
2559	vmxctx = &vmx->ctx[vcpu];
2560	vlapic = vm_lapic(vm, vcpu);
2561	vmexit = vm_exitinfo(vm, vcpu);
2562	launched = 0;
2563
2564	KASSERT(vmxctx->pmap == pmap,
2565	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2566
2567	vmx_msr_guest_enter(vmx, vcpu);
2568
2569	VMPTRLD(vmcs);
2570
2571	/*
2572	 * XXX
2573	 * We do this every time because we may setup the virtual machine
2574	 * from a different process than the one that actually runs it.
2575	 *
2576	 * If the life of a virtual machine was spent entirely in the context
2577	 * of a single process we could do this once in vmx_vminit().
2578	 */
2579	vmcs_write(VMCS_HOST_CR3, rcr3());
2580
2581	vmcs_write(VMCS_GUEST_RIP, startrip);
2582	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2583	do {
2584		handled = UNHANDLED;
2585
2586		/*
2587		 * Interrupts are disabled from this point on until the
2588		 * guest starts executing. This is done for the following
2589		 * reasons:
2590		 *
2591		 * If an AST is asserted on this thread after the check below,
2592		 * then the IPI_AST notification will not be lost, because it
2593		 * will cause a VM exit due to external interrupt as soon as
2594		 * the guest state is loaded.
2595		 *
2596		 * A posted interrupt after 'vmx_inject_interrupts()' will
2597		 * not be "lost" because it will be held pending in the host
2598		 * APIC because interrupts are disabled. The pending interrupt
2599		 * will be recognized as soon as the guest state is loaded.
2600		 *
2601		 * The same reasoning applies to the IPI generated by
2602		 * pmap_invalidate_ept().
2603		 */
2604		disable_intr();
2605		vmx_inject_interrupts(vmx, vcpu, vlapic);
2606
2607		/*
2608		 * Check for vcpu suspension after injecting events because
2609		 * vmx_inject_interrupts() can suspend the vcpu due to a
2610		 * triple fault.
2611		 */
2612		if (vcpu_suspended(suspend_cookie)) {
2613			enable_intr();
2614			vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2615			break;
2616		}
2617
2618		if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2619			enable_intr();
2620			vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
2621			break;
2622		}
2623
2624		if (vcpu_should_yield(vm, vcpu)) {
2625			enable_intr();
2626			vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
2627			vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2628			handled = HANDLED;
2629			break;
2630		}
2631
2632		vmx_run_trace(vmx, vcpu);
2633		rc = vmx_enter_guest(vmxctx, vmx, launched);
2634
2635		/* Collect some information for VM exit processing */
2636		vmexit->rip = rip = vmcs_guest_rip();
2637		vmexit->inst_length = vmexit_instruction_length();
2638		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2639		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2640
2641		if (rc == VMX_GUEST_VMEXIT) {
2642			vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2643			enable_intr();
2644			handled = vmx_exit_process(vmx, vcpu, vmexit);
2645		} else {
2646			enable_intr();
2647			vmx_exit_inst_error(vmxctx, rc, vmexit);
2648		}
2649		launched = 1;
2650		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2651	} while (handled);
2652
2653	/*
2654	 * If a VM exit has been handled then the exitcode must be BOGUS
2655	 * If a VM exit is not handled then the exitcode must not be BOGUS
2656	 */
2657	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2658	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2659		panic("Mismatch between handled (%d) and exitcode (%d)",
2660		      handled, vmexit->exitcode);
2661	}
2662
2663	if (!handled)
2664		vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2665
2666	VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2667	    vmexit->exitcode);
2668
2669	VMCLEAR(vmcs);
2670	vmx_msr_guest_exit(vmx, vcpu);
2671
2672	return (0);
2673}
2674
2675static void
2676vmx_vmcleanup(void *arg)
2677{
2678	int i;
2679	struct vmx *vmx = arg;
2680
2681	if (apic_access_virtualization(vmx, 0))
2682		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2683
2684	for (i = 0; i < VM_MAXCPU; i++)
2685		vpid_free(vmx->state[i].vpid);
2686
2687	free(vmx, M_VMX);
2688
2689	return;
2690}
2691
2692static register_t *
2693vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2694{
2695
2696	switch (reg) {
2697	case VM_REG_GUEST_RAX:
2698		return (&vmxctx->guest_rax);
2699	case VM_REG_GUEST_RBX:
2700		return (&vmxctx->guest_rbx);
2701	case VM_REG_GUEST_RCX:
2702		return (&vmxctx->guest_rcx);
2703	case VM_REG_GUEST_RDX:
2704		return (&vmxctx->guest_rdx);
2705	case VM_REG_GUEST_RSI:
2706		return (&vmxctx->guest_rsi);
2707	case VM_REG_GUEST_RDI:
2708		return (&vmxctx->guest_rdi);
2709	case VM_REG_GUEST_RBP:
2710		return (&vmxctx->guest_rbp);
2711	case VM_REG_GUEST_R8:
2712		return (&vmxctx->guest_r8);
2713	case VM_REG_GUEST_R9:
2714		return (&vmxctx->guest_r9);
2715	case VM_REG_GUEST_R10:
2716		return (&vmxctx->guest_r10);
2717	case VM_REG_GUEST_R11:
2718		return (&vmxctx->guest_r11);
2719	case VM_REG_GUEST_R12:
2720		return (&vmxctx->guest_r12);
2721	case VM_REG_GUEST_R13:
2722		return (&vmxctx->guest_r13);
2723	case VM_REG_GUEST_R14:
2724		return (&vmxctx->guest_r14);
2725	case VM_REG_GUEST_R15:
2726		return (&vmxctx->guest_r15);
2727	case VM_REG_GUEST_CR2:
2728		return (&vmxctx->guest_cr2);
2729	default:
2730		break;
2731	}
2732	return (NULL);
2733}
2734
2735static int
2736vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2737{
2738	register_t *regp;
2739
2740	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2741		*retval = *regp;
2742		return (0);
2743	} else
2744		return (EINVAL);
2745}
2746
2747static int
2748vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2749{
2750	register_t *regp;
2751
2752	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2753		*regp = val;
2754		return (0);
2755	} else
2756		return (EINVAL);
2757}
2758
2759static int
2760vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2761{
2762	uint64_t gi;
2763	int error;
2764
2765	error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2766	    VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2767	*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2768	return (error);
2769}
2770
2771static int
2772vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2773{
2774	struct vmcs *vmcs;
2775	uint64_t gi;
2776	int error, ident;
2777
2778	/*
2779	 * Forcing the vcpu into an interrupt shadow is not supported.
2780	 */
2781	if (val) {
2782		error = EINVAL;
2783		goto done;
2784	}
2785
2786	vmcs = &vmx->vmcs[vcpu];
2787	ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2788	error = vmcs_getreg(vmcs, running, ident, &gi);
2789	if (error == 0) {
2790		gi &= ~HWINTR_BLOCKING;
2791		error = vmcs_setreg(vmcs, running, ident, gi);
2792	}
2793done:
2794	VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2795	    error ? "failed" : "succeeded");
2796	return (error);
2797}
2798
2799static int
2800vmx_shadow_reg(int reg)
2801{
2802	int shreg;
2803
2804	shreg = -1;
2805
2806	switch (reg) {
2807	case VM_REG_GUEST_CR0:
2808		shreg = VMCS_CR0_SHADOW;
2809                break;
2810        case VM_REG_GUEST_CR4:
2811		shreg = VMCS_CR4_SHADOW;
2812		break;
2813	default:
2814		break;
2815	}
2816
2817	return (shreg);
2818}
2819
2820static int
2821vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2822{
2823	int running, hostcpu;
2824	struct vmx *vmx = arg;
2825
2826	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2827	if (running && hostcpu != curcpu)
2828		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2829
2830	if (reg == VM_REG_GUEST_INTR_SHADOW)
2831		return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2832
2833	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2834		return (0);
2835
2836	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2837}
2838
2839static int
2840vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2841{
2842	int error, hostcpu, running, shadow;
2843	uint64_t ctls;
2844	pmap_t pmap;
2845	struct vmx *vmx = arg;
2846
2847	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2848	if (running && hostcpu != curcpu)
2849		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2850
2851	if (reg == VM_REG_GUEST_INTR_SHADOW)
2852		return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2853
2854	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2855		return (0);
2856
2857	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2858
2859	if (error == 0) {
2860		/*
2861		 * If the "load EFER" VM-entry control is 1 then the
2862		 * value of EFER.LMA must be identical to "IA-32e mode guest"
2863		 * bit in the VM-entry control.
2864		 */
2865		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2866		    (reg == VM_REG_GUEST_EFER)) {
2867			vmcs_getreg(&vmx->vmcs[vcpu], running,
2868				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2869			if (val & EFER_LMA)
2870				ctls |= VM_ENTRY_GUEST_LMA;
2871			else
2872				ctls &= ~VM_ENTRY_GUEST_LMA;
2873			vmcs_setreg(&vmx->vmcs[vcpu], running,
2874				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2875		}
2876
2877		shadow = vmx_shadow_reg(reg);
2878		if (shadow > 0) {
2879			/*
2880			 * Store the unmodified value in the shadow
2881			 */
2882			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2883				    VMCS_IDENT(shadow), val);
2884		}
2885
2886		if (reg == VM_REG_GUEST_CR3) {
2887			/*
2888			 * Invalidate the guest vcpu's TLB mappings to emulate
2889			 * the behavior of updating %cr3.
2890			 *
2891			 * XXX the processor retains global mappings when %cr3
2892			 * is updated but vmx_invvpid() does not.
2893			 */
2894			pmap = vmx->ctx[vcpu].pmap;
2895			vmx_invvpid(vmx, vcpu, pmap, running);
2896		}
2897	}
2898
2899	return (error);
2900}
2901
2902static int
2903vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2904{
2905	int hostcpu, running;
2906	struct vmx *vmx = arg;
2907
2908	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2909	if (running && hostcpu != curcpu)
2910		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2911
2912	return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2913}
2914
2915static int
2916vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2917{
2918	int hostcpu, running;
2919	struct vmx *vmx = arg;
2920
2921	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2922	if (running && hostcpu != curcpu)
2923		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2924
2925	return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2926}
2927
2928static int
2929vmx_getcap(void *arg, int vcpu, int type, int *retval)
2930{
2931	struct vmx *vmx = arg;
2932	int vcap;
2933	int ret;
2934
2935	ret = ENOENT;
2936
2937	vcap = vmx->cap[vcpu].set;
2938
2939	switch (type) {
2940	case VM_CAP_HALT_EXIT:
2941		if (cap_halt_exit)
2942			ret = 0;
2943		break;
2944	case VM_CAP_PAUSE_EXIT:
2945		if (cap_pause_exit)
2946			ret = 0;
2947		break;
2948	case VM_CAP_MTRAP_EXIT:
2949		if (cap_monitor_trap)
2950			ret = 0;
2951		break;
2952	case VM_CAP_UNRESTRICTED_GUEST:
2953		if (cap_unrestricted_guest)
2954			ret = 0;
2955		break;
2956	case VM_CAP_ENABLE_INVPCID:
2957		if (cap_invpcid)
2958			ret = 0;
2959		break;
2960	default:
2961		break;
2962	}
2963
2964	if (ret == 0)
2965		*retval = (vcap & (1 << type)) ? 1 : 0;
2966
2967	return (ret);
2968}
2969
2970static int
2971vmx_setcap(void *arg, int vcpu, int type, int val)
2972{
2973	struct vmx *vmx = arg;
2974	struct vmcs *vmcs = &vmx->vmcs[vcpu];
2975	uint32_t baseval;
2976	uint32_t *pptr;
2977	int error;
2978	int flag;
2979	int reg;
2980	int retval;
2981
2982	retval = ENOENT;
2983	pptr = NULL;
2984
2985	switch (type) {
2986	case VM_CAP_HALT_EXIT:
2987		if (cap_halt_exit) {
2988			retval = 0;
2989			pptr = &vmx->cap[vcpu].proc_ctls;
2990			baseval = *pptr;
2991			flag = PROCBASED_HLT_EXITING;
2992			reg = VMCS_PRI_PROC_BASED_CTLS;
2993		}
2994		break;
2995	case VM_CAP_MTRAP_EXIT:
2996		if (cap_monitor_trap) {
2997			retval = 0;
2998			pptr = &vmx->cap[vcpu].proc_ctls;
2999			baseval = *pptr;
3000			flag = PROCBASED_MTF;
3001			reg = VMCS_PRI_PROC_BASED_CTLS;
3002		}
3003		break;
3004	case VM_CAP_PAUSE_EXIT:
3005		if (cap_pause_exit) {
3006			retval = 0;
3007			pptr = &vmx->cap[vcpu].proc_ctls;
3008			baseval = *pptr;
3009			flag = PROCBASED_PAUSE_EXITING;
3010			reg = VMCS_PRI_PROC_BASED_CTLS;
3011		}
3012		break;
3013	case VM_CAP_UNRESTRICTED_GUEST:
3014		if (cap_unrestricted_guest) {
3015			retval = 0;
3016			pptr = &vmx->cap[vcpu].proc_ctls2;
3017			baseval = *pptr;
3018			flag = PROCBASED2_UNRESTRICTED_GUEST;
3019			reg = VMCS_SEC_PROC_BASED_CTLS;
3020		}
3021		break;
3022	case VM_CAP_ENABLE_INVPCID:
3023		if (cap_invpcid) {
3024			retval = 0;
3025			pptr = &vmx->cap[vcpu].proc_ctls2;
3026			baseval = *pptr;
3027			flag = PROCBASED2_ENABLE_INVPCID;
3028			reg = VMCS_SEC_PROC_BASED_CTLS;
3029		}
3030		break;
3031	default:
3032		break;
3033	}
3034
3035	if (retval == 0) {
3036		if (val) {
3037			baseval |= flag;
3038		} else {
3039			baseval &= ~flag;
3040		}
3041		VMPTRLD(vmcs);
3042		error = vmwrite(reg, baseval);
3043		VMCLEAR(vmcs);
3044
3045		if (error) {
3046			retval = error;
3047		} else {
3048			/*
3049			 * Update optional stored flags, and record
3050			 * setting
3051			 */
3052			if (pptr != NULL) {
3053				*pptr = baseval;
3054			}
3055
3056			if (val) {
3057				vmx->cap[vcpu].set |= (1 << type);
3058			} else {
3059				vmx->cap[vcpu].set &= ~(1 << type);
3060			}
3061		}
3062	}
3063
3064        return (retval);
3065}
3066
3067struct vlapic_vtx {
3068	struct vlapic	vlapic;
3069	struct pir_desc	*pir_desc;
3070	struct vmx	*vmx;
3071};
3072
3073#define	VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)	\
3074do {									\
3075	VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",	\
3076	    level ? "level" : "edge", vector);				\
3077	VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);	\
3078	VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);	\
3079	VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);	\
3080	VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);	\
3081	VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3082} while (0)
3083
3084/*
3085 * vlapic->ops handlers that utilize the APICv hardware assist described in
3086 * Chapter 29 of the Intel SDM.
3087 */
3088static int
3089vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3090{
3091	struct vlapic_vtx *vlapic_vtx;
3092	struct pir_desc *pir_desc;
3093	uint64_t mask;
3094	int idx, notify;
3095
3096	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3097	pir_desc = vlapic_vtx->pir_desc;
3098
3099	/*
3100	 * Keep track of interrupt requests in the PIR descriptor. This is
3101	 * because the virtual APIC page pointed to by the VMCS cannot be
3102	 * modified if the vcpu is running.
3103	 */
3104	idx = vector / 64;
3105	mask = 1UL << (vector % 64);
3106	atomic_set_long(&pir_desc->pir[idx], mask);
3107	notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3108
3109	VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3110	    level, "vmx_set_intr_ready");
3111	return (notify);
3112}
3113
3114static int
3115vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3116{
3117	struct vlapic_vtx *vlapic_vtx;
3118	struct pir_desc *pir_desc;
3119	struct LAPIC *lapic;
3120	uint64_t pending, pirval;
3121	uint32_t ppr, vpr;
3122	int i;
3123
3124	/*
3125	 * This function is only expected to be called from the 'HLT' exit
3126	 * handler which does not care about the vector that is pending.
3127	 */
3128	KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3129
3130	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3131	pir_desc = vlapic_vtx->pir_desc;
3132
3133	pending = atomic_load_acq_long(&pir_desc->pending);
3134	if (!pending)
3135		return (0);	/* common case */
3136
3137	/*
3138	 * If there is an interrupt pending then it will be recognized only
3139	 * if its priority is greater than the processor priority.
3140	 *
3141	 * Special case: if the processor priority is zero then any pending
3142	 * interrupt will be recognized.
3143	 */
3144	lapic = vlapic->apic_page;
3145	ppr = lapic->ppr & 0xf0;
3146	if (ppr == 0)
3147		return (1);
3148
3149	VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3150	    lapic->ppr);
3151
3152	for (i = 3; i >= 0; i--) {
3153		pirval = pir_desc->pir[i];
3154		if (pirval != 0) {
3155			vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3156			return (vpr > ppr);
3157		}
3158	}
3159	return (0);
3160}
3161
3162static void
3163vmx_intr_accepted(struct vlapic *vlapic, int vector)
3164{
3165
3166	panic("vmx_intr_accepted: not expected to be called");
3167}
3168
3169static void
3170vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3171{
3172	struct vlapic_vtx *vlapic_vtx;
3173	struct vmx *vmx;
3174	struct vmcs *vmcs;
3175	uint64_t mask, val;
3176
3177	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3178	KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3179	    ("vmx_set_tmr: vcpu cannot be running"));
3180
3181	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3182	vmx = vlapic_vtx->vmx;
3183	vmcs = &vmx->vmcs[vlapic->vcpuid];
3184	mask = 1UL << (vector % 64);
3185
3186	VMPTRLD(vmcs);
3187	val = vmcs_read(VMCS_EOI_EXIT(vector));
3188	if (level)
3189		val |= mask;
3190	else
3191		val &= ~mask;
3192	vmcs_write(VMCS_EOI_EXIT(vector), val);
3193	VMCLEAR(vmcs);
3194}
3195
3196static void
3197vmx_enable_x2apic_mode(struct vlapic *vlapic)
3198{
3199	struct vmx *vmx;
3200	struct vmcs *vmcs;
3201	uint32_t proc_ctls2;
3202	int vcpuid, error;
3203
3204	vcpuid = vlapic->vcpuid;
3205	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3206	vmcs = &vmx->vmcs[vcpuid];
3207
3208	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3209	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3210	    ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3211
3212	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3213	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3214	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3215
3216	VMPTRLD(vmcs);
3217	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3218	VMCLEAR(vmcs);
3219
3220	if (vlapic->vcpuid == 0) {
3221		/*
3222		 * The nested page table mappings are shared by all vcpus
3223		 * so unmap the APIC access page just once.
3224		 */
3225		error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3226		KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3227		    __func__, error));
3228
3229		/*
3230		 * The MSR bitmap is shared by all vcpus so modify it only
3231		 * once in the context of vcpu 0.
3232		 */
3233		error = vmx_allow_x2apic_msrs(vmx);
3234		KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3235		    __func__, error));
3236	}
3237}
3238
3239static void
3240vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3241{
3242
3243	ipi_cpu(hostcpu, pirvec);
3244}
3245
3246/*
3247 * Transfer the pending interrupts in the PIR descriptor to the IRR
3248 * in the virtual APIC page.
3249 */
3250static void
3251vmx_inject_pir(struct vlapic *vlapic)
3252{
3253	struct vlapic_vtx *vlapic_vtx;
3254	struct pir_desc *pir_desc;
3255	struct LAPIC *lapic;
3256	uint64_t val, pirval;
3257	int rvi, pirbase = -1;
3258	uint16_t intr_status_old, intr_status_new;
3259
3260	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3261	pir_desc = vlapic_vtx->pir_desc;
3262	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3263		VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3264		    "no posted interrupt pending");
3265		return;
3266	}
3267
3268	pirval = 0;
3269	pirbase = -1;
3270	lapic = vlapic->apic_page;
3271
3272	val = atomic_readandclear_long(&pir_desc->pir[0]);
3273	if (val != 0) {
3274		lapic->irr0 |= val;
3275		lapic->irr1 |= val >> 32;
3276		pirbase = 0;
3277		pirval = val;
3278	}
3279
3280	val = atomic_readandclear_long(&pir_desc->pir[1]);
3281	if (val != 0) {
3282		lapic->irr2 |= val;
3283		lapic->irr3 |= val >> 32;
3284		pirbase = 64;
3285		pirval = val;
3286	}
3287
3288	val = atomic_readandclear_long(&pir_desc->pir[2]);
3289	if (val != 0) {
3290		lapic->irr4 |= val;
3291		lapic->irr5 |= val >> 32;
3292		pirbase = 128;
3293		pirval = val;
3294	}
3295
3296	val = atomic_readandclear_long(&pir_desc->pir[3]);
3297	if (val != 0) {
3298		lapic->irr6 |= val;
3299		lapic->irr7 |= val >> 32;
3300		pirbase = 192;
3301		pirval = val;
3302	}
3303
3304	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3305
3306	/*
3307	 * Update RVI so the processor can evaluate pending virtual
3308	 * interrupts on VM-entry.
3309	 *
3310	 * It is possible for pirval to be 0 here, even though the
3311	 * pending bit has been set. The scenario is:
3312	 * CPU-Y is sending a posted interrupt to CPU-X, which
3313	 * is running a guest and processing posted interrupts in h/w.
3314	 * CPU-X will eventually exit and the state seen in s/w is
3315	 * the pending bit set, but no PIR bits set.
3316	 *
3317	 *      CPU-X                      CPU-Y
3318	 *   (vm running)                (host running)
3319	 *   rx posted interrupt
3320	 *   CLEAR pending bit
3321	 *				 SET PIR bit
3322	 *   READ/CLEAR PIR bits
3323	 *				 SET pending bit
3324	 *   (vm exit)
3325	 *   pending bit set, PIR 0
3326	 */
3327	if (pirval != 0) {
3328		rvi = pirbase + flsl(pirval) - 1;
3329		intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3330		intr_status_new = (intr_status_old & 0xFF00) | rvi;
3331		if (intr_status_new > intr_status_old) {
3332			vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3333			VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3334			    "guest_intr_status changed from 0x%04x to 0x%04x",
3335			    intr_status_old, intr_status_new);
3336		}
3337	}
3338}
3339
3340static struct vlapic *
3341vmx_vlapic_init(void *arg, int vcpuid)
3342{
3343	struct vmx *vmx;
3344	struct vlapic *vlapic;
3345	struct vlapic_vtx *vlapic_vtx;
3346
3347	vmx = arg;
3348
3349	vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3350	vlapic->vm = vmx->vm;
3351	vlapic->vcpuid = vcpuid;
3352	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3353
3354	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3355	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3356	vlapic_vtx->vmx = vmx;
3357
3358	if (virtual_interrupt_delivery) {
3359		vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3360		vlapic->ops.pending_intr = vmx_pending_intr;
3361		vlapic->ops.intr_accepted = vmx_intr_accepted;
3362		vlapic->ops.set_tmr = vmx_set_tmr;
3363		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3364	}
3365
3366	if (posted_interrupts)
3367		vlapic->ops.post_intr = vmx_post_intr;
3368
3369	vlapic_init(vlapic);
3370
3371	return (vlapic);
3372}
3373
3374static void
3375vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3376{
3377
3378	vlapic_cleanup(vlapic);
3379	free(vlapic, M_VLAPIC);
3380}
3381
3382struct vmm_ops vmm_ops_intel = {
3383	vmx_init,
3384	vmx_cleanup,
3385	vmx_restore,
3386	vmx_vminit,
3387	vmx_run,
3388	vmx_vmcleanup,
3389	vmx_getreg,
3390	vmx_setreg,
3391	vmx_getdesc,
3392	vmx_setdesc,
3393	vmx_getcap,
3394	vmx_setcap,
3395	ept_vmspace_alloc,
3396	ept_vmspace_free,
3397	vmx_vlapic_init,
3398	vmx_vlapic_cleanup,
3399};
3400