1243640Sneel/*-
2243640Sneel * Copyright (c) 2012 NetApp, Inc.
3243640Sneel * All rights reserved.
4243640Sneel *
5243640Sneel * Redistribution and use in source and binary forms, with or without
6243640Sneel * modification, are permitted provided that the following conditions
7243640Sneel * are met:
8243640Sneel * 1. Redistributions of source code must retain the above copyright
9243640Sneel *    notice, this list of conditions and the following disclaimer.
10243640Sneel * 2. Redistributions in binary form must reproduce the above copyright
11243640Sneel *    notice, this list of conditions and the following disclaimer in the
12243640Sneel *    documentation and/or other materials provided with the distribution.
13243640Sneel *
14243640Sneel * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15243640Sneel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16243640Sneel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17243640Sneel * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18243640Sneel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19243640Sneel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20243640Sneel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21243640Sneel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22243640Sneel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23243640Sneel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24243640Sneel * SUCH DAMAGE.
25243640Sneel *
26243640Sneel * $FreeBSD$
27243640Sneel */
28243640Sneel
29243640Sneel#ifndef	_VMM_INSTRUCTION_EMUL_H_
30243640Sneel#define	_VMM_INSTRUCTION_EMUL_H_
31243640Sneel
32268976Sjhb#include <sys/mman.h>
33267399Sjhb
34243640Sneel/*
35243640Sneel * Callback functions to read and write memory regions.
36243640Sneel */
37243640Sneeltypedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
38243640Sneel				 uint64_t *rval, int rsize, void *arg);
39243640Sneel
40243640Sneeltypedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
41243640Sneel				  uint64_t wval, int wsize, void *arg);
42243640Sneel
43243640Sneel/*
44243640Sneel * Emulate the decoded 'vie' instruction.
45243640Sneel *
46243640Sneel * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
47243640Sneel * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
48243640Sneel * callback functions.
49243640Sneel *
50243640Sneel * 'void *vm' should be 'struct vm *' when called from kernel context and
51243640Sneel * 'struct vmctx *' when called from user context.
52243640Sneel * s
53243640Sneel */
54243640Sneelint vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
55270159Sgrehan    struct vm_guest_paging *paging, mem_region_read_t mrr,
56270159Sgrehan    mem_region_write_t mrw, void *mrarg);
57243640Sneel
58268976Sjhbint vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
59268976Sjhb    uint64_t val, int size);
60268976Sjhb
61268976Sjhb/*
62268976Sjhb * Returns 1 if an alignment check exception should be injected and 0 otherwise.
63268976Sjhb */
64268976Sjhbint vie_alignment_check(int cpl, int operand_size, uint64_t cr0,
65268976Sjhb    uint64_t rflags, uint64_t gla);
66268976Sjhb
67268976Sjhb/* Returns 1 if the 'gla' is not canonical and 0 otherwise. */
68268976Sjhbint vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla);
69268976Sjhb
70268976Sjhbuint64_t vie_size2mask(int size);
71268976Sjhb
72268976Sjhbint vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
73268976Sjhb    struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot,
74268976Sjhb    uint64_t *gla);
75268976Sjhb
76243640Sneel#ifdef _KERNEL
77243640Sneel/*
78243640Sneel * APIs to fetch and decode the instruction from nested page fault handler.
79256072Sneel *
80256072Sneel * 'vie' must be initialized before calling 'vmm_fetch_instruction()'
81243640Sneel */
82243640Sneelint vmm_fetch_instruction(struct vm *vm, int cpuid,
83268976Sjhb			  struct vm_guest_paging *guest_paging,
84268976Sjhb			  uint64_t rip, int inst_length, struct vie *vie);
85243640Sneel
86268976Sjhb/*
87268976Sjhb * Translate the guest linear address 'gla' to a guest physical address.
88268976Sjhb *
89268976Sjhb * Returns 0 on success and '*gpa' contains the result of the translation.
90268976Sjhb * Returns 1 if an exception was injected into the guest.
91268976Sjhb * Returns -1 otherwise.
92268976Sjhb */
93268976Sjhbint vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
94268976Sjhb    uint64_t gla, int prot, uint64_t *gpa);
95268976Sjhb
96256072Sneelvoid vie_init(struct vie *vie);
97256072Sneel
98248855Sneel/*
99248855Sneel * Decode the instruction fetched into 'vie' so it can be emulated.
100248855Sneel *
101248855Sneel * 'gla' is the guest linear address provided by the hardware assist
102248855Sneel * that caused the nested page table fault. It is used to verify that
103248855Sneel * the software instruction decoding is in agreement with the hardware.
104248855Sneel *
105248855Sneel * Some hardware assists do not provide the 'gla' to the hypervisor.
106248855Sneel * To skip the 'gla' verification for this or any other reason pass
107248855Sneel * in VIE_INVALID_GLA instead.
108248855Sneel */
109248855Sneel#define	VIE_INVALID_GLA		(1UL << 63)	/* a non-canonical address */
110267399Sjhbint vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
111270159Sgrehan			   enum vm_cpu_mode cpu_mode, int csd, struct vie *vie);
112243640Sneel#endif	/* _KERNEL */
113243640Sneel
114243640Sneel#endif	/* _VMM_INSTRUCTION_EMUL_H_ */
115