1/******************************************************************************
2 * vm_event.h
3 *
4 * Memory event common structures.
5 *
6 * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#ifndef _XEN_PUBLIC_VM_EVENT_H
28#define _XEN_PUBLIC_VM_EVENT_H
29
30#include "xen.h"
31
32#define VM_EVENT_INTERFACE_VERSION 0x00000007
33
34#if defined(__XEN__) || defined(__XEN_TOOLS__)
35
36#include "io/ring.h"
37
38/*
39 * Memory event flags
40 */
41
42/*
43 * VCPU_PAUSED in a request signals that the vCPU triggering the event has been
44 *  paused
45 * VCPU_PAUSED in a response signals to unpause the vCPU
46 */
47#define VM_EVENT_FLAG_VCPU_PAUSED        (1 << 0)
48/* Flags to aid debugging vm_event */
49#define VM_EVENT_FLAG_FOREIGN            (1 << 1)
50/*
51 * The following flags can be set in response to a mem_access event.
52 *
53 * Emulate the fault-causing instruction (if set in the event response flags).
54 * This will allow the guest to continue execution without lifting the page
55 * access restrictions.
56 */
57#define VM_EVENT_FLAG_EMULATE            (1 << 2)
58/*
59 * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
60 * potentially having side effects (like memory mapped or port I/O) disabled.
61 */
62#define VM_EVENT_FLAG_EMULATE_NOWRITE    (1 << 3)
63/*
64 * Toggle singlestepping on vm_event response.
65 * Requires the vCPU to be paused already (synchronous events only).
66 */
67#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP  (1 << 4)
68/*
69 * Data is being sent back to the hypervisor in the event response, to be
70 * returned by the read function when emulating an instruction.
71 * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
72 * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
73 * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
74 * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
75 */
76#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
77/*
78 * Deny completion of the operation that triggered the event.
79 * Currently only useful for MSR and control-register write events.
80 * Requires the vCPU to be paused already (synchronous events only).
81 */
82#define VM_EVENT_FLAG_DENY               (1 << 6)
83/*
84 * This flag can be set in a request or a response
85 *
86 * On a request, indicates that the event occurred in the alternate p2m
87 * specified by the altp2m_idx request field.
88 *
89 * On a response, indicates that the VCPU should resume in the alternate p2m
90 * specified by the altp2m_idx response field if possible.
91 */
92#define VM_EVENT_FLAG_ALTERNATE_P2M      (1 << 7)
93/*
94 * Set the vCPU registers to the values in the  vm_event response.
95 * At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15,
96 * EFLAGS, and EIP.
97 * Requires the vCPU to be paused already (synchronous events only).
98 */
99#define VM_EVENT_FLAG_SET_REGISTERS      (1 << 8)
100/*
101 * Instruction cache is being sent back to the hypervisor in the event response
102 * to be used by the emulator. This flag is only useful when combined with
103 * VM_EVENT_FLAG_EMULATE and does not take presedence if combined with
104 * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e.
105 * if any of those flags are set, only those will be honored).
106 */
107#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
108/*
109 * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
110 * interrupt pending after resuming the VCPU.
111 */
112#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
113/*
114 * Execute fast singlestepping on vm_event response.
115 * Requires the vCPU to be paused already (synchronous events only).
116 *
117 * On a response requires setting the  p2midx field of fast_singlestep to which
118 * Xen will switch the vCPU to on the occurance of the first singlestep, after
119 * which singlestep gets automatically disabled.
120 */
121#define VM_EVENT_FLAG_FAST_SINGLESTEP    (1 << 11)
122/*
123 * Set if the event comes from a nested VM and thus npt_base is valid.
124 */
125#define VM_EVENT_FLAG_NESTED_P2M         (1 << 12)
126/*
127 * Reset the vmtrace buffer (if vmtrace is enabled)
128 */
129#define VM_EVENT_FLAG_RESET_VMTRACE      (1 << 13)
130
131/*
132 * Reasons for the vm event request
133 */
134
135/* Default case */
136#define VM_EVENT_REASON_UNKNOWN                 0
137/* Memory access violation */
138#define VM_EVENT_REASON_MEM_ACCESS              1
139/* Memory sharing event */
140#define VM_EVENT_REASON_MEM_SHARING             2
141/* Memory paging event */
142#define VM_EVENT_REASON_MEM_PAGING              3
143/* A control register was updated */
144#define VM_EVENT_REASON_WRITE_CTRLREG           4
145/* An MSR was updated. */
146#define VM_EVENT_REASON_MOV_TO_MSR              5
147/* Debug operation executed (e.g. int3) */
148#define VM_EVENT_REASON_SOFTWARE_BREAKPOINT     6
149/* Single-step (e.g. MTF) */
150#define VM_EVENT_REASON_SINGLESTEP              7
151/* An event has been requested via HVMOP_guest_request_vm_event. */
152#define VM_EVENT_REASON_GUEST_REQUEST           8
153/* A debug exception was caught */
154#define VM_EVENT_REASON_DEBUG_EXCEPTION         9
155/* CPUID executed */
156#define VM_EVENT_REASON_CPUID                   10
157/*
158 * Privileged call executed (e.g. SMC).
159 * Note: event may be generated even if SMC condition check fails on some CPUs.
160 *       As this behavior is CPU-specific, users are advised to not rely on it.
161 *       These kinds of events will be filtered out in future versions.
162 */
163#define VM_EVENT_REASON_PRIVILEGED_CALL         11
164/* An interrupt has been delivered. */
165#define VM_EVENT_REASON_INTERRUPT               12
166/* A descriptor table register was accessed. */
167#define VM_EVENT_REASON_DESCRIPTOR_ACCESS       13
168/* Current instruction is not implemented by the emulator */
169#define VM_EVENT_REASON_EMUL_UNIMPLEMENTED      14
170
171/* Supported values for the vm_event_write_ctrlreg index. */
172#define VM_EVENT_X86_CR0    0
173#define VM_EVENT_X86_CR3    1
174#define VM_EVENT_X86_CR4    2
175#define VM_EVENT_X86_XCR0   3
176
177/* The limit field is right-shifted by 12 bits if .ar.g is set. */
178struct vm_event_x86_selector_reg {
179    uint32_t limit  :    20;
180    uint32_t ar     :    12;
181};
182
183/*
184 * Using custom vCPU structs (i.e. not hvm_hw_cpu) for both x86 and ARM
185 * so as to not fill the vm_event ring buffer too quickly.
186 */
187struct vm_event_regs_x86 {
188    uint64_t rax;
189    uint64_t rcx;
190    uint64_t rdx;
191    uint64_t rbx;
192    uint64_t rsp;
193    uint64_t rbp;
194    uint64_t rsi;
195    uint64_t rdi;
196    uint64_t r8;
197    uint64_t r9;
198    uint64_t r10;
199    uint64_t r11;
200    uint64_t r12;
201    uint64_t r13;
202    uint64_t r14;
203    uint64_t r15;
204    uint64_t rflags;
205    uint64_t dr6;
206    uint64_t dr7;
207    uint64_t rip;
208    uint64_t cr0;
209    uint64_t cr2;
210    uint64_t cr3;
211    uint64_t cr4;
212    uint64_t sysenter_cs;
213    uint64_t sysenter_esp;
214    uint64_t sysenter_eip;
215    uint64_t msr_efer;
216    uint64_t msr_star;
217    uint64_t msr_lstar;
218    uint64_t gdtr_base;
219
220    /*
221     * When VM_EVENT_FLAG_NESTED_P2M is set, this event comes from a nested
222     * VM.  npt_base is the guest physical address of the L1 hypervisors
223     * EPT/NPT tables for the nested guest.
224     *
225     * All bits outside of architectural address ranges are reserved for
226     * future metadata.
227     */
228    uint64_t npt_base;
229
230    /*
231     * Current position in the vmtrace buffer, or ~0 if vmtrace is not active.
232     *
233     * For Intel Processor Trace, it is the upper half of MSR_RTIT_OUTPUT_MASK.
234     */
235    uint64_t vmtrace_pos;
236
237    uint32_t cs_base;
238    uint32_t ss_base;
239    uint32_t ds_base;
240    uint32_t es_base;
241    uint64_t fs_base;
242    uint64_t gs_base;
243    struct vm_event_x86_selector_reg cs;
244    struct vm_event_x86_selector_reg ss;
245    struct vm_event_x86_selector_reg ds;
246    struct vm_event_x86_selector_reg es;
247    struct vm_event_x86_selector_reg fs;
248    struct vm_event_x86_selector_reg gs;
249    uint64_t shadow_gs;
250    uint16_t gdtr_limit;
251    uint16_t cs_sel;
252    uint16_t ss_sel;
253    uint16_t ds_sel;
254    uint16_t es_sel;
255    uint16_t fs_sel;
256    uint16_t gs_sel;
257    uint16_t _pad;
258};
259
260/*
261 * Only the register 'pc' can be set on a vm_event response using the
262 * VM_EVENT_FLAG_SET_REGISTERS flag.
263 */
264struct vm_event_regs_arm {
265    uint64_t ttbr0;
266    uint64_t ttbr1;
267    uint64_t ttbcr;
268    uint64_t pc;
269    uint64_t cpsr;
270};
271
272/*
273 * mem_access flag definitions
274 *
275 * These flags are set only as part of a mem_event request.
276 *
277 * R/W/X: Defines the type of violation that has triggered the event
278 *        Multiple types can be set in a single violation!
279 * GLA_VALID: If the gla field holds a guest VA associated with the event
280 * FAULT_WITH_GLA: If the violation was triggered by accessing gla
281 * FAULT_IN_GPT: If the violation was triggered during translating gla
282 */
283#define MEM_ACCESS_R                (1 << 0)
284#define MEM_ACCESS_W                (1 << 1)
285#define MEM_ACCESS_X                (1 << 2)
286#define MEM_ACCESS_RWX              (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
287#define MEM_ACCESS_RW               (MEM_ACCESS_R | MEM_ACCESS_W)
288#define MEM_ACCESS_RX               (MEM_ACCESS_R | MEM_ACCESS_X)
289#define MEM_ACCESS_WX               (MEM_ACCESS_W | MEM_ACCESS_X)
290#define MEM_ACCESS_GLA_VALID        (1 << 3)
291#define MEM_ACCESS_FAULT_WITH_GLA   (1 << 4)
292#define MEM_ACCESS_FAULT_IN_GPT     (1 << 5)
293
294struct vm_event_mem_access {
295    uint64_t gfn;
296    uint64_t offset;
297    uint64_t gla;   /* if flags has MEM_ACCESS_GLA_VALID set */
298    uint32_t flags; /* MEM_ACCESS_* */
299    uint32_t _pad;
300};
301
302struct vm_event_write_ctrlreg {
303    uint32_t index;
304    uint32_t _pad;
305    uint64_t new_value;
306    uint64_t old_value;
307};
308
309struct vm_event_singlestep {
310    uint64_t gfn;
311};
312
313struct vm_event_fast_singlestep {
314    uint16_t p2midx;
315};
316
317struct vm_event_debug {
318    uint64_t gfn;
319    uint64_t pending_dbg; /* Behaves like the VT-x PENDING_DBG field. */
320    uint32_t insn_length;
321    uint8_t type;        /* HVMOP_TRAP_* */
322    uint8_t _pad[3];
323};
324
325struct vm_event_mov_to_msr {
326    uint64_t msr;
327    uint64_t new_value;
328    uint64_t old_value;
329};
330
331#define VM_EVENT_DESC_IDTR           1
332#define VM_EVENT_DESC_GDTR           2
333#define VM_EVENT_DESC_LDTR           3
334#define VM_EVENT_DESC_TR             4
335
336struct vm_event_desc_access {
337    union {
338        struct {
339            uint32_t instr_info;         /* VMX: VMCS Instruction-Information */
340            uint32_t _pad1;
341            uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */
342        } vmx;
343    } arch;
344    uint8_t descriptor;                  /* VM_EVENT_DESC_* */
345    uint8_t is_write;
346    uint8_t _pad[6];
347};
348
349struct vm_event_cpuid {
350    uint32_t insn_length;
351    uint32_t leaf;
352    uint32_t subleaf;
353    uint32_t _pad;
354};
355
356struct vm_event_interrupt_x86 {
357    uint32_t vector;
358    uint32_t type;
359    uint32_t error_code;
360    uint32_t _pad;
361    uint64_t cr2;
362};
363
364#define MEM_PAGING_DROP_PAGE       (1 << 0)
365#define MEM_PAGING_EVICT_FAIL      (1 << 1)
366
367struct vm_event_paging {
368    uint64_t gfn;
369    uint32_t p2mt;
370    uint32_t flags;
371};
372
373struct vm_event_sharing {
374    uint64_t gfn;
375    uint32_t p2mt;
376    uint32_t _pad;
377};
378
379struct vm_event_emul_read_data {
380    uint32_t size;
381    /* The struct is used in a union with vm_event_regs_x86. */
382    uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
383};
384
385struct vm_event_emul_insn_data {
386    uint8_t data[16]; /* Has to be completely filled */
387};
388
389typedef struct vm_event_st {
390    uint32_t version;   /* VM_EVENT_INTERFACE_VERSION */
391    uint32_t flags;     /* VM_EVENT_FLAG_* */
392    uint32_t reason;    /* VM_EVENT_REASON_* */
393    uint32_t vcpu_id;
394    uint16_t altp2m_idx; /* may be used during request and response */
395    uint16_t _pad[3];
396
397    union {
398        struct vm_event_paging                mem_paging;
399        struct vm_event_sharing               mem_sharing;
400        struct vm_event_mem_access            mem_access;
401        struct vm_event_write_ctrlreg         write_ctrlreg;
402        struct vm_event_mov_to_msr            mov_to_msr;
403        struct vm_event_desc_access           desc_access;
404        struct vm_event_singlestep            singlestep;
405        struct vm_event_fast_singlestep       fast_singlestep;
406        struct vm_event_debug                 software_breakpoint;
407        struct vm_event_debug                 debug_exception;
408        struct vm_event_cpuid                 cpuid;
409        union {
410            struct vm_event_interrupt_x86     x86;
411        } interrupt;
412    } u;
413
414    union {
415        union {
416            struct vm_event_regs_x86 x86;
417            struct vm_event_regs_arm arm;
418        } regs;
419
420        union {
421            struct vm_event_emul_read_data read;
422            struct vm_event_emul_insn_data insn;
423        } emul;
424    } data;
425} vm_event_request_t, vm_event_response_t;
426
427DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
428
429#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
430#endif /* _XEN_PUBLIC_VM_EVENT_H */
431
432/*
433 * Local variables:
434 * mode: C
435 * c-file-style: "BSD"
436 * c-basic-offset: 4
437 * tab-width: 4
438 * indent-tabs-mode: nil
439 * End:
440 */
441