vmm_instruction_emul.c revision 243640
1/*-
2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: projects/bhyve/sys/amd64/vmm/vmm_instruction_emul.c 243640 2012-11-28 00:02:17Z neel $
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: projects/bhyve/sys/amd64/vmm/vmm_instruction_emul.c 243640 2012-11-28 00:02:17Z neel $");
32
33#ifdef _KERNEL
34#include <sys/param.h>
35#include <sys/pcpu.h>
36#include <sys/systm.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <machine/pmap.h>
42#include <machine/vmparam.h>
43#include <machine/vmm.h>
44#else	/* !_KERNEL */
45#include <sys/types.h>
46#include <sys/errno.h>
47
48#include <machine/vmm.h>
49
50#include <vmmapi.h>
51#endif	/* _KERNEL */
52
53
54
55/* struct vie_op.op_type */
56enum {
57	VIE_OP_TYPE_NONE = 0,
58	VIE_OP_TYPE_MOV,
59	VIE_OP_TYPE_AND,
60	VIE_OP_TYPE_LAST
61};
62
63/* struct vie_op.op_flags */
64#define	VIE_OP_F_IMM		(1 << 0)	/* immediate operand present */
65#define	VIE_OP_F_IMM8		(1 << 1)	/* 8-bit immediate operand */
66
67static const struct vie_op one_byte_opcodes[256] = {
68	[0x89] = {
69		.op_byte = 0x89,
70		.op_type = VIE_OP_TYPE_MOV,
71	},
72	[0x8B] = {
73		.op_byte = 0x8B,
74		.op_type = VIE_OP_TYPE_MOV,
75	},
76	[0xC7] = {
77		.op_byte = 0xC7,
78		.op_type = VIE_OP_TYPE_MOV,
79		.op_flags = VIE_OP_F_IMM,
80	},
81	[0x23] = {
82		.op_byte = 0x23,
83		.op_type = VIE_OP_TYPE_AND,
84	}
85};
86
87/* struct vie.mod */
88#define	VIE_MOD_INDIRECT		0
89#define	VIE_MOD_INDIRECT_DISP8		1
90#define	VIE_MOD_INDIRECT_DISP32		2
91#define	VIE_MOD_DIRECT			3
92
93/* struct vie.rm */
94#define	VIE_RM_SIB			4
95#define	VIE_RM_DISP32			5
96
97#define	GB				(1024 * 1024 * 1024)
98
99static enum vm_reg_name gpr_map[16] = {
100	VM_REG_GUEST_RAX,
101	VM_REG_GUEST_RCX,
102	VM_REG_GUEST_RDX,
103	VM_REG_GUEST_RBX,
104	VM_REG_GUEST_RSP,
105	VM_REG_GUEST_RBP,
106	VM_REG_GUEST_RSI,
107	VM_REG_GUEST_RDI,
108	VM_REG_GUEST_R8,
109	VM_REG_GUEST_R9,
110	VM_REG_GUEST_R10,
111	VM_REG_GUEST_R11,
112	VM_REG_GUEST_R12,
113	VM_REG_GUEST_R13,
114	VM_REG_GUEST_R14,
115	VM_REG_GUEST_R15
116};
117
118static uint64_t size2mask[] = {
119	[1] = 0xff,
120	[2] = 0xffff,
121	[4] = 0xffffffff,
122	[8] = 0xffffffffffffffff,
123};
124
125static int
126vie_valid_register(enum vm_reg_name reg)
127{
128#ifdef _KERNEL
129	/*
130	 * XXX
131	 * The operand register in which we store the result of the
132	 * read must be a GPR that we can modify even if the vcpu
133	 * is "running". All the GPRs qualify except for %rsp.
134	 *
135	 * This is a limitation of the vm_set_register() API
136	 * and can be fixed if necessary.
137	 */
138	if (reg == VM_REG_GUEST_RSP)
139		return (0);
140#endif
141	return (1);
142}
143
144static int
145vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
146{
147	int error;
148
149	if (!vie_valid_register(reg))
150		return (EINVAL);
151
152	error = vm_get_register(vm, vcpuid, reg, rval);
153
154	return (error);
155}
156
157static int
158vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
159		    uint64_t val, int size)
160{
161	int error;
162	uint64_t origval;
163
164	if (!vie_valid_register(reg))
165		return (EINVAL);
166
167	switch (size) {
168	case 1:
169	case 2:
170		error = vie_read_register(vm, vcpuid, reg, &origval);
171		if (error)
172			return (error);
173		val &= size2mask[size];
174		val |= origval & ~size2mask[size];
175		break;
176	case 4:
177		val &= 0xffffffffUL;
178		break;
179	case 8:
180		break;
181	default:
182		return (EINVAL);
183	}
184
185	error = vm_set_register(vm, vcpuid, reg, val);
186	return (error);
187}
188
189/*
190 * The following simplifying assumptions are made during emulation:
191 *
192 * - guest is in 64-bit mode
193 *   - default address size is 64-bits
194 *   - default operand size is 32-bits
195 *
196 * - operand size override is not supported
197 *
198 * - address size override is not supported
199 */
200static int
201emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
202	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
203{
204	int error, size;
205	enum vm_reg_name reg;
206	uint64_t val;
207
208	size = 4;
209	error = EINVAL;
210
211	switch (vie->op.op_byte) {
212	case 0x89:
213		/*
214		 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
215		 * 89/r:	mov r/m32, r32
216		 * REX.W + 89/r	mov r/m64, r64
217		 */
218		if (vie->rex_w)
219			size = 8;
220		reg = gpr_map[vie->reg];
221		error = vie_read_register(vm, vcpuid, reg, &val);
222		if (error == 0) {
223			val &= size2mask[size];
224			error = memwrite(vm, vcpuid, gpa, val, size, arg);
225		}
226		break;
227	case 0x8B:
228		/*
229		 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
230		 * 8B/r:	mov r32, r/m32
231		 * REX.W 8B/r:	mov r64, r/m64
232		 */
233		if (vie->rex_w)
234			size = 8;
235		error = memread(vm, vcpuid, gpa, &val, size, arg);
236		if (error == 0) {
237			reg = gpr_map[vie->reg];
238			error = vie_update_register(vm, vcpuid, reg, val, size);
239		}
240		break;
241	case 0xC7:
242		/*
243		 * MOV from imm32 to mem (ModRM:r/m)
244		 * C7/0		mov r/m32, imm32
245		 * REX.W + C7/0	mov r/m64, imm32 (sign-extended to 64-bits)
246		 */
247		val = vie->immediate;		/* already sign-extended */
248
249		if (vie->rex_w)
250			size = 8;
251
252		if (size != 8)
253			val &= size2mask[size];
254
255		error = memwrite(vm, vcpuid, gpa, val, size, arg);
256		break;
257	default:
258		break;
259	}
260
261	return (error);
262}
263
264static int
265emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
266	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
267{
268	int error, size;
269	enum vm_reg_name reg;
270	uint64_t val1, val2;
271
272	size = 4;
273	error = EINVAL;
274
275	switch (vie->op.op_byte) {
276	case 0x23:
277		/*
278		 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
279		 * result in reg.
280		 *
281		 * 23/r		and r32, r/m32
282		 * REX.W + 23/r	and r64, r/m64
283		 */
284		if (vie->rex_w)
285			size = 8;
286
287		/* get the first operand */
288		reg = gpr_map[vie->reg];
289		error = vie_read_register(vm, vcpuid, reg, &val1);
290		if (error)
291			break;
292
293		/* get the second operand */
294		error = memread(vm, vcpuid, gpa, &val2, size, arg);
295		if (error)
296			break;
297
298		/* perform the operation and write the result */
299		val1 &= val2;
300		error = vie_update_register(vm, vcpuid, reg, val1, size);
301		break;
302	default:
303		break;
304	}
305	return (error);
306}
307
308int
309vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
310			mem_region_read_t memread, mem_region_write_t memwrite,
311			void *memarg)
312{
313	int error;
314
315	if (!vie->decoded)
316		return (EINVAL);
317
318	switch (vie->op.op_type) {
319	case VIE_OP_TYPE_MOV:
320		error = emulate_mov(vm, vcpuid, gpa, vie,
321				    memread, memwrite, memarg);
322		break;
323	case VIE_OP_TYPE_AND:
324		error = emulate_and(vm, vcpuid, gpa, vie,
325				    memread, memwrite, memarg);
326		break;
327	default:
328		error = EINVAL;
329		break;
330	}
331
332	return (error);
333}
334
335#ifdef _KERNEL
336static void
337vie_init(struct vie *vie)
338{
339
340	bzero(vie, sizeof(struct vie));
341
342	vie->base_register = VM_REG_LAST;
343	vie->index_register = VM_REG_LAST;
344}
345
346static int
347gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
348	uint64_t *gpa, uint64_t *gpaend)
349{
350	vm_paddr_t hpa;
351	int nlevels, ptpshift, ptpindex;
352	uint64_t *ptpbase, pte, pgsize;
353
354	/*
355	 * XXX assumes 64-bit guest with 4 page walk levels
356	 */
357	nlevels = 4;
358	while (--nlevels >= 0) {
359		/* Zero out the lower 12 bits and the upper 12 bits */
360		ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
361
362		hpa = vm_gpa2hpa(vm, ptpphys, PAGE_SIZE);
363		if (hpa == -1)
364			goto error;
365
366		ptpbase = (uint64_t *)PHYS_TO_DMAP(hpa);
367
368		ptpshift = PAGE_SHIFT + nlevels * 9;
369		ptpindex = (gla >> ptpshift) & 0x1FF;
370		pgsize = 1UL << ptpshift;
371
372		pte = ptpbase[ptpindex];
373
374		if ((pte & PG_V) == 0)
375			goto error;
376
377		if (pte & PG_PS) {
378			if (pgsize > 1 * GB)
379				goto error;
380			else
381				break;
382		}
383
384		ptpphys = pte;
385	}
386
387	/* Zero out the lower 'ptpshift' bits and the upper 12 bits */
388	pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
389	*gpa = pte | (gla & (pgsize - 1));
390	*gpaend = pte + pgsize;
391	return (0);
392
393error:
394	return (-1);
395}
396
397int
398vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
399		      uint64_t cr3, struct vie *vie)
400{
401	int n, err;
402	uint64_t hpa, gpa, gpaend, off;
403
404	/*
405	 * XXX cache previously fetched instructions using 'rip' as the tag
406	 */
407
408	if (inst_length > VIE_INST_SIZE)
409		panic("vmm_fetch_instruction: invalid length %d", inst_length);
410
411	vie_init(vie);
412
413	/* Copy the instruction into 'vie' */
414	while (vie->num_valid < inst_length) {
415		err = gla2gpa(vm, rip, cr3, &gpa, &gpaend);
416		if (err)
417			break;
418
419		off = gpa & PAGE_MASK;
420		n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
421
422		hpa = vm_gpa2hpa(vm, gpa, n);
423		if (hpa == -1)
424			break;
425
426		bcopy((void *)PHYS_TO_DMAP(hpa), &vie->inst[vie->num_valid], n);
427
428		rip += n;
429		vie->num_valid += n;
430	}
431
432	if (vie->num_valid == inst_length)
433		return (0);
434	else
435		return (-1);
436}
437
438static int
439vie_peek(struct vie *vie, uint8_t *x)
440{
441
442	if (vie->num_processed < vie->num_valid) {
443		*x = vie->inst[vie->num_processed];
444		return (0);
445	} else
446		return (-1);
447}
448
449static void
450vie_advance(struct vie *vie)
451{
452
453	vie->num_processed++;
454}
455
456static int
457decode_rex(struct vie *vie)
458{
459	uint8_t x;
460
461	if (vie_peek(vie, &x))
462		return (-1);
463
464	if (x >= 0x40 && x <= 0x4F) {
465		vie->rex_w = x & 0x8 ? 1 : 0;
466		vie->rex_r = x & 0x4 ? 1 : 0;
467		vie->rex_x = x & 0x2 ? 1 : 0;
468		vie->rex_b = x & 0x1 ? 1 : 0;
469
470		vie_advance(vie);
471	}
472
473	return (0);
474}
475
476static int
477decode_opcode(struct vie *vie)
478{
479	uint8_t x;
480
481	if (vie_peek(vie, &x))
482		return (-1);
483
484	vie->op = one_byte_opcodes[x];
485
486	if (vie->op.op_type == VIE_OP_TYPE_NONE)
487		return (-1);
488
489	vie_advance(vie);
490	return (0);
491}
492
493/*
494 * XXX assuming 32-bit or 64-bit guest
495 */
496static int
497decode_modrm(struct vie *vie)
498{
499	uint8_t x;
500
501	if (vie_peek(vie, &x))
502		return (-1);
503
504	vie->mod = (x >> 6) & 0x3;
505	vie->rm =  (x >> 0) & 0x7;
506	vie->reg = (x >> 3) & 0x7;
507
508	/*
509	 * A direct addressing mode makes no sense in the context of an EPT
510	 * fault. There has to be a memory access involved to cause the
511	 * EPT fault.
512	 */
513	if (vie->mod == VIE_MOD_DIRECT)
514		return (-1);
515
516	if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
517	    (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
518		/*
519		 * Table 2-5: Special Cases of REX Encodings
520		 *
521		 * mod=0, r/m=5 is used in the compatibility mode to
522		 * indicate a disp32 without a base register.
523		 *
524		 * mod!=3, r/m=4 is used in the compatibility mode to
525		 * indicate that the SIB byte is present.
526		 *
527		 * The 'b' bit in the REX prefix is don't care in
528		 * this case.
529		 */
530	} else {
531		vie->rm |= (vie->rex_b << 3);
532	}
533
534	vie->reg |= (vie->rex_r << 3);
535
536	/* SIB */
537	if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
538		goto done;
539
540	vie->base_register = gpr_map[vie->rm];
541
542	switch (vie->mod) {
543	case VIE_MOD_INDIRECT_DISP8:
544		vie->disp_bytes = 1;
545		break;
546	case VIE_MOD_INDIRECT_DISP32:
547		vie->disp_bytes = 4;
548		break;
549	case VIE_MOD_INDIRECT:
550		if (vie->rm == VIE_RM_DISP32) {
551			vie->disp_bytes = 4;
552			vie->base_register = VM_REG_LAST;	/* no base */
553		}
554		break;
555	}
556
557	/* Figure out immediate operand size (if any) */
558	if (vie->op.op_flags & VIE_OP_F_IMM)
559		vie->imm_bytes = 4;
560	else if (vie->op.op_flags & VIE_OP_F_IMM8)
561		vie->imm_bytes = 1;
562
563done:
564	vie_advance(vie);
565
566	return (0);
567}
568
569static int
570decode_sib(struct vie *vie)
571{
572	uint8_t x;
573
574	/* Proceed only if SIB byte is present */
575	if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
576		return (0);
577
578	if (vie_peek(vie, &x))
579		return (-1);
580
581	/* De-construct the SIB byte */
582	vie->ss = (x >> 6) & 0x3;
583	vie->index = (x >> 3) & 0x7;
584	vie->base = (x >> 0) & 0x7;
585
586	/* Apply the REX prefix modifiers */
587	vie->index |= vie->rex_x << 3;
588	vie->base |= vie->rex_b << 3;
589
590	switch (vie->mod) {
591	case VIE_MOD_INDIRECT_DISP8:
592		vie->disp_bytes = 1;
593		break;
594	case VIE_MOD_INDIRECT_DISP32:
595		vie->disp_bytes = 4;
596		break;
597	}
598
599	if (vie->mod == VIE_MOD_INDIRECT &&
600	    (vie->base == 5 || vie->base == 13)) {
601		/*
602		 * Special case when base register is unused if mod = 0
603		 * and base = %rbp or %r13.
604		 *
605		 * Documented in:
606		 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
607		 * Table 2-5: Special Cases of REX Encodings
608		 */
609		vie->disp_bytes = 4;
610	} else {
611		vie->base_register = gpr_map[vie->base];
612	}
613
614	/*
615	 * All encodings of 'index' are valid except for %rsp (4).
616	 *
617	 * Documented in:
618	 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
619	 * Table 2-5: Special Cases of REX Encodings
620	 */
621	if (vie->index != 4)
622		vie->index_register = gpr_map[vie->index];
623
624	/* 'scale' makes sense only in the context of an index register */
625	if (vie->index_register < VM_REG_LAST)
626		vie->scale = 1 << vie->ss;
627
628	vie_advance(vie);
629
630	return (0);
631}
632
633static int
634decode_displacement(struct vie *vie)
635{
636	int n, i;
637	uint8_t x;
638
639	union {
640		char	buf[4];
641		int8_t	signed8;
642		int32_t	signed32;
643	} u;
644
645	if ((n = vie->disp_bytes) == 0)
646		return (0);
647
648	if (n != 1 && n != 4)
649		panic("decode_displacement: invalid disp_bytes %d", n);
650
651	for (i = 0; i < n; i++) {
652		if (vie_peek(vie, &x))
653			return (-1);
654
655		u.buf[i] = x;
656		vie_advance(vie);
657	}
658
659	if (n == 1)
660		vie->displacement = u.signed8;		/* sign-extended */
661	else
662		vie->displacement = u.signed32;		/* sign-extended */
663
664	return (0);
665}
666
667static int
668decode_immediate(struct vie *vie)
669{
670	int i, n;
671	uint8_t x;
672	union {
673		char	buf[4];
674		int8_t	signed8;
675		int32_t	signed32;
676	} u;
677
678	if ((n = vie->imm_bytes) == 0)
679		return (0);
680
681	if (n != 1 && n != 4)
682		panic("decode_immediate: invalid imm_bytes %d", n);
683
684	for (i = 0; i < n; i++) {
685		if (vie_peek(vie, &x))
686			return (-1);
687
688		u.buf[i] = x;
689		vie_advance(vie);
690	}
691
692	if (n == 1)
693		vie->immediate = u.signed8;		/* sign-extended */
694	else
695		vie->immediate = u.signed32;		/* sign-extended */
696
697	return (0);
698}
699
700#define	VERIFY_GLA
701/*
702 * Verify that the 'guest linear address' provided as collateral of the nested
703 * page table fault matches with our instruction decoding.
704 */
705#ifdef VERIFY_GLA
706static int
707verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
708{
709	int error;
710	uint64_t base, idx;
711
712	base = 0;
713	if (vie->base_register != VM_REG_LAST) {
714		error = vm_get_register(vm, cpuid, vie->base_register, &base);
715		if (error) {
716			printf("verify_gla: error %d getting base reg %d\n",
717				error, vie->base_register);
718			return (-1);
719		}
720	}
721
722	idx = 0;
723	if (vie->index_register != VM_REG_LAST) {
724		error = vm_get_register(vm, cpuid, vie->index_register, &idx);
725		if (error) {
726			printf("verify_gla: error %d getting index reg %d\n",
727				error, vie->index_register);
728			return (-1);
729		}
730	}
731
732	if (base + vie->scale * idx + vie->displacement != gla) {
733		printf("verify_gla mismatch: "
734		       "base(0x%0lx), scale(%d), index(0x%0lx), "
735		       "disp(0x%0lx), gla(0x%0lx)\n",
736		       base, vie->scale, idx, vie->displacement, gla);
737		return (-1);
738	}
739
740	return (0);
741}
742#endif	/* VERIFY_GLA */
743
744int
745vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
746{
747
748	if (decode_rex(vie))
749		return (-1);
750
751	if (decode_opcode(vie))
752		return (-1);
753
754	if (decode_modrm(vie))
755		return (-1);
756
757	if (decode_sib(vie))
758		return (-1);
759
760	if (decode_displacement(vie))
761		return (-1);
762
763	if (decode_immediate(vie))
764		return (-1);
765
766#ifdef VERIFY_GLA
767	if (verify_gla(vm, cpuid, gla, vie))
768		return (-1);
769#endif
770
771	vie->decoded = 1;	/* success */
772
773	return (0);
774}
775#endif	/* _KERNEL */
776