vmm_instruction_emul.c revision 254964
1/*-
2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 254964 2013-08-27 16:49:20Z neel $
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 254964 2013-08-27 16:49:20Z neel $");
32
33#ifdef _KERNEL
34#include <sys/param.h>
35#include <sys/pcpu.h>
36#include <sys/systm.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <machine/pmap.h>
42#include <machine/vmparam.h>
43#include <machine/vmm.h>
44#else	/* !_KERNEL */
45#include <sys/types.h>
46#include <sys/errno.h>
47
48#include <machine/vmm.h>
49
50#include <vmmapi.h>
51#endif	/* _KERNEL */
52
53enum cpu_mode {
54	CPU_MODE_COMPATIBILITY,		/* IA-32E mode (CS.L = 0) */
55	CPU_MODE_64BIT,			/* IA-32E mode (CS.L = 1) */
56};
57
58/* struct vie_op.op_type */
59enum {
60	VIE_OP_TYPE_NONE = 0,
61	VIE_OP_TYPE_MOV,
62	VIE_OP_TYPE_AND,
63	VIE_OP_TYPE_OR,
64	VIE_OP_TYPE_LAST
65};
66
67/* struct vie_op.op_flags */
68#define	VIE_OP_F_IMM		(1 << 0)	/* immediate operand present */
69#define	VIE_OP_F_IMM8		(1 << 1)	/* 8-bit immediate operand */
70
71static const struct vie_op one_byte_opcodes[256] = {
72	[0x88] = {
73		.op_byte = 0x88,
74		.op_type = VIE_OP_TYPE_MOV,
75	},
76	[0x89] = {
77		.op_byte = 0x89,
78		.op_type = VIE_OP_TYPE_MOV,
79	},
80	[0x8A] = {
81		.op_byte = 0x8A,
82		.op_type = VIE_OP_TYPE_MOV,
83	},
84	[0x8B] = {
85		.op_byte = 0x8B,
86		.op_type = VIE_OP_TYPE_MOV,
87	},
88	[0xC7] = {
89		.op_byte = 0xC7,
90		.op_type = VIE_OP_TYPE_MOV,
91		.op_flags = VIE_OP_F_IMM,
92	},
93	[0x23] = {
94		.op_byte = 0x23,
95		.op_type = VIE_OP_TYPE_AND,
96	},
97	[0x81] = {
98		/* XXX Group 1 extended opcode - not just AND */
99		.op_byte = 0x81,
100		.op_type = VIE_OP_TYPE_AND,
101		.op_flags = VIE_OP_F_IMM,
102	},
103	[0x83] = {
104		/* XXX Group 1 extended opcode - not just OR */
105		.op_byte = 0x83,
106		.op_type = VIE_OP_TYPE_OR,
107		.op_flags = VIE_OP_F_IMM8,
108	},
109};
110
111/* struct vie.mod */
112#define	VIE_MOD_INDIRECT		0
113#define	VIE_MOD_INDIRECT_DISP8		1
114#define	VIE_MOD_INDIRECT_DISP32		2
115#define	VIE_MOD_DIRECT			3
116
117/* struct vie.rm */
118#define	VIE_RM_SIB			4
119#define	VIE_RM_DISP32			5
120
121#define	GB				(1024 * 1024 * 1024)
122
123static enum vm_reg_name gpr_map[16] = {
124	VM_REG_GUEST_RAX,
125	VM_REG_GUEST_RCX,
126	VM_REG_GUEST_RDX,
127	VM_REG_GUEST_RBX,
128	VM_REG_GUEST_RSP,
129	VM_REG_GUEST_RBP,
130	VM_REG_GUEST_RSI,
131	VM_REG_GUEST_RDI,
132	VM_REG_GUEST_R8,
133	VM_REG_GUEST_R9,
134	VM_REG_GUEST_R10,
135	VM_REG_GUEST_R11,
136	VM_REG_GUEST_R12,
137	VM_REG_GUEST_R13,
138	VM_REG_GUEST_R14,
139	VM_REG_GUEST_R15
140};
141
142static uint64_t size2mask[] = {
143	[1] = 0xff,
144	[2] = 0xffff,
145	[4] = 0xffffffff,
146	[8] = 0xffffffffffffffff,
147};
148
149static int
150vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
151{
152	int error;
153
154	error = vm_get_register(vm, vcpuid, reg, rval);
155
156	return (error);
157}
158
159static int
160vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
161{
162	uint64_t val;
163	int error, rshift;
164	enum vm_reg_name reg;
165
166	rshift = 0;
167	reg = gpr_map[vie->reg];
168
169	/*
170	 * 64-bit mode imposes limitations on accessing legacy byte registers.
171	 *
172	 * The legacy high-byte registers cannot be addressed if the REX
173	 * prefix is present. In this case the values 4, 5, 6 and 7 of the
174	 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
175	 *
176	 * If the REX prefix is not present then the values 4, 5, 6 and 7
177	 * of the 'ModRM:reg' field address the legacy high-byte registers,
178	 * %ah, %ch, %dh and %bh respectively.
179	 */
180	if (!vie->rex_present) {
181		if (vie->reg & 0x4) {
182			/*
183			 * Obtain the value of %ah by reading %rax and shifting
184			 * right by 8 bits (same for %bh, %ch and %dh).
185			 */
186			rshift = 8;
187			reg = gpr_map[vie->reg & 0x3];
188		}
189	}
190
191	error = vm_get_register(vm, vcpuid, reg, &val);
192	*rval = val >> rshift;
193	return (error);
194}
195
196static int
197vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
198		    uint64_t val, int size)
199{
200	int error;
201	uint64_t origval;
202
203	switch (size) {
204	case 1:
205	case 2:
206		error = vie_read_register(vm, vcpuid, reg, &origval);
207		if (error)
208			return (error);
209		val &= size2mask[size];
210		val |= origval & ~size2mask[size];
211		break;
212	case 4:
213		val &= 0xffffffffUL;
214		break;
215	case 8:
216		break;
217	default:
218		return (EINVAL);
219	}
220
221	error = vm_set_register(vm, vcpuid, reg, val);
222	return (error);
223}
224
225/*
226 * The following simplifying assumptions are made during emulation:
227 *
228 * - guest is in 64-bit mode
229 *   - default address size is 64-bits
230 *   - default operand size is 32-bits
231 *
232 * - operand size override is not supported
233 *
234 * - address size override is not supported
235 */
236static int
237emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
238	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
239{
240	int error, size;
241	enum vm_reg_name reg;
242	uint8_t byte;
243	uint64_t val;
244
245	size = 4;
246	error = EINVAL;
247
248	switch (vie->op.op_byte) {
249	case 0x88:
250		/*
251		 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
252		 * 88/r:	mov r/m8, r8
253		 * REX + 88/r:	mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
254		 */
255		size = 1;
256		error = vie_read_bytereg(vm, vcpuid, vie, &byte);
257		if (error == 0)
258			error = memwrite(vm, vcpuid, gpa, byte, size, arg);
259		break;
260	case 0x89:
261		/*
262		 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
263		 * 89/r:	mov r/m32, r32
264		 * REX.W + 89/r	mov r/m64, r64
265		 */
266		if (vie->rex_w)
267			size = 8;
268		reg = gpr_map[vie->reg];
269		error = vie_read_register(vm, vcpuid, reg, &val);
270		if (error == 0) {
271			val &= size2mask[size];
272			error = memwrite(vm, vcpuid, gpa, val, size, arg);
273		}
274		break;
275	case 0x8A:
276	case 0x8B:
277		/*
278		 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
279		 * 8A/r:	mov r/m8, r8
280		 * REX + 8A/r:	mov r/m8, r8
281		 * 8B/r:	mov r32, r/m32
282		 * REX.W 8B/r:	mov r64, r/m64
283		 */
284		if (vie->op.op_byte == 0x8A)
285			size = 1;
286		else if (vie->rex_w)
287			size = 8;
288		error = memread(vm, vcpuid, gpa, &val, size, arg);
289		if (error == 0) {
290			reg = gpr_map[vie->reg];
291			error = vie_update_register(vm, vcpuid, reg, val, size);
292		}
293		break;
294	case 0xC7:
295		/*
296		 * MOV from imm32 to mem (ModRM:r/m)
297		 * C7/0		mov r/m32, imm32
298		 * REX.W + C7/0	mov r/m64, imm32 (sign-extended to 64-bits)
299		 */
300		val = vie->immediate;		/* already sign-extended */
301
302		if (vie->rex_w)
303			size = 8;
304
305		if (size != 8)
306			val &= size2mask[size];
307
308		error = memwrite(vm, vcpuid, gpa, val, size, arg);
309		break;
310	default:
311		break;
312	}
313
314	return (error);
315}
316
317static int
318emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
319	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
320{
321	int error, size;
322	enum vm_reg_name reg;
323	uint64_t val1, val2;
324
325	size = 4;
326	error = EINVAL;
327
328	switch (vie->op.op_byte) {
329	case 0x23:
330		/*
331		 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
332		 * result in reg.
333		 *
334		 * 23/r		and r32, r/m32
335		 * REX.W + 23/r	and r64, r/m64
336		 */
337		if (vie->rex_w)
338			size = 8;
339
340		/* get the first operand */
341		reg = gpr_map[vie->reg];
342		error = vie_read_register(vm, vcpuid, reg, &val1);
343		if (error)
344			break;
345
346		/* get the second operand */
347		error = memread(vm, vcpuid, gpa, &val2, size, arg);
348		if (error)
349			break;
350
351		/* perform the operation and write the result */
352		val1 &= val2;
353		error = vie_update_register(vm, vcpuid, reg, val1, size);
354		break;
355	case 0x81:
356		/*
357		 * AND mem (ModRM:r/m) with immediate and store the
358		 * result in mem.
359		 *
360		 * 81/          and r/m32, imm32
361		 * REX.W + 81/  and r/m64, imm32 sign-extended to 64
362		 *
363		 * Currently, only the AND operation of the 0x81 opcode
364		 * is implemented (ModRM:reg = b100).
365		 */
366		if ((vie->reg & 7) != 4)
367			break;
368
369		if (vie->rex_w)
370			size = 8;
371
372		/* get the first operand */
373                error = memread(vm, vcpuid, gpa, &val1, size, arg);
374                if (error)
375			break;
376
377                /*
378		 * perform the operation with the pre-fetched immediate
379		 * operand and write the result
380		 */
381                val1 &= vie->immediate;
382                error = memwrite(vm, vcpuid, gpa, val1, size, arg);
383		break;
384	default:
385		break;
386	}
387	return (error);
388}
389
390static int
391emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
392	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
393{
394	int error, size;
395	uint64_t val1;
396
397	size = 4;
398	error = EINVAL;
399
400	switch (vie->op.op_byte) {
401	case 0x83:
402		/*
403		 * OR mem (ModRM:r/m) with immediate and store the
404		 * result in mem.
405		 *
406		 * 83/          OR r/m32, imm8 sign-extended to 32
407		 * REX.W + 83/  OR r/m64, imm8 sign-extended to 64
408		 *
409		 * Currently, only the OR operation of the 0x83 opcode
410		 * is implemented (ModRM:reg = b001).
411		 */
412		if ((vie->reg & 7) != 1)
413			break;
414
415		if (vie->rex_w)
416			size = 8;
417
418		/* get the first operand */
419                error = memread(vm, vcpuid, gpa, &val1, size, arg);
420                if (error)
421			break;
422
423                /*
424		 * perform the operation with the pre-fetched immediate
425		 * operand and write the result
426		 */
427                val1 |= vie->immediate;
428                error = memwrite(vm, vcpuid, gpa, val1, size, arg);
429		break;
430	default:
431		break;
432	}
433	return (error);
434}
435
436int
437vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
438			mem_region_read_t memread, mem_region_write_t memwrite,
439			void *memarg)
440{
441	int error;
442
443	if (!vie->decoded)
444		return (EINVAL);
445
446	switch (vie->op.op_type) {
447	case VIE_OP_TYPE_MOV:
448		error = emulate_mov(vm, vcpuid, gpa, vie,
449				    memread, memwrite, memarg);
450		break;
451	case VIE_OP_TYPE_AND:
452		error = emulate_and(vm, vcpuid, gpa, vie,
453				    memread, memwrite, memarg);
454		break;
455	case VIE_OP_TYPE_OR:
456		error = emulate_or(vm, vcpuid, gpa, vie,
457				    memread, memwrite, memarg);
458		break;
459	default:
460		error = EINVAL;
461		break;
462	}
463
464	return (error);
465}
466
467#ifdef _KERNEL
468static void
469vie_init(struct vie *vie)
470{
471
472	bzero(vie, sizeof(struct vie));
473
474	vie->base_register = VM_REG_LAST;
475	vie->index_register = VM_REG_LAST;
476}
477
478static int
479gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
480	uint64_t *gpa, uint64_t *gpaend)
481{
482	vm_paddr_t hpa;
483	int nlevels, ptpshift, ptpindex;
484	uint64_t *ptpbase, pte, pgsize;
485
486	/*
487	 * XXX assumes 64-bit guest with 4 page walk levels
488	 */
489	nlevels = 4;
490	while (--nlevels >= 0) {
491		/* Zero out the lower 12 bits and the upper 12 bits */
492		ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
493
494		hpa = vm_gpa2hpa(vm, ptpphys, PAGE_SIZE);
495		if (hpa == -1)
496			goto error;
497
498		ptpbase = (uint64_t *)PHYS_TO_DMAP(hpa);
499
500		ptpshift = PAGE_SHIFT + nlevels * 9;
501		ptpindex = (gla >> ptpshift) & 0x1FF;
502		pgsize = 1UL << ptpshift;
503
504		pte = ptpbase[ptpindex];
505
506		if ((pte & PG_V) == 0)
507			goto error;
508
509		if (pte & PG_PS) {
510			if (pgsize > 1 * GB)
511				goto error;
512			else
513				break;
514		}
515
516		ptpphys = pte;
517	}
518
519	/* Zero out the lower 'ptpshift' bits and the upper 12 bits */
520	pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
521	*gpa = pte | (gla & (pgsize - 1));
522	*gpaend = pte + pgsize;
523	return (0);
524
525error:
526	return (-1);
527}
528
529int
530vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
531		      uint64_t cr3, struct vie *vie)
532{
533	int n, err;
534	uint64_t hpa, gpa, gpaend, off;
535
536	/*
537	 * XXX cache previously fetched instructions using 'rip' as the tag
538	 */
539
540	if (inst_length > VIE_INST_SIZE)
541		panic("vmm_fetch_instruction: invalid length %d", inst_length);
542
543	vie_init(vie);
544
545	/* Copy the instruction into 'vie' */
546	while (vie->num_valid < inst_length) {
547		err = gla2gpa(vm, rip, cr3, &gpa, &gpaend);
548		if (err)
549			break;
550
551		off = gpa & PAGE_MASK;
552		n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
553
554		hpa = vm_gpa2hpa(vm, gpa, n);
555		if (hpa == -1)
556			break;
557
558		bcopy((void *)PHYS_TO_DMAP(hpa), &vie->inst[vie->num_valid], n);
559
560		rip += n;
561		vie->num_valid += n;
562	}
563
564	if (vie->num_valid == inst_length)
565		return (0);
566	else
567		return (-1);
568}
569
570static int
571vie_peek(struct vie *vie, uint8_t *x)
572{
573
574	if (vie->num_processed < vie->num_valid) {
575		*x = vie->inst[vie->num_processed];
576		return (0);
577	} else
578		return (-1);
579}
580
581static void
582vie_advance(struct vie *vie)
583{
584
585	vie->num_processed++;
586}
587
588static int
589decode_rex(struct vie *vie)
590{
591	uint8_t x;
592
593	if (vie_peek(vie, &x))
594		return (-1);
595
596	if (x >= 0x40 && x <= 0x4F) {
597		vie->rex_present = 1;
598
599		vie->rex_w = x & 0x8 ? 1 : 0;
600		vie->rex_r = x & 0x4 ? 1 : 0;
601		vie->rex_x = x & 0x2 ? 1 : 0;
602		vie->rex_b = x & 0x1 ? 1 : 0;
603
604		vie_advance(vie);
605	}
606
607	return (0);
608}
609
610static int
611decode_opcode(struct vie *vie)
612{
613	uint8_t x;
614
615	if (vie_peek(vie, &x))
616		return (-1);
617
618	vie->op = one_byte_opcodes[x];
619
620	if (vie->op.op_type == VIE_OP_TYPE_NONE)
621		return (-1);
622
623	vie_advance(vie);
624	return (0);
625}
626
627static int
628decode_modrm(struct vie *vie)
629{
630	uint8_t x;
631	enum cpu_mode cpu_mode;
632
633	/*
634	 * XXX assuming that guest is in IA-32E 64-bit mode
635	 */
636	cpu_mode = CPU_MODE_64BIT;
637
638	if (vie_peek(vie, &x))
639		return (-1);
640
641	vie->mod = (x >> 6) & 0x3;
642	vie->rm =  (x >> 0) & 0x7;
643	vie->reg = (x >> 3) & 0x7;
644
645	/*
646	 * A direct addressing mode makes no sense in the context of an EPT
647	 * fault. There has to be a memory access involved to cause the
648	 * EPT fault.
649	 */
650	if (vie->mod == VIE_MOD_DIRECT)
651		return (-1);
652
653	if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
654	    (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
655		/*
656		 * Table 2-5: Special Cases of REX Encodings
657		 *
658		 * mod=0, r/m=5 is used in the compatibility mode to
659		 * indicate a disp32 without a base register.
660		 *
661		 * mod!=3, r/m=4 is used in the compatibility mode to
662		 * indicate that the SIB byte is present.
663		 *
664		 * The 'b' bit in the REX prefix is don't care in
665		 * this case.
666		 */
667	} else {
668		vie->rm |= (vie->rex_b << 3);
669	}
670
671	vie->reg |= (vie->rex_r << 3);
672
673	/* SIB */
674	if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
675		goto done;
676
677	vie->base_register = gpr_map[vie->rm];
678
679	switch (vie->mod) {
680	case VIE_MOD_INDIRECT_DISP8:
681		vie->disp_bytes = 1;
682		break;
683	case VIE_MOD_INDIRECT_DISP32:
684		vie->disp_bytes = 4;
685		break;
686	case VIE_MOD_INDIRECT:
687		if (vie->rm == VIE_RM_DISP32) {
688			vie->disp_bytes = 4;
689			/*
690			 * Table 2-7. RIP-Relative Addressing
691			 *
692			 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
693			 * whereas in compatibility mode it just implies disp32.
694			 */
695
696			if (cpu_mode == CPU_MODE_64BIT)
697				vie->base_register = VM_REG_GUEST_RIP;
698			else
699				vie->base_register = VM_REG_LAST;
700		}
701		break;
702	}
703
704	/* Figure out immediate operand size (if any) */
705	if (vie->op.op_flags & VIE_OP_F_IMM)
706		vie->imm_bytes = 4;
707	else if (vie->op.op_flags & VIE_OP_F_IMM8)
708		vie->imm_bytes = 1;
709
710done:
711	vie_advance(vie);
712
713	return (0);
714}
715
716static int
717decode_sib(struct vie *vie)
718{
719	uint8_t x;
720
721	/* Proceed only if SIB byte is present */
722	if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
723		return (0);
724
725	if (vie_peek(vie, &x))
726		return (-1);
727
728	/* De-construct the SIB byte */
729	vie->ss = (x >> 6) & 0x3;
730	vie->index = (x >> 3) & 0x7;
731	vie->base = (x >> 0) & 0x7;
732
733	/* Apply the REX prefix modifiers */
734	vie->index |= vie->rex_x << 3;
735	vie->base |= vie->rex_b << 3;
736
737	switch (vie->mod) {
738	case VIE_MOD_INDIRECT_DISP8:
739		vie->disp_bytes = 1;
740		break;
741	case VIE_MOD_INDIRECT_DISP32:
742		vie->disp_bytes = 4;
743		break;
744	}
745
746	if (vie->mod == VIE_MOD_INDIRECT &&
747	    (vie->base == 5 || vie->base == 13)) {
748		/*
749		 * Special case when base register is unused if mod = 0
750		 * and base = %rbp or %r13.
751		 *
752		 * Documented in:
753		 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
754		 * Table 2-5: Special Cases of REX Encodings
755		 */
756		vie->disp_bytes = 4;
757	} else {
758		vie->base_register = gpr_map[vie->base];
759	}
760
761	/*
762	 * All encodings of 'index' are valid except for %rsp (4).
763	 *
764	 * Documented in:
765	 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
766	 * Table 2-5: Special Cases of REX Encodings
767	 */
768	if (vie->index != 4)
769		vie->index_register = gpr_map[vie->index];
770
771	/* 'scale' makes sense only in the context of an index register */
772	if (vie->index_register < VM_REG_LAST)
773		vie->scale = 1 << vie->ss;
774
775	vie_advance(vie);
776
777	return (0);
778}
779
780static int
781decode_displacement(struct vie *vie)
782{
783	int n, i;
784	uint8_t x;
785
786	union {
787		char	buf[4];
788		int8_t	signed8;
789		int32_t	signed32;
790	} u;
791
792	if ((n = vie->disp_bytes) == 0)
793		return (0);
794
795	if (n != 1 && n != 4)
796		panic("decode_displacement: invalid disp_bytes %d", n);
797
798	for (i = 0; i < n; i++) {
799		if (vie_peek(vie, &x))
800			return (-1);
801
802		u.buf[i] = x;
803		vie_advance(vie);
804	}
805
806	if (n == 1)
807		vie->displacement = u.signed8;		/* sign-extended */
808	else
809		vie->displacement = u.signed32;		/* sign-extended */
810
811	return (0);
812}
813
814static int
815decode_immediate(struct vie *vie)
816{
817	int i, n;
818	uint8_t x;
819	union {
820		char	buf[4];
821		int8_t	signed8;
822		int32_t	signed32;
823	} u;
824
825	if ((n = vie->imm_bytes) == 0)
826		return (0);
827
828	if (n != 1 && n != 4)
829		panic("decode_immediate: invalid imm_bytes %d", n);
830
831	for (i = 0; i < n; i++) {
832		if (vie_peek(vie, &x))
833			return (-1);
834
835		u.buf[i] = x;
836		vie_advance(vie);
837	}
838
839	if (n == 1)
840		vie->immediate = u.signed8;		/* sign-extended */
841	else
842		vie->immediate = u.signed32;		/* sign-extended */
843
844	return (0);
845}
846
847/*
848 * Verify that all the bytes in the instruction buffer were consumed.
849 */
850static int
851verify_inst_length(struct vie *vie)
852{
853
854	if (vie->num_processed == vie->num_valid)
855		return (0);
856	else
857		return (-1);
858}
859
860/*
861 * Verify that the 'guest linear address' provided as collateral of the nested
862 * page table fault matches with our instruction decoding.
863 */
864static int
865verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
866{
867	int error;
868	uint64_t base, idx;
869
870	/* Skip 'gla' verification */
871	if (gla == VIE_INVALID_GLA)
872		return (0);
873
874	base = 0;
875	if (vie->base_register != VM_REG_LAST) {
876		error = vm_get_register(vm, cpuid, vie->base_register, &base);
877		if (error) {
878			printf("verify_gla: error %d getting base reg %d\n",
879				error, vie->base_register);
880			return (-1);
881		}
882
883		/*
884		 * RIP-relative addressing starts from the following
885		 * instruction
886		 */
887		if (vie->base_register == VM_REG_GUEST_RIP)
888			base += vie->num_valid;
889	}
890
891	idx = 0;
892	if (vie->index_register != VM_REG_LAST) {
893		error = vm_get_register(vm, cpuid, vie->index_register, &idx);
894		if (error) {
895			printf("verify_gla: error %d getting index reg %d\n",
896				error, vie->index_register);
897			return (-1);
898		}
899	}
900
901	if (base + vie->scale * idx + vie->displacement != gla) {
902		printf("verify_gla mismatch: "
903		       "base(0x%0lx), scale(%d), index(0x%0lx), "
904		       "disp(0x%0lx), gla(0x%0lx)\n",
905		       base, vie->scale, idx, vie->displacement, gla);
906		return (-1);
907	}
908
909	return (0);
910}
911
912int
913vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
914{
915
916	if (decode_rex(vie))
917		return (-1);
918
919	if (decode_opcode(vie))
920		return (-1);
921
922	if (decode_modrm(vie))
923		return (-1);
924
925	if (decode_sib(vie))
926		return (-1);
927
928	if (decode_displacement(vie))
929		return (-1);
930
931	if (decode_immediate(vie))
932		return (-1);
933
934	if (verify_inst_length(vie))
935		return (-1);
936
937	if (verify_gla(vm, cpuid, gla, vie))
938		return (-1);
939
940	vie->decoded = 1;	/* success */
941
942	return (0);
943}
944#endif	/* _KERNEL */
945