vmm_instruction_emul.c revision 270159
1/*- 2 * Copyright (c) 2012 Sandvine, Inc. 3 * Copyright (c) 2012 NetApp, Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: stable/10/sys/amd64/vmm/vmm_instruction_emul.c 270159 2014-08-19 01:20:24Z grehan $ 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_instruction_emul.c 270159 2014-08-19 01:20:24Z grehan $"); 32 33#ifdef _KERNEL 34#include <sys/param.h> 35#include <sys/pcpu.h> 36#include <sys/systm.h> 37#include <sys/proc.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/vmparam.h> 43#include <machine/vmm.h> 44#else /* !_KERNEL */ 45#include <sys/types.h> 46#include <sys/errno.h> 47#include <sys/_iovec.h> 48 49#include <machine/vmm.h> 50 51#include <assert.h> 52#include <vmmapi.h> 53#define KASSERT(exp,msg) assert((exp)) 54#endif /* _KERNEL */ 55 56#include <machine/vmm_instruction_emul.h> 57#include <x86/psl.h> 58#include <x86/specialreg.h> 59 60/* struct vie_op.op_type */ 61enum { 62 VIE_OP_TYPE_NONE = 0, 63 VIE_OP_TYPE_MOV, 64 VIE_OP_TYPE_MOVSX, 65 VIE_OP_TYPE_MOVZX, 66 VIE_OP_TYPE_AND, 67 VIE_OP_TYPE_OR, 68 VIE_OP_TYPE_TWO_BYTE, 69 VIE_OP_TYPE_PUSH, 70 VIE_OP_TYPE_CMP, 71 VIE_OP_TYPE_LAST 72}; 73 74/* struct vie_op.op_flags */ 75#define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */ 76#define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */ 77#define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */ 78#define VIE_OP_F_NO_MODRM (1 << 3) 79 80static const struct vie_op two_byte_opcodes[256] = { 81 [0xB6] = { 82 .op_byte = 0xB6, 83 .op_type = VIE_OP_TYPE_MOVZX, 84 }, 85 [0xB7] = { 86 .op_byte = 0xB7, 87 .op_type = VIE_OP_TYPE_MOVZX, 88 }, 89 [0xBE] = { 90 .op_byte = 0xBE, 91 .op_type = VIE_OP_TYPE_MOVSX, 92 }, 93}; 94 95static const struct vie_op one_byte_opcodes[256] = { 96 [0x0F] = { 97 .op_byte = 0x0F, 98 .op_type = VIE_OP_TYPE_TWO_BYTE 99 }, 100 [0x3B] = { 101 .op_byte = 0x3B, 102 .op_type = VIE_OP_TYPE_CMP, 103 }, 104 [0x88] = { 105 .op_byte = 0x88, 106 .op_type = VIE_OP_TYPE_MOV, 107 }, 108 [0x89] = { 109 .op_byte = 0x89, 110 .op_type = VIE_OP_TYPE_MOV, 111 }, 112 [0x8A] = { 113 .op_byte = 0x8A, 114 .op_type = VIE_OP_TYPE_MOV, 115 }, 116 [0x8B] = { 117 .op_byte = 0x8B, 118 .op_type = VIE_OP_TYPE_MOV, 119 }, 120 [0xA1] = { 121 .op_byte = 0xA1, 122 .op_type = VIE_OP_TYPE_MOV, 123 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM, 124 }, 125 [0xA3] = { 126 .op_byte = 0xA3, 127 .op_type = VIE_OP_TYPE_MOV, 128 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM, 129 }, 130 [0xC6] = { 131 /* XXX Group 11 extended opcode - not just MOV */ 132 .op_byte = 0xC6, 133 .op_type = VIE_OP_TYPE_MOV, 134 .op_flags = VIE_OP_F_IMM8, 135 }, 136 [0xC7] = { 137 .op_byte = 0xC7, 138 .op_type = VIE_OP_TYPE_MOV, 139 .op_flags = VIE_OP_F_IMM, 140 }, 141 [0x23] = { 142 .op_byte = 0x23, 143 .op_type = VIE_OP_TYPE_AND, 144 }, 145 [0x81] = { 146 /* XXX Group 1 extended opcode - not just AND */ 147 .op_byte = 0x81, 148 .op_type = VIE_OP_TYPE_AND, 149 .op_flags = VIE_OP_F_IMM, 150 }, 151 [0x83] = { 152 /* XXX Group 1 extended opcode - not just OR */ 153 .op_byte = 0x83, 154 .op_type = VIE_OP_TYPE_OR, 155 .op_flags = VIE_OP_F_IMM8, 156 }, 157 [0xFF] = { 158 /* XXX Group 5 extended opcode - not just PUSH */ 159 .op_byte = 0xFF, 160 .op_type = VIE_OP_TYPE_PUSH, 161 } 162}; 163 164/* struct vie.mod */ 165#define VIE_MOD_INDIRECT 0 166#define VIE_MOD_INDIRECT_DISP8 1 167#define VIE_MOD_INDIRECT_DISP32 2 168#define VIE_MOD_DIRECT 3 169 170/* struct vie.rm */ 171#define VIE_RM_SIB 4 172#define VIE_RM_DISP32 5 173 174#define GB (1024 * 1024 * 1024) 175 176static enum vm_reg_name gpr_map[16] = { 177 VM_REG_GUEST_RAX, 178 VM_REG_GUEST_RCX, 179 VM_REG_GUEST_RDX, 180 VM_REG_GUEST_RBX, 181 VM_REG_GUEST_RSP, 182 VM_REG_GUEST_RBP, 183 VM_REG_GUEST_RSI, 184 VM_REG_GUEST_RDI, 185 VM_REG_GUEST_R8, 186 VM_REG_GUEST_R9, 187 VM_REG_GUEST_R10, 188 VM_REG_GUEST_R11, 189 VM_REG_GUEST_R12, 190 VM_REG_GUEST_R13, 191 VM_REG_GUEST_R14, 192 VM_REG_GUEST_R15 193}; 194 195static uint64_t size2mask[] = { 196 [1] = 0xff, 197 [2] = 0xffff, 198 [4] = 0xffffffff, 199 [8] = 0xffffffffffffffff, 200}; 201 202static int 203vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval) 204{ 205 int error; 206 207 error = vm_get_register(vm, vcpuid, reg, rval); 208 209 return (error); 210} 211 212static void 213vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr) 214{ 215 *lhbr = 0; 216 *reg = gpr_map[vie->reg]; 217 218 /* 219 * 64-bit mode imposes limitations on accessing legacy high byte 220 * registers (lhbr). 221 * 222 * The legacy high-byte registers cannot be addressed if the REX 223 * prefix is present. In this case the values 4, 5, 6 and 7 of the 224 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively. 225 * 226 * If the REX prefix is not present then the values 4, 5, 6 and 7 227 * of the 'ModRM:reg' field address the legacy high-byte registers, 228 * %ah, %ch, %dh and %bh respectively. 229 */ 230 if (!vie->rex_present) { 231 if (vie->reg & 0x4) { 232 *lhbr = 1; 233 *reg = gpr_map[vie->reg & 0x3]; 234 } 235 } 236} 237 238static int 239vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval) 240{ 241 uint64_t val; 242 int error, lhbr; 243 enum vm_reg_name reg; 244 245 vie_calc_bytereg(vie, ®, &lhbr); 246 error = vm_get_register(vm, vcpuid, reg, &val); 247 248 /* 249 * To obtain the value of a legacy high byte register shift the 250 * base register right by 8 bits (%ah = %rax >> 8). 251 */ 252 if (lhbr) 253 *rval = val >> 8; 254 else 255 *rval = val; 256 return (error); 257} 258 259static int 260vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte) 261{ 262 uint64_t origval, val, mask; 263 int error, lhbr; 264 enum vm_reg_name reg; 265 266 vie_calc_bytereg(vie, ®, &lhbr); 267 error = vm_get_register(vm, vcpuid, reg, &origval); 268 if (error == 0) { 269 val = byte; 270 mask = 0xff; 271 if (lhbr) { 272 /* 273 * Shift left by 8 to store 'byte' in a legacy high 274 * byte register. 275 */ 276 val <<= 8; 277 mask <<= 8; 278 } 279 val |= origval & ~mask; 280 error = vm_set_register(vm, vcpuid, reg, val); 281 } 282 return (error); 283} 284 285int 286vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg, 287 uint64_t val, int size) 288{ 289 int error; 290 uint64_t origval; 291 292 switch (size) { 293 case 1: 294 case 2: 295 error = vie_read_register(vm, vcpuid, reg, &origval); 296 if (error) 297 return (error); 298 val &= size2mask[size]; 299 val |= origval & ~size2mask[size]; 300 break; 301 case 4: 302 val &= 0xffffffffUL; 303 break; 304 case 8: 305 break; 306 default: 307 return (EINVAL); 308 } 309 310 error = vm_set_register(vm, vcpuid, reg, val); 311 return (error); 312} 313 314/* 315 * Return the status flags that would result from doing (x - y). 316 */ 317static u_long 318getcc16(uint16_t x, uint16_t y) 319{ 320 u_long rflags; 321 322 __asm __volatile("sub %1,%2; pushfq; popq %0" : 323 "=r" (rflags) : "m" (y), "r" (x)); 324 return (rflags); 325} 326 327static u_long 328getcc32(uint32_t x, uint32_t y) 329{ 330 u_long rflags; 331 332 __asm __volatile("sub %1,%2; pushfq; popq %0" : 333 "=r" (rflags) : "m" (y), "r" (x)); 334 return (rflags); 335} 336 337static u_long 338getcc64(uint64_t x, uint64_t y) 339{ 340 u_long rflags; 341 342 __asm __volatile("sub %1,%2; pushfq; popq %0" : 343 "=r" (rflags) : "m" (y), "r" (x)); 344 return (rflags); 345} 346 347static u_long 348getcc(int opsize, uint64_t x, uint64_t y) 349{ 350 KASSERT(opsize == 2 || opsize == 4 || opsize == 8, 351 ("getcc: invalid operand size %d", opsize)); 352 353 if (opsize == 2) 354 return (getcc16(x, y)); 355 else if (opsize == 4) 356 return (getcc32(x, y)); 357 else 358 return (getcc64(x, y)); 359} 360 361static int 362emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie, 363 mem_region_read_t memread, mem_region_write_t memwrite, void *arg) 364{ 365 int error, size; 366 enum vm_reg_name reg; 367 uint8_t byte; 368 uint64_t val; 369 370 size = vie->opsize; 371 error = EINVAL; 372 373 switch (vie->op.op_byte) { 374 case 0x88: 375 /* 376 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m) 377 * 88/r: mov r/m8, r8 378 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available) 379 */ 380 size = 1; /* override for byte operation */ 381 error = vie_read_bytereg(vm, vcpuid, vie, &byte); 382 if (error == 0) 383 error = memwrite(vm, vcpuid, gpa, byte, size, arg); 384 break; 385 case 0x89: 386 /* 387 * MOV from reg (ModRM:reg) to mem (ModRM:r/m) 388 * 89/r: mov r/m16, r16 389 * 89/r: mov r/m32, r32 390 * REX.W + 89/r mov r/m64, r64 391 */ 392 reg = gpr_map[vie->reg]; 393 error = vie_read_register(vm, vcpuid, reg, &val); 394 if (error == 0) { 395 val &= size2mask[size]; 396 error = memwrite(vm, vcpuid, gpa, val, size, arg); 397 } 398 break; 399 case 0x8A: 400 /* 401 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg) 402 * 8A/r: mov r8, r/m8 403 * REX + 8A/r: mov r8, r/m8 404 */ 405 size = 1; /* override for byte operation */ 406 error = memread(vm, vcpuid, gpa, &val, size, arg); 407 if (error == 0) 408 error = vie_write_bytereg(vm, vcpuid, vie, val); 409 break; 410 case 0x8B: 411 /* 412 * MOV from mem (ModRM:r/m) to reg (ModRM:reg) 413 * 8B/r: mov r16, r/m16 414 * 8B/r: mov r32, r/m32 415 * REX.W 8B/r: mov r64, r/m64 416 */ 417 error = memread(vm, vcpuid, gpa, &val, size, arg); 418 if (error == 0) { 419 reg = gpr_map[vie->reg]; 420 error = vie_update_register(vm, vcpuid, reg, val, size); 421 } 422 break; 423 case 0xA1: 424 /* 425 * MOV from seg:moffset to AX/EAX/RAX 426 * A1: mov AX, moffs16 427 * A1: mov EAX, moffs32 428 * REX.W + A1: mov RAX, moffs64 429 */ 430 error = memread(vm, vcpuid, gpa, &val, size, arg); 431 if (error == 0) { 432 reg = VM_REG_GUEST_RAX; 433 error = vie_update_register(vm, vcpuid, reg, val, size); 434 } 435 break; 436 case 0xA3: 437 /* 438 * MOV from AX/EAX/RAX to seg:moffset 439 * A3: mov moffs16, AX 440 * A3: mov moffs32, EAX 441 * REX.W + A3: mov moffs64, RAX 442 */ 443 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val); 444 if (error == 0) { 445 val &= size2mask[size]; 446 error = memwrite(vm, vcpuid, gpa, val, size, arg); 447 } 448 break; 449 case 0xC6: 450 /* 451 * MOV from imm8 to mem (ModRM:r/m) 452 * C6/0 mov r/m8, imm8 453 * REX + C6/0 mov r/m8, imm8 454 */ 455 size = 1; /* override for byte operation */ 456 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg); 457 break; 458 case 0xC7: 459 /* 460 * MOV from imm16/imm32 to mem (ModRM:r/m) 461 * C7/0 mov r/m16, imm16 462 * C7/0 mov r/m32, imm32 463 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits) 464 */ 465 val = vie->immediate & size2mask[size]; 466 error = memwrite(vm, vcpuid, gpa, val, size, arg); 467 break; 468 default: 469 break; 470 } 471 472 return (error); 473} 474 475static int 476emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie, 477 mem_region_read_t memread, mem_region_write_t memwrite, 478 void *arg) 479{ 480 int error, size; 481 enum vm_reg_name reg; 482 uint64_t val; 483 484 size = vie->opsize; 485 error = EINVAL; 486 487 switch (vie->op.op_byte) { 488 case 0xB6: 489 /* 490 * MOV and zero extend byte from mem (ModRM:r/m) to 491 * reg (ModRM:reg). 492 * 493 * 0F B6/r movzx r16, r/m8 494 * 0F B6/r movzx r32, r/m8 495 * REX.W + 0F B6/r movzx r64, r/m8 496 */ 497 498 /* get the first operand */ 499 error = memread(vm, vcpuid, gpa, &val, 1, arg); 500 if (error) 501 break; 502 503 /* get the second operand */ 504 reg = gpr_map[vie->reg]; 505 506 /* zero-extend byte */ 507 val = (uint8_t)val; 508 509 /* write the result */ 510 error = vie_update_register(vm, vcpuid, reg, val, size); 511 break; 512 case 0xB7: 513 /* 514 * MOV and zero extend word from mem (ModRM:r/m) to 515 * reg (ModRM:reg). 516 * 517 * 0F B7/r movzx r32, r/m16 518 * REX.W + 0F B7/r movzx r64, r/m16 519 */ 520 error = memread(vm, vcpuid, gpa, &val, 2, arg); 521 if (error) 522 return (error); 523 524 reg = gpr_map[vie->reg]; 525 526 /* zero-extend word */ 527 val = (uint16_t)val; 528 529 error = vie_update_register(vm, vcpuid, reg, val, size); 530 break; 531 case 0xBE: 532 /* 533 * MOV and sign extend byte from mem (ModRM:r/m) to 534 * reg (ModRM:reg). 535 * 536 * 0F BE/r movsx r16, r/m8 537 * 0F BE/r movsx r32, r/m8 538 * REX.W + 0F BE/r movsx r64, r/m8 539 */ 540 541 /* get the first operand */ 542 error = memread(vm, vcpuid, gpa, &val, 1, arg); 543 if (error) 544 break; 545 546 /* get the second operand */ 547 reg = gpr_map[vie->reg]; 548 549 /* sign extend byte */ 550 val = (int8_t)val; 551 552 /* write the result */ 553 error = vie_update_register(vm, vcpuid, reg, val, size); 554 break; 555 default: 556 break; 557 } 558 return (error); 559} 560 561static int 562emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie, 563 mem_region_read_t memread, mem_region_write_t memwrite, void *arg) 564{ 565 int error, size; 566 enum vm_reg_name reg; 567 uint64_t val1, val2; 568 569 size = vie->opsize; 570 error = EINVAL; 571 572 switch (vie->op.op_byte) { 573 case 0x23: 574 /* 575 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the 576 * result in reg. 577 * 578 * 23/r and r16, r/m16 579 * 23/r and r32, r/m32 580 * REX.W + 23/r and r64, r/m64 581 */ 582 583 /* get the first operand */ 584 reg = gpr_map[vie->reg]; 585 error = vie_read_register(vm, vcpuid, reg, &val1); 586 if (error) 587 break; 588 589 /* get the second operand */ 590 error = memread(vm, vcpuid, gpa, &val2, size, arg); 591 if (error) 592 break; 593 594 /* perform the operation and write the result */ 595 val1 &= val2; 596 error = vie_update_register(vm, vcpuid, reg, val1, size); 597 break; 598 case 0x81: 599 /* 600 * AND mem (ModRM:r/m) with immediate and store the 601 * result in mem. 602 * 603 * 81 /4 and r/m16, imm16 604 * 81 /4 and r/m32, imm32 605 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64 606 * 607 * Currently, only the AND operation of the 0x81 opcode 608 * is implemented (ModRM:reg = b100). 609 */ 610 if ((vie->reg & 7) != 4) 611 break; 612 613 /* get the first operand */ 614 error = memread(vm, vcpuid, gpa, &val1, size, arg); 615 if (error) 616 break; 617 618 /* 619 * perform the operation with the pre-fetched immediate 620 * operand and write the result 621 */ 622 val1 &= vie->immediate; 623 error = memwrite(vm, vcpuid, gpa, val1, size, arg); 624 break; 625 default: 626 break; 627 } 628 return (error); 629} 630 631static int 632emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie, 633 mem_region_read_t memread, mem_region_write_t memwrite, void *arg) 634{ 635 int error, size; 636 uint64_t val1; 637 638 size = vie->opsize; 639 error = EINVAL; 640 641 switch (vie->op.op_byte) { 642 case 0x83: 643 /* 644 * OR mem (ModRM:r/m) with immediate and store the 645 * result in mem. 646 * 647 * 83 /1 OR r/m16, imm8 sign-extended to 16 648 * 83 /1 OR r/m32, imm8 sign-extended to 32 649 * REX.W + 83/1 OR r/m64, imm8 sign-extended to 64 650 * 651 * Currently, only the OR operation of the 0x83 opcode 652 * is implemented (ModRM:reg = b001). 653 */ 654 if ((vie->reg & 7) != 1) 655 break; 656 657 /* get the first operand */ 658 error = memread(vm, vcpuid, gpa, &val1, size, arg); 659 if (error) 660 break; 661 662 /* 663 * perform the operation with the pre-fetched immediate 664 * operand and write the result 665 */ 666 val1 |= vie->immediate; 667 error = memwrite(vm, vcpuid, gpa, val1, size, arg); 668 break; 669 default: 670 break; 671 } 672 return (error); 673} 674 675#define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V) 676 677static int 678emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie, 679 mem_region_read_t memread, mem_region_write_t memwrite, void *arg) 680{ 681 int error, size; 682 uint64_t op1, op2, rflags, rflags2; 683 enum vm_reg_name reg; 684 685 size = vie->opsize; 686 switch (vie->op.op_byte) { 687 case 0x3B: 688 /* 689 * 3B/r CMP r16, r/m16 690 * 3B/r CMP r32, r/m32 691 * REX.W + 3B/r CMP r64, r/m64 692 * 693 * Compare first operand (reg) with second operand (r/m) and 694 * set status flags in EFLAGS register. The comparison is 695 * performed by subtracting the second operand from the first 696 * operand and then setting the status flags. 697 */ 698 699 /* Get the first operand */ 700 reg = gpr_map[vie->reg]; 701 error = vie_read_register(vm, vcpuid, reg, &op1); 702 if (error) 703 return (error); 704 705 /* Get the second operand */ 706 error = memread(vm, vcpuid, gpa, &op2, size, arg); 707 if (error) 708 return (error); 709 710 break; 711 default: 712 return (EINVAL); 713 } 714 rflags2 = getcc(size, op1, op2); 715 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags); 716 if (error) 717 return (error); 718 rflags &= ~RFLAGS_STATUS_BITS; 719 rflags |= rflags2 & RFLAGS_STATUS_BITS; 720 721 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8); 722 return (error); 723} 724 725static int 726emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie, 727 struct vm_guest_paging *paging, mem_region_read_t memread, 728 mem_region_write_t memwrite, void *arg) 729{ 730#ifdef _KERNEL 731 struct vm_copyinfo copyinfo[2]; 732#else 733 struct iovec copyinfo[2]; 734#endif 735 struct seg_desc ss_desc; 736 uint64_t cr0, rflags, rsp, stack_gla, val; 737 int error, size, stackaddrsize; 738 739 /* 740 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2. 741 * 742 * PUSH is part of the group 5 extended opcodes and is identified 743 * by ModRM:reg = b110. 744 */ 745 if ((vie->reg & 7) != 6) 746 return (EINVAL); 747 748 size = vie->opsize; 749 /* 750 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1 751 */ 752 if (paging->cpu_mode == CPU_MODE_REAL) { 753 stackaddrsize = 2; 754 } else if (paging->cpu_mode == CPU_MODE_64BIT) { 755 /* 756 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3 757 * - Stack pointer size is always 64-bits. 758 * - PUSH/POP of 32-bit values is not possible in 64-bit mode. 759 * - 16-bit PUSH/POP is supported by using the operand size 760 * override prefix (66H). 761 */ 762 stackaddrsize = 8; 763 size = vie->opsize_override ? 2 : 8; 764 } else { 765 /* 766 * In protected or compability mode the 'B' flag in the 767 * stack-segment descriptor determines the size of the 768 * stack pointer. 769 */ 770 error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc); 771 KASSERT(error == 0, ("%s: error %d getting SS descriptor", 772 __func__, error)); 773 if (SEG_DESC_DEF32(ss_desc.access)) 774 stackaddrsize = 4; 775 else 776 stackaddrsize = 2; 777 } 778 779 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0); 780 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error)); 781 782 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags); 783 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); 784 785 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp); 786 KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error)); 787 788 rsp -= size; 789 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc, 790 rsp, size, stackaddrsize, PROT_WRITE, &stack_gla)) { 791 vm_inject_ss(vm, vcpuid, 0); 792 return (0); 793 } 794 795 if (vie_canonical_check(paging->cpu_mode, stack_gla)) { 796 vm_inject_ss(vm, vcpuid, 0); 797 return (0); 798 } 799 800 if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) { 801 vm_inject_ac(vm, vcpuid, 0); 802 return (0); 803 } 804 805 error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size, PROT_WRITE, 806 copyinfo, nitems(copyinfo)); 807 if (error == -1) { 808 /* 809 * XXX cannot return a negative error value here because it 810 * ends up being the return value of the VM_RUN() ioctl and 811 * is interpreted as a pseudo-error (for e.g. ERESTART). 812 */ 813 return (EFAULT); 814 } else if (error == 1) { 815 /* Resume guest execution to handle page fault */ 816 return (0); 817 } 818 819 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg); 820 if (error == 0) { 821 vm_copyout(vm, vcpuid, &val, copyinfo, size); 822 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp, 823 stackaddrsize); 824 KASSERT(error == 0, ("error %d updating rsp", error)); 825 } 826#ifdef _KERNEL 827 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo)); 828#endif 829 return (error); 830} 831 832int 833vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie, 834 struct vm_guest_paging *paging, mem_region_read_t memread, 835 mem_region_write_t memwrite, void *memarg) 836{ 837 int error; 838 839 if (!vie->decoded) 840 return (EINVAL); 841 842 switch (vie->op.op_type) { 843 case VIE_OP_TYPE_PUSH: 844 error = emulate_push(vm, vcpuid, gpa, vie, paging, memread, 845 memwrite, memarg); 846 break; 847 case VIE_OP_TYPE_CMP: 848 error = emulate_cmp(vm, vcpuid, gpa, vie, 849 memread, memwrite, memarg); 850 break; 851 case VIE_OP_TYPE_MOV: 852 error = emulate_mov(vm, vcpuid, gpa, vie, 853 memread, memwrite, memarg); 854 break; 855 case VIE_OP_TYPE_MOVSX: 856 case VIE_OP_TYPE_MOVZX: 857 error = emulate_movx(vm, vcpuid, gpa, vie, 858 memread, memwrite, memarg); 859 break; 860 case VIE_OP_TYPE_AND: 861 error = emulate_and(vm, vcpuid, gpa, vie, 862 memread, memwrite, memarg); 863 break; 864 case VIE_OP_TYPE_OR: 865 error = emulate_or(vm, vcpuid, gpa, vie, 866 memread, memwrite, memarg); 867 break; 868 default: 869 error = EINVAL; 870 break; 871 } 872 873 return (error); 874} 875 876int 877vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla) 878{ 879 KASSERT(size == 1 || size == 2 || size == 4 || size == 8, 880 ("%s: invalid size %d", __func__, size)); 881 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl)); 882 883 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0) 884 return (0); 885 886 return ((gla & (size - 1)) ? 1 : 0); 887} 888 889int 890vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla) 891{ 892 uint64_t mask; 893 894 if (cpu_mode != CPU_MODE_64BIT) 895 return (0); 896 897 /* 898 * The value of the bit 47 in the 'gla' should be replicated in the 899 * most significant 16 bits. 900 */ 901 mask = ~((1UL << 48) - 1); 902 if (gla & (1UL << 47)) 903 return ((gla & mask) != mask); 904 else 905 return ((gla & mask) != 0); 906} 907 908uint64_t 909vie_size2mask(int size) 910{ 911 KASSERT(size == 1 || size == 2 || size == 4 || size == 8, 912 ("vie_size2mask: invalid size %d", size)); 913 return (size2mask[size]); 914} 915 916int 917vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg, 918 struct seg_desc *desc, uint64_t offset, int length, int addrsize, 919 int prot, uint64_t *gla) 920{ 921 uint64_t firstoff, low_limit, high_limit, segbase; 922 int glasize, type; 923 924 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS, 925 ("%s: invalid segment %d", __func__, seg)); 926 KASSERT(length == 1 || length == 2 || length == 4 || length == 8, 927 ("%s: invalid operand size %d", __func__, length)); 928 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0, 929 ("%s: invalid prot %#x", __func__, prot)); 930 931 firstoff = offset; 932 if (cpu_mode == CPU_MODE_64BIT) { 933 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address " 934 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode)); 935 glasize = 8; 936 } else { 937 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address " 938 "size %d for cpu mode %d", __func__, addrsize, cpu_mode)); 939 glasize = 4; 940 /* 941 * If the segment selector is loaded with a NULL selector 942 * then the descriptor is unusable and attempting to use 943 * it results in a #GP(0). 944 */ 945 if (SEG_DESC_UNUSABLE(desc->access)) 946 return (-1); 947 948 /* 949 * The processor generates a #NP exception when a segment 950 * register is loaded with a selector that points to a 951 * descriptor that is not present. If this was the case then 952 * it would have been checked before the VM-exit. 953 */ 954 KASSERT(SEG_DESC_PRESENT(desc->access), 955 ("segment %d not present: %#x", seg, desc->access)); 956 957 /* 958 * The descriptor type must indicate a code/data segment. 959 */ 960 type = SEG_DESC_TYPE(desc->access); 961 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid " 962 "descriptor type %#x", seg, type)); 963 964 if (prot & PROT_READ) { 965 /* #GP on a read access to a exec-only code segment */ 966 if ((type & 0xA) == 0x8) 967 return (-1); 968 } 969 970 if (prot & PROT_WRITE) { 971 /* 972 * #GP on a write access to a code segment or a 973 * read-only data segment. 974 */ 975 if (type & 0x8) /* code segment */ 976 return (-1); 977 978 if ((type & 0xA) == 0) /* read-only data seg */ 979 return (-1); 980 } 981 982 /* 983 * 'desc->limit' is fully expanded taking granularity into 984 * account. 985 */ 986 if ((type & 0xC) == 0x4) { 987 /* expand-down data segment */ 988 low_limit = desc->limit + 1; 989 high_limit = SEG_DESC_DEF32(desc->access) ? 990 0xffffffff : 0xffff; 991 } else { 992 /* code segment or expand-up data segment */ 993 low_limit = 0; 994 high_limit = desc->limit; 995 } 996 997 while (length > 0) { 998 offset &= vie_size2mask(addrsize); 999 if (offset < low_limit || offset > high_limit) 1000 return (-1); 1001 offset++; 1002 length--; 1003 } 1004 } 1005 1006 /* 1007 * In 64-bit mode all segments except %fs and %gs have a segment 1008 * base address of 0. 1009 */ 1010 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS && 1011 seg != VM_REG_GUEST_GS) { 1012 segbase = 0; 1013 } else { 1014 segbase = desc->base; 1015 } 1016 1017 /* 1018 * Truncate 'firstoff' to the effective address size before adding 1019 * it to the segment base. 1020 */ 1021 firstoff &= vie_size2mask(addrsize); 1022 *gla = (segbase + firstoff) & vie_size2mask(glasize); 1023 return (0); 1024} 1025 1026#ifdef _KERNEL 1027void 1028vie_init(struct vie *vie) 1029{ 1030 1031 bzero(vie, sizeof(struct vie)); 1032 1033 vie->base_register = VM_REG_LAST; 1034 vie->index_register = VM_REG_LAST; 1035} 1036 1037static int 1038pf_error_code(int usermode, int prot, int rsvd, uint64_t pte) 1039{ 1040 int error_code = 0; 1041 1042 if (pte & PG_V) 1043 error_code |= PGEX_P; 1044 if (prot & VM_PROT_WRITE) 1045 error_code |= PGEX_W; 1046 if (usermode) 1047 error_code |= PGEX_U; 1048 if (rsvd) 1049 error_code |= PGEX_RSV; 1050 if (prot & VM_PROT_EXECUTE) 1051 error_code |= PGEX_I; 1052 1053 return (error_code); 1054} 1055 1056static void 1057ptp_release(void **cookie) 1058{ 1059 if (*cookie != NULL) { 1060 vm_gpa_release(*cookie); 1061 *cookie = NULL; 1062 } 1063} 1064 1065static void * 1066ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie) 1067{ 1068 void *ptr; 1069 1070 ptp_release(cookie); 1071 ptr = vm_gpa_hold(vm, ptpphys, len, VM_PROT_RW, cookie); 1072 return (ptr); 1073} 1074 1075int 1076vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 1077 uint64_t gla, int prot, uint64_t *gpa) 1078{ 1079 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable; 1080 u_int retries; 1081 uint64_t *ptpbase, ptpphys, pte, pgsize; 1082 uint32_t *ptpbase32, pte32; 1083 void *cookie; 1084 1085 usermode = (paging->cpl == 3 ? 1 : 0); 1086 writable = prot & VM_PROT_WRITE; 1087 cookie = NULL; 1088 retval = 0; 1089 retries = 0; 1090restart: 1091 ptpphys = paging->cr3; /* root of the page tables */ 1092 ptp_release(&cookie); 1093 if (retries++ > 0) 1094 maybe_yield(); 1095 1096 if (vie_canonical_check(paging->cpu_mode, gla)) { 1097 /* 1098 * XXX assuming a non-stack reference otherwise a stack fault 1099 * should be generated. 1100 */ 1101 vm_inject_gp(vm, vcpuid); 1102 goto fault; 1103 } 1104 1105 if (paging->paging_mode == PAGING_MODE_FLAT) { 1106 *gpa = gla; 1107 goto done; 1108 } 1109 1110 if (paging->paging_mode == PAGING_MODE_32) { 1111 nlevels = 2; 1112 while (--nlevels >= 0) { 1113 /* Zero out the lower 12 bits. */ 1114 ptpphys &= ~0xfff; 1115 1116 ptpbase32 = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie); 1117 1118 if (ptpbase32 == NULL) 1119 goto error; 1120 1121 ptpshift = PAGE_SHIFT + nlevels * 10; 1122 ptpindex = (gla >> ptpshift) & 0x3FF; 1123 pgsize = 1UL << ptpshift; 1124 1125 pte32 = ptpbase32[ptpindex]; 1126 1127 if ((pte32 & PG_V) == 0 || 1128 (usermode && (pte32 & PG_U) == 0) || 1129 (writable && (pte32 & PG_RW) == 0)) { 1130 pfcode = pf_error_code(usermode, prot, 0, 1131 pte32); 1132 vm_inject_pf(vm, vcpuid, pfcode, gla); 1133 goto fault; 1134 } 1135 1136 /* 1137 * Emulate the x86 MMU's management of the accessed 1138 * and dirty flags. While the accessed flag is set 1139 * at every level of the page table, the dirty flag 1140 * is only set at the last level providing the guest 1141 * physical address. 1142 */ 1143 if ((pte32 & PG_A) == 0) { 1144 if (atomic_cmpset_32(&ptpbase32[ptpindex], 1145 pte32, pte32 | PG_A) == 0) { 1146 goto restart; 1147 } 1148 } 1149 1150 /* XXX must be ignored if CR4.PSE=0 */ 1151 if (nlevels > 0 && (pte32 & PG_PS) != 0) 1152 break; 1153 1154 ptpphys = pte32; 1155 } 1156 1157 /* Set the dirty bit in the page table entry if necessary */ 1158 if (writable && (pte32 & PG_M) == 0) { 1159 if (atomic_cmpset_32(&ptpbase32[ptpindex], 1160 pte32, pte32 | PG_M) == 0) { 1161 goto restart; 1162 } 1163 } 1164 1165 /* Zero out the lower 'ptpshift' bits */ 1166 pte32 >>= ptpshift; pte32 <<= ptpshift; 1167 *gpa = pte32 | (gla & (pgsize - 1)); 1168 goto done; 1169 } 1170 1171 if (paging->paging_mode == PAGING_MODE_PAE) { 1172 /* Zero out the lower 5 bits and the upper 32 bits */ 1173 ptpphys &= 0xffffffe0UL; 1174 1175 ptpbase = ptp_hold(vm, ptpphys, sizeof(*ptpbase) * 4, &cookie); 1176 if (ptpbase == NULL) 1177 goto error; 1178 1179 ptpindex = (gla >> 30) & 0x3; 1180 1181 pte = ptpbase[ptpindex]; 1182 1183 if ((pte & PG_V) == 0) { 1184 pfcode = pf_error_code(usermode, prot, 0, pte); 1185 vm_inject_pf(vm, vcpuid, pfcode, gla); 1186 goto fault; 1187 } 1188 1189 ptpphys = pte; 1190 1191 nlevels = 2; 1192 } else 1193 nlevels = 4; 1194 while (--nlevels >= 0) { 1195 /* Zero out the lower 12 bits and the upper 12 bits */ 1196 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12; 1197 1198 ptpbase = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie); 1199 if (ptpbase == NULL) 1200 goto error; 1201 1202 ptpshift = PAGE_SHIFT + nlevels * 9; 1203 ptpindex = (gla >> ptpshift) & 0x1FF; 1204 pgsize = 1UL << ptpshift; 1205 1206 pte = ptpbase[ptpindex]; 1207 1208 if ((pte & PG_V) == 0 || 1209 (usermode && (pte & PG_U) == 0) || 1210 (writable && (pte & PG_RW) == 0)) { 1211 pfcode = pf_error_code(usermode, prot, 0, pte); 1212 vm_inject_pf(vm, vcpuid, pfcode, gla); 1213 goto fault; 1214 } 1215 1216 /* Set the accessed bit in the page table entry */ 1217 if ((pte & PG_A) == 0) { 1218 if (atomic_cmpset_64(&ptpbase[ptpindex], 1219 pte, pte | PG_A) == 0) { 1220 goto restart; 1221 } 1222 } 1223 1224 if (nlevels > 0 && (pte & PG_PS) != 0) { 1225 if (pgsize > 1 * GB) { 1226 pfcode = pf_error_code(usermode, prot, 1, pte); 1227 vm_inject_pf(vm, vcpuid, pfcode, gla); 1228 goto fault; 1229 } 1230 break; 1231 } 1232 1233 ptpphys = pte; 1234 } 1235 1236 /* Set the dirty bit in the page table entry if necessary */ 1237 if (writable && (pte & PG_M) == 0) { 1238 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0) 1239 goto restart; 1240 } 1241 1242 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */ 1243 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12; 1244 *gpa = pte | (gla & (pgsize - 1)); 1245done: 1246 ptp_release(&cookie); 1247 return (retval); 1248error: 1249 retval = -1; 1250 goto done; 1251fault: 1252 retval = 1; 1253 goto done; 1254} 1255 1256int 1257vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 1258 uint64_t rip, int inst_length, struct vie *vie) 1259{ 1260 struct vm_copyinfo copyinfo[2]; 1261 int error, prot; 1262 1263 if (inst_length > VIE_INST_SIZE) 1264 panic("vmm_fetch_instruction: invalid length %d", inst_length); 1265 1266 prot = PROT_READ | PROT_EXEC; 1267 error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot, 1268 copyinfo, nitems(copyinfo)); 1269 if (error == 0) { 1270 vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length); 1271 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo)); 1272 vie->num_valid = inst_length; 1273 } 1274 return (error); 1275} 1276 1277static int 1278vie_peek(struct vie *vie, uint8_t *x) 1279{ 1280 1281 if (vie->num_processed < vie->num_valid) { 1282 *x = vie->inst[vie->num_processed]; 1283 return (0); 1284 } else 1285 return (-1); 1286} 1287 1288static void 1289vie_advance(struct vie *vie) 1290{ 1291 1292 vie->num_processed++; 1293} 1294 1295static int 1296decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d) 1297{ 1298 uint8_t x; 1299 1300 while (1) { 1301 if (vie_peek(vie, &x)) 1302 return (-1); 1303 1304 if (x == 0x66) 1305 vie->opsize_override = 1; 1306 else if (x == 0x67) 1307 vie->addrsize_override = 1; 1308 else 1309 break; 1310 1311 vie_advance(vie); 1312 } 1313 1314 /* 1315 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2: 1316 * - Only one REX prefix is allowed per instruction. 1317 * - The REX prefix must immediately precede the opcode byte or the 1318 * escape opcode byte. 1319 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3) 1320 * the mandatory prefix must come before the REX prefix. 1321 */ 1322 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) { 1323 vie->rex_present = 1; 1324 vie->rex_w = x & 0x8 ? 1 : 0; 1325 vie->rex_r = x & 0x4 ? 1 : 0; 1326 vie->rex_x = x & 0x2 ? 1 : 0; 1327 vie->rex_b = x & 0x1 ? 1 : 0; 1328 vie_advance(vie); 1329 } 1330 1331 /* 1332 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1 1333 */ 1334 if (cpu_mode == CPU_MODE_64BIT) { 1335 /* 1336 * Default address size is 64-bits and default operand size 1337 * is 32-bits. 1338 */ 1339 vie->addrsize = vie->addrsize_override ? 4 : 8; 1340 if (vie->rex_w) 1341 vie->opsize = 8; 1342 else if (vie->opsize_override) 1343 vie->opsize = 2; 1344 else 1345 vie->opsize = 4; 1346 } else if (cs_d) { 1347 /* Default address and operand sizes are 32-bits */ 1348 vie->addrsize = vie->addrsize_override ? 2 : 4; 1349 vie->opsize = vie->opsize_override ? 2 : 4; 1350 } else { 1351 /* Default address and operand sizes are 16-bits */ 1352 vie->addrsize = vie->addrsize_override ? 4 : 2; 1353 vie->opsize = vie->opsize_override ? 4 : 2; 1354 } 1355 return (0); 1356} 1357 1358static int 1359decode_two_byte_opcode(struct vie *vie) 1360{ 1361 uint8_t x; 1362 1363 if (vie_peek(vie, &x)) 1364 return (-1); 1365 1366 vie->op = two_byte_opcodes[x]; 1367 1368 if (vie->op.op_type == VIE_OP_TYPE_NONE) 1369 return (-1); 1370 1371 vie_advance(vie); 1372 return (0); 1373} 1374 1375static int 1376decode_opcode(struct vie *vie) 1377{ 1378 uint8_t x; 1379 1380 if (vie_peek(vie, &x)) 1381 return (-1); 1382 1383 vie->op = one_byte_opcodes[x]; 1384 1385 if (vie->op.op_type == VIE_OP_TYPE_NONE) 1386 return (-1); 1387 1388 vie_advance(vie); 1389 1390 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE) 1391 return (decode_two_byte_opcode(vie)); 1392 1393 return (0); 1394} 1395 1396static int 1397decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode) 1398{ 1399 uint8_t x; 1400 1401 if (cpu_mode == CPU_MODE_REAL) 1402 return (-1); 1403 1404 if (vie->op.op_flags & VIE_OP_F_NO_MODRM) 1405 return (0); 1406 1407 if (vie_peek(vie, &x)) 1408 return (-1); 1409 1410 vie->mod = (x >> 6) & 0x3; 1411 vie->rm = (x >> 0) & 0x7; 1412 vie->reg = (x >> 3) & 0x7; 1413 1414 /* 1415 * A direct addressing mode makes no sense in the context of an EPT 1416 * fault. There has to be a memory access involved to cause the 1417 * EPT fault. 1418 */ 1419 if (vie->mod == VIE_MOD_DIRECT) 1420 return (-1); 1421 1422 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) || 1423 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) { 1424 /* 1425 * Table 2-5: Special Cases of REX Encodings 1426 * 1427 * mod=0, r/m=5 is used in the compatibility mode to 1428 * indicate a disp32 without a base register. 1429 * 1430 * mod!=3, r/m=4 is used in the compatibility mode to 1431 * indicate that the SIB byte is present. 1432 * 1433 * The 'b' bit in the REX prefix is don't care in 1434 * this case. 1435 */ 1436 } else { 1437 vie->rm |= (vie->rex_b << 3); 1438 } 1439 1440 vie->reg |= (vie->rex_r << 3); 1441 1442 /* SIB */ 1443 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB) 1444 goto done; 1445 1446 vie->base_register = gpr_map[vie->rm]; 1447 1448 switch (vie->mod) { 1449 case VIE_MOD_INDIRECT_DISP8: 1450 vie->disp_bytes = 1; 1451 break; 1452 case VIE_MOD_INDIRECT_DISP32: 1453 vie->disp_bytes = 4; 1454 break; 1455 case VIE_MOD_INDIRECT: 1456 if (vie->rm == VIE_RM_DISP32) { 1457 vie->disp_bytes = 4; 1458 /* 1459 * Table 2-7. RIP-Relative Addressing 1460 * 1461 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32 1462 * whereas in compatibility mode it just implies disp32. 1463 */ 1464 1465 if (cpu_mode == CPU_MODE_64BIT) 1466 vie->base_register = VM_REG_GUEST_RIP; 1467 else 1468 vie->base_register = VM_REG_LAST; 1469 } 1470 break; 1471 } 1472 1473done: 1474 vie_advance(vie); 1475 1476 return (0); 1477} 1478 1479static int 1480decode_sib(struct vie *vie) 1481{ 1482 uint8_t x; 1483 1484 /* Proceed only if SIB byte is present */ 1485 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB) 1486 return (0); 1487 1488 if (vie_peek(vie, &x)) 1489 return (-1); 1490 1491 /* De-construct the SIB byte */ 1492 vie->ss = (x >> 6) & 0x3; 1493 vie->index = (x >> 3) & 0x7; 1494 vie->base = (x >> 0) & 0x7; 1495 1496 /* Apply the REX prefix modifiers */ 1497 vie->index |= vie->rex_x << 3; 1498 vie->base |= vie->rex_b << 3; 1499 1500 switch (vie->mod) { 1501 case VIE_MOD_INDIRECT_DISP8: 1502 vie->disp_bytes = 1; 1503 break; 1504 case VIE_MOD_INDIRECT_DISP32: 1505 vie->disp_bytes = 4; 1506 break; 1507 } 1508 1509 if (vie->mod == VIE_MOD_INDIRECT && 1510 (vie->base == 5 || vie->base == 13)) { 1511 /* 1512 * Special case when base register is unused if mod = 0 1513 * and base = %rbp or %r13. 1514 * 1515 * Documented in: 1516 * Table 2-3: 32-bit Addressing Forms with the SIB Byte 1517 * Table 2-5: Special Cases of REX Encodings 1518 */ 1519 vie->disp_bytes = 4; 1520 } else { 1521 vie->base_register = gpr_map[vie->base]; 1522 } 1523 1524 /* 1525 * All encodings of 'index' are valid except for %rsp (4). 1526 * 1527 * Documented in: 1528 * Table 2-3: 32-bit Addressing Forms with the SIB Byte 1529 * Table 2-5: Special Cases of REX Encodings 1530 */ 1531 if (vie->index != 4) 1532 vie->index_register = gpr_map[vie->index]; 1533 1534 /* 'scale' makes sense only in the context of an index register */ 1535 if (vie->index_register < VM_REG_LAST) 1536 vie->scale = 1 << vie->ss; 1537 1538 vie_advance(vie); 1539 1540 return (0); 1541} 1542 1543static int 1544decode_displacement(struct vie *vie) 1545{ 1546 int n, i; 1547 uint8_t x; 1548 1549 union { 1550 char buf[4]; 1551 int8_t signed8; 1552 int32_t signed32; 1553 } u; 1554 1555 if ((n = vie->disp_bytes) == 0) 1556 return (0); 1557 1558 if (n != 1 && n != 4) 1559 panic("decode_displacement: invalid disp_bytes %d", n); 1560 1561 for (i = 0; i < n; i++) { 1562 if (vie_peek(vie, &x)) 1563 return (-1); 1564 1565 u.buf[i] = x; 1566 vie_advance(vie); 1567 } 1568 1569 if (n == 1) 1570 vie->displacement = u.signed8; /* sign-extended */ 1571 else 1572 vie->displacement = u.signed32; /* sign-extended */ 1573 1574 return (0); 1575} 1576 1577static int 1578decode_immediate(struct vie *vie) 1579{ 1580 int i, n; 1581 uint8_t x; 1582 union { 1583 char buf[4]; 1584 int8_t signed8; 1585 int16_t signed16; 1586 int32_t signed32; 1587 } u; 1588 1589 /* Figure out immediate operand size (if any) */ 1590 if (vie->op.op_flags & VIE_OP_F_IMM) { 1591 /* 1592 * Section 2.2.1.5 "Immediates", Intel SDM: 1593 * In 64-bit mode the typical size of immediate operands 1594 * remains 32-bits. When the operand size if 64-bits, the 1595 * processor sign-extends all immediates to 64-bits prior 1596 * to their use. 1597 */ 1598 if (vie->opsize == 4 || vie->opsize == 8) 1599 vie->imm_bytes = 4; 1600 else 1601 vie->imm_bytes = 2; 1602 } else if (vie->op.op_flags & VIE_OP_F_IMM8) { 1603 vie->imm_bytes = 1; 1604 } 1605 1606 if ((n = vie->imm_bytes) == 0) 1607 return (0); 1608 1609 KASSERT(n == 1 || n == 2 || n == 4, 1610 ("%s: invalid number of immediate bytes: %d", __func__, n)); 1611 1612 for (i = 0; i < n; i++) { 1613 if (vie_peek(vie, &x)) 1614 return (-1); 1615 1616 u.buf[i] = x; 1617 vie_advance(vie); 1618 } 1619 1620 /* sign-extend the immediate value before use */ 1621 if (n == 1) 1622 vie->immediate = u.signed8; 1623 else if (n == 2) 1624 vie->immediate = u.signed16; 1625 else 1626 vie->immediate = u.signed32; 1627 1628 return (0); 1629} 1630 1631static int 1632decode_moffset(struct vie *vie) 1633{ 1634 int i, n; 1635 uint8_t x; 1636 union { 1637 char buf[8]; 1638 uint64_t u64; 1639 } u; 1640 1641 if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0) 1642 return (0); 1643 1644 /* 1645 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM: 1646 * The memory offset size follows the address-size of the instruction. 1647 */ 1648 n = vie->addrsize; 1649 KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n)); 1650 1651 u.u64 = 0; 1652 for (i = 0; i < n; i++) { 1653 if (vie_peek(vie, &x)) 1654 return (-1); 1655 1656 u.buf[i] = x; 1657 vie_advance(vie); 1658 } 1659 vie->displacement = u.u64; 1660 return (0); 1661} 1662 1663/* 1664 * Verify that all the bytes in the instruction buffer were consumed. 1665 */ 1666static int 1667verify_inst_length(struct vie *vie) 1668{ 1669 1670 if (vie->num_processed == vie->num_valid) 1671 return (0); 1672 else 1673 return (-1); 1674} 1675 1676/* 1677 * Verify that the 'guest linear address' provided as collateral of the nested 1678 * page table fault matches with our instruction decoding. 1679 */ 1680static int 1681verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie) 1682{ 1683 int error; 1684 uint64_t base, idx, gla2; 1685 1686 /* Skip 'gla' verification */ 1687 if (gla == VIE_INVALID_GLA) 1688 return (0); 1689 1690 base = 0; 1691 if (vie->base_register != VM_REG_LAST) { 1692 error = vm_get_register(vm, cpuid, vie->base_register, &base); 1693 if (error) { 1694 printf("verify_gla: error %d getting base reg %d\n", 1695 error, vie->base_register); 1696 return (-1); 1697 } 1698 1699 /* 1700 * RIP-relative addressing starts from the following 1701 * instruction 1702 */ 1703 if (vie->base_register == VM_REG_GUEST_RIP) 1704 base += vie->num_valid; 1705 } 1706 1707 idx = 0; 1708 if (vie->index_register != VM_REG_LAST) { 1709 error = vm_get_register(vm, cpuid, vie->index_register, &idx); 1710 if (error) { 1711 printf("verify_gla: error %d getting index reg %d\n", 1712 error, vie->index_register); 1713 return (-1); 1714 } 1715 } 1716 1717 /* XXX assuming that the base address of the segment is 0 */ 1718 gla2 = base + vie->scale * idx + vie->displacement; 1719 gla2 &= size2mask[vie->addrsize]; 1720 if (gla != gla2) { 1721 printf("verify_gla mismatch: " 1722 "base(0x%0lx), scale(%d), index(0x%0lx), " 1723 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n", 1724 base, vie->scale, idx, vie->displacement, gla, gla2); 1725 return (-1); 1726 } 1727 1728 return (0); 1729} 1730 1731int 1732vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, 1733 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie) 1734{ 1735 1736 if (decode_prefixes(vie, cpu_mode, cs_d)) 1737 return (-1); 1738 1739 if (decode_opcode(vie)) 1740 return (-1); 1741 1742 if (decode_modrm(vie, cpu_mode)) 1743 return (-1); 1744 1745 if (decode_sib(vie)) 1746 return (-1); 1747 1748 if (decode_displacement(vie)) 1749 return (-1); 1750 1751 if (decode_immediate(vie)) 1752 return (-1); 1753 1754 if (decode_moffset(vie)) 1755 return (-1); 1756 1757 if (verify_inst_length(vie)) 1758 return (-1); 1759 1760 if (verify_gla(vm, cpuid, gla, vie)) 1761 return (-1); 1762 1763 vie->decoded = 1; /* success */ 1764 1765 return (0); 1766} 1767#endif /* _KERNEL */ 1768