summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortychon <tychon@FreeBSD.org>2015-04-06 12:22:41 +0000
committertychon <tychon@FreeBSD.org>2015-04-06 12:22:41 +0000
commit9079cdda4647a858c97d8cba261b38c03cfa86ea (patch)
treea3d66e082861d9b61ccaf253d942acd345679f12
parent78d51c7b15fc92383c6c68b46966248bdaf2bf1a (diff)
downloadFreeBSD-src-9079cdda4647a858c97d8cba261b38c03cfa86ea.zip
FreeBSD-src-9079cdda4647a858c97d8cba261b38c03cfa86ea.tar.gz
Enhance the support for Group 1 Extended opcodes:
* Implemement the 0x81 and 0x83 CMP instructions. * Implemement the 0x83 AND instruction. * Implemement the 0x81 OR instruction. Reviewed by: neel
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c122
1 files changed, 84 insertions, 38 deletions
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index ca0b144..0b50e92 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -71,6 +71,7 @@ enum {
VIE_OP_TYPE_CMP,
VIE_OP_TYPE_POP,
VIE_OP_TYPE_MOVS,
+ VIE_OP_TYPE_GROUP1,
VIE_OP_TYPE_LAST
};
@@ -161,15 +162,15 @@ static const struct vie_op one_byte_opcodes[256] = {
.op_type = VIE_OP_TYPE_AND,
},
[0x81] = {
- /* XXX Group 1 extended opcode - not just AND */
+ /* XXX Group 1 extended opcode */
.op_byte = 0x81,
- .op_type = VIE_OP_TYPE_AND,
+ .op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM,
},
[0x83] = {
- /* XXX Group 1 extended opcode - not just OR */
+ /* XXX Group 1 extended opcode */
.op_byte = 0x83,
- .op_type = VIE_OP_TYPE_OR,
+ .op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM8,
},
[0x8F] = {
@@ -839,16 +840,18 @@ emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
error = vie_update_register(vm, vcpuid, reg, result, size);
break;
case 0x81:
+ case 0x83:
/*
- * AND/OR mem (ModRM:r/m) with immediate and store the
+ * AND mem (ModRM:r/m) with immediate and store the
* result in mem.
*
- * AND: i = 4
- * OR: i = 1
- * 81 /i op r/m16, imm16
- * 81 /i op r/m32, imm32
- * REX.W + 81 /i op r/m64, imm32 sign-extended to 64
+ * 81 /4 and r/m16, imm16
+ * 81 /4 and r/m32, imm32
+ * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
*
+ * 83 /4 and r/m16, imm8 sign-extended to 16
+ * 83 /4 and r/m32, imm8 sign-extended to 32
+ * REX.W + 83/4 and r/m64, imm8 sign-extended to 64
*/
/* get the first operand */
@@ -857,26 +860,11 @@ emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
break;
/*
- * perform the operation with the pre-fetched immediate
- * operand and write the result
- */
- switch (vie->reg & 7) {
- case 0x4:
- /* modrm:reg == b100, AND */
- result = val1 & vie->immediate;
- break;
- case 0x1:
- /* modrm:reg == b001, OR */
- result = val1 | vie->immediate;
- break;
- default:
- error = EINVAL;
- break;
- }
- if (error)
- break;
-
- error = memwrite(vm, vcpuid, gpa, result, size, arg);
+ * perform the operation with the pre-fetched immediate
+ * operand and write the result
+ */
+ result = val1 & vie->immediate;
+ error = memwrite(vm, vcpuid, gpa, result, size, arg);
break;
default:
break;
@@ -913,20 +901,20 @@ emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
error = EINVAL;
switch (vie->op.op_byte) {
+ case 0x81:
case 0x83:
/*
* OR mem (ModRM:r/m) with immediate and store the
* result in mem.
*
- * 83 /1 OR r/m16, imm8 sign-extended to 16
- * 83 /1 OR r/m32, imm8 sign-extended to 32
- * REX.W + 83/1 OR r/m64, imm8 sign-extended to 64
+ * 81 /1 or r/m16, imm16
+ * 81 /1 or r/m32, imm32
+ * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64
*
- * Currently, only the OR operation of the 0x83 opcode
- * is implemented (ModRM:reg = b001).
+ * 83 /1 or r/m16, imm8 sign-extended to 16
+ * 83 /1 or r/m32, imm8 sign-extended to 32
+ * REX.W + 83/1 or r/m64, imm8 sign-extended to 64
*/
- if ((vie->reg & 7) != 1)
- break;
/* get the first operand */
error = memread(vm, vcpuid, gpa, &val1, size, arg);
@@ -997,11 +985,37 @@ emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
if (error)
return (error);
+ rflags2 = getcc(size, op1, op2);
+ break;
+ case 0x81:
+ case 0x83:
+ /*
+ * 81 /7 cmp r/m16, imm16
+ * 81 /7 cmp r/m32, imm32
+ * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
+ *
+ * 83 /7 cmp r/m16, imm8 sign-extended to 16
+ * 83 /7 cmp r/m32, imm8 sign-extended to 32
+ * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64
+ *
+ * Compare mem (ModRM:r/m) with immediate and set
+ * status flags according to the results. The
+ * comparison is performed by subtracting the
+ * immediate from the first operand and then setting
+ * the status flags.
+ *
+ */
+
+ /* get the first operand */
+ error = memread(vm, vcpuid, gpa, &op1, size, arg);
+ if (error)
+ return (error);
+
+ rflags2 = getcc(size, op1, vie->immediate);
break;
default:
return (EINVAL);
}
- rflags2 = getcc(size, op1, op2);
error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1220,6 +1234,34 @@ emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
return (error);
}
+static int
+emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+ struct vm_guest_paging *paging, mem_region_read_t memread,
+ mem_region_write_t memwrite, void *memarg)
+{
+ int error;
+
+ switch (vie->reg & 7) {
+ case 0x1: /* OR */
+ error = emulate_or(vm, vcpuid, gpa, vie,
+ memread, memwrite, memarg);
+ break;
+ case 0x4: /* AND */
+ error = emulate_and(vm, vcpuid, gpa, vie,
+ memread, memwrite, memarg);
+ break;
+ case 0x7: /* CMP */
+ error = emulate_cmp(vm, vcpuid, gpa, vie,
+ memread, memwrite, memarg);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
int
vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
@@ -1231,6 +1273,10 @@ vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
return (EINVAL);
switch (vie->op.op_type) {
+ case VIE_OP_TYPE_GROUP1:
+ error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
+ memwrite, memarg);
+ break;
case VIE_OP_TYPE_POP:
error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
memwrite, memarg);
OpenPOWER on IntegriCloud