summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm
diff options
context:
space:
mode:
authortychon <tychon@FreeBSD.org>2015-04-01 00:15:31 +0000
committertychon <tychon@FreeBSD.org>2015-04-01 00:15:31 +0000
commitb3c521e85228b0e74f7983ca41489dfafd7a424d (patch)
treedb6cabf68a19dd36f98777d6764af82f1969b8c0 /sys/amd64/vmm
parentcbabb678cae1bc20311261fcd38ceb4a0cc319a9 (diff)
downloadFreeBSD-src-b3c521e85228b0e74f7983ca41489dfafd7a424d.zip
FreeBSD-src-b3c521e85228b0e74f7983ca41489dfafd7a424d.tar.gz
Fix "MOVS" instruction memory to MMIO emulation. Currently updates to
%rdi, %rsi, etc are inadvertently bypassed along with the check to see if the instruction needs to be repeated per the 'rep' prefix. Add "MOVS" instruction support for the 'MMIO to MMIO' case. Reviewed by: neel
Diffstat (limited to 'sys/amd64/vmm')
-rw-r--r--sys/amd64/vmm/vmm.c2
-rw-r--r--sys/amd64/vmm/vmm_dev.c4
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c81
3 files changed, 53 insertions, 34 deletions
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 42a275b..6bd5bce 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -2332,7 +2332,7 @@ vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
remaining = len;
while (remaining > 0) {
KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
- error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
+ error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
if (error)
return (error);
off = gpa & PAGE_MASK;
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index 0293d191..5be99cb 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -440,10 +440,10 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
CTASSERT(PROT_WRITE == VM_PROT_WRITE);
CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
gg = (struct vm_gla2gpa *)data;
- error = vmm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
+ error = vm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
gg->prot, &gg->gpa);
KASSERT(error == 0 || error == 1 || error == -1,
- ("%s: vmm_gla2gpa unknown error %d", __func__, error));
+ ("%s: vm_gla2gpa unknown error %d", __func__, error));
if (error >= 0) {
/*
* error = 0: the translation was successful
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index 2dfbe8b..ca0b144 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -634,7 +634,7 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
#else
struct iovec copyinfo[2];
#endif
- uint64_t dstaddr, srcaddr, val;
+ uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
uint64_t rcx, rdi, rsi, rflags;
int error, opsize, seg, repeat;
@@ -669,7 +669,7 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
* (1) memory memory n/a
* (2) memory mmio emulated
* (3) mmio memory emulated
- * (4) mmio mmio not emulated
+ * (4) mmio mmio emulated
*
* At this point we don't have sufficient information to distinguish
* between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
@@ -694,7 +694,8 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
- goto done;
+ if (error)
+ goto done;
} else if (error > 0) {
/*
* Resume guest execution to handle fault.
@@ -705,37 +706,55 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
* 'vm_copy_setup()' is expected to fail for cases (3) and (4)
* if 'srcaddr' is in the mmio space.
*/
- }
-
- error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
- PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
- if (error)
- goto done;
- error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
- PROT_WRITE, copyinfo, nitems(copyinfo));
- if (error == 0) {
- /*
- * case (3): read from MMIO and write to system memory.
- *
- * A MMIO read can have side-effects so we commit to it
- * only after vm_copy_setup() is successful. If a page-fault
- * needs to be injected into the guest then it will happen
- * before the MMIO read is attempted.
- */
- error = memread(vm, vcpuid, gpa, &val, opsize, arg);
+ error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
+ PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
if (error)
goto done;
- vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
- vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
- } else if (error > 0) {
- /*
- * Resume guest execution to handle fault.
- */
- goto done;
- } else {
- goto done;
+ error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
+ PROT_WRITE, copyinfo, nitems(copyinfo));
+ if (error == 0) {
+ /*
+ * case (3): read from MMIO and write to system memory.
+ *
+ * A MMIO read can have side-effects so we
+ * commit to it only after vm_copy_setup() is
+ * successful. If a page-fault needs to be
+ * injected into the guest then it will happen
+ * before the MMIO read is attempted.
+ */
+ error = memread(vm, vcpuid, gpa, &val, opsize, arg);
+ if (error)
+ goto done;
+
+ vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
+ vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+ } else if (error > 0) {
+ /*
+ * Resume guest execution to handle fault.
+ */
+ goto done;
+ } else {
+ /*
+ * Case (4): read from and write to mmio.
+ */
+ error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
+ PROT_READ, &srcgpa);
+ if (error)
+ goto done;
+ error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
+ if (error)
+ goto done;
+
+ error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
+ PROT_WRITE, &dstgpa);
+ if (error)
+ goto done;
+ error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
+ if (error)
+ goto done;
+ }
}
error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
@@ -1465,7 +1484,7 @@ ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
}
int
-vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa)
{
int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
OpenPOWER on IntegriCloud