summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortychon <tychon@FreeBSD.org>2015-04-01 00:15:31 +0000
committertychon <tychon@FreeBSD.org>2015-04-01 00:15:31 +0000
commitb3c521e85228b0e74f7983ca41489dfafd7a424d (patch)
treedb6cabf68a19dd36f98777d6764af82f1969b8c0
parentcbabb678cae1bc20311261fcd38ceb4a0cc319a9 (diff)
downloadFreeBSD-src-b3c521e85228b0e74f7983ca41489dfafd7a424d.zip
FreeBSD-src-b3c521e85228b0e74f7983ca41489dfafd7a424d.tar.gz
Fix "MOVS" instruction memory to MMIO emulation. Currently updates to
%rdi, %rsi, etc are inadvertently bypassed along with the check to see if the instruction needs to be repeated per the 'rep' prefix. Add "MOVS" instruction support for the 'MMIO to MMIO' case. Reviewed by: neel
-rw-r--r--lib/libvmmapi/vmmapi.c12
-rw-r--r--lib/libvmmapi/vmmapi.h2
-rw-r--r--sys/amd64/include/vmm_instruction_emul.h2
-rw-r--r--sys/amd64/vmm/vmm.c2
-rw-r--r--sys/amd64/vmm/vmm_dev.c4
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c81
6 files changed, 68 insertions, 35 deletions
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
index 9828876..0c15845 100644
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -979,6 +979,18 @@ gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
return (error);
}
+int
+vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa)
+{
+ int error, fault;
+
+ error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, gpa);
+ if (fault)
+ error = fault;
+ return (error);
+}
+
#ifndef min
#define min(a,b) (((a) < (b)) ? (a) : (b))
#endif
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
index 06b2930..d001cd8 100644
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -63,6 +63,8 @@ int vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num);
+int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa);
uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
void vm_set_memflags(struct vmctx *ctx, int flags);
diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h
index 516cc01..651b3b3 100644
--- a/sys/amd64/include/vmm_instruction_emul.h
+++ b/sys/amd64/include/vmm_instruction_emul.h
@@ -90,7 +90,7 @@ int vmm_fetch_instruction(struct vm *vm, int cpuid,
* Returns 1 if an exception was injected into the guest.
* Returns -1 otherwise.
*/
-int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa);
void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 42a275b..6bd5bce 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -2332,7 +2332,7 @@ vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
remaining = len;
while (remaining > 0) {
KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
- error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
+ error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
if (error)
return (error);
off = gpa & PAGE_MASK;
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index 0293d191..5be99cb 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -440,10 +440,10 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
CTASSERT(PROT_WRITE == VM_PROT_WRITE);
CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
gg = (struct vm_gla2gpa *)data;
- error = vmm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
+ error = vm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
gg->prot, &gg->gpa);
KASSERT(error == 0 || error == 1 || error == -1,
- ("%s: vmm_gla2gpa unknown error %d", __func__, error));
+ ("%s: vm_gla2gpa unknown error %d", __func__, error));
if (error >= 0) {
/*
* error = 0: the translation was successful
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index 2dfbe8b..ca0b144 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -634,7 +634,7 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
#else
struct iovec copyinfo[2];
#endif
- uint64_t dstaddr, srcaddr, val;
+ uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
uint64_t rcx, rdi, rsi, rflags;
int error, opsize, seg, repeat;
@@ -669,7 +669,7 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
* (1) memory memory n/a
* (2) memory mmio emulated
* (3) mmio memory emulated
- * (4) mmio mmio not emulated
+ * (4) mmio mmio emulated
*
* At this point we don't have sufficient information to distinguish
* between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
@@ -694,7 +694,8 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
- goto done;
+ if (error)
+ goto done;
} else if (error > 0) {
/*
* Resume guest execution to handle fault.
@@ -705,37 +706,55 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
* 'vm_copy_setup()' is expected to fail for cases (3) and (4)
* if 'srcaddr' is in the mmio space.
*/
- }
-
- error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
- PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
- if (error)
- goto done;
- error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
- PROT_WRITE, copyinfo, nitems(copyinfo));
- if (error == 0) {
- /*
- * case (3): read from MMIO and write to system memory.
- *
- * A MMIO read can have side-effects so we commit to it
- * only after vm_copy_setup() is successful. If a page-fault
- * needs to be injected into the guest then it will happen
- * before the MMIO read is attempted.
- */
- error = memread(vm, vcpuid, gpa, &val, opsize, arg);
+ error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
+ PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
if (error)
goto done;
- vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
- vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
- } else if (error > 0) {
- /*
- * Resume guest execution to handle fault.
- */
- goto done;
- } else {
- goto done;
+ error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
+ PROT_WRITE, copyinfo, nitems(copyinfo));
+ if (error == 0) {
+ /*
+ * case (3): read from MMIO and write to system memory.
+ *
+ * A MMIO read can have side-effects so we
+ * commit to it only after vm_copy_setup() is
+ * successful. If a page-fault needs to be
+ * injected into the guest then it will happen
+ * before the MMIO read is attempted.
+ */
+ error = memread(vm, vcpuid, gpa, &val, opsize, arg);
+ if (error)
+ goto done;
+
+ vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
+ vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+ } else if (error > 0) {
+ /*
+ * Resume guest execution to handle fault.
+ */
+ goto done;
+ } else {
+ /*
+ * Case (4): read from and write to mmio.
+ */
+ error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
+ PROT_READ, &srcgpa);
+ if (error)
+ goto done;
+ error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
+ if (error)
+ goto done;
+
+ error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
+ PROT_WRITE, &dstgpa);
+ if (error)
+ goto done;
+ error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
+ if (error)
+ goto done;
+ }
}
error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
@@ -1465,7 +1484,7 @@ ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
}
int
-vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa)
{
int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
OpenPOWER on IntegriCloud