summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2014-05-24 23:12:30 +0000
committerneel <neel@FreeBSD.org>2014-05-24 23:12:30 +0000
commit51a05acc08355b84ae16b193ccb7037691f8ba90 (patch)
tree6d3c6dd2e08177b036c0a323543c552eea67411c
parent75eadcdacf2b5f0dc80b3adb8f91fc71cb1c62cc (diff)
downloadFreeBSD-src-51a05acc08355b84ae16b193ccb7037691f8ba90.zip
FreeBSD-src-51a05acc08355b84ae16b193ccb7037691f8ba90.tar.gz
Add libvmmapi functions vm_copyin() and vm_copyout() to copy into and out
of the guest linear address space. These APIs in turn use a new ioctl 'VM_GLA2GPA' to convert the guest linear address to guest physical. Use the new copyin/copyout APIs when emulating ins/outs instruction in bhyve(8).
-rw-r--r--lib/libvmmapi/vmmapi.c86
-rw-r--r--lib/libvmmapi/vmmapi.h5
-rw-r--r--sys/amd64/include/vmm.h1
-rw-r--r--sys/amd64/include/vmm_dev.h12
-rw-r--r--sys/amd64/vmm/vmm_dev.c24
-rw-r--r--sys/amd64/vmm/vmm_ioport.c16
-rw-r--r--usr.sbin/bhyve/inout.c47
7 files changed, 154 insertions, 37 deletions
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
index 5e630f8..45fffcf 100644
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mman.h>
#include <machine/specialreg.h>
+#include <machine/param.h>
#include <stdio.h>
#include <stdlib.h>
@@ -937,3 +938,88 @@ vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
*capabilities = cap.capabilities;
return (error);
}
+
+static int
+vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, int *fault, uint64_t *gpa)
+{
+ struct vm_gla2gpa gg;
+ int error;
+
+ bzero(&gg, sizeof(struct vm_gla2gpa));
+ gg.vcpuid = vcpu;
+ gg.prot = prot;
+ gg.gla = gla;
+ gg.paging = *paging;
+
+ error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
+ if (error == 0) {
+ *fault = gg.fault;
+ *gpa = gg.gpa;
+ }
+ return (error);
+}
+
+#ifndef min
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+int
+vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+ uint64_t gla, void *vp, size_t len)
+{
+ char *dst;
+ const char *src;
+ uint64_t gpa;
+ int error, fault, n, off;
+
+ dst = vp;
+ while (len) {
+ error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_READ,
+ &fault, &gpa);
+ if (error)
+ return (-1);
+ if (fault)
+ return (1);
+
+ off = gpa & PAGE_MASK;
+ n = min(len, PAGE_SIZE - off);
+ src = vm_map_gpa(ctx, gpa, n);
+ bcopy(src, dst, n);
+
+ gla += n;
+ dst += n;
+ len -= n;
+ }
+ return (0);
+}
+
+int
+vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+ const void *vp, uint64_t gla, size_t len)
+{
+ uint64_t gpa;
+ char *dst;
+ const char *src;
+ int error, fault, n, off;
+
+ src = vp;
+ while (len) {
+ error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_WRITE,
+ &fault, &gpa);
+ if (error)
+ return (-1);
+ if (fault)
+ return (1);
+
+ off = gpa & PAGE_MASK;
+ n = min(len, PAGE_SIZE - off);
+ dst = vm_map_gpa(ctx, gpa, n);
+ bcopy(src, dst, n);
+
+ gla += n;
+ src += n;
+ len -= n;
+ }
+ return (0);
+}
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
index 88e9947..cad41c8 100644
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -109,6 +109,11 @@ int vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state s);
int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities);
+int vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+ uint64_t gla_src, void *dst, size_t len);
+int vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+ const void *src, uint64_t gla_dst, size_t len);
+
/* Reset vcpu register state */
int vcpu_reset(struct vmctx *ctx, int vcpu);
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 021efaf..28e2808 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -427,7 +427,6 @@ struct vm_inout_str {
enum vm_reg_name seg_name;
struct seg_desc seg_desc;
uint64_t gla; /* may be set to VIE_INVALID_GLA */
- uint64_t gpa;
};
struct vm_exit {
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index ecafa9c..f094d51 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -168,6 +168,15 @@ struct vm_suspend {
enum vm_suspend_how how;
};
+struct vm_gla2gpa {
+ int vcpuid; /* inputs */
+ int prot; /* PROT_READ or PROT_WRITE */
+ uint64_t gla;
+ struct vm_guest_paging paging;
+ int fault; /* outputs */
+ uint64_t gpa;
+};
+
enum {
/* general routines */
IOCNUM_ABIVERS = 0,
@@ -180,6 +189,7 @@ enum {
IOCNUM_MAP_MEMORY = 10,
IOCNUM_GET_MEMORY_SEG = 11,
IOCNUM_GET_GPA_PMAP = 12,
+ IOCNUM_GLA2GPA = 13,
/* register/state accessors */
IOCNUM_SET_REGISTER = 20,
@@ -289,4 +299,6 @@ enum {
_IOR('v', IOCNUM_GET_HPET_CAPABILITIES, struct vm_hpet_cap)
#define VM_GET_GPA_PMAP \
_IOWR('v', IOCNUM_GET_GPA_PMAP, struct vm_gpa_pte)
+#define VM_GLA2GPA \
+ _IOWR('v', IOCNUM_GLA2GPA, struct vm_gla2gpa)
#endif
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index f1d5795..0561785 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
#include <machine/vmm.h>
+#include <machine/vmm_instruction_emul.h>
#include <machine/vmm_dev.h>
#include "vmm_lapic.h"
@@ -168,6 +169,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct vm_x2apic *x2apic;
struct vm_gpa_pte *gpapte;
struct vm_suspend *vmsuspend;
+ struct vm_gla2gpa *gg;
sc = vmmdev_lookup2(cdev);
if (sc == NULL)
@@ -192,6 +194,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_PPTDEV_MSI:
case VM_PPTDEV_MSIX:
case VM_SET_X2APIC_STATE:
+ case VM_GLA2GPA:
/*
* XXX fragile, handle with care
* Assumes that the first field of the ioctl data is the vcpu.
@@ -415,6 +418,27 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_GET_HPET_CAPABILITIES:
error = vhpet_getcap((struct vm_hpet_cap *)data);
break;
+ case VM_GLA2GPA: {
+ CTASSERT(PROT_READ == VM_PROT_READ);
+ CTASSERT(PROT_WRITE == VM_PROT_WRITE);
+ CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
+ gg = (struct vm_gla2gpa *)data;
+ error = vmm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
+ gg->prot, &gg->gpa);
+ KASSERT(error == 0 || error == 1 || error == -1,
+ ("%s: vmm_gla2gpa unknown error %d", __func__, error));
+ if (error >= 0) {
+ /*
+ * error = 0: the translation was successful
+ * error = 1: a fault was injected into the guest
+ */
+ gg->fault = error;
+ error = 0;
+ } else {
+ error = EFAULT;
+ }
+ break;
+ }
default:
error = ENOTTY;
break;
diff --git a/sys/amd64/vmm/vmm_ioport.c b/sys/amd64/vmm/vmm_ioport.c
index f9fda2d..96f2418 100644
--- a/sys/amd64/vmm/vmm_ioport.c
+++ b/sys/amd64/vmm/vmm_ioport.c
@@ -145,7 +145,7 @@ emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
{
struct vm_inout_str *vis;
uint64_t gla, index, segbase;
- int error, in;
+ int in;
vis = &vmexit->u.inout_str;
in = vis->inout.in;
@@ -197,18 +197,8 @@ emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
}
vis->gla = gla;
- error = vmm_gla2gpa(vm, vcpuid, &vis->paging, gla,
- in ? VM_PROT_WRITE : VM_PROT_READ, &vis->gpa);
- KASSERT(error == 0 || error == 1 || error == -1,
- ("%s: vmm_gla2gpa unexpected error %d", __func__, error));
- if (error == -1) {
- return (EFAULT);
- } else if (error == 1) {
- return (0); /* Resume guest to handle page fault */
- } else {
- *retu = true;
- return (0); /* Return to userspace to finish emulation */
- }
+ *retu = true;
+ return (0); /* Return to userspace to finish emulation */
}
int
diff --git a/usr.sbin/bhyve/inout.c b/usr.sbin/bhyve/inout.c
index e7cbd98..9df3ab4 100644
--- a/usr.sbin/bhyve/inout.c
+++ b/usr.sbin/bhyve/inout.c
@@ -102,14 +102,12 @@ int
emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
{
int addrsize, bytes, flags, in, port, rep;
- uint64_t gpa, gpaend;
uint32_t val;
inout_func_t handler;
void *arg;
- char *gva;
int error, retval;
enum vm_reg_name idxreg;
- uint64_t index, count;
+ uint64_t gla, index, count;
struct vm_inout_str *vis;
bytes = vmexit->u.inout.bytes;
@@ -149,10 +147,6 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
/* Count register */
count = vis->count & vie_size2mask(addrsize);
- gpa = vis->gpa;
- gpaend = rounddown(gpa + PAGE_SIZE, PAGE_SIZE);
- gva = paddr_guest2host(ctx, gpa, gpaend - gpa);
-
if (vie_alignment_check(vis->paging.cpl, bytes, vis->cr0,
vis->rflags, vis->gla)) {
error = vm_inject_exception2(ctx, vcpu, IDT_AC, 0);
@@ -160,26 +154,34 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
return (INOUT_RESTART);
}
- while (count != 0 && gpa < gpaend) {
- /*
- * XXX this may not work for unaligned accesses because
- * the last access on the page may spill over into the
- * adjacent page in the linear address space. This is a
- * problem because we don't have a gla2gpa() mapping of
- * this adjacent page.
- */
- assert(gpaend - gpa >= bytes);
-
+ gla = vis->gla;
+ while (count) {
val = 0;
- if (!in)
- bcopy(gva, &val, bytes);
+ if (!in) {
+ error = vm_copyin(ctx, vcpu, &vis->paging,
+ gla, &val, bytes);
+ assert(error == 0 || error == 1 || error == -1);
+ if (error) {
+ retval = (error == 1) ? INOUT_RESTART :
+ INOUT_ERROR;
+ break;
+ }
+ }
retval = handler(ctx, vcpu, in, port, bytes, &val, arg);
if (retval != 0)
break;
- if (in)
- bcopy(&val, gva, bytes);
+ if (in) {
+ error = vm_copyout(ctx, vcpu, &vis->paging,
+ &val, gla, bytes);
+ assert(error == 0 || error == 1 || error == -1);
+ if (error) {
+ retval = (error == 1) ? INOUT_RESTART :
+ INOUT_ERROR;
+ break;
+ }
+ }
/* Update index */
if (vis->rflags & PSL_D)
@@ -188,8 +190,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
index += bytes;
count--;
- gva += bytes;
- gpa += bytes;
+ gla += bytes;
}
/* Update index register */
OpenPOWER on IntegriCloud