summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libvmmapi/vmmapi.c7
-rw-r--r--lib/libvmmapi/vmmapi.h1
-rw-r--r--sys/amd64/include/vmm.h1
-rw-r--r--sys/amd64/include/vmm_dev.h3
-rw-r--r--sys/amd64/vmm/vmm.c204
-rw-r--r--sys/amd64/vmm/vmm_dev.c4
-rw-r--r--sys/amd64/vmm/vmm_stat.c16
-rw-r--r--sys/amd64/vmm/vmm_stat.h5
-rw-r--r--usr.sbin/bhyveload/bhyveload.c21
9 files changed, 177 insertions, 85 deletions
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
index 89c7825..5ce3d8e 100644
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -367,6 +367,13 @@ vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
}
+int
+vm_reinit(struct vmctx *ctx)
+{
+
+ return (ioctl(ctx->fd, VM_REINIT, 0));
+}
+
static int
vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
int error_code, int error_code_valid)
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
index 0f2e3ae..4cc4290 100644
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -69,6 +69,7 @@ int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval);
int vm_run(struct vmctx *ctx, int vcpu, uint64_t rip,
struct vm_exit *ret_vmexit);
int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how);
+int vm_reinit(struct vmctx *ctx);
int vm_apicid2vcpu(struct vmctx *ctx, int apicid);
int vm_inject_exception(struct vmctx *ctx, int vcpu, int vec);
int vm_inject_exception2(struct vmctx *ctx, int vcpu, int vec, int errcode);
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 05df325..00e1d96 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -105,6 +105,7 @@ extern struct vmm_ops vmm_ops_amd;
int vm_create(const char *name, struct vm **retvm);
void vm_destroy(struct vm *vm);
+int vm_reinit(struct vm *vm);
const char *vm_name(struct vm *vm);
int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index a6568dc..9b3b00d 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -196,6 +196,7 @@ enum {
IOCNUM_SET_CAPABILITY = 2,
IOCNUM_GET_CAPABILITY = 3,
IOCNUM_SUSPEND = 4,
+ IOCNUM_REINIT = 5,
/* memory apis */
IOCNUM_MAP_MEMORY = 10,
@@ -251,6 +252,8 @@ enum {
_IOWR('v', IOCNUM_RUN, struct vm_run)
#define VM_SUSPEND \
_IOW('v', IOCNUM_SUSPEND, struct vm_suspend)
+#define VM_REINIT \
+ _IO('v', IOCNUM_REINIT)
#define VM_MAP_MEMORY \
_IOWR('v', IOCNUM_MAP_MEMORY, struct vm_memory_segment)
#define VM_GET_MEMORY_SEG \
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index e84359d..435ba39 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -84,25 +84,31 @@ __FBSDID("$FreeBSD$");
struct vlapic;
+/*
+ * Initialization:
+ * (a) allocated when vcpu is created
+ * (i) initialized when vcpu is created and when it is reinitialized
+ * (o) initialized the first time the vcpu is created
+ * (x) initialized before use
+ */
struct vcpu {
- int flags;
- enum vcpu_state state;
- struct mtx mtx;
- int hostcpu; /* host cpuid this vcpu last ran on */
- uint64_t guest_msrs[VMM_MSR_NUM];
- struct vlapic *vlapic;
- int vcpuid;
- struct savefpu *guestfpu; /* guest fpu state */
- uint64_t guest_xcr0;
- void *stats;
- struct vm_exit exitinfo;
- enum x2apic_state x2apic_state;
- int nmi_pending;
- int extint_pending;
- struct vm_exception exception;
- int exception_pending;
+ struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
+ enum vcpu_state state; /* (o) vcpu state */
+ int hostcpu; /* (o) vcpu's host cpu */
+ struct vlapic *vlapic; /* (i) APIC device model */
+ enum x2apic_state x2apic_state; /* (i) APIC mode */
+ int nmi_pending; /* (i) NMI pending */
+ int extint_pending; /* (i) INTR pending */
+ struct vm_exception exception; /* (x) exception collateral */
+ int exception_pending; /* (i) exception pending */
+ struct savefpu *guestfpu; /* (a,i) guest fpu state */
+ uint64_t guest_xcr0; /* (i) guest %xcr0 register */
+ void *stats; /* (a,i) statistics */
+ uint64_t guest_msrs[VMM_MSR_NUM]; /* (i) emulated MSRs */
+ struct vm_exit exitinfo; /* (x) exit reason and collateral */
};
+#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
@@ -116,36 +122,33 @@ struct mem_seg {
};
#define VM_MAX_MEMORY_SEGMENTS 2
+/*
+ * Initialization:
+ * (o) initialized the first time the VM is created
+ * (i) initialized when VM is created and when it is reinitialized
+ * (x) initialized before use
+ */
struct vm {
- void *cookie; /* processor-specific data */
- void *iommu; /* iommu-specific data */
- struct vhpet *vhpet; /* virtual HPET */
- struct vioapic *vioapic; /* virtual ioapic */
- struct vatpic *vatpic; /* virtual atpic */
- struct vatpit *vatpit; /* virtual atpit */
- struct vmspace *vmspace; /* guest's address space */
- struct vcpu vcpu[VM_MAXCPU];
- int num_mem_segs;
- struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
- char name[VM_MAX_NAMELEN];
-
- /*
- * Set of active vcpus.
- * An active vcpu is one that has been started implicitly (BSP) or
- * explicitly (AP) by sending it a startup ipi.
- */
- volatile cpuset_t active_cpus;
-
- struct mtx rendezvous_mtx;
- cpuset_t rendezvous_req_cpus;
- cpuset_t rendezvous_done_cpus;
- void *rendezvous_arg;
+ void *cookie; /* (i) cpu-specific data */
+ void *iommu; /* (x) iommu-specific data */
+ struct vhpet *vhpet; /* (i) virtual HPET */
+ struct vioapic *vioapic; /* (i) virtual ioapic */
+ struct vatpic *vatpic; /* (i) virtual atpic */
+ struct vatpit *vatpit; /* (i) virtual atpit */
+ volatile cpuset_t active_cpus; /* (i) active vcpus */
+ int suspend; /* (i) stop VM execution */
+ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
+ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
+ cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
+ cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
+ void *rendezvous_arg; /* (x) rendezvous func/arg */
vm_rendezvous_func_t rendezvous_func;
-
- int suspend;
- volatile cpuset_t suspended_cpus;
-
- volatile cpuset_t halted_cpus;
+ struct mtx rendezvous_mtx; /* (o) rendezvous lock */
+ int num_mem_segs; /* (o) guest memory segments */
+ struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
+ struct vmspace *vmspace; /* (o) guest's address space */
+ char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
+ struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
};
static int vmm_initialized;
@@ -206,31 +209,46 @@ SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
"IPI vector used for vcpu notifications");
static void
-vcpu_cleanup(struct vm *vm, int i)
+vcpu_cleanup(struct vm *vm, int i, bool destroy)
{
struct vcpu *vcpu = &vm->vcpu[i];
VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
- vmm_stat_free(vcpu->stats);
- fpu_save_area_free(vcpu->guestfpu);
+ if (destroy) {
+ vmm_stat_free(vcpu->stats);
+ fpu_save_area_free(vcpu->guestfpu);
+ }
}
static void
-vcpu_init(struct vm *vm, uint32_t vcpu_id)
+vcpu_init(struct vm *vm, int vcpu_id, bool create)
{
struct vcpu *vcpu;
-
+
+ KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
+ ("vcpu_init: invalid vcpu %d", vcpu_id));
+
vcpu = &vm->vcpu[vcpu_id];
- vcpu_lock_init(vcpu);
- vcpu->hostcpu = NOCPU;
- vcpu->vcpuid = vcpu_id;
+ if (create) {
+ KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
+ "initialized", vcpu_id));
+ vcpu_lock_init(vcpu);
+ vcpu->state = VCPU_IDLE;
+ vcpu->hostcpu = NOCPU;
+ vcpu->guestfpu = fpu_save_area_alloc();
+ vcpu->stats = vmm_stat_alloc();
+ }
+
vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
+ vcpu->nmi_pending = 0;
+ vcpu->extint_pending = 0;
+ vcpu->exception_pending = 0;
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
- vcpu->guestfpu = fpu_save_area_alloc();
fpu_save_area_reset(vcpu->guestfpu);
- vcpu->stats = vmm_stat_alloc();
+ vmm_stat_init(vcpu->stats);
+ guest_msrs_init(vm, vcpu_id);
}
struct vm_exit *
@@ -335,10 +353,30 @@ static moduledata_t vmm_kmod = {
DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
MODULE_VERSION(vmm, 1);
+static void
+vm_init(struct vm *vm, bool create)
+{
+ int i;
+
+ vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
+ vm->iommu = NULL;
+ vm->vioapic = vioapic_init(vm);
+ vm->vhpet = vhpet_init(vm);
+ vm->vatpic = vatpic_init(vm);
+ vm->vatpit = vatpit_init(vm);
+
+ CPU_ZERO(&vm->active_cpus);
+
+ vm->suspend = 0;
+ CPU_ZERO(&vm->suspended_cpus);
+
+ for (i = 0; i < VM_MAXCPU; i++)
+ vcpu_init(vm, i, create);
+}
+
int
vm_create(const char *name, struct vm **retvm)
{
- int i;
struct vm *vm;
struct vmspace *vmspace;
@@ -358,18 +396,11 @@ vm_create(const char *name, struct vm **retvm)
vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
strcpy(vm->name, name);
+ vm->num_mem_segs = 0;
vm->vmspace = vmspace;
mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
- vm->cookie = VMINIT(vm, vmspace_pmap(vmspace));
- vm->vioapic = vioapic_init(vm);
- vm->vhpet = vhpet_init(vm);
- vm->vatpic = vatpic_init(vm);
- vm->vatpit = vatpit_init(vm);
- for (i = 0; i < VM_MAXCPU; i++) {
- vcpu_init(vm, i);
- guest_msrs_init(vm, i);
- }
+ vm_init(vm, true);
*retvm = vm;
return (0);
@@ -385,8 +416,8 @@ vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
bzero(seg, sizeof(*seg));
}
-void
-vm_destroy(struct vm *vm)
+static void
+vm_cleanup(struct vm *vm, bool destroy)
{
int i;
@@ -400,21 +431,48 @@ vm_destroy(struct vm *vm)
vatpic_cleanup(vm->vatpic);
vioapic_cleanup(vm->vioapic);
- for (i = 0; i < vm->num_mem_segs; i++)
- vm_free_mem_seg(vm, &vm->mem_segs[i]);
+ for (i = 0; i < VM_MAXCPU; i++)
+ vcpu_cleanup(vm, i, destroy);
- vm->num_mem_segs = 0;
+ VMCLEANUP(vm->cookie);
- for (i = 0; i < VM_MAXCPU; i++)
- vcpu_cleanup(vm, i);
+ if (destroy) {
+ for (i = 0; i < vm->num_mem_segs; i++)
+ vm_free_mem_seg(vm, &vm->mem_segs[i]);
- VMSPACE_FREE(vm->vmspace);
+ vm->num_mem_segs = 0;
- VMCLEANUP(vm->cookie);
+ VMSPACE_FREE(vm->vmspace);
+ vm->vmspace = NULL;
+ }
+}
+void
+vm_destroy(struct vm *vm)
+{
+ vm_cleanup(vm, true);
free(vm, M_VM);
}
+int
+vm_reinit(struct vm *vm)
+{
+ int error;
+
+ /*
+ * A virtual machine can be reset only if all vcpus are suspended.
+ */
+ if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
+ vm_cleanup(vm, false);
+ vm_init(vm, false);
+ error = 0;
+ } else {
+ error = EBUSY;
+ }
+
+ return (error);
+}
+
const char *
vm_name(struct vm *vm)
{
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index 824389f..f3e31a3 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -220,6 +220,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_BIND_PPTDEV:
case VM_UNBIND_PPTDEV:
case VM_MAP_MEMORY:
+ case VM_REINIT:
/*
* ioctls that operate on the entire virtual machine must
* prevent all vcpus from running.
@@ -253,6 +254,9 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
vmsuspend = (struct vm_suspend *)data;
error = vm_suspend(sc->vm, vmsuspend->how);
break;
+ case VM_REINIT:
+ error = vm_reinit(sc->vm);
+ break;
case VM_STAT_DESC: {
statdesc = (struct vm_stat_desc *)data;
error = vmm_stat_desc_copy(statdesc->index,
diff --git a/sys/amd64/vmm/vmm_stat.c b/sys/amd64/vmm/vmm_stat.c
index e3d6999..ef9f411 100644
--- a/sys/amd64/vmm/vmm_stat.c
+++ b/sys/amd64/vmm/vmm_stat.c
@@ -52,8 +52,10 @@ static struct vmm_stat_type *vsttab[MAX_VMM_STAT_ELEMS];
static MALLOC_DEFINE(M_VMM_STAT, "vmm stat", "vmm stat");
+#define vst_size ((size_t)vst_num_elems * sizeof(uint64_t))
+
void
-vmm_stat_init(void *arg)
+vmm_stat_register(void *arg)
{
struct vmm_stat_type *vst = arg;
@@ -97,11 +99,15 @@ vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf)
void *
vmm_stat_alloc(void)
{
- u_long size;
-
- size = vst_num_elems * sizeof(uint64_t);
- return (malloc(size, M_VMM_STAT, M_ZERO | M_WAITOK));
+ return (malloc(vst_size, M_VMM_STAT, M_WAITOK));
+}
+
+void
+vmm_stat_init(void *vp)
+{
+
+ bzero(vp, vst_size);
}
void
diff --git a/sys/amd64/vmm/vmm_stat.h b/sys/amd64/vmm/vmm_stat.h
index 9110c8f..6e98965 100644
--- a/sys/amd64/vmm/vmm_stat.h
+++ b/sys/amd64/vmm/vmm_stat.h
@@ -49,13 +49,13 @@ struct vmm_stat_type {
enum vmm_stat_scope scope;
};
-void vmm_stat_init(void *arg);
+void vmm_stat_register(void *arg);
#define VMM_STAT_DEFINE(type, nelems, desc, scope) \
struct vmm_stat_type type[1] = { \
{ -1, nelems, desc, scope } \
}; \
- SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_init, type)
+ SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
#define VMM_STAT_DECLARE(type) \
extern struct vmm_stat_type type[1]
@@ -71,6 +71,7 @@ void vmm_stat_init(void *arg);
VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
void *vmm_stat_alloc(void);
+void vmm_stat_init(void *vp);
void vmm_stat_free(void *vp);
/*
diff --git a/usr.sbin/bhyveload/bhyveload.c b/usr.sbin/bhyveload/bhyveload.c
index c1a5432..4442496 100644
--- a/usr.sbin/bhyveload/bhyveload.c
+++ b/usr.sbin/bhyveload/bhyveload.c
@@ -642,7 +642,7 @@ main(int argc, char** argv)
void *h;
void (*func)(struct loader_callbacks *, void *, int, int);
uint64_t mem_size;
- int opt, error;
+ int opt, error, need_reinit;
progname = basename(argv[0]);
@@ -691,11 +691,14 @@ main(int argc, char** argv)
vmname = argv[0];
+ need_reinit = 0;
error = vm_create(vmname);
- if (error != 0 && errno != EEXIST) {
- perror("vm_create");
- exit(1);
-
+ if (error) {
+ if (errno != EEXIST) {
+ perror("vm_create");
+ exit(1);
+ }
+ need_reinit = 1;
}
ctx = vm_open(vmname);
@@ -704,6 +707,14 @@ main(int argc, char** argv)
exit(1);
}
+ if (need_reinit) {
+ error = vm_reinit(ctx);
+ if (error) {
+ perror("vm_reinit");
+ exit(1);
+ }
+ }
+
error = vm_setup_memory(ctx, mem_size, VM_MMAP_ALL);
if (error) {
perror("vm_setup_memory");
OpenPOWER on IntegriCloud