summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorgrehan <grehan@FreeBSD.org>2014-08-17 01:23:52 +0000
committergrehan <grehan@FreeBSD.org>2014-08-17 01:23:52 +0000
commit46d28d66fb3e9d4bfd5383e9b2ffa437115af6e6 (patch)
tree3eccc4cc10658396ede1ae036b060d6802d92bbc /sys/amd64
parentc00f011a6ecb134c74a4cf17b0cae12586601eb4 (diff)
downloadFreeBSD-src-46d28d66fb3e9d4bfd5383e9b2ffa437115af6e6.zip
FreeBSD-src-46d28d66fb3e9d4bfd5383e9b2ffa437115af6e6.tar.gz
MFC r267311, r267330, r267811, r267884
Turn on interrupt window exiting unconditionally when an ExtINT is being injected into the guest. Add helper functions to populate VM exit information for rendezvous and astpending exits. Provide APIs to directly get 'lowmem' and 'highmem' size directly. Expose the amount of resident and wired memory from the guest's vmspace
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/include/vmm.h2
-rw-r--r--sys/amd64/vmm/intel/vmx.c48
-rw-r--r--sys/amd64/vmm/vmm.c57
-rw-r--r--sys/amd64/vmm/vmm_stat.c13
-rw-r--r--sys/amd64/vmm/vmm_stat.h39
5 files changed, 120 insertions, 39 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 00e1d96..86f696d 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -146,6 +146,8 @@ cpuset_t vm_active_cpus(struct vm *vm);
cpuset_t vm_suspended_cpus(struct vm *vm);
struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
+void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
+void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
/*
* Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 7390ccc..2cbb159 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -1327,9 +1327,13 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
* have posted another one. If that is the case, set
* the Interrupt Window Exiting execution control so
* we can inject that one too.
+ *
+ * Also, interrupt window exiting allows us to inject any
+ * pending APIC vector that was preempted by the ExtINT
+ * as soon as possible. This applies both for the software
+ * emulated vlapic and the hardware assisted virtual APIC.
*/
- if (vm_extint_pending(vmx->vm, vcpu))
- vmx_set_int_window_exiting(vmx, vcpu);
+ vmx_set_int_window_exiting(vmx, vcpu);
}
VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
@@ -2275,32 +2279,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
return (handled);
}
-static __inline int
-vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
-{
-
- vmexit->rip = vmcs_guest_rip();
- vmexit->inst_length = 0;
- vmexit->exitcode = VM_EXITCODE_BOGUS;
- vmx_astpending_trace(vmx, vcpu, vmexit->rip);
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
-
- return (HANDLED);
-}
-
-static __inline int
-vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
-{
-
- vmexit->rip = vmcs_guest_rip();
- vmexit->inst_length = 0;
- vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1);
-
- return (UNHANDLED);
-}
-
-static __inline int
+static __inline void
vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
{
@@ -2324,8 +2303,6 @@ vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
default:
panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
}
-
- return (UNHANDLED);
}
/*
@@ -2398,6 +2375,8 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
vmcs_write(VMCS_GUEST_RIP, startrip);
vmx_set_pcpu_defaults(vmx, vcpu, pmap);
do {
+ handled = UNHANDLED;
+
/*
* Interrupts are disabled from this point on until the
* guest starts executing. This is done for the following
@@ -2420,19 +2399,20 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
if (vcpu_suspended(suspend_cookie)) {
enable_intr();
vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
- handled = UNHANDLED;
break;
}
if (vcpu_rendezvous_pending(rendezvous_cookie)) {
enable_intr();
- handled = vmx_exit_rendezvous(vmx, vcpu, vmexit);
+ vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
break;
}
if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
enable_intr();
- handled = vmx_exit_astpending(vmx, vcpu, vmexit);
+ vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
+ vmx_astpending_trace(vmx, vcpu, vmexit->rip);
+ handled = HANDLED;
break;
}
@@ -2452,7 +2432,7 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
handled = vmx_exit_process(vmx, vcpu, vmexit);
} else {
enable_intr();
- handled = vmx_exit_inst_error(vmxctx, rc, vmexit);
+ vmx_exit_inst_error(vmxctx, rc, vmexit);
}
launched = 1;
vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 435ba39..c2a9fd1 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1331,6 +1331,32 @@ vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
vmexit->u.suspended.how = vm->suspend;
}
+void
+vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
+{
+ struct vm_exit *vmexit;
+
+ KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
+
+ vmexit = vm_exitinfo(vm, vcpuid);
+ vmexit->rip = rip;
+ vmexit->inst_length = 0;
+ vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
+ vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
+}
+
+void
+vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
+{
+ struct vm_exit *vmexit;
+
+ vmexit = vm_exitinfo(vm, vcpuid);
+ vmexit->rip = rip;
+ vmexit->inst_length = 0;
+ vmexit->exitcode = VM_EXITCODE_BOGUS;
+ vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
+}
+
int
vm_run(struct vm *vm, struct vm_run *vmrun)
{
@@ -1966,3 +1992,34 @@ vm_segment_name(int seg)
("%s: invalid segment encoding %d", __func__, seg));
return (seg_names[seg]);
}
+
+
+/*
+ * Return the amount of in-use and wired memory for the VM. Since
+ * these are global stats, only return the values with for vCPU 0
+ */
+VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
+VMM_STAT_DECLARE(VMM_MEM_WIRED);
+
+static void
+vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
+{
+
+ if (vcpu == 0) {
+ vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
+ PAGE_SIZE * vmspace_resident_count(vm->vmspace));
+ }
+}
+
+static void
+vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
+{
+
+ if (vcpu == 0) {
+ vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
+ PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
+ }
+}
+
+VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
+VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
diff --git a/sys/amd64/vmm/vmm_stat.c b/sys/amd64/vmm/vmm_stat.c
index ef9f411..9ecf9af 100644
--- a/sys/amd64/vmm/vmm_stat.c
+++ b/sys/amd64/vmm/vmm_stat.c
@@ -83,12 +83,21 @@ vmm_stat_register(void *arg)
int
vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf)
{
- int i;
+ struct vmm_stat_type *vst;
uint64_t *stats;
+ int i;
if (vcpu < 0 || vcpu >= VM_MAXCPU)
return (EINVAL);
-
+
+ /* Let stats functions update their counters */
+ for (i = 0; i < vst_num_types; i++) {
+ vst = vsttab[i];
+ if (vst->func != NULL)
+ (*vst->func)(vm, vcpu, vst);
+ }
+
+ /* Copy over the stats */
stats = vcpu_stats(vm, vcpu);
for (i = 0; i < vst_num_elems; i++)
buf[i] = stats[i];
diff --git a/sys/amd64/vmm/vmm_stat.h b/sys/amd64/vmm/vmm_stat.h
index 6e98965..1640ba3 100644
--- a/sys/amd64/vmm/vmm_stat.h
+++ b/sys/amd64/vmm/vmm_stat.h
@@ -42,21 +42,29 @@ enum vmm_stat_scope {
VMM_STAT_SCOPE_AMD, /* AMD SVM specific statistic */
};
+struct vmm_stat_type;
+typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
+ struct vmm_stat_type *stat);
+
struct vmm_stat_type {
int index; /* position in the stats buffer */
int nelems; /* standalone or array */
const char *desc; /* description of statistic */
+ vmm_stat_func_t func;
enum vmm_stat_scope scope;
};
void vmm_stat_register(void *arg);
-#define VMM_STAT_DEFINE(type, nelems, desc, scope) \
+#define VMM_STAT_FDEFINE(type, nelems, desc, func, scope) \
struct vmm_stat_type type[1] = { \
- { -1, nelems, desc, scope } \
+ { -1, nelems, desc, func, scope } \
}; \
SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
+#define VMM_STAT_DEFINE(type, nelems, desc, scope) \
+ VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
+
#define VMM_STAT_DECLARE(type) \
extern struct vmm_stat_type type[1]
@@ -67,6 +75,9 @@ void vmm_stat_register(void *arg);
#define VMM_STAT_AMD(type, desc) \
VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
+#define VMM_STAT_FUNC(type, desc, func) \
+ VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
+
#define VMM_STAT_ARRAY(type, nelems, desc) \
VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
@@ -93,9 +104,22 @@ vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
stats[vst->index + statidx] += x;
#endif
}
-
static void __inline
+vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
+ int statidx, uint64_t val)
+{
+#ifdef VMM_KEEP_STATS
+ uint64_t *stats;
+
+ stats = vcpu_stats(vm, vcpu);
+
+ if (vst->index >= 0 && statidx < vst->nelems)
+ stats[vst->index + statidx] = val;
+#endif
+}
+
+static void __inline
vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
{
@@ -104,6 +128,15 @@ vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
#endif
}
+static void __inline
+vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
+{
+
+#ifdef VMM_KEEP_STATS
+ vmm_stat_array_set(vm, vcpu, vst, 0, val);
+#endif
+}
+
VMM_STAT_DECLARE(VCPU_MIGRATIONS);
VMM_STAT_DECLARE(VMEXIT_COUNT);
VMM_STAT_DECLARE(VMEXIT_EXTINT);
OpenPOWER on IntegriCloud