summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorSuresh Warrier <warrier@linux.vnet.ibm.com>2015-12-17 14:59:07 -0600
committerPaul Mackerras <paulus@samba.org>2016-02-29 16:25:06 +1100
commitb8e6a87c82927ed9ccf0f3ee42946a41cb9d75fe (patch)
tree76ef8e4179831811a90c2be3b9550cd3cc99b689 /arch/powerpc
parent79b6c247e9afe35714c1f83cfcecf40a438ca4a4 (diff)
downloadop-kernel-dev-b8e6a87c82927ed9ccf0f3ee42946a41cb9d75fe.zip
op-kernel-dev-b8e6a87c82927ed9ccf0f3ee42946a41cb9d75fe.tar.gz
KVM: PPC: Book3S HV: Manage core host state
Update the core host state in kvmppc_host_rm_ops whenever the primary thread of the core enters the guest or returns back. Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8b3332f..542ec97 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2303,6 +2303,46 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
}
/*
+ * Clear core from the list of active host cores as we are about to
+ * enter the guest. Only do this if it is the primary thread of the
+ * core (not if a subcore) that is entering the guest.
+ */
+static inline void kvmppc_clear_host_core(int cpu)
+{
+ int core;
+
+ if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
+ return;
+ /*
+ * Memory barrier can be omitted here as we will do a smp_wmb()
+ * later in kvmppc_start_thread and we need ensure that state is
+ * visible to other CPUs only after we enter guest.
+ */
+ core = cpu >> threads_shift;
+ kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
+}
+
+/*
+ * Advertise this core as an active host core since we exited the guest
+ * Only need to do this if it is the primary thread of the core that is
+ * exiting.
+ */
+static inline void kvmppc_set_host_core(int cpu)
+{
+ int core;
+
+ if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
+ return;
+
+ /*
+ * Memory barrier can be omitted here because we do a spin_unlock
+ * immediately after this which provides the memory barrier.
+ */
+ core = cpu >> threads_shift;
+ kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
+}
+
+/*
* Run a set of guest threads on a physical core.
* Called with vc->lock held.
*/
@@ -2414,6 +2454,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
}
+ kvmppc_clear_host_core(pcpu);
+
/* Start all the threads */
active = 0;
for (sub = 0; sub < core_info.n_subcores; ++sub) {
@@ -2510,6 +2552,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_ipi_thread(pcpu + i);
}
+ kvmppc_set_host_core(pcpu);
+
spin_unlock(&vc->lock);
/* make sure updates to secondary vcpu structs are visible now */
OpenPOWER on IntegriCloud