summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-08-10 13:23:55 +0200
committerAlexander Graf <agraf@suse.de>2012-10-05 23:38:43 +0200
commit9b0cb3c808fef0d75d6f79ab9684246e6879f9c1 (patch)
treedc42b6ff5c6e1d9adf853934b51d52c143a14e60
parent03d25c5bd5c3125055bd36f4813ddb817def19dd (diff)
downloadop-kernel-dev-9b0cb3c808fef0d75d6f79ab9684246e6879f9c1.zip
op-kernel-dev-9b0cb3c808fef0d75d6f79ab9684246e6879f9c1.tar.gz
KVM: PPC: Book3s: PR: Add (dumb) MMU Notifier support
Now that we have very simple MMU Notifier support for e500 in place, also add the same simple support to book3s. It gets us one step closer to actual fast support. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c1
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c47
6 files changed, 51 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index cea9d3a..4a5ec8f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -46,8 +46,7 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
-#if defined(CONFIG_KVM_BOOK3S_64_HV) || defined(CONFIG_KVM_E500V2) || \
- defined(CONFIG_KVM_E500MC)
+#if !defined(CONFIG_KVM_440)
#include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 40cad8c..71f0cd9 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM_BOOK3S_64_HANDLER
config KVM_BOOK3S_PR
bool
select KVM_MMIO
+ select MMU_NOTIFIER
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 837f13e..9fac010 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -254,6 +254,7 @@ next_pteg:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
+ kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
out:
return r;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0688b6b..6b2c80e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -168,6 +168,7 @@ map_again:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
}
+ kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
out:
return r;
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 41cb001..2c86b0d 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -114,11 +114,6 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
hlist_del_init_rcu(&pte->list_vpte);
hlist_del_init_rcu(&pte->list_vpte_long);
- if (pte->pte.may_write)
- kvm_release_pfn_dirty(pte->pfn);
- else
- kvm_release_pfn_clean(pte->pfn);
-
spin_unlock(&vcpu3s->mmu_lock);
vcpu3s->hpte_cache_count--;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index cae2def..10f8217 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -90,8 +90,55 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
+ /* We misuse TLB_FLUSH to indicate that we want to clear
+ all shadow cache entries */
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
}
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_unmap_hva(hva);
+
+ /*
+ * Flush all shadow tlb entries everywhere. This is slow, but
+ * we are 100% sure that we catch the to be unmapped page
+ */
+ kvm_flush_remote_tlbs(kvm);
+
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ /* The page will get remapped properly on its next fault */
+ kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
{
ulong smsr = vcpu->arch.shared->msr;
OpenPOWER on IntegriCloud