summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-09-20 14:52:53 +1000
committerAlexander Graf <agraf@suse.de>2013-10-17 14:49:36 +0200
commitadc0bafe00f4c7e5f052c9f29e75a072e03a19fc (patch)
treefa596a8a865cd6a4468bf90abd8654d890d1ee4b
parentd78bca72961ae816181b386ff6b347419dfcd5cf (diff)
downloadop-kernel-dev-adc0bafe00f4c7e5f052c9f29e75a072e03a19fc.zip
op-kernel-dev-adc0bafe00f4c7e5f052c9f29e75a072e03a19fc.tar.gz
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest, doesn't actually mark the page as dirty as far as the MM subsystem is concerned. It merely sets a bit in KVM's map of dirty pages, if userspace has requested dirty tracking for the relevant memslot. To tell the MM subsystem that the page is dirty, we have to call kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()). This adds a call to kvm_set_pfn_dirty(), and while we are here, also adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that the page has been accessed. Since we are now using the pfn in several places, this adds a 'pfn' variable to store it and changes the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which is the same thing. This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but PP_RXRX is more informative as being the read-only page permission bit setting. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 307e6e8..e2ab8a7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -96,20 +96,21 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct hpte_cache *cpte;
+ unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
+ unsigned long pfn;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
/* Get host physical address for gpa */
- hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
- iswrite, &writable);
- if (is_error_noslot_pfn(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
+ pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
+ if (is_error_noslot_pfn(pfn)) {
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
r = -EINVAL;
goto out;
}
- hpaddr <<= PAGE_SHIFT;
+ hpaddr = pfn << PAGE_SHIFT;
/* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -129,15 +130,18 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
+ kvm_set_pfn_accessed(pfn);
if (!orig_pte->may_write || !writable)
- rflags |= HPTE_R_PP;
- else
- mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ rflags |= PP_RXRX;
+ else {
+ mark_page_dirty(vcpu->kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
if (!orig_pte->may_execute)
rflags |= HPTE_R_N;
else
- kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+ kvmppc_mmu_flush_icache(pfn);
/*
* Use 64K pages if possible; otherwise, on 64K page kernels,
@@ -191,7 +195,7 @@ map_again:
cpte->slot = hpteg + (ret & 7);
cpte->host_vpn = vpn;
cpte->pte = *orig_pte;
- cpte->pfn = hpaddr >> PAGE_SHIFT;
+ cpte->pfn = pfn;
cpte->pagesize = hpsize;
kvmppc_mmu_hpte_cache_map(vcpu, cpte);
@@ -200,7 +204,7 @@ map_again:
out_unlock:
spin_unlock(&kvm->mmu_lock);
- kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
+ kvm_release_pfn_clean(pfn);
if (cpte)
kvmppc_mmu_hpte_cache_free(cpte);
OpenPOWER on IntegriCloud