summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <guangrong.xiao@linux.intel.com>2016-02-24 17:51:12 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2016-03-03 14:36:22 +0100
commite5691a81e830c12d396b3f219ab999be87a1208f (patch)
treeb916b2799982fa81d5f01dda0f48360263691f61 /arch/x86/kvm
parent3d0c27ad6ee465f174b09ee99fcaf189c57d567a (diff)
downloadop-kernel-dev-e5691a81e830c12d396b3f219ab999be87a1208f.zip
op-kernel-dev-e5691a81e830c12d396b3f219ab999be87a1208f.tar.gz
KVM: MMU: clear write-flooding on the fast path of tracked page
If the page fault is caused by write access on write tracked page, the real shadow page walking is skipped, we lost the chance to clear write flooding for the page structure current vcpu is using Fix it by locklessly waking shadow page table to clear write flooding on the shadow page structure out of mmu-lock. So that we change the count to atomic_t Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c22
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
2 files changed, 23 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dd8e3ca..58c067d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2063,7 +2063,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
- sp->write_flooding_count = 0;
+ atomic_set(&sp->write_flooding_count, 0);
}
static void clear_sp_write_flooding_count(u64 *spte)
@@ -3406,6 +3406,23 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
return false;
}
+static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ struct kvm_shadow_walk_iterator iterator;
+ u64 spte;
+
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+ clear_sp_write_flooding_count(iterator.sptep);
+ if (!is_shadow_present_pte(spte))
+ break;
+ }
+ walk_shadow_page_lockless_end(vcpu);
+}
+
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
@@ -4221,7 +4238,8 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
if (sp->role.level == PT_PAGE_TABLE_LEVEL)
return false;
- return ++sp->write_flooding_count >= 3;
+ atomic_inc(&sp->write_flooding_count);
+ return atomic_read(&sp->write_flooding_count) >= 3;
}
/*
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 52ae2d9..4174cf2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -728,8 +728,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0;
}
- if (page_fault_handle_page_track(vcpu, error_code, walker.gfn))
+ if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
+ shadow_page_table_clear_flood(vcpu, addr);
return 1;
+ }
vcpu->arch.write_fault_to_shadow_pgtable = false;
OpenPOWER on IntegriCloud