diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-06-20 15:57:15 +0800 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-07-11 16:51:14 +0300 |
commit | d13bc5b5a1f9eafd59331baa1d1d32e1867f57b5 (patch) | |
tree | 93c7ff7441330358ec74672cc035c41f9cdb4ca8 /arch/x86/kvm | |
parent | 2f84569f978cd7d54970d893b4c4e68ef24dc1ec (diff) | |
download | op-kernel-dev-d13bc5b5a1f9eafd59331baa1d1d32e1867f57b5.zip op-kernel-dev-d13bc5b5a1f9eafd59331baa1d1d32e1867f57b5.tar.gz |
KVM: MMU: abstract spte write-protect
Introduce a common function to abstract spte write-protect to
cleanup the code
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 58 |
1 files changed, 31 insertions, 27 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5dd2242..d04d630 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1050,36 +1050,48 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) rmap_remove(kvm, sptep); } +/* Return true if the spte is dropped. */ +static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) +{ + u64 spte = *sptep; + + if (!is_writable_pte(spte)) + return false; + + rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); + + *flush |= true; + if (is_large_pte(spte)) { + WARN_ON(page_header(__pa(sptep))->role.level == + PT_PAGE_TABLE_LEVEL); + drop_spte(kvm, sptep); + --kvm->stat.lpages; + return true; + } + + spte = spte & ~PT_WRITABLE_MASK; + mmu_spte_update(sptep, spte); + return false; +} + static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) { u64 *sptep; struct rmap_iterator iter; - bool write_protected = false; + bool flush = false; for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); - - if (!is_writable_pte(*sptep)) { - sptep = rmap_get_next(&iter); - continue; - } - - if (level == PT_PAGE_TABLE_LEVEL) { - mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK); - sptep = rmap_get_next(&iter); - } else { - BUG_ON(!is_large_pte(*sptep)); - drop_spte(kvm, sptep); - --kvm->stat.lpages; + if (spte_write_protect(kvm, sptep, &flush)) { sptep = rmap_get_first(*rmapp, &iter); + continue; } - write_protected = true; + sptep = rmap_get_next(&iter); } - return write_protected; + return flush; } /** @@ -3886,6 +3898,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu) void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) { struct kvm_mmu_page *sp; + bool flush = false; list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { int i; @@ -3900,16 +3913,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) !is_last_spte(pt[i], sp->role.level)) continue; - if (is_large_pte(pt[i])) { - drop_spte(kvm, &pt[i]); - --kvm->stat.lpages; - continue; - } - - /* avoid RMW */ - if (is_writable_pte(pt[i])) - mmu_spte_update(&pt[i], - pt[i] & ~PT_WRITABLE_MASK); + spte_write_protect(kvm, &pt[i], &flush); } } kvm_flush_remote_tlbs(kvm); |