diff options
author | Avi Kivity <avi@qumranet.com> | 2007-11-21 02:06:21 +0200 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 17:53:10 +0200 |
commit | 79539cec0c3c38d35a1e3e5310d2c562ae6e82b8 (patch) | |
tree | 7bcb93c2a3a26a1541a77f108915e43479dc3ebd /drivers/kvm/mmu.c | |
parent | 0f74a24c59b814c1c8085251cbea48d339f0c7c6 (diff) | |
download | op-kernel-dev-79539cec0c3c38d35a1e3e5310d2c562ae6e82b8.zip op-kernel-dev-79539cec0c3c38d35a1e3e5310d2c562ae6e82b8.tar.gz |
KVM: MMU: Avoid unnecessary remote tlb flushes when guest updates a pte
If all we're doing is increasing permissions on a pte (typical for demand
paging), then there's not need to flush remote tlbs. Worst case they'll
get a spurious page fault.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 27 |
1 files changed, 26 insertions, 1 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 101cd537..281dd5f 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -134,6 +134,8 @@ static int dbg = 1; #define PT32_DIR_BASE_ADDR_MASK \ (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) +#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ + | PT64_NX_MASK) #define PFERR_PRESENT_MASK (1U << 0) #define PFERR_WRITE_MASK (1U << 1) @@ -1227,7 +1229,6 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, } } set_shadow_pte(spte, shadow_trap_nonpresent_pte); - kvm_flush_remote_tlbs(vcpu->kvm); } static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, @@ -1250,6 +1251,27 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, offset_in_pte); } +static bool need_remote_flush(u64 old, u64 new) +{ + if (!is_shadow_present_pte(old)) + return false; + if (!is_shadow_present_pte(new)) + return true; + if ((old ^ new) & PT64_BASE_ADDR_MASK) + return true; + old ^= PT64_NX_MASK; + new ^= PT64_NX_MASK; + return (old & ~new & PT64_PERM_MASK) != 0; +} + +static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new) +{ + if (need_remote_flush(old, new)) + kvm_flush_remote_tlbs(vcpu->kvm); + else + kvm_mmu_flush_tlb(vcpu); +} + static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->last_pte_updated; @@ -1265,6 +1287,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, struct hlist_node *node, *n; struct hlist_head *bucket; unsigned index; + u64 entry; u64 *spte; unsigned offset = offset_in_page(gpa); unsigned pte_size; @@ -1335,9 +1358,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, } spte = &page->spt[page_offset / sizeof(*spte)]; while (npte--) { + entry = *spte; mmu_pte_write_zap_pte(vcpu, page, spte); mmu_pte_write_new_pte(vcpu, page, spte, new, bytes, page_offset & (pte_size - 1)); + mmu_pte_write_flush_tlb(vcpu, entry, *spte); ++spte; } } |