summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorWanpeng Li <wanpeng.li@linux.intel.com>2015-04-15 10:24:54 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2015-04-15 17:01:01 +0200
commit130005231c9f2090b1b177e2cca9841b562c1784 (patch)
tree3a08b71fe1577f3848af20f47381ae661d664243 /arch/x86/kvm/mmu.c
parent95fce4fa0850da8f85ecf8d069ab3fc6c8bc1478 (diff)
downloadop-kernel-dev-130005231c9f2090b1b177e2cca9841b562c1784.zip
op-kernel-dev-130005231c9f2090b1b177e2cca9841b562c1784.tar.gz
kvm: mmu: don't do memslot overflow check
As Andres pointed out: | I don't understand the value of this check here. Are we looking for a | broken memslot? Shouldn't this be a BUG_ON? Is this the place to care | about these things? npages is capped to KVM_MEM_MAX_NR_PAGES, i.e. | 2^31. A 64 bit overflow would be caused by a gigantic gfn_start which | would be trouble in many other ways. This patch drops the memslot overflow check to make the codes more simple. Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com> Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com> Message-Id: <1429064694-3072-1-git-send-email-wanpeng.li@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c12
1 files changed, 2 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 146f295..07bb221 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4504,19 +4504,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool flush = false;
unsigned long *rmapp;
unsigned long last_index, index;
- gfn_t gfn_start, gfn_end;
spin_lock(&kvm->mmu_lock);
- gfn_start = memslot->base_gfn;
- gfn_end = memslot->base_gfn + memslot->npages - 1;
-
- if (gfn_start >= gfn_end)
- goto out;
-
rmapp = memslot->arch.rmap[0];
- last_index = gfn_to_index(gfn_end, memslot->base_gfn,
- PT_PAGE_TABLE_LEVEL);
+ last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1,
+ memslot->base_gfn, PT_PAGE_TABLE_LEVEL);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
@@ -4534,7 +4527,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
if (flush)
kvm_flush_remote_tlbs(kvm);
-out:
spin_unlock(&kvm->mmu_lock);
}
OpenPOWER on IntegriCloud