diff options
author | Dave Hansen <dave@linux.vnet.ibm.com> | 2010-08-19 18:11:28 -0700 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 10:51:18 +0200 |
commit | 49d5ca26636cb8feb05aff92fc4dba3e494ec683 (patch) | |
tree | 180d5d2926e1282654c2c9438612aa567d9eb68f /arch/x86/kvm/mmu.c | |
parent | 39de71ec5397f374aed95e99509372d605e1407c (diff) | |
download | op-kernel-dev-49d5ca26636cb8feb05aff92fc4dba3e494ec683.zip op-kernel-dev-49d5ca26636cb8feb05aff92fc4dba3e494ec683.tar.gz |
KVM: replace x86 kvm n_free_mmu_pages with n_used_mmu_pages
Doing this makes the code much more readable. That's
borne out by the fact that this patch removes code. "used"
also happens to be the number that we need to return back to
the slab code when our shrinker gets called. Keeping this
value as opposed to free makes the next patch simpler.
So, 'struct kvm' is kzalloc()'d. 'struct kvm_arch' is a
structure member (and not a pointer) of 'struct kvm'. That
means they start out zeroed. I _think_ they get initialized
properly by kvm_mmu_change_mmu_pages(). But, that only happens
via kvm ioctls.
Another benefit of storing 'used' intead of 'free' is
that the values are consistent from the moment the structure is
allocated: no negative "used" value.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 27 |
1 files changed, 9 insertions, 18 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6979e7d..ff39b85 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -980,7 +980,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) if (!sp->role.direct) __free_page(virt_to_page(sp->gfns)); kmem_cache_free(mmu_page_header_cache, sp); - ++kvm->arch.n_free_mmu_pages; + --kvm->arch.n_used_mmu_pages; } static unsigned kvm_page_table_hashfn(gfn_t gfn) @@ -1003,7 +1003,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; sp->parent_pte = parent_pte; - --vcpu->kvm->arch.n_free_mmu_pages; + ++vcpu->kvm->arch.n_used_mmu_pages; return sp; } @@ -1689,41 +1689,32 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, /* * Changing the number of mmu pages allocated to the vm - * Note: if kvm_nr_mmu_pages is too small, you will get dead lock + * Note: if goal_nr_mmu_pages is too small, you will get dead lock */ -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) { - int used_pages; LIST_HEAD(invalid_list); - - used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm); - used_pages = max(0, used_pages); - /* * If we set the number of mmu pages to be smaller be than the * number of actived pages , we must to free some mmu pages before we * change the value */ - if (used_pages > kvm_nr_mmu_pages) { - while (used_pages > kvm_nr_mmu_pages && + if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { + while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && !list_empty(&kvm->arch.active_mmu_pages)) { struct kvm_mmu_page *page; page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); - used_pages -= kvm_mmu_prepare_zap_page(kvm, page, + kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); - kvm_nr_mmu_pages = used_pages; - kvm->arch.n_free_mmu_pages = 0; + goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; } - else - kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages - - kvm->arch.n_max_mmu_pages; - kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages; + kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; } static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |