summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 16:36:46 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-05 23:55:25 -0800
commit697fe2e24ac49f03a82f6cfe5d77f7a2122ff382 (patch)
tree26de8b1535ea7af9124e85985b37069e7d1fd604
parenta436036baf331703b4d2c8e8a45f02c597bf6913 (diff)
downloadop-kernel-dev-697fe2e24ac49f03a82f6cfe5d77f7a2122ff382.zip
op-kernel-dev-697fe2e24ac49f03a82f6cfe5d77f7a2122ff382.tar.gz
[PATCH] KVM: MMU: Implement child shadow unlinking
When removing a page table, we must maintain the parent_pte field all child shadow page tables. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/kvm/mmu.c42
1 files changed, 38 insertions, 4 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 1484b72..7e20dbf 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -402,12 +402,21 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
break;
if (pte_chain->parent_ptes[i] != parent_pte)
continue;
- while (i + 1 < NR_PTE_CHAIN_ENTRIES) {
+ while (i + 1 < NR_PTE_CHAIN_ENTRIES
+ && pte_chain->parent_ptes[i + 1]) {
pte_chain->parent_ptes[i]
= pte_chain->parent_ptes[i + 1];
++i;
}
pte_chain->parent_ptes[i] = NULL;
+ if (i == 0) {
+ hlist_del(&pte_chain->link);
+ kfree(pte_chain);
+ if (hlist_empty(&page->parent_ptes)) {
+ page->multimapped = 0;
+ page->parent_pte = NULL;
+ }
+ }
return;
}
BUG();
@@ -481,7 +490,30 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page)
{
- BUG();
+ unsigned i;
+ u64 *pt;
+ u64 ent;
+
+ pt = __va(page->page_hpa);
+
+ if (page->role.level == PT_PAGE_TABLE_LEVEL) {
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+ if (pt[i] & PT_PRESENT_MASK)
+ rmap_remove(vcpu->kvm, &pt[i]);
+ pt[i] = 0;
+ }
+ return;
+ }
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+ ent = pt[i];
+
+ pt[i] = 0;
+ if (!(ent & PT_PRESENT_MASK))
+ continue;
+ ent &= PT64_BASE_ADDR_MASK;
+ mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
+ }
}
static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
@@ -489,8 +521,7 @@ static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
u64 *parent_pte)
{
mmu_page_remove_parent_pte(page, parent_pte);
- if (page->role.level > PT_PAGE_TABLE_LEVEL)
- kvm_mmu_page_unlink_children(vcpu, page);
+ kvm_mmu_page_unlink_children(vcpu, page);
hlist_del(&page->hash_link);
list_del(&page->link);
list_add(&page->link, &vcpu->free_pages);
@@ -511,6 +542,7 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
struct kvm_pte_chain, link);
parent_pte = chain->parent_ptes[0];
}
+ BUG_ON(!parent_pte);
kvm_mmu_put_page(vcpu, page, parent_pte);
*parent_pte = 0;
}
@@ -530,6 +562,8 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
bucket = &vcpu->kvm->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) {
+ pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
+ page->role.word);
kvm_mmu_zap_page(vcpu, page);
r = 1;
}
OpenPOWER on IntegriCloud