summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-08-22 19:28:04 +0300
committerAvi Kivity <avi@qumranet.com>2008-10-15 10:15:23 +0200
commit140754bc80e1cdbf2d14cdb10d900da1f7718e7b (patch)
tree0b23b396c28da5b01b6693546dca9416a88ded54 /arch/x86/kvm/mmu.c
parent3d000db5688c8beff6319fb9ff4b98dcac32f798 (diff)
downloadop-kernel-dev-140754bc80e1cdbf2d14cdb10d900da1f7718e7b.zip
op-kernel-dev-140754bc80e1cdbf2d14cdb10d900da1f7718e7b.tar.gz
KVM: MMU: Convert direct maps to use the generic shadow walker
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c93
1 files changed, 55 insertions, 38 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8b95cf7..a1ca4ff 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1260,49 +1260,66 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}
-static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
- int largepage, gfn_t gfn, pfn_t pfn)
-{
- hpa_t table_addr = vcpu->arch.mmu.root_hpa;
- int pt_write = 0;
- int level = vcpu->arch.mmu.shadow_root_level;
-
- for (; ; level--) {
- u32 index = PT64_INDEX(v, level);
- u64 *table;
+struct direct_shadow_walk {
+ struct kvm_shadow_walk walker;
+ pfn_t pfn;
+ int write;
+ int largepage;
+ int pt_write;
+};
- ASSERT(VALID_PAGE(table_addr));
- table = __va(table_addr);
+static int direct_map_entry(struct kvm_shadow_walk *_walk,
+ struct kvm_vcpu *vcpu,
+ gva_t addr, u64 *sptep, int level)
+{
+ struct direct_shadow_walk *walk =
+ container_of(_walk, struct direct_shadow_walk, walker);
+ struct kvm_mmu_page *sp;
+ gfn_t pseudo_gfn;
+ gfn_t gfn = addr >> PAGE_SHIFT;
+
+ if (level == PT_PAGE_TABLE_LEVEL
+ || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
+ mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
+ 0, walk->write, 1, &walk->pt_write,
+ walk->largepage, gfn, walk->pfn, false);
+ return 1;
+ }
- if (level == 1 || (largepage && level == 2)) {
- mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
- 0, write, 1, &pt_write, largepage,
- gfn, pfn, false);
- return pt_write;
+ if (*sptep == shadow_trap_nonpresent_pte) {
+ pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
+ sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1,
+ 1, ACC_ALL, sptep);
+ if (!sp) {
+ pgprintk("nonpaging_map: ENOMEM\n");
+ kvm_release_pfn_clean(walk->pfn);
+ return -ENOMEM;
}
- if (table[index] == shadow_trap_nonpresent_pte) {
- struct kvm_mmu_page *new_table;
- gfn_t pseudo_gfn;
-
- pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
- >> PAGE_SHIFT;
- new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
- v, level - 1,
- 1, ACC_ALL, &table[index]);
- if (!new_table) {
- pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_pfn_clean(pfn);
- return -ENOMEM;
- }
-
- set_shadow_pte(&table[index],
- __pa(new_table->spt)
- | PT_PRESENT_MASK | PT_WRITABLE_MASK
- | shadow_user_mask | shadow_x_mask);
- }
- table_addr = table[index] & PT64_BASE_ADDR_MASK;
+ set_shadow_pte(sptep,
+ __pa(sp->spt)
+ | PT_PRESENT_MASK | PT_WRITABLE_MASK
+ | shadow_user_mask | shadow_x_mask);
}
+ return 0;
+}
+
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+ int largepage, gfn_t gfn, pfn_t pfn)
+{
+ int r;
+ struct direct_shadow_walk walker = {
+ .walker = { .entry = direct_map_entry, },
+ .pfn = pfn,
+ .largepage = largepage,
+ .write = write,
+ .pt_write = 0,
+ };
+
+ r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT);
+ if (r < 0)
+ return r;
+ return walker.pt_write;
}
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
OpenPOWER on IntegriCloud