diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index df465f6..db0fd55 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2286,7 +2286,11 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) return 1; } -static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) +static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, + gva_t gva, pfn_t *pfn, bool write, bool *writable); + +static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, + bool no_apf) { int r; int level; @@ -2307,7 +2311,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); - pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, &map_writable); + + if (try_async_pf(vcpu, no_apf, gfn, v, &pfn, write, &map_writable)) + return 0; /* mmio */ if (is_error_pfn(pfn)) @@ -2595,7 +2601,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, gfn = gva >> PAGE_SHIFT; return nonpaging_map(vcpu, gva & PAGE_MASK, - error_code & PFERR_WRITE_MASK, gfn); + error_code & PFERR_WRITE_MASK, gfn, no_apf); } static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |