diff options
author | Alexey Kardashevskiy <aik@ozlabs.ru> | 2017-03-24 17:49:22 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@ozlabs.org> | 2017-04-20 11:38:14 +1000 |
commit | 96df2267695199b9377bd641c7eb68c393b81b0b (patch) | |
tree | 45bfa5980f9b72b4635b2de84a267b90c49b074d /arch/powerpc | |
parent | bd9166ffe624000140fc6b606b256df01fc0d060 (diff) | |
download | op-kernel-dev-96df2267695199b9377bd641c7eb68c393b81b0b.zip op-kernel-dev-96df2267695199b9377bd641c7eb68c393b81b0b.tar.gz |
KVM: PPC: Book3S PR: Preserve storage control bits
PR KVM page fault handler performs eaddr to pte translation for a guest,
however kvmppc_mmu_book3s_64_xlate() does not preserve WIMG bits
(storage control) in the kvmppc_pte struct. If PR KVM is running as
a second level guest under HV KVM, and PR KVM tries inserting HPT entry,
this fails in HV KVM if it already has this mapping.
This preserves WIMG bits between kvmppc_mmu_book3s_64_xlate() and
kvmppc_mmu_map_page().
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 2 |
4 files changed, 5 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 2acc691..0f3ac09 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -342,6 +342,7 @@ struct kvmppc_pte { bool may_read : 1; bool may_write : 1; bool may_execute : 1; + unsigned long wimg; u8 page_size; /* MMU_PAGE_xxx */ }; diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 7015357..29ebe2f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -319,6 +319,7 @@ do_second: gpte->may_execute = true; gpte->may_read = false; gpte->may_write = false; + gpte->wimg = r & HPTE_R_WIMG; switch (pp) { case 0: diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 4b4e927..145a618 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, else kvmppc_mmu_flush_icache(pfn); + rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg; + /* * Use 64K pages if possible; otherwise, on 64K page kernels, * we need to transfer 4 more bits from guest real to host real addr. diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index ce437b9..f026b06 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -537,7 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, int r = RESUME_GUEST; int relocated; int page_found = 0; - struct kvmppc_pte pte; + struct kvmppc_pte pte = { 0 }; bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; u64 vsid; |