diff options
author | Alexander Graf <agraf@suse.de> | 2013-11-29 02:29:00 +0100 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-12-09 09:41:39 +0100 |
commit | 40fdd8c88c4a5e9b26bfbed2215ac661f24aef07 (patch) | |
tree | 89cbf59c364f0f310b28a0eb6a398eab1d388835 | |
parent | c9dad7f9db4ed42de37d3f0ef2b2c0e10d5b6f92 (diff) | |
download | op-kernel-dev-40fdd8c88c4a5e9b26bfbed2215ac661f24aef07.zip op-kernel-dev-40fdd8c88c4a5e9b26bfbed2215ac661f24aef07.tar.gz |
KVM: PPC: Book3S: PR: Make svcpu -> vcpu store preempt savvy
As soon as we get back to our "highmem" handler in virtual address
space we may get preempted. Today the reason we can get preempted is
that we replay interrupts and all the lazy logic thinks we have
interrupts enabled.
However, it's not hard to make the code interruptible and that way
we can enable and handle interrupts even earlier.
This fixes random guest crashes that happened with CONFIG_PREEMPT=y
for me.
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_asm.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 22 |
2 files changed, 23 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348..412b2f3 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -106,6 +106,7 @@ struct kvmppc_host_state { }; struct kvmppc_book3s_shadow_vcpu { + bool in_use; ulong gpr[14]; u32 cr; u32 xer; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3..5b9e906 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; + svcpu->in_use = 0; svcpu_put(svcpu); #endif vcpu->cpu = smp_processor_id(); @@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + if (svcpu->in_use) { + kvmppc_copy_from_svcpu(vcpu, svcpu); + } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); @@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, svcpu->ctr = vcpu->arch.ctr; svcpu->lr = vcpu->arch.lr; svcpu->pc = vcpu->arch.pc; + svcpu->in_use = true; } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, struct kvmppc_book3s_shadow_vcpu *svcpu) { + /* + * vcpu_put would just call us again because in_use hasn't + * been updated yet. + */ + preempt_disable(); + + /* + * Maybe we were already preempted and synced the svcpu from + * our preempt notifiers. Don't bother touching this svcpu then. + */ + if (!svcpu->in_use) + goto out; + vcpu->arch.gpr[0] = svcpu->gpr[0]; vcpu->arch.gpr[1] = svcpu->gpr[1]; vcpu->arch.gpr[2] = svcpu->gpr[2]; @@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; + svcpu->in_use = false; + +out: + preempt_enable(); } static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |