diff options
author | Jes Sorensen <jes@sgi.com> | 2009-04-16 10:43:48 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 11:48:43 +0300 |
commit | c6b60c6921381130e5288b19f5fdf81152230b37 (patch) | |
tree | d2d69c726c2acd4ef83dc463adc2379ef896aac4 /arch/ia64 | |
parent | 463656c0007ddccee78db383eeb9e6eac75ccb7f (diff) | |
download | op-kernel-dev-c6b60c6921381130e5288b19f5fdf81152230b37.zip op-kernel-dev-c6b60c6921381130e5288b19f5fdf81152230b37.tar.gz |
KVM: ia64: Don't hold slots_lock in guest mode
Reorder locking to avoid holding the slots_lock when entering
the guest.
Signed-off-by: Jes Sorensen <jes@sgi.com>
Acked-by : Xiantao Zhang<xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 64 |
1 files changed, 33 insertions, 31 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 3bf0a34..f127fb7 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -632,34 +632,22 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) vti_set_rr6(vcpu->arch.vmm_rr); return kvm_insert_vmm_mapping(vcpu); } + static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) { kvm_purge_vmm_mapping(vcpu); vti_set_rr6(vcpu->arch.host_rr6); } -static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { union context *host_ctx, *guest_ctx; int r; - /*Get host and guest context with guest address space.*/ - host_ctx = kvm_get_host_context(vcpu); - guest_ctx = kvm_get_guest_context(vcpu); - - r = kvm_vcpu_pre_transition(vcpu); - if (r < 0) - goto out; - kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); - kvm_vcpu_post_transition(vcpu); - r = 0; -out: - return r; -} - -static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - int r; + /* + * down_read() may sleep and return with interrupts enabled + */ + down_read(&vcpu->kvm->slots_lock); again: if (signal_pending(current)) { @@ -668,23 +656,28 @@ again: goto out; } - /* - * down_read() may sleep and return with interrupts enabled - */ - down_read(&vcpu->kvm->slots_lock); - preempt_disable(); local_irq_disable(); + /*Get host and guest context with guest address space.*/ + host_ctx = kvm_get_host_context(vcpu); + guest_ctx = kvm_get_guest_context(vcpu); + vcpu->guest_mode = 1; + + r = kvm_vcpu_pre_transition(vcpu); + if (r < 0) + goto vcpu_run_fail; + + up_read(&vcpu->kvm->slots_lock); kvm_guest_enter(); - r = vti_vcpu_run(vcpu, kvm_run); - if (r < 0) { - local_irq_enable(); - preempt_enable(); - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; - goto out; - } + + /* + * Transition to the guest + */ + kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); + + kvm_vcpu_post_transition(vcpu); vcpu->arch.launched = 1; vcpu->guest_mode = 0; @@ -698,9 +691,10 @@ again: */ barrier(); kvm_guest_exit(); - up_read(&vcpu->kvm->slots_lock); preempt_enable(); + down_read(&vcpu->kvm->slots_lock); + r = kvm_handle_exit(kvm_run, vcpu); if (r > 0) { @@ -709,12 +703,20 @@ again: } out: + up_read(&vcpu->kvm->slots_lock); if (r > 0) { kvm_resched(vcpu); + down_read(&vcpu->kvm->slots_lock); goto again; } return r; + +vcpu_run_fail: + local_irq_enable(); + preempt_enable(); + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + goto out; } static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) |