diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-10-16 15:58:15 +0800 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-28 14:22:14 +0200 |
commit | decc90162a99b4e51c534ab63f9b6fc5cb0f2596 (patch) | |
tree | 61f590370e93a3f0ed34881c2c7a954ce1c21b57 /arch | |
parent | 5550af4df179e52753d3a43a788a113ad8cd95cd (diff) | |
download | op-kernel-dev-decc90162a99b4e51c534ab63f9b6fc5cb0f2596.zip op-kernel-dev-decc90162a99b4e51c534ab63f9b6fc5cb0f2596.tar.gz |
KVM: ia64: Fix halt emulation logic
Common halt logic was changed by x86 and did not update ia64. This patch
updates halt for ia64.
Fixes a regression causing guests to hang with more than 2 vcpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 72 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 2 |
4 files changed, 46 insertions, 40 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 04c0b88..c60d324 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -365,7 +365,8 @@ struct kvm_vcpu_arch { long itc_offset; unsigned long itc_check; unsigned long timer_check; - unsigned long timer_pending; + unsigned int timer_pending; + unsigned int timer_fired; unsigned long vrr[8]; unsigned long ibr[8]; diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 8a2b13f..3caac47 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) struct kvm *kvm = vcpu->kvm; struct call_data call_data; int i; + call_data.ptc_g_data = p->u.ptc_g_data; for (i = 0; i < KVM_MAX_VCPUS; i++) { @@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ktime_t kt; long itc_diff; unsigned long vcpu_now_itc; - unsigned long expires; struct hrtimer *p_ht = &vcpu->arch.hlt_timer; unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; + if (irqchip_in_kernel(vcpu->kvm)) { - if (time_after(vcpu_now_itc, vpd->itm)) { - vcpu->arch.timer_check = 1; - return 1; - } - itc_diff = vpd->itm - vcpu_now_itc; - if (itc_diff < 0) - itc_diff = -itc_diff; + vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; - expires = div64_u64(itc_diff, cyc_per_usec); - kt = ktime_set(0, 1000 * expires); - vcpu->arch.ht_active = 1; - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); + if (time_after(vcpu_now_itc, vpd->itm)) { + vcpu->arch.timer_check = 1; + return 1; + } + itc_diff = vpd->itm - vcpu_now_itc; + if (itc_diff < 0) + itc_diff = -itc_diff; + + expires = div64_u64(itc_diff, cyc_per_usec); + kt = ktime_set(0, 1000 * expires); + + down_read(&vcpu->kvm->slots_lock); + vcpu->arch.ht_active = 1; + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); - if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; kvm_vcpu_block(vcpu); hrtimer_cancel(p_ht); vcpu->arch.ht_active = 0; + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) + vcpu->arch.mp_state = + KVM_MP_STATE_RUNNABLE; + up_read(&vcpu->kvm->slots_lock); + if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) return -EINTR; return 1; @@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, static const int kvm_vti_max_exit_handlers = sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); -static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) -{ -} - static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; @@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) again: preempt_disable(); - - kvm_prepare_guest_switch(vcpu); local_irq_disable(); if (signal_pending(current)) { @@ -614,7 +617,7 @@ again: vcpu->guest_mode = 1; kvm_guest_enter(); - + down_read(&vcpu->kvm->slots_lock); r = vti_vcpu_run(vcpu, kvm_run); if (r < 0) { local_irq_enable(); @@ -634,9 +637,8 @@ again: * But we need to prevent reordering, hence this barrier(): */ barrier(); - kvm_guest_exit(); - + up_read(&vcpu->kvm->slots_lock); preempt_enable(); r = kvm_handle_exit(kvm_run, vcpu); @@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); vcpu_put(vcpu); return -EAGAIN; } @@ -1125,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) wait_queue_head_t *q; vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); + q = &vcpu->wq; + if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) goto out; - q = &vcpu->wq; - if (waitqueue_active(q)) { - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + if (waitqueue_active(q)) wake_up_interruptible(q); - } + out: + vcpu->arch.timer_fired = 1; vcpu->arch.timer_check = 1; return HRTIMER_NORESTART; } @@ -1702,12 +1706,14 @@ static void vcpu_kick_intr(void *info) void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int ipi_pcpu = vcpu->cpu; + int cpu = get_cpu(); if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); - if (vcpu->guest_mode) + if (vcpu->guest_mode && cpu != ipi_pcpu) smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); + put_cpu(); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) @@ -1717,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) if (!test_and_set_bit(vec, &vpd->irr[0])) { vcpu->arch.irq_new_pending = 1; - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) - kvm_vcpu_kick(vcpu); - else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - if (waitqueue_active(&vcpu->wq)) - wake_up_interruptible(&vcpu->wq); - } + kvm_vcpu_kick(vcpu); return 1; } return 0; @@ -1793,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return 0; + return vcpu->arch.timer_fired; } gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index 0c69d9e..cb7600b 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) return index; } +static void prepare_for_halt(struct kvm_vcpu *vcpu) +{ + vcpu->arch.timer_pending = 1; + vcpu->arch.timer_fired = 0; +} + int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) { @@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) break; case PAL_HALT_LIGHT: { - vcpu->arch.timer_pending = 1; INIT_PAL_STATUS_SUCCESS(result); + prepare_for_halt(vcpu); if (kvm_highest_pending_irq(vcpu) == -1) ret = kvm_emulate_halt(vcpu); - } break; diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 3417783..8008173 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c @@ -713,7 +713,7 @@ void leave_hypervisor_tail(void) if (!(VCPU(v, itv) & (1 << 16))) { vcpu_pend_interrupt(v, VCPU(v, itv) & 0xff); - VMX(v, itc_check) = 0; + VMX(v, itc_check) = 0; } else { v->arch.timer_pending = 1; } |