diff options
author | neel <neel@FreeBSD.org> | 2014-04-29 18:42:56 +0000 |
---|---|---|
committer | neel <neel@FreeBSD.org> | 2014-04-29 18:42:56 +0000 |
commit | 9c850920136486c909a374adff26b7848b1be08c (patch) | |
tree | 0efb359ab486bdbb586f778515b600a59f5205c8 /sys/amd64/vmm/vmm.c | |
parent | 0c45ba8eb04a85e1796404415b8012342884d10e (diff) | |
download | FreeBSD-src-9c850920136486c909a374adff26b7848b1be08c.zip FreeBSD-src-9c850920136486c909a374adff26b7848b1be08c.tar.gz |
Some Linux guests will implement a 'halt' by disabling the APIC and executing
the 'HLT' instruction. This condition was detected by 'vm_handle_hlt()' and
converted into the SPINDOWN_CPU exitcode . The bhyve(8) process would exit
the vcpu thread in response to a SPINDOWN_CPU and when the last vcpu was
spun down it would reset the virtual machine via vm_suspend(VM_SUSPEND_RESET).
This functionality was broken in r263780 in a way that made it impossible
to kill the bhyve(8) process because it would loop forever in
vm_handle_suspend().
Unbreak this by removing the code to spindown vcpus. Thus a 'halt' from
a Linux guest will appear to be hung but this is consistent with the
behavior on bare metal. The guest can be rebooted by using the bhyvectl
options '--force-reset' or '--force-poweroff'.
Reviewed by: grehan@
Diffstat (limited to 'sys/amd64/vmm/vmm.c')
-rw-r--r-- | sys/amd64/vmm/vmm.c | 95 |
1 files changed, 28 insertions, 67 deletions
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index 51a132b..ba49616 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -191,8 +191,6 @@ static int vmm_ipinum; SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, "IPI vector used for vcpu notifications"); -static void vm_deactivate_cpu(struct vm *vm, int vcpuid); - static void vcpu_cleanup(struct vm *vm, int i) { @@ -1006,60 +1004,47 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid) static int vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) { - struct vm_exit *vmexit; struct vcpu *vcpu; - int t, timo, spindown; + const char *wmesg; + int t; vcpu = &vm->vcpu[vcpuid]; - spindown = 0; vcpu_lock(vcpu); + while (1) { + /* + * Do a final check for pending NMI or interrupts before + * really putting this thread to sleep. Also check for + * software events that would cause this vcpu to wakeup. + * + * These interrupts/events could have happened after the + * vcpu returned from VMRUN() and before it acquired the + * vcpu lock above. + */ + if (vm->rendezvous_func != NULL || vm->suspend) + break; + if (vm_nmi_pending(vm, vcpuid)) + break; + if (!intr_disabled) { + if (vm_extint_pending(vm, vcpuid) || + vlapic_pending_intr(vcpu->vlapic, NULL)) { + break; + } + } + + if (vlapic_enabled(vcpu->vlapic)) + wmesg = "vmidle"; + else + wmesg = "vmhalt"; - /* - * Do a final check for pending NMI or interrupts before - * really putting this thread to sleep. - * - * These interrupts could have happened any time after we - * returned from VMRUN() and before we grabbed the vcpu lock. - */ - if (vm->rendezvous_func == NULL && - !vm_nmi_pending(vm, vcpuid) && - (intr_disabled || !vlapic_pending_intr(vcpu->vlapic, NULL))) { t = ticks; vcpu_require_state_locked(vcpu, VCPU_SLEEPING); - if (vlapic_enabled(vcpu->vlapic)) { - /* - * XXX msleep_spin() is not interruptible so use the - * 'timo' to put an upper bound on the sleep time. - */ - timo = hz; - msleep_spin(vcpu, &vcpu->mtx, "vmidle", timo); - } else { - /* - * Spindown the vcpu if the APIC is disabled and it - * had entered the halted state, but never spin - * down the BSP. - */ - if (vcpuid != 0) - spindown = 1; - } + msleep_spin(vcpu, &vcpu->mtx, wmesg, 0); vcpu_require_state_locked(vcpu, VCPU_FROZEN); vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); } vcpu_unlock(vcpu); - /* - * Since 'vm_deactivate_cpu()' grabs a sleep mutex we must call it - * outside the confines of the vcpu spinlock. - */ - if (spindown) { - *retu = true; - vmexit = vm_exitinfo(vm, vcpuid); - vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU; - vm_deactivate_cpu(vm, vcpuid); - VCPU_CTR0(vm, vcpuid, "spinning down cpu"); - } - return (0); } @@ -1673,30 +1658,6 @@ vm_activate_cpu(struct vm *vm, int vcpuid) CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); } -static void -vm_deactivate_cpu(struct vm *vm, int vcpuid) -{ - - KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, - ("vm_deactivate_cpu: invalid vcpuid %d", vcpuid)); - KASSERT(CPU_ISSET(vcpuid, &vm->active_cpus), - ("vm_deactivate_cpu: vcpuid %d is not active", vcpuid)); - - VCPU_CTR0(vm, vcpuid, "deactivated"); - CPU_CLR_ATOMIC(vcpuid, &vm->active_cpus); - - /* - * If a vcpu rendezvous is in progress then it could be blocked - * on 'vcpuid' - unblock it before disappearing forever. - */ - mtx_lock(&vm->rendezvous_mtx); - if (vm->rendezvous_func != NULL) { - VCPU_CTR0(vm, vcpuid, "unblock rendezvous after deactivation"); - wakeup(&vm->rendezvous_func); - } - mtx_unlock(&vm->rendezvous_mtx); -} - cpuset_t vm_active_cpus(struct vm *vm) { |