summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-12-12 21:19:23 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2014-12-15 11:50:42 +0100
commit05971120fca43e0357789a14b3386bb56eef2201 (patch)
treee839f7efda57e1d53fa69c90315df4d0775716bf /virt/kvm/arm
parentca7d9c829d419c06e450afa5f785d58198c37caa (diff)
downloadop-kernel-dev-05971120fca43e0357789a14b3386bb56eef2201.zip
op-kernel-dev-05971120fca43e0357789a14b3386bb56eef2201.tar.gz
arm/arm64: KVM: Require in-kernel vgic for the arch timers
It is curently possible to run a VM with architected timers support without creating an in-kernel VGIC, which will result in interrupts from the virtual timer going nowhere. To address this issue, move the architected timers initialization to the time when we run a VCPU for the first time, and then only initialize (and enable) the architected timers if we have a properly created and initialized in-kernel VGIC. When injecting interrupts from the virtual timer to the vgic, the current setup should ensure that this never calls an on-demand init of the VGIC, which is the only call path that could return an error from kvm_vgic_inject_irq(), so capture the return value and raise a warning if there's an error there. We also change the kvm_timer_init() function from returning an int to be a void function, since the function always succeeds. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r--virt/kvm/arm/arch_timer.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 22fa819..1c0772b 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
{
+ int ret;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->irq->irq,
- timer->irq->level);
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->irq->irq,
+ timer->irq->level);
+ WARN_ON(ret);
}
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
timer_disarm(timer);
}
-int kvm_timer_init(struct kvm *kvm)
+void kvm_timer_enable(struct kvm *kvm)
{
- if (timecounter && wqueue) {
- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ if (kvm->arch.timer.enabled)
+ return;
+
+ /*
+ * There is a potential race here between VCPUs starting for the first
+ * time, which may be enabling the timer multiple times. That doesn't
+ * hurt though, because we're just setting a variable to the same
+ * variable that it already was. The important thing is that all
+ * VCPUs have the enabled variable set, before entering the guest, if
+ * the arch timers are enabled.
+ */
+ if (timecounter && wqueue)
kvm->arch.timer.enabled = 1;
- }
+}
- return 0;
+void kvm_timer_init(struct kvm *kvm)
+{
+ kvm->arch.timer.cntvoff = kvm_phys_timer_read();
}
OpenPOWER on IntegriCloud