summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2016-05-18 16:26:00 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2016-05-20 15:39:41 +0200
commit41a54482c010d8806cf56e1501bb3b61fac14cf9 (patch)
treee8b452eddf3b83137d4a771e687deee289c422d5 /virt
parentc8eb3f6b9bc31abc0ab3230737fde1639c8b1ea6 (diff)
downloadop-kernel-dev-41a54482c010d8806cf56e1501bb3b61fac14cf9.zip
op-kernel-dev-41a54482c010d8806cf56e1501bb3b61fac14cf9.tar.gz
KVM: arm/arm64: Move timer IRQ map to latest possible time
We are about to modify the VGIC to allocate all data structures dynamically and store mapped IRQ information on a per-IRQ struct, which is indeed allocated dynamically at init time. Therefore, we cannot record the mapped IRQ info from the timer at timer reset time like it's done now, because VCPU reset happens before timer init. A possible later time to do this is on the first run of a per VCPU, it just requires us to move the enable state to be a per-VCPU state and do the lookup of the physical IRQ number when we are about to run the VCPU. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c66
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c5
2 files changed, 40 insertions, 31 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 3232105..e2d5b6f 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -197,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
* because the guest would never see the interrupt. Instead wait
* until we call this function from kvm_timer_flush_hwstate.
*/
- if (!vgic_initialized(vcpu->kvm))
+ if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level)
@@ -333,9 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct irq_desc *desc;
- struct irq_data *data;
- int phys_irq;
/*
* The vcpu timer irq number cannot be determined in
@@ -354,26 +351,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
timer->cntv_ctl = 0;
kvm_timer_update_state(vcpu);
- /*
- * Find the physical IRQ number corresponding to the host_vtimer_irq
- */
- desc = irq_to_desc(host_vtimer_irq);
- if (!desc) {
- kvm_err("%s: no interrupt descriptor\n", __func__);
- return -EINVAL;
- }
-
- data = irq_desc_get_irq_data(desc);
- while (data->parent_data)
- data = data->parent_data;
-
- phys_irq = data->hwirq;
-
- /*
- * Tell the VGIC that the virtual interrupt is tied to a
- * physical interrupt. We do that once per VCPU.
- */
- return kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq);
+ return 0;
}
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
@@ -501,10 +479,40 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
}
-void kvm_timer_enable(struct kvm *kvm)
+int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
- if (kvm->arch.timer.enabled)
- return;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct irq_desc *desc;
+ struct irq_data *data;
+ int phys_irq;
+ int ret;
+
+ if (timer->enabled)
+ return 0;
+
+ /*
+ * Find the physical IRQ number corresponding to the host_vtimer_irq
+ */
+ desc = irq_to_desc(host_vtimer_irq);
+ if (!desc) {
+ kvm_err("%s: no interrupt descriptor\n", __func__);
+ return -EINVAL;
+ }
+
+ data = irq_desc_get_irq_data(desc);
+ while (data->parent_data)
+ data = data->parent_data;
+
+ phys_irq = data->hwirq;
+
+ /*
+ * Tell the VGIC that the virtual interrupt is tied to a
+ * physical interrupt. We do that once per VCPU.
+ */
+ ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
+ if (ret)
+ return ret;
+
/*
* There is a potential race here between VCPUs starting for the first
@@ -515,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm)
* the arch timers are enabled.
*/
if (timecounter && wqueue)
- kvm->arch.timer.enabled = 1;
+ timer->enabled = 1;
+
+ return 0;
}
void kvm_timer_init(struct kvm *kvm)
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index ea00d69..798866a 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -24,11 +24,10 @@
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;
- if (kvm->arch.timer.enabled) {
+ if (timer->enabled) {
timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
timer->cntv_cval = read_sysreg_el0(cntv_cval);
}
@@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
- if (kvm->arch.timer.enabled) {
+ if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
write_sysreg_el0(timer->cntv_cval, cntv_cval);
isb();
OpenPOWER on IntegriCloud