From 75da01e127f7db3b23effa6118336d303e7572a7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 31 Jan 2013 11:25:52 +0000 Subject: ARM: KVM: vgic: force EOIed LRs to the empty state The VGIC doesn't guarantee that an EOIed LR that has been configured to generate a maintenance interrupt will appear as empty. While the code recovers from this situation, it is better to clean the LR and flag it as empty so it can be quickly recycled. Signed-off-by: Marc Zyngier --- arch/arm/kvm/vgic.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a1731..76ea1aa 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - - goto out; + return true; } /* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); -out: if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1054,6 +1052,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } else { vgic_cpu_irq_clear(vcpu, irq); } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; } } -- cgit v1.1 From 33c83cb3c1d84b76c8270abe5487e77f83a81b22 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 1 Feb 2013 18:28:30 +0000 Subject: ARM: KVM: vgic: take distributor lock on sync_hwstate path Now that the maintenance interrupt handling is actually out of the handler itself, the code becomes quite racy as we can get preempted while we process the state. Wrapping this code around the distributor lock ensures that we're not preempted and relatively race-free. Signed-off-by: Marc Zyngier --- arch/arm/kvm/vgic.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index 76ea1aa..0e4cfe1 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -1016,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); - /* - * We do not need to take the distributor lock here, since the only - * action we perform is clearing the irq_active_bit for an EOIed - * level interrupt. There is a potential race with - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we - * check if the interrupt is already active. Two possibilities: - * - * - The queuing is occurring on the same vcpu: cannot happen, - * as we're already in the context of this vcpu, and - * executing the handler - * - The interrupt has been migrated to another vcpu, and we - * ignore this interrupt for this run. Big deal. It is still - * pending though, and will get considered when this vcpu - * exits. - */ if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { /* * Some level interrupts have been EOIed. Clear their @@ -1069,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing. */ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { @@ -1117,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + if (!irqchip_in_kernel(vcpu->kvm)) return; + spin_lock(&dist->lock); __kvm_vgic_sync_hwstate(vcpu); + spin_unlock(&dist->lock); } int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) -- cgit v1.1 From ca46e10fb239d4646cb19620695473f252a6ffc7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Apr 2013 10:43:13 +0100 Subject: ARM: KVM: fix KVM_CAP_ARM_SET_DEVICE_ADDR reporting Commit 3401d54696f9 (KVM: ARM: Introduce KVM_ARM_SET_DEVICE_ADDR ioctl) added support for the KVM_CAP_ARM_SET_DEVICE_ADDR capability, but failed to add a break in the relevant case statement, returning the number of CPUs instead. Luckilly enough, the CONFIG_NR_CPUS=0 patch hasn't been merged yet (https://lkml.org/lkml/diff/2012/3/31/131/1), so the bug wasn't noticed. Just give it a break! Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/arm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a93698..c1fe498 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; + break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; -- cgit v1.1 From 15bbc1b28ff65767922f78c266821cc138b90a47 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 17 Apr 2013 12:09:09 -0700 Subject: ARM: KVM: fix unbalanced get_cpu() in access_dcsw In the very unlikely event where a guest would be foolish enough to *read* from a write-only cache maintainance register, we end up with preemption disabled, due to a misplaced get_cpu(). Just move the "is_write" test outside of the critical section. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Linus Torvalds --- arch/arm/kvm/coproc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a98..7bed755 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, u32 val; int cpu; - cpu = get_cpu(); - if (!p->is_write) return read_from_write_only(vcpu, p); + cpu = get_cpu(); + cpumask_setall(&vcpu->arch.require_dcache_flush); cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); -- cgit v1.1