summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/ioapic.c57
-rw-r--r--virt/kvm/ioapic.h6
-rw-r--r--virt/kvm/irq_comm.c71
3 files changed, 60 insertions, 74 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 43969bb..1eddae9 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -142,58 +142,33 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
}
}
-int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e)
-{
- DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
- int i, r = -1;
-
- kvm_get_intr_delivery_bitmask(kvm, NULL, e->fields.dest_id,
- e->fields.dest_mode,
- e->fields.delivery_mode == IOAPIC_LOWEST_PRIORITY,
- 0, deliver_bitmask);
-
- if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
- ioapic_debug("no target on destination\n");
- return r;
- }
-
- while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
- < KVM_MAX_VCPUS) {
- struct kvm_vcpu *vcpu = kvm->vcpus[i];
- __clear_bit(i, deliver_bitmask);
- if (vcpu) {
- if (r < 0)
- r = 0;
- r += kvm_apic_set_irq(vcpu, e->fields.vector,
- e->fields.delivery_mode,
- e->fields.trig_mode);
- } else
- ioapic_debug("null destination vcpu: "
- "mask=%x vector=%x delivery_mode=%x\n",
- e->fields.deliver_bitmask,
- e->fields.vector, e->fields.delivery_mode);
- }
- return r;
-}
-
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{
- union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
+ union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
+ struct kvm_lapic_irq irqe;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
- entry.fields.dest, entry.fields.dest_mode,
- entry.fields.delivery_mode, entry.fields.vector,
- entry.fields.trig_mode);
+ entry->fields.dest, entry->fields.dest_mode,
+ entry->fields.delivery_mode, entry->fields.vector,
+ entry->fields.trig_mode);
+
+ irqe.dest_id = entry->fields.dest_id;
+ irqe.vector = entry->fields.vector;
+ irqe.dest_mode = entry->fields.dest_mode;
+ irqe.trig_mode = entry->fields.trig_mode;
+ irqe.delivery_mode = entry->fields.delivery_mode << 8;
+ irqe.level = 1;
+ irqe.shorthand = 0;
#ifdef CONFIG_X86
/* Always delivery PIT interrupt to vcpu 0 */
if (irq == 0) {
- entry.fields.dest_mode = 0; /* Physical mode. */
- entry.fields.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
+ irqe.dest_mode = 0; /* Physical mode. */
+ irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
}
#endif
- return ioapic_deliver_entry(ioapic->kvm, &entry);
+ return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
}
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index e7bc92d..7080b71 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -71,8 +71,6 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
-void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src,
- int dest_id, int dest_mode, bool low_prio, int short_hand,
- unsigned long *deliver_bitmask);
-int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e);
+int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+ struct kvm_lapic_irq *irq);
#endif
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index f5e059b..4fa1f60 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -22,6 +22,9 @@
#include <linux/kvm_host.h>
#include <asm/msidef.h>
+#ifdef CONFIG_IA64
+#include <asm/iosapic.h>
+#endif
#include "irq.h"
@@ -43,61 +46,71 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
}
-void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src,
- int dest_id, int dest_mode, bool low_prio, int short_hand,
- unsigned long *deliver_bitmask)
+inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
{
- int i, lowest = -1;
- struct kvm_vcpu *vcpu;
+#ifdef CONFIG_IA64
+ return irq->delivery_mode ==
+ (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
+#else
+ return irq->delivery_mode == APIC_DM_LOWEST;
+#endif
+}
- if (dest_mode == 0 && dest_id == 0xff && low_prio)
+int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+ struct kvm_lapic_irq *irq)
+{
+ int i, r = -1;
+ struct kvm_vcpu *vcpu, *lowest = NULL;
+
+ if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
+ kvm_is_dm_lowest_prio(irq))
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
- bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
for (i = 0; i < KVM_MAX_VCPUS; i++) {
vcpu = kvm->vcpus[i];
if (!vcpu || !kvm_apic_present(vcpu))
continue;
- if (!kvm_apic_match_dest(vcpu, src, short_hand, dest_id,
- dest_mode))
+ if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
+ irq->dest_id, irq->dest_mode))
continue;
- if (!low_prio) {
- __set_bit(i, deliver_bitmask);
+ if (!kvm_is_dm_lowest_prio(irq)) {
+ if (r < 0)
+ r = 0;
+ r += kvm_apic_set_irq(vcpu, irq);
} else {
- if (lowest < 0)
- lowest = i;
- if (kvm_apic_compare_prio(vcpu, kvm->vcpus[lowest]) < 0)
- lowest = i;
+ if (!lowest)
+ lowest = vcpu;
+ else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
+ lowest = vcpu;
}
}
- if (lowest != -1)
- __set_bit(lowest, deliver_bitmask);
+ if (lowest)
+ r = kvm_apic_set_irq(lowest, irq);
+
+ return r;
}
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level)
{
- union kvm_ioapic_redirect_entry entry;
+ struct kvm_lapic_irq irq;
- entry.bits = 0;
- entry.fields.dest_id = (e->msi.address_lo &
+ irq.dest_id = (e->msi.address_lo &
MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
- entry.fields.vector = (e->msi.data &
+ irq.vector = (e->msi.data &
MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
- entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
- (unsigned long *)&e->msi.address_lo);
- entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
- (unsigned long *)&e->msi.data);
- entry.fields.delivery_mode = test_bit(
- MSI_DATA_DELIVERY_MODE_SHIFT,
- (unsigned long *)&e->msi.data);
+ irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
+ irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
+ irq.delivery_mode = e->msi.data & 0x700;
+ irq.level = 1;
+ irq.shorthand = 0;
/* TODO Deal with RH bit of MSI message address */
- return ioapic_deliver_entry(kvm, &entry);
+ return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
}
/* This should be called with the kvm->lock mutex held
OpenPOWER on IntegriCloud