summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-09-18 18:15:32 -0700
committerChristoffer Dall <christoffer.dall@linaro.org>2014-09-18 18:15:32 -0700
commita875dafcf9b6b266c855e1f9b0aa060ef585d38a (patch)
tree1903cb0a39ac1cade1940ccb559591cddf3660a0 /virt/kvm
parent0ba09511ddc3ff0b462f37b4fe4b9c4dccc054ec (diff)
parentf51770ed465e6eb41da7fa16fd92eb67069600cf (diff)
downloadop-kernel-dev-a875dafcf9b6b266c855e1f9b0aa060ef585d38a.zip
op-kernel-dev-a875dafcf9b6b266c855e1f9b0aa060ef585d38a.tar.gz
Merge remote-tracking branch 'kvm/next' into queue
Conflicts: arch/arm64/include/asm/kvm_host.h virt/kvm/arm/vgic.c
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic.c157
-rw-r--r--virt/kvm/ioapic.c46
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/kvm_main.c101
-rw-r--r--virt/kvm/vfio.c22
5 files changed, 195 insertions, 133 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index efe6eee..eeb23b3 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1522,83 +1522,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
return 0;
}
-static void vgic_init_maintenance_interrupt(void *info)
-{
- enable_percpu_irq(vgic->maint_irq, 0);
-}
-
-static int vgic_cpu_notify(struct notifier_block *self,
- unsigned long action, void *cpu)
-{
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- vgic_init_maintenance_interrupt(NULL);
- break;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- disable_percpu_irq(vgic->maint_irq);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block vgic_cpu_nb = {
- .notifier_call = vgic_cpu_notify,
-};
-
-static const struct of_device_id vgic_ids[] = {
- { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
- { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
- {},
-};
-
-int kvm_vgic_hyp_init(void)
-{
- const struct of_device_id *matched_id;
- const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
- const struct vgic_params **);
- struct device_node *vgic_node;
- int ret;
-
- vgic_node = of_find_matching_node_and_match(NULL,
- vgic_ids, &matched_id);
- if (!vgic_node) {
- kvm_err("error: no compatible GIC node found\n");
- return -ENODEV;
- }
-
- vgic_probe = matched_id->data;
- ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
- if (ret)
- return ret;
-
- ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
- "vgic", kvm_get_running_vcpus());
- if (ret) {
- kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
- return ret;
- }
-
- ret = __register_cpu_notifier(&vgic_cpu_nb);
- if (ret) {
- kvm_err("Cannot register vgic CPU notifier\n");
- goto out_free_irq;
- }
-
- /* Callback into for arch code for setup */
- vgic_arch_setup(vgic);
-
- on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
- return 0;
-
-out_free_irq:
- free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
- return ret;
-}
-
/**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
@@ -2062,7 +1985,7 @@ static int vgic_create(struct kvm_device *dev, u32 type)
return kvm_vgic_create(dev->kvm);
}
-struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
.name = "kvm-arm-vgic",
.create = vgic_create,
.destroy = vgic_destroy,
@@ -2070,3 +1993,81 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
.get_attr = vgic_get_attr,
.has_attr = vgic_has_attr,
};
+
+static void vgic_init_maintenance_interrupt(void *info)
+{
+ enable_percpu_irq(vgic->maint_irq, 0);
+}
+
+static int vgic_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *cpu)
+{
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+ vgic_init_maintenance_interrupt(NULL);
+ break;
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ disable_percpu_irq(vgic->maint_irq);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vgic_cpu_nb = {
+ .notifier_call = vgic_cpu_notify,
+};
+
+static const struct of_device_id vgic_ids[] = {
+ { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
+ { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
+ {},
+};
+
+int kvm_vgic_hyp_init(void)
+{
+ const struct of_device_id *matched_id;
+ const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
+ const struct vgic_params **);
+ struct device_node *vgic_node;
+ int ret;
+
+ vgic_node = of_find_matching_node_and_match(NULL,
+ vgic_ids, &matched_id);
+ if (!vgic_node) {
+ kvm_err("error: no compatible GIC node found\n");
+ return -ENODEV;
+ }
+
+ vgic_probe = matched_id->data;
+ ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
+ if (ret)
+ return ret;
+
+ ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
+ "vgic", kvm_get_running_vcpus());
+ if (ret) {
+ kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
+ return ret;
+ }
+
+ ret = __register_cpu_notifier(&vgic_cpu_nb);
+ if (ret) {
+ kvm_err("Cannot register vgic CPU notifier\n");
+ goto out_free_irq;
+ }
+
+ /* Callback into for arch code for setup */
+ vgic_arch_setup(vgic);
+
+ on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
+ return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V2);
+
+out_free_irq:
+ free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
+ return ret;
+}
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index e8ce34c..0ba4057 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -405,6 +405,26 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
spin_unlock(&ioapic->lock);
}
+static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
+{
+ int i;
+ struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
+ eoi_inject.work);
+ spin_lock(&ioapic->lock);
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
+
+ if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
+ continue;
+
+ if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
+ ioapic_service(ioapic, i, false);
+ }
+ spin_unlock(&ioapic->lock);
+}
+
+#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
+
static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
{
@@ -435,8 +455,26 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
ent->fields.remote_irr = 0;
- if (ioapic->irr & (1 << i))
- ioapic_service(ioapic, i, false);
+ if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
+ ++ioapic->irq_eoi[i];
+ if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
+ /*
+ * Real hardware does not deliver the interrupt
+ * immediately during eoi broadcast, and this
+ * lets a buggy guest make slow progress
+ * even if it does not correctly handle a
+ * level-triggered interrupt. Emulate this
+ * behavior if we detect an interrupt storm.
+ */
+ schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
+ ioapic->irq_eoi[i] = 0;
+ trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
+ } else {
+ ioapic_service(ioapic, i, false);
+ }
+ } else {
+ ioapic->irq_eoi[i] = 0;
+ }
}
}
@@ -565,12 +603,14 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
{
int i;
+ cancel_delayed_work_sync(&ioapic->eoi_inject);
for (i = 0; i < IOAPIC_NUM_PINS; i++)
ioapic->redirtbl[i].fields.mask = 1;
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
ioapic->ioregsel = 0;
ioapic->irr = 0;
ioapic->id = 0;
+ memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
rtc_irq_eoi_tracking_reset(ioapic);
update_handled_vectors(ioapic);
}
@@ -589,6 +629,7 @@ int kvm_ioapic_init(struct kvm *kvm)
if (!ioapic)
return -ENOMEM;
spin_lock_init(&ioapic->lock);
+ INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
kvm->arch.vioapic = ioapic;
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
@@ -609,6 +650,7 @@ void kvm_ioapic_destroy(struct kvm *kvm)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ cancel_delayed_work_sync(&ioapic->eoi_inject);
if (ioapic) {
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm->arch.vioapic = NULL;
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 90d43e9..e23b706 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -59,6 +59,8 @@ struct kvm_ioapic {
spinlock_t lock;
DECLARE_BITMAP(handled_vectors, 256);
struct rtc_status rtc_status;
+ struct delayed_work eoi_inject;
+ u32 irq_eoi[IOAPIC_NUM_PINS];
};
#ifdef DEBUG
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 76c92a7..2782320 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -95,8 +95,6 @@ static int hardware_enable_all(void);
static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-static void update_memslots(struct kvm_memslots *slots,
- struct kvm_memory_slot *new, u64 last_generation);
static void kvm_release_pfn_dirty(pfn_t pfn);
static void mark_page_dirty_in_slot(struct kvm *kvm,
@@ -477,6 +475,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
goto out_err_no_srcu;
+
+ /*
+ * Init kvm generation close to the maximum to easily test the
+ * code of handling generation number wrap-around.
+ */
+ kvm->memslots->generation = -150;
+
kvm_init_memslots_id(kvm);
if (init_srcu_struct(&kvm->srcu))
goto out_err_no_srcu;
@@ -688,8 +693,7 @@ static void sort_memslots(struct kvm_memslots *slots)
}
static void update_memslots(struct kvm_memslots *slots,
- struct kvm_memory_slot *new,
- u64 last_generation)
+ struct kvm_memory_slot *new)
{
if (new) {
int id = new->id;
@@ -700,15 +704,13 @@ static void update_memslots(struct kvm_memslots *slots,
if (new->npages != npages)
sort_memslots(slots);
}
-
- slots->generation = last_generation + 1;
}
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
-#ifdef KVM_CAP_READONLY_MEM
+#ifdef __KVM_HAVE_READONLY_MEM
valid_flags |= KVM_MEM_READONLY;
#endif
@@ -723,10 +725,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
{
struct kvm_memslots *old_memslots = kvm->memslots;
- update_memslots(slots, new, kvm->memslots->generation);
+ /*
+ * Set the low bit in the generation, which disables SPTE caching
+ * until the end of synchronize_srcu_expedited.
+ */
+ WARN_ON(old_memslots->generation & 1);
+ slots->generation = old_memslots->generation + 1;
+
+ update_memslots(slots, new);
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
+ /*
+ * Increment the new memslot generation a second time. This prevents
+ * vm exits that race with memslot updates from caching a memslot
+ * generation that will (potentially) be valid forever.
+ */
+ slots->generation++;
+
kvm_arch_memslots_updated(kvm);
return old_memslots;
@@ -777,7 +793,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
- r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
@@ -791,7 +806,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.npages = npages;
new.flags = mem->flags;
- r = -EINVAL;
if (npages) {
if (!old.npages)
change = KVM_MR_CREATE;
@@ -847,7 +861,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
}
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
@@ -1776,8 +1789,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
bool eligible;
eligible = !vcpu->spin_loop.in_spin_loop ||
- (vcpu->spin_loop.in_spin_loop &&
- vcpu->spin_loop.dy_eligible);
+ vcpu->spin_loop.dy_eligible;
if (vcpu->spin_loop.in_spin_loop)
kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
@@ -2267,6 +2279,29 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
return filp->private_data;
}
+static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
+#ifdef CONFIG_KVM_MPIC
+ [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
+ [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
+#endif
+
+#ifdef CONFIG_KVM_XICS
+ [KVM_DEV_TYPE_XICS] = &kvm_xics_ops,
+#endif
+};
+
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
+{
+ if (type >= ARRAY_SIZE(kvm_device_ops_table))
+ return -ENOSPC;
+
+ if (kvm_device_ops_table[type] != NULL)
+ return -EEXIST;
+
+ kvm_device_ops_table[type] = ops;
+ return 0;
+}
+
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{
@@ -2275,36 +2310,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
int ret;
- switch (cd->type) {
-#ifdef CONFIG_KVM_MPIC
- case KVM_DEV_TYPE_FSL_MPIC_20:
- case KVM_DEV_TYPE_FSL_MPIC_42:
- ops = &kvm_mpic_ops;
- break;
-#endif
-#ifdef CONFIG_KVM_XICS
- case KVM_DEV_TYPE_XICS:
- ops = &kvm_xics_ops;
- break;
-#endif
-#ifdef CONFIG_KVM_VFIO
- case KVM_DEV_TYPE_VFIO:
- ops = &kvm_vfio_ops;
- break;
-#endif
-#ifdef CONFIG_KVM_ARM_VGIC
- case KVM_DEV_TYPE_ARM_VGIC_V2:
- ops = &kvm_arm_vgic_v2_ops;
- break;
-#endif
-#ifdef CONFIG_S390
- case KVM_DEV_TYPE_FLIC:
- ops = &kvm_flic_ops;
- break;
-#endif
- default:
+ if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
+ return -ENODEV;
+
+ ops = kvm_device_ops_table[cd->type];
+ if (ops == NULL)
return -ENODEV;
- }
if (test)
return 0;
@@ -2619,7 +2630,6 @@ static long kvm_dev_ioctl(struct file *filp,
switch (ioctl) {
case KVM_GET_API_VERSION:
- r = -EINVAL;
if (arg)
goto out;
r = KVM_API_VERSION;
@@ -2631,7 +2641,6 @@ static long kvm_dev_ioctl(struct file *filp,
r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
break;
case KVM_GET_VCPU_MMAP_SIZE:
- r = -EINVAL;
if (arg)
goto out;
r = PAGE_SIZE; /* struct kvm_run */
@@ -2676,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
cpumask_set_cpu(cpu, cpus_hardware_enabled);
- r = kvm_arch_hardware_enable(NULL);
+ r = kvm_arch_hardware_enable();
if (r) {
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -2701,7 +2710,7 @@ static void hardware_disable_nolock(void *junk)
if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_disable(NULL);
+ kvm_arch_hardware_disable();
}
static void hardware_disable(void)
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index ba1a93f..bb11b36 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -246,6 +246,16 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
}
+static int kvm_vfio_create(struct kvm_device *dev, u32 type);
+
+static struct kvm_device_ops kvm_vfio_ops = {
+ .name = "kvm-vfio",
+ .create = kvm_vfio_create,
+ .destroy = kvm_vfio_destroy,
+ .set_attr = kvm_vfio_set_attr,
+ .has_attr = kvm_vfio_has_attr,
+};
+
static int kvm_vfio_create(struct kvm_device *dev, u32 type)
{
struct kvm_device *tmp;
@@ -268,10 +278,8 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
return 0;
}
-struct kvm_device_ops kvm_vfio_ops = {
- .name = "kvm-vfio",
- .create = kvm_vfio_create,
- .destroy = kvm_vfio_destroy,
- .set_attr = kvm_vfio_set_attr,
- .has_attr = kvm_vfio_has_attr,
-};
+static int __init kvm_vfio_ops_init(void)
+{
+ return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
+}
+module_init(kvm_vfio_ops_init);
OpenPOWER on IntegriCloud