diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/arm.c | 5 | ||||
-rw-r--r-- | virt/kvm/arm/mmu.c | 44 | ||||
-rw-r--r-- | virt/kvm/arm/pmu.c | 43 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-debug.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-init.c | 3 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 11 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v2.c | 47 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v3.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 16 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 95 |
10 files changed, 127 insertions, 145 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index a39a1e1..b9f68e4 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -416,6 +416,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) && !v->arch.power_off && !v->arch.pause); } +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return vcpu_mode_priv(vcpu); +} + /* Just ensure a guest exit from a particular CPU */ static void exit_vm_noop(void *info) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 0e1fc75..b36945d 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1454,25 +1454,6 @@ out: kvm_set_pfn_accessed(pfn); } -static bool is_abort_sea(unsigned long fault_status) -{ - switch (fault_status) { - case FSC_SEA: - case FSC_SEA_TTW0: - case FSC_SEA_TTW1: - case FSC_SEA_TTW2: - case FSC_SEA_TTW3: - case FSC_SECC: - case FSC_SECC_TTW0: - case FSC_SECC_TTW1: - case FSC_SECC_TTW2: - case FSC_SECC_TTW3: - return true; - default: - return false; - } -} - /** * kvm_handle_guest_abort - handles all 2nd stage aborts * @vcpu: the VCPU pointer @@ -1498,20 +1479,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) fault_status = kvm_vcpu_trap_get_fault_type(vcpu); fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); + is_iabt = kvm_vcpu_trap_is_iabt(vcpu); - /* - * The host kernel will handle the synchronous external abort. There - * is no need to pass the error into the guest. - */ - if (is_abort_sea(fault_status)) { + /* Synchronous External Abort? */ + if (kvm_vcpu_dabt_isextabt(vcpu)) { + /* + * For RAS the host kernel may handle this abort. + * There is no need to pass the error into the guest. + */ if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu))) return 1; - } - is_iabt = kvm_vcpu_trap_is_iabt(vcpu); - if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) { - kvm_inject_vabt(vcpu); - return 1; + if (unlikely(!is_iabt)) { + kvm_inject_vabt(vcpu); + return 1; + } } trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), @@ -1718,12 +1700,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void * int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) { + if (!kvm->arch.pgd) + return 0; trace_kvm_age_hva(start, end); return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { + if (!kvm->arch.pgd) + return 0; trace_kvm_test_age_hva(hva); return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); } diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index fc8a723..8a9c423 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -203,11 +203,15 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) return reg; } -static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu) +static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; - bool overflow = !!kvm_pmu_overflow_status(vcpu); + bool overflow; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return; + overflow = !!kvm_pmu_overflow_status(vcpu); if (pmu->irq_level == overflow) return; @@ -215,33 +219,11 @@ static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu) if (likely(irqchip_in_kernel(vcpu->kvm))) { int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, - pmu->irq_num, overflow, - &vcpu->arch.pmu); + pmu->irq_num, overflow, pmu); WARN_ON(ret); } } -/** - * kvm_pmu_overflow_set - set PMU overflow interrupt - * @vcpu: The vcpu pointer - * @val: the value guest writes to PMOVSSET register - */ -void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) -{ - if (val == 0) - return; - - vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val; - kvm_pmu_check_overflow(vcpu); -} - -static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) -{ - if (!kvm_arm_pmu_v3_ready(vcpu)) - return; - kvm_pmu_check_overflow(vcpu); -} - bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; @@ -303,7 +285,7 @@ static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) } /** - * When perf event overflows, call kvm_pmu_overflow_set to set overflow status. + * When the perf event overflows, set the overflow status and inform the vcpu. */ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, struct perf_sample_data *data, @@ -313,7 +295,12 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); int idx = pmc->idx; - kvm_pmu_overflow_set(vcpu, BIT(idx)); + vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); + + if (kvm_pmu_overflow_status(vcpu)) { + kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); + kvm_vcpu_kick(vcpu); + } } /** @@ -341,7 +328,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) reg = lower_32_bits(reg); vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; if (!reg) - kvm_pmu_overflow_set(vcpu, BIT(i)); + vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); } } } diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 7072ab7..10b3817 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c @@ -234,7 +234,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) return 0; } -static struct seq_operations vgic_debug_seq_ops = { +static const struct seq_operations vgic_debug_seq_ops = { .start = vgic_debug_start, .next = vgic_debug_next, .stop = vgic_debug_stop, @@ -255,7 +255,7 @@ static int debug_open(struct inode *inode, struct file *file) return ret; }; -static struct file_operations vgic_debug_fops = { +static const struct file_operations vgic_debug_fops = { .owner = THIS_MODULE, .open = debug_open, .read = seq_read, diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 3a0b899..5801261 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -285,9 +285,6 @@ int vgic_init(struct kvm *kvm) if (ret) goto out; - if (vgic_has_its(kvm)) - dist->msis_require_devid = true; - kvm_for_each_vcpu(i, vcpu, kvm) kvm_vgic_vcpu_enable(vcpu); diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 2dff288..f51c1e1 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -144,7 +144,6 @@ struct its_ite { struct vgic_irq *irq; struct its_collection *collection; - u32 lpi; u32 event_id; }; @@ -813,7 +812,7 @@ static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id) /* Must be called with its_lock mutex held */ static struct its_ite *vgic_its_alloc_ite(struct its_device *device, struct its_collection *collection, - u32 lpi_id, u32 event_id) + u32 event_id) { struct its_ite *ite; @@ -823,7 +822,6 @@ static struct its_ite *vgic_its_alloc_ite(struct its_device *device, ite->event_id = event_id; ite->collection = collection; - ite->lpi = lpi_id; list_add_tail(&ite->ite_list, &device->itt_head); return ite; @@ -873,7 +871,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, new_coll = collection; } - ite = vgic_its_alloc_ite(device, collection, lpi_nr, event_id); + ite = vgic_its_alloc_ite(device, collection, event_id); if (IS_ERR(ite)) { if (new_coll) vgic_its_free_collection(its, coll_id); @@ -1598,6 +1596,7 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) INIT_LIST_HEAD(&its->device_list); INIT_LIST_HEAD(&its->collection_list); + dev->kvm->arch.vgic.msis_require_devid = true; dev->kvm->arch.vgic.has_its = true; its->enabled = false; its->dev = dev; @@ -1847,7 +1846,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, next_offset = compute_next_eventid_offset(&dev->itt_head, ite); val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) | - ((u64)ite->lpi << KVM_ITS_ITE_PINTID_SHIFT) | + ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | ite->collection->collection_id; val = cpu_to_le64(val); return kvm_write_guest(kvm, gpa, &val, ite_esz); @@ -1894,7 +1893,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, if (!collection) return -EINVAL; - ite = vgic_its_alloc_ite(dev, collection, lpi_id, event_id); + ite = vgic_its_alloc_ite(dev, collection, event_id); if (IS_ERR(ite)) return PTR_ERR(ite); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 37522e6..b3d4a10 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -303,6 +303,51 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, vgic_set_vmcr(vcpu, &vmcr); } +static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + int n; /* which APRn is this */ + + n = (addr >> 2) & 0x3; + + if (kvm_vgic_global_state.type == VGIC_V2) { + /* GICv2 hardware systems support max. 32 groups */ + if (n != 0) + return 0; + return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr; + } else { + struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; + + if (n > vgic_v3_max_apr_idx(vcpu)) + return 0; + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ + return vgicv3->vgic_ap1r[n]; + } +} + +static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + int n; /* which APRn is this */ + + n = (addr >> 2) & 0x3; + + if (kvm_vgic_global_state.type == VGIC_V2) { + /* GICv2 hardware systems support max. 32 groups */ + if (n != 0) + return; + vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val; + } else { + struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; + + if (n > vgic_v3_max_apr_idx(vcpu)) + return; + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ + vgicv3->vgic_ap1r[n] = val; + } +} + static const struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, @@ -364,7 +409,7 @@ static const struct vgic_register_region vgic_v2_cpu_registers[] = { vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO, - vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + vgic_mmio_read_apr, vgic_mmio_write_apr, 16, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT, vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 714fa39..408ef06 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -369,7 +369,7 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu, return; do { - old_propbaser = dist->propbaser; + old_propbaser = READ_ONCE(dist->propbaser); propbaser = old_propbaser; propbaser = update_64bit_reg(propbaser, addr & 4, len, val); propbaser = vgic_sanitise_propbaser(propbaser); @@ -397,7 +397,7 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu, return; do { - old_pendbaser = vgic_cpu->pendbaser; + old_pendbaser = READ_ONCE(vgic_cpu->pendbaser); pendbaser = old_pendbaser; pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val); pendbaser = vgic_sanitise_pendbaser(pendbaser); diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index bba7fa2..bf9ceab 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -220,4 +220,20 @@ int vgic_debug_destroy(struct kvm *kvm); bool lock_all_vcpus(struct kvm *kvm); void unlock_all_vcpus(struct kvm *kvm); +static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; + + /* + * num_pri_bits are initialized with HW supported values. + * We can rely safely on num_pri_bits even if VM has not + * restored ICC_CTLR_EL1 before restoring APnR registers. + */ + switch (cpu_if->num_pri_bits) { + case 7: return 3; + case 6: return 1; + default: return 0; + } +} + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 82987d4..6ed1c20 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) return container_of(mn, struct kvm, mmu_notifier); } -static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int need_tlb_flush, idx; - - /* - * When ->invalidate_page runs, the linux pte has been zapped - * already but the page is still allocated until - * ->invalidate_page returns. So if we increase the sequence - * here the kvm page fault will notice if the spte can't be - * established because the page is going to be freed. If - * instead the kvm page fault establishes the spte before - * ->invalidate_page runs, kvm_unmap_hva will release it - * before returning. - * - * The sequence increase only need to be seen at spin_unlock - * time, and not at spin_lock time. - * - * Increasing the sequence after the spin_unlock would be - * unsafe because the kvm page fault could then establish the - * pte after kvm_unmap_hva returned, without noticing the page - * is going to be freed. - */ - idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); - - kvm->mmu_notifier_seq++; - need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; - /* we've to flush the tlb before the pages can be freed */ - if (need_tlb_flush) - kvm_flush_remote_tlbs(kvm); - - spin_unlock(&kvm->mmu_lock); - - kvm_arch_mmu_notifier_invalidate_page(kvm, address); - - srcu_read_unlock(&kvm->srcu, idx); -} - static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { - .invalidate_page = kvm_mmu_notifier_invalidate_page, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, @@ -717,10 +675,9 @@ out_err_no_srcu: hardware_disable_all(); out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) - kfree(rcu_access_pointer(kvm->buses[i])); + kfree(kvm_get_bus(kvm, i)); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - kvm_free_memslots(kvm, - rcu_dereference_protected(kvm->memslots[i], 1)); + kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); kvm_arch_free_vm(kvm); mmdrop(current->mm); return ERR_PTR(r); @@ -754,9 +711,8 @@ static void kvm_destroy_vm(struct kvm *kvm) spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); for (i = 0; i < KVM_NR_BUSES; i++) { - struct kvm_io_bus *bus; + struct kvm_io_bus *bus = kvm_get_bus(kvm, i); - bus = rcu_dereference_protected(kvm->buses[i], 1); if (bus) kvm_io_bus_destroy(bus); kvm->buses[i] = NULL; @@ -770,8 +726,7 @@ static void kvm_destroy_vm(struct kvm *kvm) kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - kvm_free_memslots(kvm, - rcu_dereference_protected(kvm->memslots[i], 1)); + kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); @@ -1654,7 +1609,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages) { unsigned long addr; - gfn_t entry; + gfn_t entry = 0; addr = gfn_to_hva_many(slot, gfn, &entry); if (kvm_is_error_hva(addr)) @@ -1973,6 +1928,7 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, * verify that the entire region is valid here. */ while (start_gfn <= end_gfn) { + nr_pages_avail = 0; ghc->memslot = __gfn_to_memslot(slots, start_gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); @@ -2320,7 +2276,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) #endif } -void kvm_vcpu_on_spin(struct kvm_vcpu *me) +void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; @@ -2351,6 +2307,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) continue; + if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu)) + continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; @@ -3883,7 +3841,6 @@ static const struct file_operations *stat_fops[] = { static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) { struct kobj_uevent_env *env; - char *tmp, *pathbuf = NULL; unsigned long long created, active; if (!kvm_dev.this_device || !kvm) @@ -3907,38 +3864,28 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) add_uevent_var(env, "CREATED=%llu", created); add_uevent_var(env, "COUNT=%llu", active); - if (type == KVM_EVENT_CREATE_VM) + if (type == KVM_EVENT_CREATE_VM) { add_uevent_var(env, "EVENT=create"); - else if (type == KVM_EVENT_DESTROY_VM) + kvm->userspace_pid = task_pid_nr(current); + } else if (type == KVM_EVENT_DESTROY_VM) { add_uevent_var(env, "EVENT=destroy"); + } + add_uevent_var(env, "PID=%d", kvm->userspace_pid); if (kvm->debugfs_dentry) { - char p[ITOA_MAX_LEN]; - - snprintf(p, sizeof(p), "%s", kvm->debugfs_dentry->d_name.name); - tmp = strchrnul(p + 1, '-'); - *tmp = '\0'; - add_uevent_var(env, "PID=%s", p); - pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); - if (pathbuf) { - /* sizeof counts the final '\0' */ - int len = sizeof("STATS_PATH=") - 1; - const char *pvar = "STATS_PATH="; - - tmp = dentry_path_raw(kvm->debugfs_dentry, - pathbuf + len, - PATH_MAX - len); - if (!IS_ERR(tmp)) { - memcpy(tmp - len, pvar, len); - env->envp[env->envp_idx++] = tmp - len; - } + char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); + + if (p) { + tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); + if (!IS_ERR(tmp)) + add_uevent_var(env, "STATS_PATH=%s", tmp); + kfree(p); } } /* no need for checks, since we are adding at most only 5 keys */ env->envp[env->envp_idx++] = NULL; kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); kfree(env); - kfree(pathbuf); } static int kvm_init_debug(void) |