diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/apei/einj.c | 224 | ||||
-rw-r--r-- | drivers/acpi/numa.c | 6 | ||||
-rw-r--r-- | drivers/acpi/processor_core.c | 26 | ||||
-rw-r--r-- | drivers/acpi/processor_driver.c | 20 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 96 |
5 files changed, 276 insertions, 96 deletions
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 6e6512e..5b898d4 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -43,6 +43,42 @@ #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) /* + * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action. + */ +static int acpi5; + +struct set_error_type_with_address { + u32 type; + u32 vendor_extension; + u32 flags; + u32 apicid; + u64 memory_address; + u64 memory_address_range; + u32 pcie_sbdf; +}; +enum { + SETWA_FLAGS_APICID = 1, + SETWA_FLAGS_MEM = 2, + SETWA_FLAGS_PCIE_SBDF = 4, +}; + +/* + * Vendor extensions for platform specific operations + */ +struct vendor_error_type_extension { + u32 length; + u32 pcie_sbdf; + u16 vendor_id; + u16 device_id; + u8 rev_id; + u8 reserved[3]; +}; + +static u32 vendor_flags; +static struct debugfs_blob_wrapper vendor_blob; +static char vendor_dev[64]; + +/* * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the * EINJ table through an unpublished extension. Use with caution as * most will ignore the parameter and make their own choice of address @@ -103,7 +139,14 @@ static struct apei_exec_ins_type einj_ins_type[] = { */ static DEFINE_MUTEX(einj_mutex); -static struct einj_parameter *einj_param; +static void *einj_param; + +#ifndef readq +static inline __u64 readq(volatile void __iomem *addr) +{ + return ((__u64)readl(addr+4) << 32) + readl(addr); +} +#endif #ifndef writeq static inline void writeq(__u64 val, volatile void __iomem *addr) @@ -158,10 +201,31 @@ static int einj_timedout(u64 *t) return 0; } -static u64 einj_get_parameter_address(void) +static void check_vendor_extension(u64 paddr, + struct set_error_type_with_address *v5param) +{ + int offset = readl(&v5param->vendor_extension); + struct vendor_error_type_extension *v; + u32 sbdf; + + if (!offset) + return; + v = ioremap(paddr + offset, sizeof(*v)); + if (!v) + return; + sbdf = readl(&v->pcie_sbdf); + sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", + sbdf >> 24, (sbdf >> 16) & 0xff, + (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, + readw(&v->vendor_id), readw(&v->device_id), + readb(&v->rev_id)); + iounmap(v); +} + +static void *einj_get_parameter_address(void) { int i; - u64 paddr = 0; + u64 paddrv4 = 0, paddrv5 = 0; struct acpi_whea_header *entry; entry = EINJ_TAB_ENTRY(einj_tab); @@ -170,12 +234,40 @@ static u64 einj_get_parameter_address(void) entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - memcpy(&paddr, &entry->register_region.address, - sizeof(paddr)); + memcpy(&paddrv4, &entry->register_region.address, + sizeof(paddrv4)); + if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && + entry->instruction == ACPI_EINJ_WRITE_REGISTER && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY) + memcpy(&paddrv5, &entry->register_region.address, + sizeof(paddrv5)); entry++; } + if (paddrv5) { + struct set_error_type_with_address *v5param; + + v5param = ioremap(paddrv5, sizeof(*v5param)); + if (v5param) { + acpi5 = 1; + check_vendor_extension(paddrv5, v5param); + return v5param; + } + } + if (paddrv4) { + struct einj_parameter *v4param; + + v4param = ioremap(paddrv4, sizeof(*v4param)); + if (!v4param) + return 0; + if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) { + iounmap(v4param); + return 0; + } + return v4param; + } - return paddr; + return 0; } /* do sanity check to trigger table */ @@ -340,12 +432,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) if (rc) return rc; apei_exec_ctx_set_input(&ctx, type); - rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); - if (rc) - return rc; - if (einj_param) { - writeq(param1, &einj_param->param1); - writeq(param2, &einj_param->param2); + if (acpi5) { + struct set_error_type_with_address *v5param = einj_param; + + writel(type, &v5param->type); + if (type & 0x80000000) { + switch (vendor_flags) { + case SETWA_FLAGS_APICID: + writel(param1, &v5param->apicid); + break; + case SETWA_FLAGS_MEM: + writeq(param1, &v5param->memory_address); + writeq(param2, &v5param->memory_address_range); + break; + case SETWA_FLAGS_PCIE_SBDF: + writel(param1, &v5param->pcie_sbdf); + break; + } + writel(vendor_flags, &v5param->flags); + } else { + switch (type) { + case ACPI_EINJ_PROCESSOR_CORRECTABLE: + case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: + case ACPI_EINJ_PROCESSOR_FATAL: + writel(param1, &v5param->apicid); + writel(SETWA_FLAGS_APICID, &v5param->flags); + break; + case ACPI_EINJ_MEMORY_CORRECTABLE: + case ACPI_EINJ_MEMORY_UNCORRECTABLE: + case ACPI_EINJ_MEMORY_FATAL: + writeq(param1, &v5param->memory_address); + writeq(param2, &v5param->memory_address_range); + writel(SETWA_FLAGS_MEM, &v5param->flags); + break; + case ACPI_EINJ_PCIX_CORRECTABLE: + case ACPI_EINJ_PCIX_UNCORRECTABLE: + case ACPI_EINJ_PCIX_FATAL: + writel(param1, &v5param->pcie_sbdf); + writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags); + break; + } + } + } else { + rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); + if (rc) + return rc; + if (einj_param) { + struct einj_parameter *v4param = einj_param; + writeq(param1, &v4param->param1); + writeq(param2, &v4param->param2); + } } rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); if (rc) @@ -455,15 +591,25 @@ static int error_type_set(void *data, u64 val) { int rc; u32 available_error_type = 0; + u32 tval, vendor; + + /* + * Vendor defined types have 0x80000000 bit set, and + * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE + */ + vendor = val & 0x80000000; + tval = val & 0x7fffffff; /* Only one error type can be specified */ - if (val & (val - 1)) - return -EINVAL; - rc = einj_get_available_error_type(&available_error_type); - if (rc) - return rc; - if (!(val & available_error_type)) + if (tval & (tval - 1)) return -EINVAL; + if (!vendor) { + rc = einj_get_available_error_type(&available_error_type); + if (rc) + return rc; + if (!(val & available_error_type)) + return -EINVAL; + } error_type = val; return 0; @@ -502,7 +648,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab) static int __init einj_init(void) { int rc; - u64 param_paddr; acpi_status status; struct dentry *fentry; struct apei_exec_context ctx; @@ -555,23 +700,30 @@ static int __init einj_init(void) rc = apei_exec_pre_map_gars(&ctx); if (rc) goto err_release; - if (param_extension) { - param_paddr = einj_get_parameter_address(); - if (param_paddr) { - einj_param = ioremap(param_paddr, sizeof(*einj_param)); - rc = -ENOMEM; - if (!einj_param) - goto err_unmap; - fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param1); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param2); - if (!fentry) - goto err_unmap; - } else - pr_warn(EINJ_PFX "Parameter extension is not supported.\n"); + + einj_param = einj_get_parameter_address(); + if ((param_extension || acpi5) && einj_param) { + fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param1); + if (!fentry) + goto err_unmap; + fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param2); + if (!fentry) + goto err_unmap; + } + + if (vendor_dev[0]) { + vendor_blob.data = vendor_dev; + vendor_blob.size = strlen(vendor_dev); + fentry = debugfs_create_blob("vendor", S_IRUSR, + einj_debug_dir, &vendor_blob); + if (!fentry) + goto err_unmap; + fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, + einj_debug_dir, &vendor_flags); + if (!fentry) + goto err_unmap; } pr_info(EINJ_PFX "Error INJection is initialized.\n"); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 3b5c318..e56f3be 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS] static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; +unsigned char acpi_srat_revision __initdata; + int pxm_to_node(int pxm) { if (pxm < 0) @@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, static int __init acpi_parse_srat(struct acpi_table_header *table) { + struct acpi_table_srat *srat; if (!table) return -EINVAL; + srat = (struct acpi_table_srat *)table; + acpi_srat_revision = srat->header.revision; + /* Real work done in acpi_table_parse_srat below. */ return 0; diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 3a0428e..c850de4 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) apic_id = map_mat_entry(handle, type, acpi_id); if (apic_id == -1) apic_id = map_madt_entry(type, acpi_id); - if (apic_id == -1) - return apic_id; + if (apic_id == -1) { + /* + * On UP processor, there is no _MAT or MADT table. + * So above apic_id is always set to -1. + * + * BIOS may define multiple CPU handles even for UP processor. + * For example, + * + * Scope (_PR) + * { + * Processor (CPU0, 0x00, 0x00000410, 0x06) {} + * Processor (CPU1, 0x01, 0x00000410, 0x06) {} + * Processor (CPU2, 0x02, 0x00000410, 0x06) {} + * Processor (CPU3, 0x03, 0x00000410, 0x06) {} + * } + * + * Ignores apic_id and always return 0 for CPU0's handle. + * Return -1 for other CPU's handle. + */ + if (acpi_id == 0) + return acpi_id; + else + return apic_id; + } #ifdef CONFIG_SMP for_each_possible_cpu(i) { diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 9d7bc9f..b9cbd9b 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -82,7 +82,7 @@ MODULE_LICENSE("GPL"); static int acpi_processor_add(struct acpi_device *device); static int acpi_processor_remove(struct acpi_device *device, int type); static void acpi_processor_notify(struct acpi_device *device, u32 event); -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); +static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); static int acpi_processor_handle_eject(struct acpi_processor *pr); @@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device) * they are physically not present. */ if (pr->id == -1) { - if (ACPI_FAILURE - (acpi_processor_hotadd_init(pr->handle, &pr->id))) { + if (ACPI_FAILURE(acpi_processor_hotadd_init(pr))) return -ENODEV; - } } /* * On some boxes several processors use the same processor bus id. @@ -539,6 +537,7 @@ err_thermal_unregister: thermal_cooling_device_unregister(pr->cdev); err_power_exit: acpi_processor_power_exit(pr, device); + sysfs_remove_link(&device->dev.kobj, "sysdev"); err_free_cpumask: free_cpumask_var(pr->throttling.shared_cpu_map); @@ -720,18 +719,19 @@ processor_walk_namespace_cb(acpi_handle handle, return (AE_OK); } -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) +static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) { + acpi_handle handle = pr->handle; if (!is_processor_present(handle)) { return AE_ERROR; } - if (acpi_map_lsapic(handle, p_cpu)) + if (acpi_map_lsapic(handle, &pr->id)) return AE_ERROR; - if (arch_register_cpu(*p_cpu)) { - acpi_unmap_lsapic(*p_cpu); + if (arch_register_cpu(pr->id)) { + acpi_unmap_lsapic(pr->id); return AE_ERROR; } @@ -748,7 +748,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr) return (0); } #else -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) +static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) { return AE_ERROR; } @@ -827,8 +827,6 @@ static void __exit acpi_processor_exit(void) acpi_bus_unregister_driver(&acpi_processor_driver); - cpuidle_unregister_driver(&acpi_idle_driver); - return; } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d2f8e1..20bce51 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { .enter = &intel_idle }, }; -static int get_driver_data(int cstate) +static long get_driver_data(int cstate) { int driver_data; switch (cstate) { @@ -232,6 +232,7 @@ static int get_driver_data(int cstate) * @drv: cpuidle driver * @index: index of cpuidle state * + * Must be called under local_irq_disable(). */ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev, cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; - local_irq_disable(); - /* * leave_mm() to avoid costly and often unnecessary wakeups * for flushing the user TLB's associated with the active mm. @@ -348,7 +347,8 @@ static int intel_idle_probe(void) cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || + !mwait_substates) return -ENODEV; pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); @@ -394,7 +394,7 @@ static int intel_idle_probe(void) if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; else { - smp_call_function(__setup_broadcast_timer, (void *)true, 1); + on_each_cpu(__setup_broadcast_timer, (void *)true, 1); register_cpu_notifier(&setup_broadcast_notifier); } @@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void) } if (auto_demotion_disable_flags) - smp_call_function(auto_demotion_disable, NULL, 1); + on_each_cpu(auto_demotion_disable, NULL, 1); return 0; } /* - * intel_idle_cpuidle_devices_init() + * intel_idle_cpu_init() * allocate, initialize, register cpuidle_devices + * @cpu: cpu/core to initialize */ -static int intel_idle_cpuidle_devices_init(void) +int intel_idle_cpu_init(int cpu) { - int i, cstate; + int cstate; struct cpuidle_device *dev; - intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (intel_idle_cpuidle_devices == NULL) - return -ENOMEM; - - for_each_online_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); - dev->state_count = 1; + dev->state_count = 1; - for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { - int num_substates; + for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { + int num_substates; - if (cstate > max_cstate) { - printk(PREFIX "max_cstate %d reached\n", - max_cstate); - break; - } + if (cstate > max_cstate) { + printk(PREFIX "max_cstate %d reached\n", + max_cstate); + break; + } - /* does the state exist in CPUID.MWAIT? */ - num_substates = (mwait_substates >> ((cstate) * 4)) - & MWAIT_SUBSTATE_MASK; - if (num_substates == 0) - continue; - /* is the state not enabled? */ - if (cpuidle_state_table[cstate].enter == NULL) { - continue; - } + /* does the state exist in CPUID.MWAIT? */ + num_substates = (mwait_substates >> ((cstate) * 4)) + & MWAIT_SUBSTATE_MASK; + if (num_substates == 0) + continue; + /* is the state not enabled? */ + if (cpuidle_state_table[cstate].enter == NULL) + continue; - dev->states_usage[dev->state_count].driver_data = - (void *)get_driver_data(cstate); + dev->states_usage[dev->state_count].driver_data = + (void *)get_driver_data(cstate); dev->state_count += 1; } + dev->cpu = cpu; - dev->cpu = i; - if (cpuidle_register_device(dev)) { - pr_debug(PREFIX "cpuidle_register_device %d failed!\n", - i); - intel_idle_cpuidle_devices_uninit(); - return -EIO; - } + if (cpuidle_register_device(dev)) { + pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); + intel_idle_cpuidle_devices_uninit(); + return -EIO; } + if (auto_demotion_disable_flags) + smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); + return 0; } static int __init intel_idle_init(void) { - int retval; + int retval, i; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) @@ -553,10 +549,16 @@ static int __init intel_idle_init(void) return retval; } - retval = intel_idle_cpuidle_devices_init(); - if (retval) { - cpuidle_unregister_driver(&intel_idle_driver); - return retval; + intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); + if (intel_idle_cpuidle_devices == NULL) + return -ENOMEM; + + for_each_online_cpu(i) { + retval = intel_idle_cpu_init(i); + if (retval) { + cpuidle_unregister_driver(&intel_idle_driver); + return retval; + } } return 0; @@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void) cpuidle_unregister_driver(&intel_idle_driver); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { - smp_call_function(__setup_broadcast_timer, (void *)false, 1); + on_each_cpu(__setup_broadcast_timer, (void *)false, 1); unregister_cpu_notifier(&setup_broadcast_notifier); } |