summaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c33
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c593
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c548
-rw-r--r--drivers/acpi/apei/erst.c855
-rw-r--r--drivers/acpi/apei/ghes.c427
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/hed.c112
-rw-r--r--drivers/acpi/hest.c135
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/power_meter.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/sleep.c101
-rw-r--r--drivers/acpi/video_detect.c2
23 files changed, 3383 insertions, 298 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c79..7464115 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
+config ACPI_HED
+ tristate "Hardware Error Device"
+ help
+ This driver supports the Hardware Error Device (PNP0C33),
+ which is used to report some hardware errors notified via
+ SCI, mainly the corrected errors.
+
+source "drivers/acpi/apei/Kconfig"
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998..6ee3316 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
-acpi-y += hest.o
+acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
+obj-$(CONFIG_ACPI_HED) += hed.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 19dacfd..6212213 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -31,7 +31,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 5ff32c7..bfbe291 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void)
{
- acpi_status status = AE_OK;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enable);
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
- } else {
- /* Transition to ACPI mode */
+ return_ACPI_STATUS(AE_OK);
+ }
- status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not transition to ACPI mode"));
- return_ACPI_STATUS(status);
- }
+ /* Transition to ACPI mode */
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Transition to ACPI mode successful\n"));
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
}
- return_ACPI_STATUS(status);
+ /* Sanity check that transition succeeded */
+
+ if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
+ ACPI_ERROR((AE_INFO,
+ "Hardware did not enter ACPI mode"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112..b44274a 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
- u32 retry;
ACPI_FUNCTION_TRACE(hw_set_mode);
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
return_ACPI_STATUS(status);
}
- /*
- * Some hardware takes a LONG time to switch modes. Give them 3 sec to
- * do so, but allow faster systems to proceed more quickly.
- */
- retry = 3000;
- while (retry) {
- if (acpi_hw_get_mode() == mode) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Mode %X successfully enabled\n",
- mode));
- return_ACPI_STATUS(AE_OK);
- }
- acpi_os_stall(1000);
- retry--;
- }
-
- ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
- return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 0000000..f8c668f
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
+config ACPI_APEI
+ bool "ACPI Platform Error Interface (APEI)"
+ depends on X86
+ help
+ APEI allows to report errors (for example from the chipset)
+ to the operating system. This improves NMI handling
+ especially. In addition it supports error serialization and
+ error injection.
+
+config ACPI_APEI_GHES
+ tristate "APEI Generic Hardware Error Source"
+ depends on ACPI_APEI && X86
+ select ACPI_HED
+ help
+ Generic Hardware Error Source provides a way to report
+ platform hardware errors (such as that from chipset). It
+ works in so called "Firmware First" mode, that is, hardware
+ errors are reported to firmware firstly, then reported to
+ Linux by firmware. This way, some non-standard hardware
+ error registers or non-standard hardware link can be checked
+ by firmware to produce more valuable hardware error
+ information for Linux.
+
+config ACPI_APEI_EINJ
+ tristate "APEI Error INJection (EINJ)"
+ depends on ACPI_APEI && DEBUG_FS
+ help
+ EINJ provides a hardware error injection mechanism, it is
+ mainly used for debugging and testing the other parts of
+ APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 0000000..b13b03a
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ACPI_APEI) += apei.o
+obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
+obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+
+apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 0000000..db3946e
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI table,
+ * including framework of interpreter for ERST and EINJ; resource
+ * management for APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <acpi/atomicio.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER 0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries)
+{
+ ctx->ins_table = ins_table;
+ ctx->instructions = instructions;
+ ctx->action_table = action_table;
+ ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+ int rc;
+
+ rc = acpi_atomic_read(val, &entry->register_region);
+ if (rc)
+ return rc;
+ *val >>= entry->register_region.bit_offset;
+ *val &= entry->mask;
+
+ return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ ctx->value = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ rc = apei_exec_read_register(ctx, entry);
+ if (rc)
+ return rc;
+ ctx->value = (ctx->value == entry->value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+ int rc;
+
+ val &= entry->mask;
+ val <<= entry->register_region.bit_offset;
+ if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+ u64 valr = 0;
+ rc = acpi_atomic_read(&valr, &entry->register_region);
+ if (rc)
+ return rc;
+ valr &= ~(entry->mask << entry->register_region.bit_offset);
+ val |= valr;
+ }
+ rc = acpi_atomic_write(val, &entry->register_region);
+
+ return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ ctx->value = entry->value;
+ rc = apei_exec_write_register(ctx, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ int rc;
+ u32 i, ip;
+ struct acpi_whea_header *entry;
+ apei_exec_ins_func_t run;
+
+ ctx->ip = 0;
+
+ /*
+ * "ip" is the instruction pointer of current instruction,
+ * "ctx->ip" specifies the next instruction to executed,
+ * instruction "run" function may change the "ctx->ip" to
+ * implement "goto" semantics.
+ */
+rewind:
+ ip = 0;
+ for (i = 0; i < ctx->entries; i++) {
+ entry = &ctx->action_table[i];
+ if (entry->action != action)
+ continue;
+ if (ip == ctx->ip) {
+ if (entry->instruction >= ctx->instructions ||
+ !ctx->ins_table[entry->instruction].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
+ return -EINVAL;
+ }
+ run = ctx->ins_table[entry->instruction].run;
+ rc = run(ctx, entry);
+ if (rc < 0)
+ return rc;
+ else if (rc != APEI_EXEC_SET_IP)
+ ctx->ip++;
+ }
+ ip++;
+ if (ctx->ip < ip)
+ goto rewind;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+ apei_exec_entry_func_t func,
+ void *data,
+ int *end)
+{
+ u8 ins;
+ int i, rc;
+ struct acpi_whea_header *entry;
+ struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+ for (i = 0; i < ctx->entries; i++) {
+ entry = ctx->action_table + i;
+ ins = entry->instruction;
+ if (end)
+ *end = i;
+ if (ins >= ctx->instructions || !ins_table[ins].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
+ return -EINVAL;
+ }
+ rc = func(ctx, entry, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ return acpi_pre_map_gar(&entry->register_region);
+
+ return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+ int rc, end;
+
+ rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+ NULL, &end);
+ if (rc) {
+ struct apei_exec_context ctx_unmap;
+ memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+ ctx_unmap.entries = end;
+ apei_exec_post_unmap_gars(&ctx_unmap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ acpi_post_unmap_gar(&entry->register_region);
+
+ return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+ return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+/* Collect all resources requested, to avoid conflict */
+struct apei_resources apei_resources_all = {
+ .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
+ .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
+};
+
+static int apei_res_add(struct list_head *res_list,
+ unsigned long start, unsigned long size)
+{
+ struct apei_res *res, *resn, *res_ins = NULL;
+ unsigned long end = start + size;
+
+ if (end <= start)
+ return 0;
+repeat:
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ if (res->start > end || res->end < start)
+ continue;
+ else if (end <= res->end && start >= res->start) {
+ kfree(res_ins);
+ return 0;
+ }
+ list_del(&res->list);
+ res->start = start = min(res->start, start);
+ res->end = end = max(res->end, end);
+ kfree(res_ins);
+ res_ins = res;
+ goto repeat;
+ }
+
+ if (res_ins)
+ list_add(&res_ins->list, res_list);
+ else {
+ res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res_ins)
+ return -ENOMEM;
+ res_ins->start = start;
+ res_ins->end = end;
+ list_add(&res_ins->list, res_list);
+ }
+
+ return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+ struct list_head *res_list2)
+{
+ struct apei_res *res1, *resn1, *res2, *res;
+ res1 = list_entry(res_list1->next, struct apei_res, list);
+ resn1 = list_entry(res1->list.next, struct apei_res, list);
+ while (&res1->list != res_list1) {
+ list_for_each_entry(res2, res_list2, list) {
+ if (res1->start >= res2->end ||
+ res1->end <= res2->start)
+ continue;
+ else if (res1->end <= res2->end &&
+ res1->start >= res2->start) {
+ list_del(&res1->list);
+ kfree(res1);
+ break;
+ } else if (res1->end > res2->end &&
+ res1->start < res2->start) {
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->start = res2->end;
+ res->end = res1->end;
+ res1->end = res2->start;
+ list_add(&res->list, &res1->list);
+ resn1 = res;
+ } else {
+ if (res1->start < res2->start)
+ res1->end = res2->start;
+ else
+ res1->start = res2->end;
+ }
+ }
+ res1 = resn1;
+ resn1 = list_entry(resn1->list.next, struct apei_res, list);
+ }
+
+ return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+ struct apei_res *res, *resn;
+
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+ apei_res_clean(&resources->iomem);
+ apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+static int apei_resources_merge(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources2->iomem, list) {
+ rc = apei_res_add(&resources1->iomem, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+ list_for_each_entry(res, &resources2->ioport, list) {
+ rc = apei_res_add(&resources1->ioport, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+
+ rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+ if (rc)
+ return rc;
+ return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc)
+{
+ struct apei_res *res, *res_bak;
+ struct resource *r;
+
+ apei_resources_sub(resources, &apei_resources_all);
+
+ list_for_each_entry(res, &resources->iomem, list) {
+ r = request_mem_region(res->start, res->end - res->start,
+ desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_iomem;
+ }
+ }
+
+ list_for_each_entry(res, &resources->ioport, list) {
+ r = request_region(res->start, res->end - res->start, desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_ioport;
+ }
+ }
+
+ apei_resources_merge(&apei_resources_all, resources);
+
+ return 0;
+err_unmap_ioport:
+ list_for_each_entry(res, &resources->ioport, list) {
+ if (res == res_bak)
+ break;
+ release_mem_region(res->start, res->end - res->start);
+ }
+ res_bak = NULL;
+err_unmap_iomem:
+ list_for_each_entry(res, &resources->iomem, list) {
+ if (res == res_bak)
+ break;
+ release_region(res->start, res->end - res->start);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources->iomem, list)
+ release_mem_region(res->start, res->end - res->start);
+ list_for_each_entry(res, &resources->ioport, list)
+ release_region(res->start, res->end - res->start);
+
+ apei_resources_sub(&apei_resources_all, resources);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ struct apei_resources *resources = data;
+ struct acpi_generic_address *reg = &entry->register_region;
+ u8 ins = entry->instruction;
+ u64 paddr;
+ int rc;
+
+ if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+ return 0;
+
+ rc = apei_check_gar(reg, &paddr);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return apei_res_add(&resources->iomem, paddr,
+ reg->bit_width / 8);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return apei_res_add(&resources->ioport, paddr,
+ reg->bit_width / 8);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources)
+{
+ return apei_exec_for_each_entry(ctx, collect_res_callback,
+ resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+struct dentry *apei_get_debugfs_dir(void)
+{
+ static struct dentry *dapei;
+
+ if (!dapei)
+ dapei = debugfs_create_dir("apei", NULL);
+
+ return dapei;
+}
+EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 0000000..18df1e9
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+#include <linux/cper.h>
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
+
+struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+ u32 ip;
+ u64 value;
+ u64 var1;
+ u64 var2;
+ u64 src_base;
+ u64 dst_base;
+ struct apei_exec_ins_type *ins_table;
+ u32 instructions;
+ struct acpi_whea_header *action_table;
+ u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+ u64 input)
+{
+ ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+ return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP 1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+ struct list_head iomem;
+ struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+ INIT_LIST_HEAD(&resources->iomem);
+ INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources);
+
+struct dentry;
+struct dentry *apei_get_debugfs_dir(void);
+
+#define apei_estatus_for_each_section(estatus, section) \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ (void *)section - (void *)estatus < estatus->data_length; \
+ section = (void *)(section+1) + section->error_data_length)
+
+static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->raw_data_length)
+ return estatus->raw_data_offset + \
+ estatus->raw_data_length;
+ else
+ return sizeof(*estatus) + estatus->data_length;
+}
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 0000000..f4cf2fc
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various APEI tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.3.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/acpi.h>
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq))
+ atomic64_set(&seq, ((u64)get_seconds()) << 32);
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, gedata_len;
+ int rc;
+
+ rc = apei_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+ data_len = estatus->data_length;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ while (data_len > sizeof(*gdata)) {
+ gedata_len = gdata->error_data_length;
+ if (gedata_len > data_len - sizeof(*gdata))
+ return -EINVAL;
+ data_len -= gedata_len + sizeof(*gdata);
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 0000000..465c885
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
+/*
+ * APEI Error INJection support
+ *
+ * EINJ provides a hardware error injection mechanism, this is useful
+ * for debugging and testing of other APEI and RAS features.
+ *
+ * For more information about EINJ, please refer to ACPI Specification
+ * version 4.0, section 17.5.
+ *
+ * Copyright 2009-2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <acpi/acpi.h>
+
+#include "apei-internal.h"
+
+#define EINJ_PFX "EINJ: "
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+
+/*
+ * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
+ * EINJ table through an unpublished extension. Use with caution as
+ * most will ignore the parameter and make their own choice of address
+ * for error injection.
+ */
+struct einj_parameter {
+ u64 type;
+ u64 reserved1;
+ u64 reserved2;
+ u64 param1;
+ u64 param2;
+};
+
+#define EINJ_OP_BUSY 0x1
+#define EINJ_STATUS_SUCCESS 0x0
+#define EINJ_STATUS_FAIL 0x1
+#define EINJ_STATUS_INVAL 0x2
+
+#define EINJ_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_einj)))
+
+static struct acpi_table_einj *einj_tab;
+
+static struct apei_resources einj_resources;
+
+static struct apei_exec_ins_type einj_ins_type[] = {
+ [ACPI_EINJ_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_EINJ_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_EINJ_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_EINJ_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+};
+
+/*
+ * Prevent EINJ interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ */
+static DEFINE_MUTEX(einj_mutex);
+
+static struct einj_parameter *einj_param;
+
+static void einj_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
+ EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
+}
+
+static int __einj_get_available_error_type(u32 *type)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ *type = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/* Get error injection capabilities of the platform */
+static int einj_get_available_error_type(u32 *type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_get_available_error_type(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static int einj_timedout(u64 *t)
+{
+ if ((s64)*t < SPIN_UNIT) {
+ pr_warning(FW_WARN EINJ_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= SPIN_UNIT;
+ ndelay(SPIN_UNIT);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static u64 einj_get_parameter_address(void)
+{
+ int i;
+ u64 paddr = 0;
+ struct acpi_whea_header *entry;
+
+ entry = EINJ_TAB_ENTRY(einj_tab);
+ for (i = 0; i < einj_tab->entries; i++) {
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddr, &entry->register_region.address,
+ sizeof(paddr));
+ entry++;
+ }
+
+ return paddr;
+}
+
+/* do sanity check to trigger table */
+static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
+{
+ if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
+ return -EINVAL;
+ if (trigger_tab->table_size > PAGE_SIZE ||
+ trigger_tab->table_size <= trigger_tab->header_size)
+ return -EINVAL;
+ if (trigger_tab->entry_count !=
+ (trigger_tab->table_size - trigger_tab->header_size) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Execute instructions in trigger error action table */
+static int __einj_error_trigger(u64 trigger_paddr)
+{
+ struct acpi_einj_trigger *trigger_tab = NULL;
+ struct apei_exec_context trigger_ctx;
+ struct apei_resources trigger_resources;
+ struct acpi_whea_header *trigger_entry;
+ struct resource *r;
+ u32 table_size;
+ int rc = -EIO;
+
+ r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ (unsigned long long)trigger_paddr,
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ goto out;
+ }
+ trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_header;
+ }
+ rc = einj_check_trigger_header(trigger_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX
+ "The trigger error action table is invalid\n");
+ goto out_rel_header;
+ }
+ rc = -EIO;
+ table_size = trigger_tab->table_size;
+ r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
+ goto out_rel_header;
+ }
+ iounmap(trigger_tab);
+ trigger_tab = ioremap_cache(trigger_paddr, table_size);
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_entry;
+ }
+ trigger_entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ apei_resources_init(&trigger_resources);
+ apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
+ ARRAY_SIZE(einj_ins_type),
+ trigger_entry, trigger_tab->entry_count);
+ rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources, &einj_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
+ if (rc)
+ goto out_fini;
+ rc = apei_exec_pre_map_gars(&trigger_ctx);
+ if (rc)
+ goto out_release;
+
+ rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
+
+ apei_exec_post_unmap_gars(&trigger_ctx);
+out_release:
+ apei_resources_release(&trigger_resources);
+out_fini:
+ apei_resources_fini(&trigger_resources);
+out_rel_entry:
+ release_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab));
+out_rel_header:
+ release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+out:
+ if (trigger_tab)
+ iounmap(trigger_tab);
+
+ return rc;
+}
+
+static int __einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ struct apei_exec_context ctx;
+ u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, type);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ writeq(param1, &einj_param->param1);
+ writeq(param2, &einj_param->param2);
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!(val & EINJ_OP_BUSY))
+ break;
+ if (einj_timedout(&timeout))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (val != EINJ_STATUS_SUCCESS)
+ return -EBUSY;
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
+ if (rc)
+ return rc;
+ trigger_paddr = apei_exec_ctx_get_output(&ctx);
+ rc = __einj_error_trigger(trigger_paddr);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+
+ return rc;
+}
+
+/* Inject the specified hardware error */
+static int einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type, param1, param2);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static u32 error_type;
+static u64 error_param1;
+static u64 error_param2;
+static struct dentry *einj_debug_dir;
+
+static int available_error_type_show(struct seq_file *m, void *v)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (available_error_type & 0x0001)
+ seq_printf(m, "0x00000001\tProcessor Correctable\n");
+ if (available_error_type & 0x0002)
+ seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0004)
+ seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
+ if (available_error_type & 0x0008)
+ seq_printf(m, "0x00000008\tMemory Correctable\n");
+ if (available_error_type & 0x0010)
+ seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0020)
+ seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
+ if (available_error_type & 0x0040)
+ seq_printf(m, "0x00000040\tPCI Express Correctable\n");
+ if (available_error_type & 0x0080)
+ seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0100)
+ seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
+ if (available_error_type & 0x0200)
+ seq_printf(m, "0x00000200\tPlatform Correctable\n");
+ if (available_error_type & 0x0400)
+ seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0800)
+ seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
+
+ return 0;
+}
+
+static int available_error_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_error_type_show, NULL);
+}
+
+static const struct file_operations available_error_type_fops = {
+ .open = available_error_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int error_type_get(void *data, u64 *val)
+{
+ *val = error_type;
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ /* Only one error type can be specified */
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ error_type = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
+ error_type_set, "0x%llx\n");
+
+static int error_inject_set(void *data, u64 val)
+{
+ if (!error_type)
+ return -EINVAL;
+
+ return einj_error_inject(error_type, error_param1, error_param2);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
+ error_inject_set, "%llu\n");
+
+static int einj_check_table(struct acpi_table_einj *einj_tab)
+{
+ if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->header.length < sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->entries !=
+ (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init einj_init(void)
+{
+ int rc;
+ u64 param_paddr;
+ acpi_status status;
+ struct dentry *fentry;
+ struct apei_exec_context ctx;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_EINJ, 0,
+ (struct acpi_table_header **)&einj_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = einj_check_table(einj_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ return -EINVAL;
+ }
+
+ rc = -ENOMEM;
+ einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
+ if (!einj_debug_dir)
+ goto err_cleanup;
+ fentry = debugfs_create_file("available_error_type", S_IRUSR,
+ einj_debug_dir, NULL,
+ &available_error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
+ einj_debug_dir, NULL, &error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_inject", S_IWUSR,
+ einj_debug_dir, NULL, &error_inject_fops);
+ if (!fentry)
+ goto err_cleanup;
+
+ apei_resources_init(&einj_resources);
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &einj_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&einj_resources, "APEI EINJ");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ }
+
+ pr_info(EINJ_PFX "Error INJection is initialized.\n");
+
+ return 0;
+
+err_unmap:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&einj_resources);
+err_fini:
+ apei_resources_fini(&einj_resources);
+err_cleanup:
+ debugfs_remove_recursive(einj_debug_dir);
+
+ return rc;
+}
+
+static void __exit einj_exit(void)
+{
+ struct apei_exec_context ctx;
+
+ if (einj_param)
+ iounmap(einj_param);
+ einj_exec_ctx_init(&ctx);
+ apei_exec_post_unmap_gars(&ctx);
+ apei_resources_release(&einj_resources);
+ apei_resources_fini(&einj_resources);
+ debugfs_remove_recursive(einj_debug_dir);
+}
+
+module_init(einj_init);
+module_exit(einj_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error INJection support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 0000000..2ebc391
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
+/*
+ * APEI Error Record Serialization Table support
+ *
+ * ERST is a way provided by APEI to save and retrieve hardware error
+ * infomation to and from a persistent store.
+ *
+ * For more information about ERST, please refer to ACPI Specification
+ * version 4.0, section 17.4.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/cper.h>
+#include <linux/nmi.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define ERST_PFX "ERST: "
+
+/* ERST command status */
+#define ERST_STATUS_SUCCESS 0x0
+#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
+#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
+#define ERST_STATUS_FAILED 0x3
+#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
+#define ERST_STATUS_RECORD_NOT_FOUND 0x5
+
+#define ERST_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_erst)))
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define FIRMWARE_MAX_STALL 50 /* 50us */
+
+int erst_disable;
+EXPORT_SYMBOL_GPL(erst_disable);
+
+static struct acpi_table_erst *erst_tab;
+
+/* ERST Error Log Address Range atrributes */
+#define ERST_RANGE_RESERVED 0x0001
+#define ERST_RANGE_NVRAM 0x0002
+#define ERST_RANGE_SLOW 0x0004
+
+/*
+ * ERST Error Log Address Range, used as buffer for reading/writing
+ * error records.
+ */
+static struct erst_erange {
+ u64 base;
+ u64 size;
+ void __iomem *vaddr;
+ u32 attr;
+} erst_erange;
+
+/*
+ * Prevent ERST interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ *
+ * It is used to provide exclusive accessing for ERST Error Log
+ * Address Range too.
+ */
+static DEFINE_SPINLOCK(erst_lock);
+
+static inline int erst_errno(int command_status)
+{
+ switch (command_status) {
+ case ERST_STATUS_SUCCESS:
+ return 0;
+ case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
+ return -ENODEV;
+ case ERST_STATUS_NOT_ENOUGH_SPACE:
+ return -ENOSPC;
+ case ERST_STATUS_RECORD_STORE_EMPTY:
+ case ERST_STATUS_RECORD_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int erst_timedout(u64 *t, u64 spin_unit)
+{
+ if ((s64)*t < spin_unit) {
+ pr_warning(FW_WARN ERST_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= spin_unit;
+ ndelay(spin_unit);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static int erst_exec_load_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var1);
+}
+
+static int erst_exec_load_var2(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var2);
+}
+
+static int erst_exec_store_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->var1);
+}
+
+static int erst_exec_add(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 += ctx->var2;
+ return 0;
+}
+
+static int erst_exec_subtract(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 -= ctx->var2;
+ return 0;
+}
+
+static int erst_exec_add_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val += ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_subtract_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val -= ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_stall(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ u64 stall_time;
+
+ if (ctx->value > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall instruction: %llx.\n",
+ ctx->value);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->value;
+ udelay(stall_time);
+ return 0;
+}
+
+static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 stall_time;
+
+ if (ctx->var1 > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall while true instruction: %llx.\n",
+ ctx->var1);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->var1;
+
+ for (;;) {
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val != ctx->value)
+ break;
+ if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int erst_exec_skip_next_instruction_if_true(
+ struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val == ctx->value) {
+ ctx->ip += 2;
+ return APEI_EXEC_SET_IP;
+ }
+
+ return 0;
+}
+
+static int erst_exec_goto(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->ip = ctx->value;
+ return APEI_EXEC_SET_IP;
+}
+
+static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->src_base);
+}
+
+static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->dst_base);
+}
+
+static int erst_exec_move_data(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 offset;
+
+ rc = __apei_exec_read_register(entry, &offset);
+ if (rc)
+ return rc;
+ memmove((void *)ctx->dst_base + offset,
+ (void *)ctx->src_base + offset,
+ ctx->var2);
+
+ return 0;
+}
+
+static struct apei_exec_ins_type erst_ins_type[] = {
+ [ACPI_ERST_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_ERST_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_ERST_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_ERST_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_ERST_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+ [ACPI_ERST_LOAD_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var1,
+ },
+ [ACPI_ERST_LOAD_VAR2] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var2,
+ },
+ [ACPI_ERST_STORE_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_store_var1,
+ },
+ [ACPI_ERST_ADD] = {
+ .flags = 0,
+ .run = erst_exec_add,
+ },
+ [ACPI_ERST_SUBTRACT] = {
+ .flags = 0,
+ .run = erst_exec_subtract,
+ },
+ [ACPI_ERST_ADD_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_add_value,
+ },
+ [ACPI_ERST_SUBTRACT_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_subtract_value,
+ },
+ [ACPI_ERST_STALL] = {
+ .flags = 0,
+ .run = erst_exec_stall,
+ },
+ [ACPI_ERST_STALL_WHILE_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_stall_while_true,
+ },
+ [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_skip_next_instruction_if_true,
+ },
+ [ACPI_ERST_GOTO] = {
+ .flags = 0,
+ .run = erst_exec_goto,
+ },
+ [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_src_address_base,
+ },
+ [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_dst_address_base,
+ },
+ [ACPI_ERST_MOVE_DATA] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_move_data,
+ },
+};
+
+static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
+ ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
+}
+
+static int erst_get_erange(struct erst_erange *range)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
+ if (rc)
+ return rc;
+ range->base = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
+ if (rc)
+ return rc;
+ range->size = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
+ if (rc)
+ return rc;
+ range->attr = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+static ssize_t __erst_get_record_count(void)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
+ if (rc)
+ return rc;
+ return apei_exec_ctx_get_output(&ctx);
+}
+
+ssize_t erst_get_record_count(void)
+{
+ ssize_t count;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ count = __erst_get_record_count();
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_count);
+
+static int __erst_get_next_record_id(u64 *record_id)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
+ if (rc)
+ return rc;
+ *record_id = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/*
+ * Get the record ID of an existing error record on the persistent
+ * storage. If there is no error record on the persistent storage, the
+ * returned record_id is APEI_ERST_INVALID_RECORD_ID.
+ */
+int erst_get_next_record_id(u64 *record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+
+static int __erst_write_to_storage(u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_read_from_storage(u64 record_id, u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ };
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_clear_from_storage(u64 record_id)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+/* NVRAM ERST Error Log Address Range is not supported yet */
+static void pr_unimpl_nvram(void)
+{
+ if (printk_ratelimit())
+ pr_warning(ERST_PFX
+ "NVRAM ERST Log Address Range is not implemented yet\n");
+}
+
+static int __erst_write_to_nvram(const struct cper_record_header *record)
+{
+ /* do not print message, because printk is not safe for NMI */
+ return -ENOSYS;
+}
+
+static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+static int __erst_clear_from_nvram(u64 record_id)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+int erst_write(const struct cper_record_header *record)
+{
+ int rc;
+ unsigned long flags;
+ struct cper_record_header *rcd_erange;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
+ return -EINVAL;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM) {
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ rc = __erst_write_to_nvram(record);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+
+ if (record->record_length > erst_erange.size)
+ return -EINVAL;
+
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ memcpy(erst_erange.vaddr, record, record->record_length);
+ rcd_erange = erst_erange.vaddr;
+ /* signature for serialization system */
+ memcpy(&rcd_erange->persistence_information, "ER", 2);
+
+ rc = __erst_write_to_storage(0);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_write);
+
+static int __erst_read_to_erange(u64 record_id, u64 *offset)
+{
+ int rc;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ return __erst_read_to_erange_from_nvram(
+ record_id, offset);
+
+ rc = __erst_read_from_storage(record_id, 0);
+ if (rc)
+ return rc;
+ *offset = 0;
+
+ return 0;
+}
+
+static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ int rc;
+ u64 offset, len = 0;
+ struct cper_record_header *rcd_tmp;
+
+ rc = __erst_read_to_erange(record_id, &offset);
+ if (rc)
+ return rc;
+ rcd_tmp = erst_erange.vaddr + offset;
+ len = rcd_tmp->record_length;
+ if (len <= buflen)
+ memcpy(record, rcd_tmp, len);
+
+ return len;
+}
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ ssize_t len;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read);
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value = 0, there is no more record to read,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
+{
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read_next);
+
+int erst_clear(u64 record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ rc = __erst_clear_from_nvram(record_id);
+ else
+ rc = __erst_clear_from_storage(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_clear);
+
+static int __init setup_erst_disable(char *str)
+{
+ erst_disable = 1;
+ return 0;
+}
+
+__setup("erst_disable", setup_erst_disable);
+
+static int erst_check_table(struct acpi_table_erst *erst_tab)
+{
+ if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->header.length < sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->entries !=
+ (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
+ sizeof(struct acpi_erst_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init erst_init(void)
+{
+ int rc = 0;
+ acpi_status status;
+ struct apei_exec_context ctx;
+ struct apei_resources erst_resources;
+ struct resource *r;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (erst_disable) {
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_ERST, 0,
+ (struct acpi_table_header **)&erst_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_err(ERST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(ERST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = erst_check_table(erst_tab);
+ if (rc) {
+ pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
+ goto err;
+ }
+
+ apei_resources_init(&erst_resources);
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &erst_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&erst_resources, "APEI ERST");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ rc = erst_get_erange(&erst_erange);
+ if (rc) {
+ if (rc == -ENODEV)
+ pr_info(ERST_PFX
+ "The corresponding hardware device or firmware implementation "
+ "is not available.\n");
+ else
+ pr_err(ERST_PFX
+ "Failed to get Error Log Address Range.\n");
+ goto err_unmap_reg;
+ }
+
+ r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
+ if (!r) {
+ pr_err(ERST_PFX
+ "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
+ (unsigned long long)erst_erange.base,
+ (unsigned long long)erst_erange.base + erst_erange.size);
+ rc = -EIO;
+ goto err_unmap_reg;
+ }
+ rc = -ENOMEM;
+ erst_erange.vaddr = ioremap_cache(erst_erange.base,
+ erst_erange.size);
+ if (!erst_erange.vaddr)
+ goto err_release_erange;
+
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is initialized.\n");
+
+ return 0;
+
+err_release_erange:
+ release_mem_region(erst_erange.base, erst_erange.size);
+err_unmap_reg:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&erst_resources);
+err_fini:
+ apei_resources_fini(&erst_resources);
+err:
+ erst_disable = 1;
+ return rc;
+}
+
+device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 0000000..fd0cc01
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
+/*
+ * APEI Generic Hardware Error Source support
+ *
+ * Generic Hardware Error Source provides a way to report platform
+ * hardware errors (such as that from chipset). It works in so called
+ * "Firmware First" mode, that is, hardware errors are reported to
+ * firmware firstly, then reported to Linux by firmware. This way,
+ * some non-standard hardware error registers or non-standard hardware
+ * link can be checked by firmware to produce more hardware error
+ * information for Linux.
+ *
+ * For more information about Generic Hardware Error Source, please
+ * refer to ACPI Specification version 4.0, section 17.3.2.6
+ *
+ * Now, only SCI notification type and memory errors are
+ * supported. More notification type and hardware error type will be
+ * added later.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/cper.h>
+#include <linux/kdebug.h>
+#include <acpi/apei.h>
+#include <acpi/atomicio.h>
+#include <acpi/hed.h>
+#include <asm/mce.h>
+
+#include "apei-internal.h"
+
+#define GHES_PFX "GHES: "
+
+#define GHES_ESTATUS_MAX_SIZE 65536
+
+/*
+ * One struct ghes is created for each generic hardware error
+ * source.
+ *
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler. Handler for one generic hardware error source is only
+ * triggered after the previous one is done. So handler can uses
+ * struct ghes without locking.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR 0x0001
+
+struct ghes {
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ struct list_head list;
+ u64 buffer_paddr;
+ unsigned long flags;
+};
+
+/*
+ * Error source lists, one list for each notification method. The
+ * members in lists are struct ghes.
+ *
+ * The list members are only added in HEST parsing and deleted during
+ * module_exit, that is, single-threaded. So no lock is needed for
+ * that.
+ *
+ * But the mutual exclusion is needed between members adding/deleting
+ * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
+ * used for that.
+ */
+static LIST_HEAD(ghes_sci);
+
+static struct ghes *ghes_new(struct acpi_hest_generic *generic)
+{
+ struct ghes *ghes;
+ unsigned int error_block_length;
+ int rc;
+
+ ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
+ if (!ghes)
+ return ERR_PTR(-ENOMEM);
+ ghes->generic = generic;
+ INIT_LIST_HEAD(&ghes->list);
+ rc = acpi_pre_map_gar(&generic->error_status_address);
+ if (rc)
+ goto err_free;
+ error_block_length = generic->error_block_length;
+ if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
+ pr_warning(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
+ error_block_length = GHES_ESTATUS_MAX_SIZE;
+ }
+ ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
+ if (!ghes->estatus) {
+ rc = -ENOMEM;
+ goto err_unmap;
+ }
+
+ return ghes;
+
+err_unmap:
+ acpi_post_unmap_gar(&generic->error_status_address);
+err_free:
+ kfree(ghes);
+ return ERR_PTR(rc);
+}
+
+static void ghes_fini(struct ghes *ghes)
+{
+ kfree(ghes->estatus);
+ acpi_post_unmap_gar(&ghes->generic->error_status_address);
+}
+
+enum {
+ GHES_SER_NO = 0x0,
+ GHES_SER_CORRECTED = 0x1,
+ GHES_SER_RECOVERABLE = 0x2,
+ GHES_SER_PANIC = 0x3,
+};
+
+static inline int ghes_severity(int severity)
+{
+ switch (severity) {
+ case CPER_SER_INFORMATIONAL:
+ return GHES_SER_NO;
+ case CPER_SER_CORRECTED:
+ return GHES_SER_CORRECTED;
+ case CPER_SER_RECOVERABLE:
+ return GHES_SER_RECOVERABLE;
+ case CPER_SER_FATAL:
+ return GHES_SER_PANIC;
+ default:
+ /* Unkown, go panic */
+ return GHES_SER_PANIC;
+ }
+}
+
+/* SCI handler run in work queue, so ioremap can be used here */
+static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+ int from_phys)
+{
+ void *vaddr;
+
+ vaddr = ioremap_cache(paddr, len);
+ if (!vaddr)
+ return -ENOMEM;
+ if (from_phys)
+ memcpy(buffer, vaddr, len);
+ else
+ memcpy(vaddr, buffer, len);
+ iounmap(vaddr);
+
+ return 0;
+}
+
+static int ghes_read_estatus(struct ghes *ghes, int silent)
+{
+ struct acpi_hest_generic *g = ghes->generic;
+ u64 buf_paddr;
+ u32 len;
+ int rc;
+
+ rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ if (rc) {
+ if (!silent && printk_ratelimit())
+ pr_warning(FW_WARN GHES_PFX
+"Failed to read error status block address for hardware error source: %d.\n",
+ g->header.source_id);
+ return -EIO;
+ }
+ if (!buf_paddr)
+ return -ENOENT;
+
+ rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+ sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (!ghes->estatus->block_status)
+ return -ENOENT;
+
+ ghes->buffer_paddr = buf_paddr;
+ ghes->flags |= GHES_TO_CLEAR;
+
+ rc = -EIO;
+ len = apei_estatus_len(ghes->estatus);
+ if (len < sizeof(*ghes->estatus))
+ goto err_read_block;
+ if (len > ghes->generic->error_block_length)
+ goto err_read_block;
+ if (apei_estatus_check_header(ghes->estatus))
+ goto err_read_block;
+ rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
+ buf_paddr + sizeof(*ghes->estatus),
+ len - sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (apei_estatus_check(ghes->estatus))
+ goto err_read_block;
+ rc = 0;
+
+err_read_block:
+ if (rc && !silent)
+ pr_warning(FW_WARN GHES_PFX
+ "Failed to read error status block!\n");
+ return rc;
+}
+
+static void ghes_clear_estatus(struct ghes *ghes)
+{
+ ghes->estatus->block_status = 0;
+ if (!(ghes->flags & GHES_TO_CLEAR))
+ return;
+ ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
+ sizeof(ghes->estatus->block_status), 0);
+ ghes->flags &= ~GHES_TO_CLEAR;
+}
+
+static void ghes_do_proc(struct ghes *ghes)
+{
+ int ser, processed = 0;
+ struct acpi_hest_generic_data *gdata;
+
+ ser = ghes_severity(ghes->estatus->error_severity);
+ apei_estatus_for_each_section(ghes->estatus, gdata) {
+#ifdef CONFIG_X86_MCE
+ if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PLATFORM_MEM)) {
+ apei_mce_report_mem_error(
+ ser == GHES_SER_CORRECTED,
+ (struct cper_sec_mem_err *)(gdata+1));
+ processed = 1;
+ }
+#endif
+ }
+
+ if (!processed && printk_ratelimit())
+ pr_warning(GHES_PFX
+ "Unknown error record from generic hardware error source: %d\n",
+ ghes->generic->header.source_id);
+}
+
+static int ghes_proc(struct ghes *ghes)
+{
+ int rc;
+
+ rc = ghes_read_estatus(ghes, 0);
+ if (rc)
+ goto out;
+ ghes_do_proc(ghes);
+
+out:
+ ghes_clear_estatus(ghes);
+ return 0;
+}
+
+static int ghes_notify_sci(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ghes *ghes;
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ghes, &ghes_sci, list) {
+ if (!ghes_proc(ghes))
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block ghes_notifier_sci = {
+ .notifier_call = ghes_notify_sci,
+};
+
+static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_generic *generic;
+ struct ghes *ghes = NULL;
+ int rc = 0;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
+ return 0;
+
+ generic = (struct acpi_hest_generic *)hest_hdr;
+ if (!generic->enabled)
+ return 0;
+
+ if (generic->error_block_length <
+ sizeof(struct acpi_hest_generic_status)) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length,
+ generic->header.source_id);
+ goto err;
+ }
+ if (generic->records_to_preallocate == 0) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid records to preallocate: %u for generic hardware error source: %d\n",
+ generic->records_to_preallocate,
+ generic->header.source_id);
+ goto err;
+ }
+ ghes = ghes_new(generic);
+ if (IS_ERR(ghes)) {
+ rc = PTR_ERR(ghes);
+ ghes = NULL;
+ goto err;
+ }
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via POLL is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_LOCAL:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via IRQ is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_SCI:
+ if (list_empty(&ghes_sci))
+ register_acpi_hed_notifier(&ghes_notifier_sci);
+ list_add_rcu(&ghes->list, &ghes_sci);
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via NMI is not supported!\n",
+ generic->header.source_id);
+ break;
+ default:
+ pr_warning(FW_WARN GHES_PFX
+ "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ break;
+ }
+
+ return 0;
+err:
+ if (ghes)
+ ghes_fini(ghes);
+ return rc;
+}
+
+static void ghes_cleanup(void)
+{
+ struct ghes *ghes, *nghes;
+
+ if (!list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
+ list_del(&ghes->list);
+ ghes_fini(ghes);
+ kfree(ghes);
+ }
+}
+
+static int __init ghes_init(void)
+{
+ int rc;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (hest_disable) {
+ pr_info(GHES_PFX "HEST is not enabled!\n");
+ return -EINVAL;
+ }
+
+ rc = apei_hest_parse(hest_ghes_parse, NULL);
+ if (rc) {
+ pr_err(GHES_PFX
+ "Error during parsing HEST generic hardware error sources.\n");
+ goto err_cleanup;
+ }
+
+ if (list_empty(&ghes_sci)) {
+ pr_info(GHES_PFX
+ "No functional generic hardware error sources.\n");
+ rc = -ENODEV;
+ goto err_cleanup;
+ }
+
+ pr_info(GHES_PFX
+ "Generic Hardware Error Source support is initialized.\n");
+
+ return 0;
+err_cleanup:
+ ghes_cleanup();
+ return rc;
+}
+
+static void __exit ghes_exit(void)
+{
+ ghes_cleanup();
+}
+
+module_init(ghes_init);
+module_exit(ghes_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 0000000..e7f40d3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
+/*
+ * APEI Hardware Error Souce Table support
+ *
+ * HEST describes error sources in detail; communicates operational
+ * parameters (i.e. severity levels, masking bits, and threshold
+ * values) to Linux as necessary. It also allows the BIOS to report
+ * non-standard error sources to Linux (for example, chipset-specific
+ * error registers).
+ *
+ * For more information about HEST, please refer to ACPI Specification
+ * version 4.0, section 17.3.2.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/kdebug.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define HEST_PFX "HEST: "
+
+int hest_disable;
+EXPORT_SYMBOL_GPL(hest_disable);
+
+/* HEST table parsing */
+
+static struct acpi_table_hest *hest_tab;
+
+static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ return 0;
+}
+
+static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
+ [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
+ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
+ [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
+ [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
+ [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
+ [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
+ [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
+};
+
+static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
+{
+ u16 hest_type = hest_hdr->type;
+ int len;
+
+ if (hest_type >= ACPI_HEST_TYPE_RESERVED)
+ return 0;
+
+ len = hest_esrc_len_tab[hest_type];
+
+ if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
+ struct acpi_hest_ia_corrected *cmc;
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ len = sizeof(*cmc) + cmc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
+ struct acpi_hest_ia_machine_check *mc;
+ mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ }
+ BUG_ON(len == -1);
+
+ return len;
+};
+
+int apei_hest_parse(apei_hest_func_t func, void *data)
+{
+ struct acpi_hest_header *hest_hdr;
+ int i, rc, len;
+
+ if (hest_disable)
+ return -EINVAL;
+
+ hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
+ for (i = 0; i < hest_tab->error_source_count; i++) {
+ len = hest_esrc_len(hest_hdr);
+ if (!len) {
+ pr_warning(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
+ return -EINVAL;
+ }
+ if ((void *)hest_hdr + len >
+ (void *)hest_tab + hest_tab->header.length) {
+ pr_warning(FW_BUG HEST_PFX
+ "Table contents overflow for hardware error source: %d.\n",
+ hest_hdr->source_id);
+ return -EINVAL;
+ }
+
+ rc = func(hest_hdr, data);
+ if (rc)
+ return rc;
+
+ hest_hdr = (void *)hest_hdr + len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_hest_parse);
+
+static int __init setup_hest_disable(char *str)
+{
+ hest_disable = 1;
+ return 0;
+}
+
+__setup("hest_disable", setup_hest_disable);
+
+static int __init hest_init(void)
+{
+ acpi_status status;
+ int rc = -ENODEV;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (hest_disable) {
+ pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(HEST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = apei_hest_parse(hest_void_parse, NULL);
+ if (rc)
+ goto err;
+
+ pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+
+ return 0;
+err:
+ hest_disable = 1;
+ return rc;
+}
+
+subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 0000000..814b192
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
+/*
+ * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
+ * accessing in atomic context.
+ *
+ * This is used for NMI handler to access IO memory area, because
+ * ioremap/iounmap can not be used in NMI handler. The IO memory area
+ * is pre-mapped in process context and accessed in NMI handler.
+ *
+ * Copyright (C) 2009-2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <acpi/atomicio.h>
+
+#define ACPI_PFX "ACPI: "
+
+static LIST_HEAD(acpi_iomaps);
+/*
+ * Used for mutual exclusion between writers of acpi_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(acpi_iomaps_lock);
+
+struct acpi_iomap {
+ struct list_head list;
+ void __iomem *vaddr;
+ unsigned long size;
+ phys_addr_t paddr;
+ struct kref ref;
+};
+
+/* acpi_iomaps_lock or RCU read lock must be held before calling */
+static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ list_for_each_entry_rcu(map, &acpi_iomaps, list) {
+ if (map->paddr + map->size >= paddr + size &&
+ map->paddr <= paddr)
+ return map;
+ }
+ return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * acpi_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map)
+ return map->vaddr + (paddr - map->paddr);
+ else
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map) {
+ kref_get(&map->ref);
+ return map->vaddr + (paddr - map->paddr);
+ } else
+ return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __acpi_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into acpi_iomaps list.
+ */
+static void __iomem *acpi_pre_map(phys_addr_t paddr,
+ unsigned long size)
+{
+ void __iomem *vaddr;
+ struct acpi_iomap *map;
+ unsigned long pg_sz, flags;
+ phys_addr_t pg_off;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ if (vaddr)
+ return vaddr;
+
+ pg_off = paddr & PAGE_MASK;
+ pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+ vaddr = ioremap(pg_off, pg_sz);
+ if (!vaddr)
+ return NULL;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto err_unmap;
+ INIT_LIST_HEAD(&map->list);
+ map->paddr = pg_off;
+ map->size = pg_sz;
+ map->vaddr = vaddr;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ if (vaddr) {
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ iounmap(map->vaddr);
+ kfree(map);
+ return vaddr;
+ }
+ list_add_tail_rcu(&map->list, &acpi_iomaps);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ return vaddr + (paddr - pg_off);
+err_unmap:
+ iounmap(vaddr);
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_iomap *map;
+
+ map = container_of(ref, struct acpi_iomap, ref);
+ list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
+{
+ struct acpi_iomap *map;
+ unsigned long flags;
+ int del;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ map = __acpi_find_iomap(paddr, size);
+ BUG_ON(!map);
+ del = kref_put(&map->ref, __acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->vaddr);
+ kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int acpi_check_gar(struct acpi_generic_address *reg,
+ u64 *paddr, int silent)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pre-map, working on GAR */
+int acpi_pre_map_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ void __iomem *vaddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
+ if (!vaddr)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
+
+/* Post-unmap, working on GAR */
+int acpi_post_unmap_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ acpi_post_unmap(paddr, reg->bit_width / 8);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ *val = readb(addr);
+ break;
+ case 16:
+ *val = readw(addr);
+ break;
+ case 32:
+ *val = readl(addr);
+ break;
+ case 64:
+ *val = readq(addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ writeb(val, addr);
+ break;
+ case 16:
+ writew(val, addr);
+ break;
+ case 32:
+ writel(val, addr);
+ break;
+ case 64:
+ writeq(val, addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_read_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_read);
+
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_write_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_write_port(paddr, val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 37132dc..743576b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -527,7 +527,7 @@ int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id,
if (!event_is_open)
return 0;
- event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
+ event = kzalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
if (!event)
return -ENOMEM;
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 0000000..d0c1967
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
+/*
+ * ACPI Hardware Error Device (PNP0C33) Driver
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * ACPI Hardware Error Device is used to report some hardware errors
+ * notified via SCI, mainly the corrected errors.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/hed.h>
+
+static struct acpi_device_id acpi_hed_ids[] = {
+ {"PNP0C33", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
+
+static acpi_handle hed_handle;
+
+static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
+
+int register_acpi_hed_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
+
+void unregister_acpi_hed_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
+
+/*
+ * SCI to report hardware error is forwarded to the listeners of HED,
+ * it is used by HEST Generic Hardware Error Source with notify type
+ * SCI.
+ */
+static void acpi_hed_notify(struct acpi_device *device, u32 event)
+{
+ blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
+}
+
+static int __devinit acpi_hed_add(struct acpi_device *device)
+{
+ /* Only one hardware error device */
+ if (hed_handle)
+ return -EINVAL;
+ hed_handle = device->handle;
+ return 0;
+}
+
+static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+{
+ hed_handle = NULL;
+ return 0;
+}
+
+static struct acpi_driver acpi_hed_driver = {
+ .name = "hardware_error_device",
+ .class = "hardware_error",
+ .ids = acpi_hed_ids,
+ .ops = {
+ .add = acpi_hed_add,
+ .remove = acpi_hed_remove,
+ .notify = acpi_hed_notify,
+ },
+};
+
+static int __init acpi_hed_init(void)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_hed_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_hed_driver);
+}
+
+module_init(acpi_hed_init);
+module_exit(acpi_hed_exit);
+
+ACPI_MODULE_NAME("hed");
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 4bb18c9..0000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,135 +0,0 @@
-#include <linux/acpi.h>
-#include <linux/pci.h>
-
-#define PREFIX "ACPI: "
-
-static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
-{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
-}
-
-static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
-{
- struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
- unsigned long rc=0;
- u8 pcie_type = 0;
- u8 bridge = 0;
- switch (type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- rc = sizeof(struct acpi_hest_aer_root);
- pcie_type = PCI_EXP_TYPE_ROOT_PORT;
- break;
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- rc = sizeof(struct acpi_hest_aer);
- pcie_type = PCI_EXP_TYPE_ENDPOINT;
- break;
- case ACPI_HEST_TYPE_AER_BRIDGE:
- rc = sizeof(struct acpi_hest_aer_bridge);
- if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- bridge = 1;
- break;
- }
-
- if (p->flags & ACPI_HEST_GLOBAL) {
- if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- }
- else
- if (hest_match_pci(p, pci))
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- return rc;
-}
-
-static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
-{
- struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
- void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
- struct acpi_hest_header *hdr = p;
-
- int i;
- int firmware_first = 0;
- static unsigned char printed_unused = 0;
- static unsigned char printed_reserved = 0;
-
- for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
- switch (hdr->type) {
- case ACPI_HEST_TYPE_IA32_CHECK:
- p += parse_acpi_hest_ia_machine_check(p);
- break;
- case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
- p += parse_acpi_hest_ia_corrected(p);
- break;
- case ACPI_HEST_TYPE_IA32_NMI:
- p += parse_acpi_hest_ia_nmi(p);
- break;
- /* These three should never appear */
- case ACPI_HEST_TYPE_NOT_USED3:
- case ACPI_HEST_TYPE_NOT_USED4:
- case ACPI_HEST_TYPE_NOT_USED5:
- if (!printed_unused) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
- printed_unused = 1;
- }
- break;
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
- break;
- case ACPI_HEST_TYPE_GENERIC_ERROR:
- p += parse_acpi_hest_generic(p);
- break;
- /* These should never appear either */
- case ACPI_HEST_TYPE_RESERVED:
- default:
- if (!printed_reserved) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
- printed_reserved = 1;
- }
- break;
- }
- }
- return firmware_first;
-}
-
-int acpi_hest_firmware_first_pci(struct pci_dev *pci)
-{
- acpi_status status = AE_NOT_FOUND;
- struct acpi_table_header *hest = NULL;
- status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
-
- if (ACPI_SUCCESS(status)) {
- if (acpi_hest_firmware_first(hest, pci)) {
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33..4eac593 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ if ((root->segment == (u16) seg) &&
+ (root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
- int *busnr = data;
+ struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
- (address.resource_type == ACPI_BUS_NUMBER_RANGE))
- *busnr = address.minimum;
+ (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
+ res->start = address.minimum;
+ res->end = address.minimum + address.address_length - 1;
+ }
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
- unsigned long long *bus)
+ struct resource *res)
{
acpi_status status;
- int busnum;
- busnum = -1;
+ res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, &busnum);
+ get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
- /* Check if we really get a bus number from _CRS */
- if (busnum == -1)
+ if (res->start == -1)
return AE_ERROR;
- *bus = busnum;
return AE_OK;
}
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_device *child;
u32 flags, base_flags;
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
- bus = 0;
- status = try_get_root_bridge_busnr(device->handle, &bus);
+ root->secondary.flags = IORESOURCE_BUS;
+ status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
+ /*
+ * We need both the start and end of the downstream bus range
+ * to interpret _CBA (MMCONFIG base address), so it really is
+ * supposed to be in _CRS. If we don't find it there, all we
+ * can do is assume [_BBN-0xFF] or [0-0xFF].
+ */
+ root->secondary.end = 0xFF;
+ printk(KERN_WARNING FW_BUG PREFIX
+ "no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX
- "no bus number in _CRS and can't evaluate _BBN\n");
- return -ENODEV;
+ if (ACPI_SUCCESS(status))
+ root->secondary.start = bus;
+ else if (status == AE_NOT_FOUND)
+ root->secondary.start = 0;
+ else {
+ printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
+ result = -ENODEV;
+ goto end;
}
}
- root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
- if (!root)
- return -ENOMEM;
-
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
- root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
- printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
+ printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->segment, root->bus_nr);
+ root->segment, &root->secondary);
/*
* Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, segment, bus);
+ root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->segment, root->bus_nr);
+ root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
index e8c32a4..66f6729 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -35,7 +35,7 @@
#define ACPI_POWER_METER_NAME "power_meter"
ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
#define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
-#define ACPI_POWER_METER_CLASS "power_meter_resource"
+#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
#define NUM_SENSORS 17
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 36704b8..f8be23b 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -18,7 +18,7 @@
#define PREFIX "ACPI: "
-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
struct acpi_smb_hc {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index f74834a..4ab2275 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
-void __init acpi_set_sci_en_on_resume(void)
-{
- set_sci_en_on_resume = true;
-}
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
}
- /* If ACPI is not enabled by the BIOS, we need to enable it here. */
- if (set_sci_en_on_resume)
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- else
- acpi_enable();
+ /* This violates the spec but is required for bug compatibility. */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
- set_sci_en_on_resume = true;
- return 0;
-}
-
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacMini 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
@@ -394,62 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP G7000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b..c5fef01 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
OpenPOWER on IntegriCloud