summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig170
-rw-r--r--drivers/edac/Makefile36
-rw-r--r--drivers/edac/amd76x_edac.c367
-rw-r--r--drivers/edac/cell_edac.c262
-rw-r--r--drivers/edac/e752x_edac.c1351
-rw-r--r--drivers/edac/e7xxx_edac.c577
-rw-r--r--drivers/edac/edac_core.h850
-rw-r--r--drivers/edac/edac_device.c725
-rw-r--r--drivers/edac/edac_device_sysfs.c881
-rw-r--r--drivers/edac/edac_mc.c888
-rw-r--r--drivers/edac/edac_mc_sysfs.c939
-rw-r--r--drivers/edac/edac_module.c222
-rw-r--r--drivers/edac/edac_module.h84
-rw-r--r--drivers/edac/edac_pci.c497
-rw-r--r--drivers/edac/edac_pci_sysfs.c767
-rw-r--r--drivers/edac/edac_stub.c46
-rw-r--r--drivers/edac/i3000_edac.c554
-rw-r--r--drivers/edac/i5000_edac.c1573
-rw-r--r--drivers/edac/i5100_edac.c981
-rw-r--r--drivers/edac/i82443bxgx_edac.c468
-rw-r--r--drivers/edac/i82860_edac.c354
-rw-r--r--drivers/edac/i82875p_edac.c599
-rw-r--r--drivers/edac/i82975x_edac.c672
-rw-r--r--drivers/edac/mpc85xx_edac.c1078
-rw-r--r--drivers/edac/mpc85xx_edac.h162
-rw-r--r--drivers/edac/mv64x60_edac.c890
-rw-r--r--drivers/edac/mv64x60_edac.h114
-rw-r--r--drivers/edac/pasemi_edac.c307
-rw-r--r--drivers/edac/r82600_edac.c421
-rw-r--r--drivers/edac/x38_edac.c524
30 files changed, 17359 insertions, 0 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 0000000..e0dbd38
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,170 @@
+#
+# EDAC Kconfig
+# Copyright (c) 2003 Linux Networx
+# Licensed and distributed under the GPL
+#
+
+menuconfig EDAC
+ bool "EDAC - error detection and reporting (EXPERIMENTAL)"
+ depends on HAS_IOMEM
+ depends on EXPERIMENTAL
+ depends on X86 || PPC
+ help
+ EDAC is designed to report errors in the core system.
+ These are low-level errors that are reported in the CPU or
+ supporting chipset or other subsystems:
+ memory errors, cache errors, PCI errors, thermal throttling, etc..
+ If unsure, select 'Y'.
+
+ If this code is reporting problems on your system, please
+ see the EDAC project web pages for more information at:
+
+ <http://bluesmoke.sourceforge.net/>
+
+ and:
+
+ <http://buttersideup.com/edacwiki>
+
+ There is also a mailing list for the EDAC project, which can
+ be found via the sourceforge page.
+
+if EDAC
+
+comment "Reporting subsystems"
+
+config EDAC_DEBUG
+ bool "Debugging"
+ help
+ This turns on debugging information for the entire EDAC
+ sub-system. You can insert module with "debug_level=x", current
+ there're four debug levels (x=0,1,2,3 from low to high).
+ Usually you should select 'N'.
+
+config EDAC_MM_EDAC
+ tristate "Main Memory EDAC (Error Detection And Correction) reporting"
+ default y
+ help
+ Some systems are able to detect and correct errors in main
+ memory. EDAC can report statistics on memory error
+ detection and correction (EDAC - or commonly referred to ECC
+ errors). EDAC will also try to decode where these errors
+ occurred so that a particular failing memory module can be
+ replaced. If unsure, select 'Y'.
+
+
+config EDAC_AMD76X
+ tristate "AMD 76x (760, 762, 768)"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ help
+ Support for error detection and correction on the AMD 76x
+ series of chipsets used with the Athlon processor.
+
+config EDAC_E7XXX
+ tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ help
+ Support for error detection and correction on the Intel
+ E7205, E7500, E7501 and E7505 server chipsets.
+
+config EDAC_E752X
+ tristate "Intel e752x (e7520, e7525, e7320) and 3100"
+ depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
+ help
+ Support for error detection and correction on the Intel
+ E7520, E7525, E7320 server chipsets.
+
+config EDAC_I82443BXGX
+ tristate "Intel 82443BX/GX (440BX/GX)"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ depends on BROKEN
+ help
+ Support for error detection and correction on the Intel
+ 82443BX/GX memory controllers (440BX/GX chipsets).
+
+config EDAC_I82875P
+ tristate "Intel 82875p (D82875P, E7210)"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ help
+ Support for error detection and correction on the Intel
+ DP82785P and E7210 server chipsets.
+
+config EDAC_I82975X
+ tristate "Intel 82975x (D82975x)"
+ depends on EDAC_MM_EDAC && PCI && X86
+ help
+ Support for error detection and correction on the Intel
+ DP82975x server chipsets.
+
+config EDAC_I3000
+ tristate "Intel 3000/3010"
+ depends on EDAC_MM_EDAC && PCI && X86
+ help
+ Support for error detection and correction on the Intel
+ 3000 and 3010 server chipsets.
+
+config EDAC_X38
+ tristate "Intel X38"
+ depends on EDAC_MM_EDAC && PCI && X86
+ help
+ Support for error detection and correction on the Intel
+ X38 server chipsets.
+
+config EDAC_I82860
+ tristate "Intel 82860"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ help
+ Support for error detection and correction on the Intel
+ 82860 chipset.
+
+config EDAC_R82600
+ tristate "Radisys 82600 embedded chipset"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ help
+ Support for error detection and correction on the Radisys
+ 82600 embedded chipset.
+
+config EDAC_I5000
+ tristate "Intel Greencreek/Blackford chipset"
+ depends on EDAC_MM_EDAC && X86 && PCI
+ help
+ Support for error detection and correction the Intel
+ Greekcreek/Blackford chipsets.
+
+config EDAC_I5100
+ tristate "Intel San Clemente MCH"
+ depends on EDAC_MM_EDAC && X86 && PCI
+ help
+ Support for error detection and correction the Intel
+ San Clemente MCH.
+
+config EDAC_MPC85XX
+ tristate "Freescale MPC85xx"
+ depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
+ help
+ Support for error detection and correction on the Freescale
+ MPC8560, MPC8540, MPC8548
+
+config EDAC_MV64X60
+ tristate "Marvell MV64x60"
+ depends on EDAC_MM_EDAC && MV64X60
+ help
+ Support for error detection and correction on the Marvell
+ MV64360 and MV64460 chipsets.
+
+config EDAC_PASEMI
+ tristate "PA Semi PWRficient"
+ depends on EDAC_MM_EDAC && PCI
+ depends on PPC_PASEMI
+ help
+ Support for error detection and correction on PA Semi
+ PWRficient.
+
+config EDAC_CELL
+ tristate "Cell Broadband Engine memory controller"
+ depends on EDAC_MM_EDAC && PPC_CELL_NATIVE
+ help
+ Support for error detection and correction on the
+ Cell Broadband Engine internal memory controller
+ on platform without a hypervisor
+
+endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 0000000..62c2d9b
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for the Linux kernel EDAC drivers.
+#
+# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
+# This file may be distributed under the terms of the
+# GNU General Public License.
+#
+
+
+obj-$(CONFIG_EDAC) := edac_stub.o
+obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
+
+edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
+edac_core-objs += edac_module.o edac_device_sysfs.o
+
+ifdef CONFIG_PCI
+edac_core-objs += edac_pci.o edac_pci_sysfs.o
+endif
+
+obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
+obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
+obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
+obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
+obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
+obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
+obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
+obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
+obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
+obj-$(CONFIG_EDAC_X38) += x38_edac.o
+obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
+obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
+obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
+obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
+obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
+obj-$(CONFIG_EDAC_CELL) += cell_edac.o
+
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 0000000..2b95f1a
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,367 @@
+/*
+ * AMD 76x Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR "amd76x_edac"
+
+#define amd76x_printk(level, fmt, arg...) \
+ edac_printk(level, "amd76x", fmt, ##arg)
+
+#define amd76x_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
+
+#define AMD76X_NR_CSROWS 8
+#define AMD76X_NR_CHANS 1
+#define AMD76X_NR_DIMMS 4
+
+/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
+
+#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
+ *
+ * 31:16 reserved
+ * 15:14 SERR enabled: x1=ue 1x=ce
+ * 13 reserved
+ * 12 diag: disabled, enabled
+ * 11:10 mode: dis, EC, ECC, ECC+scrub
+ * 9:8 status: x1=ue 1x=ce
+ * 7:4 UE cs row
+ * 3:0 CE cs row
+ */
+
+#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
+ *
+ * 31:26 clock disable 5 - 0
+ * 25 SDRAM init
+ * 24 reserved
+ * 23 mode register service
+ * 22:21 suspend to RAM
+ * 20 burst refresh enable
+ * 19 refresh disable
+ * 18 reserved
+ * 17:16 cycles-per-refresh
+ * 15:8 reserved
+ * 7:0 x4 mode enable 7 - 0
+ */
+
+#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
+ *
+ * 31:23 chip-select base
+ * 22:16 reserved
+ * 15:7 chip-select mask
+ * 6:3 reserved
+ * 2:1 address mode
+ * 0 chip-select enable
+ */
+
+struct amd76x_error_info {
+ u32 ecc_mode_status;
+};
+
+enum amd76x_chips {
+ AMD761 = 0,
+ AMD762
+};
+
+struct amd76x_dev_info {
+ const char *ctl_name;
+};
+
+static const struct amd76x_dev_info amd76x_devs[] = {
+ [AMD761] = {
+ .ctl_name = "AMD761"},
+ [AMD762] = {
+ .ctl_name = "AMD762"},
+};
+
+static struct edac_pci_ctl_info *amd76x_pci;
+
+/**
+ * amd76x_get_error_info - fetch error information
+ * @mci: Memory controller
+ * @info: Info to fill in
+ *
+ * Fetch and store the AMD76x ECC status. Clear pending status
+ * on the chip so that further errors will be reported
+ */
+static void amd76x_get_error_info(struct mem_ctl_info *mci,
+ struct amd76x_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+ pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
+ &info->ecc_mode_status);
+
+ if (info->ecc_mode_status & BIT(8))
+ pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
+ (u32) BIT(8), (u32) BIT(8));
+
+ if (info->ecc_mode_status & BIT(9))
+ pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
+ (u32) BIT(9), (u32) BIT(9));
+}
+
+/**
+ * amd76x_process_error_info - Error check
+ * @mci: Memory controller
+ * @info: Previously fetched information from chip
+ * @handle_errors: 1 if we should do recovery
+ *
+ * Process the chip state and decide if an error has occurred.
+ * A return of 1 indicates an error. Also if handle_errors is true
+ * then attempt to handle and clean up after the error
+ */
+static int amd76x_process_error_info(struct mem_ctl_info *mci,
+ struct amd76x_error_info *info,
+ int handle_errors)
+{
+ int error_found;
+ u32 row;
+
+ error_found = 0;
+
+ /*
+ * Check for an uncorrectable error
+ */
+ if (info->ecc_mode_status & BIT(8)) {
+ error_found = 1;
+
+ if (handle_errors) {
+ row = (info->ecc_mode_status >> 4) & 0xf;
+ edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
+ row, mci->ctl_name);
+ }
+ }
+
+ /*
+ * Check for a correctable error
+ */
+ if (info->ecc_mode_status & BIT(9)) {
+ error_found = 1;
+
+ if (handle_errors) {
+ row = info->ecc_mode_status & 0xf;
+ edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
+ 0, row, 0, mci->ctl_name);
+ }
+ }
+
+ return error_found;
+}
+
+/**
+ * amd76x_check - Poll the controller
+ * @mci: Memory controller
+ *
+ * Called by the poll handlers this function reads the status
+ * from the controller and checks for errors.
+ */
+static void amd76x_check(struct mem_ctl_info *mci)
+{
+ struct amd76x_error_info info;
+ debugf3("%s()\n", __func__);
+ amd76x_get_error_info(mci, &info);
+ amd76x_process_error_info(mci, &info, 1);
+}
+
+static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+ enum edac_type edac_mode)
+{
+ struct csrow_info *csrow;
+ u32 mba, mba_base, mba_mask, dms;
+ int index;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ /* find the DRAM Chip Select Base address and mask */
+ pci_read_config_dword(pdev,
+ AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
+
+ if (!(mba & BIT(0)))
+ continue;
+
+ mba_base = mba & 0xff800000UL;
+ mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
+ pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
+ csrow->first_page = mba_base >> PAGE_SHIFT;
+ csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->page_mask = mba_mask >> PAGE_SHIFT;
+ csrow->grain = csrow->nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_RDDR;
+ csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ csrow->edac_mode = edac_mode;
+ }
+}
+
+/**
+ * amd76x_probe1 - Perform set up for detected device
+ * @pdev; PCI device detected
+ * @dev_idx: Device type index
+ *
+ * We have found an AMD76x and now need to set up the memory
+ * controller status reporting. We configure and set up the
+ * memory controller reporting and claim the device.
+ */
+static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ static const enum edac_type ems_modes[] = {
+ EDAC_NONE,
+ EDAC_EC,
+ EDAC_SECDED,
+ EDAC_SECDED
+ };
+ struct mem_ctl_info *mci = NULL;
+ u32 ems;
+ u32 ems_mode;
+ struct amd76x_error_info discard;
+
+ debugf0("%s()\n", __func__);
+ pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
+ ems_mode = (ems >> 10) & 0x3;
+ mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
+
+ if (mci == NULL) {
+ return -ENOMEM;
+ }
+
+ debugf0("%s(): mci = %p\n", __func__, mci);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = ems_mode ?
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = AMD76X_REVISION;
+ mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = amd76x_check;
+ mci->ctl_page_to_phys = NULL;
+
+ amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
+ amd76x_get_error_info(mci, &discard); /* clear counters */
+
+ /* Here we assume that we will never see multiple instances of this
+ * type of memory controller. The ID is therefore hardcoded to 0.
+ */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!amd76x_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit amd76x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("%s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return amd76x_probe1(pdev, ent->driver_data);
+}
+
+/**
+ * amd76x_remove_one - driver shutdown
+ * @pdev: PCI device being handed back
+ *
+ * Called when the driver is unloaded. Find the matching mci
+ * structure for the device then delete the mci and free the
+ * resources.
+ */
+static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ if (amd76x_pci)
+ edac_pci_release_generic_ctl(amd76x_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD762},
+ {
+ PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD761},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
+
+static struct pci_driver amd76x_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = amd76x_init_one,
+ .remove = __devexit_p(amd76x_remove_one),
+ .id_table = amd76x_pci_tbl,
+};
+
+static int __init amd76x_init(void)
+{
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ return pci_register_driver(&amd76x_driver);
+}
+
+static void __exit amd76x_exit(void)
+{
+ pci_unregister_driver(&amd76x_driver);
+}
+
+module_init(amd76x_init);
+module_exit(amd76x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
new file mode 100644
index 0000000..cd2e3b8
--- /dev/null
+++ b/drivers/edac/cell_edac.c
@@ -0,0 +1,262 @@
+/*
+ * Cell MIC driver for ECC counting
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+#undef DEBUG
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/cell-regs.h>
+
+#include "edac_core.h"
+
+struct cell_edac_priv
+{
+ struct cbe_mic_tm_regs __iomem *regs;
+ int node;
+ int chanmask;
+#ifdef DEBUG
+ u64 prev_fir;
+#endif
+};
+
+static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csrow = &mci->csrows[0];
+ unsigned long address, pfn, offset, syndrome;
+
+ dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016lx\n",
+ priv->node, chan, ar);
+
+ /* Address decoding is likely a bit bogus, to dbl check */
+ address = (ar & 0xffffffffe0000000ul) >> 29;
+ if (priv->chanmask == 0x3)
+ address = (address << 1) | chan;
+ pfn = address >> PAGE_SHIFT;
+ offset = address & ~PAGE_MASK;
+ syndrome = (ar & 0x000000001fe00000ul) >> 21;
+
+ /* TODO: Decoding of the error addresss */
+ edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
+ syndrome, 0, chan, "");
+}
+
+static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csrow = &mci->csrows[0];
+ unsigned long address, pfn, offset;
+
+ dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016lx\n",
+ priv->node, chan, ar);
+
+ /* Address decoding is likely a bit bogus, to dbl check */
+ address = (ar & 0xffffffffe0000000ul) >> 29;
+ if (priv->chanmask == 0x3)
+ address = (address << 1) | chan;
+ pfn = address >> PAGE_SHIFT;
+ offset = address & ~PAGE_MASK;
+
+ /* TODO: Decoding of the error addresss */
+ edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+}
+
+static void cell_edac_check(struct mem_ctl_info *mci)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ u64 fir, addreg, clear = 0;
+
+ fir = in_be64(&priv->regs->mic_fir);
+#ifdef DEBUG
+ if (fir != priv->prev_fir) {
+ dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir);
+ priv->prev_fir = fir;
+ }
+#endif
+ if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+ clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
+ cell_edac_count_ce(mci, 0, addreg);
+ }
+ if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+ clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
+ cell_edac_count_ce(mci, 1, addreg);
+ }
+ if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+ clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
+ cell_edac_count_ue(mci, 0, addreg);
+ }
+ if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+ clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
+ cell_edac_count_ue(mci, 1, addreg);
+ }
+
+ /* The procedure for clearing FIR bits is a bit ... weird */
+ if (clear) {
+ fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
+ fir |= CBE_MIC_FIR_ECC_RESET_MASK;
+ fir &= ~clear;
+ out_be64(&priv->regs->mic_fir, fir);
+ (void)in_be64(&priv->regs->mic_fir);
+
+ mb(); /* sync up */
+#ifdef DEBUG
+ fir = in_be64(&priv->regs->mic_fir);
+ dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir);
+#endif
+ }
+}
+
+static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow = &mci->csrows[0];
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct device_node *np;
+
+ for (np = NULL;
+ (np = of_find_node_by_name(np, "memory")) != NULL;) {
+ struct resource r;
+
+ /* We "know" that the Cell firmware only creates one entry
+ * in the "memory" nodes. If that changes, this code will
+ * need to be adapted.
+ */
+ if (of_address_to_resource(np, 0, &r))
+ continue;
+ if (of_node_to_nid(np) != priv->node)
+ continue;
+ csrow->first_page = r.start >> PAGE_SHIFT;
+ csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->mtype = MEM_XDR;
+ csrow->edac_mode = EDAC_SECDED;
+ dev_dbg(mci->dev,
+ "Initialized on node %d, chanmask=0x%x,"
+ " first_page=0x%lx, nr_pages=0x%x\n",
+ priv->node, priv->chanmask,
+ csrow->first_page, csrow->nr_pages);
+ break;
+ }
+}
+
+static int __devinit cell_edac_probe(struct platform_device *pdev)
+{
+ struct cbe_mic_tm_regs __iomem *regs;
+ struct mem_ctl_info *mci;
+ struct cell_edac_priv *priv;
+ u64 reg;
+ int rc, chanmask;
+
+ regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
+ if (regs == NULL)
+ return -ENODEV;
+
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ /* Get channel population */
+ reg = in_be64(&regs->mic_mnt_cfg);
+ dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg);
+ chanmask = 0;
+ if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
+ chanmask |= 0x1;
+ if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
+ chanmask |= 0x2;
+ if (chanmask == 0) {
+ dev_warn(&pdev->dev,
+ "Yuck ! No channel populated ? Aborting !\n");
+ return -ENODEV;
+ }
+ dev_dbg(&pdev->dev, "Initial FIR = 0x%016lx\n",
+ in_be64(&regs->mic_fir));
+
+ /* Allocate & init EDAC MC data structure */
+ mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
+ chanmask == 3 ? 2 : 1, pdev->id);
+ if (mci == NULL)
+ return -ENOMEM;
+ priv = mci->pvt_info;
+ priv->regs = regs;
+ priv->node = pdev->id;
+ priv->chanmask = chanmask;
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_XDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->mod_name = "cell_edac";
+ mci->ctl_name = "MIC";
+ mci->dev_name = pdev->dev.bus_id;
+ mci->edac_check = cell_edac_check;
+ cell_edac_init_csrows(mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ edac_mc_free(mci);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __devexit cell_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+ if (mci)
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver cell_edac_driver = {
+ .driver = {
+ .name = "cbe-mic",
+ .owner = THIS_MODULE,
+ },
+ .probe = cell_edac_probe,
+ .remove = cell_edac_remove,
+};
+
+static int __init cell_edac_init(void)
+{
+ /* Sanity check registers data structure */
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_ecc_address_0) != 0xf8);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_ecc_address_1) != 0x1b8);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_config) != 0x218);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_fir) != 0x230);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_mnt_cfg) != 0x210);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_exc) != 0x208);
+
+ return platform_driver_register(&cell_edac_driver);
+}
+
+static void __exit cell_edac_exit(void)
+{
+ platform_driver_unregister(&cell_edac_driver);
+}
+
+module_init(cell_edac_init);
+module_exit(cell_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 0000000..facfdb1
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1351 @@
+/*
+ * Intel e752x Memory Controller kernel module
+ * (C) 2004 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e752x_chips" below for supported chipsets
+ *
+ * Written by Tom Zimmerman
+ *
+ * Contributors:
+ * Thayne Harbaugh at realmsys.com (?)
+ * Wang Zhenyu at intel.com
+ * Dave Jiang at mvista.com
+ *
+ * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define E752X_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR "e752x_edac"
+
+static int report_non_memory_errors;
+static int force_function_unhide;
+static int sysbus_parity = -1;
+
+static struct edac_pci_ctl_info *e752x_pci;
+
+#define e752x_printk(level, fmt, arg...) \
+ edac_printk(level, "e752x", fmt, ##arg)
+
+#define e752x_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_0
+#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
+#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
+#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
+#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_0
+#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
+#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
+#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
+#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_0
+#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
+#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
+#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
+#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_3100_0
+#define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
+#endif /* PCI_DEVICE_ID_INTEL_3100_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
+#define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
+#endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
+
+#define E752X_NR_CSROWS 8 /* number of csrows */
+
+/* E752X register addresses - device 0 function 0 */
+#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
+#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
+ /*
+ * 31:30 Device width row 7
+ * 01=x8 10=x4 11=x8 DDR2
+ * 27:26 Device width row 6
+ * 23:22 Device width row 5
+ * 19:20 Device width row 4
+ * 15:14 Device width row 3
+ * 11:10 Device width row 2
+ * 7:6 Device width row 1
+ * 3:2 Device width row 0
+ */
+#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
+ /* FIXME:IS THIS RIGHT? */
+ /*
+ * 22 Number channels 0=1,1=2
+ * 19:18 DRB Granularity 32/64MB
+ */
+#define E752X_DRM 0x80 /* Dimm mapping register */
+#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
+ /*
+ * 14:12 1 single A, 2 single B, 3 dual
+ */
+#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
+#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
+#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
+#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
+
+/* E752X register addresses - device 0 function 1 */
+#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
+#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
+#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
+#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
+#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
+#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
+#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
+#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
+#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
+#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
+#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
+#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
+#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
+#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
+#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
+#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
+#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
+#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
+#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
+#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
+ /* error syndrome register (16b) */
+#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
+ /* error syndrome register (16b) */
+#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
+
+/* 3100 IMCH specific register addresses - device 0 function 1 */
+#define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
+#define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
+#define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
+#define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
+
+/* ICH5R register addresses - device 30 function 0 */
+#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
+#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
+#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
+
+enum e752x_chips {
+ E7520 = 0,
+ E7525 = 1,
+ E7320 = 2,
+ I3100 = 3
+};
+
+struct e752x_pvt {
+ struct pci_dev *bridge_ck;
+ struct pci_dev *dev_d0f0;
+ struct pci_dev *dev_d0f1;
+ u32 tolm;
+ u32 remapbase;
+ u32 remaplimit;
+ int mc_symmetric;
+ u8 map[8];
+ int map_type;
+ const struct e752x_dev_info *dev_info;
+};
+
+struct e752x_dev_info {
+ u16 err_dev;
+ u16 ctl_dev;
+ const char *ctl_name;
+};
+
+struct e752x_error_info {
+ u32 ferr_global;
+ u32 nerr_global;
+ u32 nsi_ferr; /* 3100 only */
+ u32 nsi_nerr; /* 3100 only */
+ u8 hi_ferr; /* all but 3100 */
+ u8 hi_nerr; /* all but 3100 */
+ u16 sysbus_ferr;
+ u16 sysbus_nerr;
+ u8 buf_ferr;
+ u8 buf_nerr;
+ u16 dram_ferr;
+ u16 dram_nerr;
+ u32 dram_sec1_add;
+ u32 dram_sec2_add;
+ u16 dram_sec1_syndrome;
+ u16 dram_sec2_syndrome;
+ u32 dram_ded_add;
+ u32 dram_scrb_add;
+ u32 dram_retr_add;
+};
+
+static const struct e752x_dev_info e752x_devs[] = {
+ [E7520] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
+ .ctl_name = "E7520"},
+ [E7525] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
+ .ctl_name = "E7525"},
+ [E7320] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
+ .ctl_name = "E7320"},
+ [I3100] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
+ .ctl_name = "3100"},
+};
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+ unsigned long page)
+{
+ u32 remap;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+ debugf3("%s()\n", __func__);
+
+ if (page < pvt->tolm)
+ return page;
+
+ if ((page >= 0x100000) && (page < pvt->remapbase))
+ return page;
+
+ remap = (page - pvt->tolm) + pvt->remapbase;
+
+ if (remap < pvt->remaplimit)
+ return remap;
+
+ e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
+ return pvt->tolm - 1;
+}
+
+static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
+ u32 sec1_add, u16 sec1_syndrome)
+{
+ u32 page;
+ int row;
+ int channel;
+ int i;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+ debugf3("%s()\n", __func__);
+
+ /* convert the addr to 4k page */
+ page = sec1_add >> (PAGE_SHIFT - 4);
+
+ /* FIXME - check for -1 */
+ if (pvt->mc_symmetric) {
+ /* chip select are bits 14 & 13 */
+ row = ((page >> 1) & 3);
+ e752x_printk(KERN_WARNING,
+ "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
+ pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
+ pvt->map[4], pvt->map[5], pvt->map[6],
+ pvt->map[7]);
+
+ /* test for channel remapping */
+ for (i = 0; i < 8; i++) {
+ if (pvt->map[i] == row)
+ break;
+ }
+
+ e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
+
+ if (i < 8)
+ row = i;
+ else
+ e752x_mc_printk(mci, KERN_WARNING,
+ "row %d not found in remap table\n",
+ row);
+ } else
+ row = edac_mc_find_csrow_by_page(mci, page);
+
+ /* 0 = channel A, 1 = channel B */
+ channel = !(error_one & 1);
+
+ /* e752x mc reads 34:6 of the DRAM linear address */
+ edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
+ sec1_syndrome, row, channel, "e752x CE");
+}
+
+static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
+ u32 sec1_add, u16 sec1_syndrome, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
+}
+
+static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
+ u32 ded_add, u32 scrb_add)
+{
+ u32 error_2b, block_page;
+ int row;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+ debugf3("%s()\n", __func__);
+
+ if (error_one & 0x0202) {
+ error_2b = ded_add;
+
+ /* convert to 4k address */
+ block_page = error_2b >> (PAGE_SHIFT - 4);
+
+ row = pvt->mc_symmetric ?
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+
+ /* e752x mc reads 34:6 of the DRAM linear address */
+ edac_mc_handle_ue(mci, block_page,
+ offset_in_page(error_2b << 4),
+ row, "e752x UE from Read");
+ }
+ if (error_one & 0x0404) {
+ error_2b = scrb_add;
+
+ /* convert to 4k address */
+ block_page = error_2b >> (PAGE_SHIFT - 4);
+
+ row = pvt->mc_symmetric ?
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+
+ /* e752x mc reads 34:6 of the DRAM linear address */
+ edac_mc_handle_ue(mci, block_page,
+ offset_in_page(error_2b << 4),
+ row, "e752x UE from Scruber");
+ }
+}
+
+static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
+ u32 ded_add, u32 scrb_add, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ue(mci, error_one, ded_add, scrb_add);
+}
+
+static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
+ int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (!handle_error)
+ return;
+
+ debugf3("%s()\n", __func__);
+ edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+}
+
+static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
+ u32 retry_add)
+{
+ u32 error_1b, page;
+ int row;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+ error_1b = retry_add;
+ page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
+
+ /* chip select are bits 14 & 13 */
+ row = pvt->mc_symmetric ? ((page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, page);
+
+ e752x_mc_printk(mci, KERN_WARNING,
+ "CE page 0x%lx, row %d : Memory read retry\n",
+ (long unsigned int)page, row);
+}
+
+static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
+ u32 retry_add, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ded_retry(mci, error, retry_add);
+}
+
+static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
+ int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
+}
+
+static char *global_message[11] = {
+ "PCI Express C1",
+ "PCI Express C",
+ "PCI Express B1",
+ "PCI Express B",
+ "PCI Express A1",
+ "PCI Express A",
+ "DMA Controller",
+ "HUB or NS Interface",
+ "System Bus",
+ "DRAM Controller", /* 9th entry */
+ "Internal Buffer"
+};
+
+#define DRAM_ENTRY 9
+
+static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
+
+static void do_global_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 11; i++) {
+ if (errors & (1 << i)) {
+ /* If the error is from DRAM Controller OR
+ * we are to report ALL errors, then
+ * report the error
+ */
+ if ((i == DRAM_ENTRY) || report_non_memory_errors)
+ e752x_printk(KERN_WARNING, "%sError %s\n",
+ fatal_message[fatal],
+ global_message[i]);
+ }
+ }
+}
+
+static inline void global_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_global_error(fatal, errors);
+}
+
+static char *hub_message[7] = {
+ "HI Address or Command Parity", "HI Illegal Access",
+ "HI Internal Parity", "Out of Range Access",
+ "HI Data Parity", "Enhanced Config Access",
+ "Hub Interface Target Abort"
+};
+
+static void do_hub_error(int fatal, u8 errors)
+{
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ if (errors & (1 << i))
+ e752x_printk(KERN_WARNING, "%sError %s\n",
+ fatal_message[fatal], hub_message[i]);
+ }
+}
+
+static inline void hub_error(int fatal, u8 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_hub_error(fatal, errors);
+}
+
+#define NSI_FATAL_MASK 0x0c080081
+#define NSI_NON_FATAL_MASK 0x23a0ba64
+#define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
+
+static char *nsi_message[30] = {
+ "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
+ "", /* reserved */
+ "NSI Parity Error", /* bit 2, non-fatal */
+ "", /* reserved */
+ "", /* reserved */
+ "Correctable Error Message", /* bit 5, non-fatal */
+ "Non-Fatal Error Message", /* bit 6, non-fatal */
+ "Fatal Error Message", /* bit 7, fatal */
+ "", /* reserved */
+ "Receiver Error", /* bit 9, non-fatal */
+ "", /* reserved */
+ "Bad TLP", /* bit 11, non-fatal */
+ "Bad DLLP", /* bit 12, non-fatal */
+ "REPLAY_NUM Rollover", /* bit 13, non-fatal */
+ "", /* reserved */
+ "Replay Timer Timeout", /* bit 15, non-fatal */
+ "", /* reserved */
+ "", /* reserved */
+ "", /* reserved */
+ "Data Link Protocol Error", /* bit 19, fatal */
+ "", /* reserved */
+ "Poisoned TLP", /* bit 21, non-fatal */
+ "", /* reserved */
+ "Completion Timeout", /* bit 23, non-fatal */
+ "Completer Abort", /* bit 24, non-fatal */
+ "Unexpected Completion", /* bit 25, non-fatal */
+ "Receiver Overflow", /* bit 26, fatal */
+ "Malformed TLP", /* bit 27, fatal */
+ "", /* reserved */
+ "Unsupported Request" /* bit 29, non-fatal */
+};
+
+static void do_nsi_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 30; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError %s\n",
+ fatal_message[fatal], nsi_message[i]);
+ }
+}
+
+static inline void nsi_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_nsi_error(fatal, errors);
+}
+
+static char *membuf_message[4] = {
+ "Internal PMWB to DRAM parity",
+ "Internal PMWB to System Bus Parity",
+ "Internal System Bus or IO to PMWB Parity",
+ "Internal DRAM to PMWB Parity"
+};
+
+static void do_membuf_error(u8 errors)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (errors & (1 << i))
+ e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
+ membuf_message[i]);
+ }
+}
+
+static inline void membuf_error(u8 errors, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_membuf_error(errors);
+}
+
+static char *sysbus_message[10] = {
+ "Addr or Request Parity",
+ "Data Strobe Glitch",
+ "Addr Strobe Glitch",
+ "Data Parity",
+ "Addr Above TOM",
+ "Non DRAM Lock Error",
+ "MCERR", "BINIT",
+ "Memory Parity",
+ "IO Subsystem Parity"
+};
+
+static void do_sysbus_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (errors & (1 << i))
+ e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
+ fatal_message[fatal], sysbus_message[i]);
+ }
+}
+
+static inline void sysbus_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_sysbus_error(fatal, errors);
+}
+
+static void e752x_check_hub_interface(struct e752x_error_info *info,
+ int *error_found, int handle_error)
+{
+ u8 stat8;
+
+ //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
+
+ stat8 = info->hi_ferr;
+
+ if (stat8 & 0x7f) { /* Error, so process */
+ stat8 &= 0x7f;
+
+ if (stat8 & 0x2b)
+ hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
+ if (stat8 & 0x54)
+ hub_error(0, stat8 & 0x54, error_found, handle_error);
+ }
+ //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+
+ stat8 = info->hi_nerr;
+
+ if (stat8 & 0x7f) { /* Error, so process */
+ stat8 &= 0x7f;
+
+ if (stat8 & 0x2b)
+ hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
+ if (stat8 & 0x54)
+ hub_error(0, stat8 & 0x54, error_found, handle_error);
+ }
+}
+
+static void e752x_check_ns_interface(struct e752x_error_info *info,
+ int *error_found, int handle_error)
+{
+ u32 stat32;
+
+ stat32 = info->nsi_ferr;
+ if (stat32 & NSI_ERR_MASK) { /* Error, so process */
+ if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
+ nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+ handle_error);
+ if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
+ nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+ handle_error);
+ }
+ stat32 = info->nsi_nerr;
+ if (stat32 & NSI_ERR_MASK) {
+ if (stat32 & NSI_FATAL_MASK)
+ nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+ handle_error);
+ if (stat32 & NSI_NON_FATAL_MASK)
+ nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+ handle_error);
+ }
+}
+
+static void e752x_check_sysbus(struct e752x_error_info *info,
+ int *error_found, int handle_error)
+{
+ u32 stat32, error32;
+
+ //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
+ stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
+
+ if (stat32 == 0)
+ return; /* no errors */
+
+ error32 = (stat32 >> 16) & 0x3ff;
+ stat32 = stat32 & 0x3ff;
+
+ if (stat32 & 0x087)
+ sysbus_error(1, stat32 & 0x087, error_found, handle_error);
+
+ if (stat32 & 0x378)
+ sysbus_error(0, stat32 & 0x378, error_found, handle_error);
+
+ if (error32 & 0x087)
+ sysbus_error(1, error32 & 0x087, error_found, handle_error);
+
+ if (error32 & 0x378)
+ sysbus_error(0, error32 & 0x378, error_found, handle_error);
+}
+
+static void e752x_check_membuf(struct e752x_error_info *info,
+ int *error_found, int handle_error)
+{
+ u8 stat8;
+
+ stat8 = info->buf_ferr;
+
+ if (stat8 & 0x0f) { /* Error, so process */
+ stat8 &= 0x0f;
+ membuf_error(stat8, error_found, handle_error);
+ }
+
+ stat8 = info->buf_nerr;
+
+ if (stat8 & 0x0f) { /* Error, so process */
+ stat8 &= 0x0f;
+ membuf_error(stat8, error_found, handle_error);
+ }
+}
+
+static void e752x_check_dram(struct mem_ctl_info *mci,
+ struct e752x_error_info *info, int *error_found,
+ int handle_error)
+{
+ u16 error_one, error_next;
+
+ error_one = info->dram_ferr;
+ error_next = info->dram_nerr;
+
+ /* decode and report errors */
+ if (error_one & 0x0101) /* check first error correctable */
+ process_ce(mci, error_one, info->dram_sec1_add,
+ info->dram_sec1_syndrome, error_found, handle_error);
+
+ if (error_next & 0x0101) /* check next error correctable */
+ process_ce(mci, error_next, info->dram_sec2_add,
+ info->dram_sec2_syndrome, error_found, handle_error);
+
+ if (error_one & 0x4040)
+ process_ue_no_info_wr(mci, error_found, handle_error);
+
+ if (error_next & 0x4040)
+ process_ue_no_info_wr(mci, error_found, handle_error);
+
+ if (error_one & 0x2020)
+ process_ded_retry(mci, error_one, info->dram_retr_add,
+ error_found, handle_error);
+
+ if (error_next & 0x2020)
+ process_ded_retry(mci, error_next, info->dram_retr_add,
+ error_found, handle_error);
+
+ if (error_one & 0x0808)
+ process_threshold_ce(mci, error_one, error_found, handle_error);
+
+ if (error_next & 0x0808)
+ process_threshold_ce(mci, error_next, error_found,
+ handle_error);
+
+ if (error_one & 0x0606)
+ process_ue(mci, error_one, info->dram_ded_add,
+ info->dram_scrb_add, error_found, handle_error);
+
+ if (error_next & 0x0606)
+ process_ue(mci, error_next, info->dram_ded_add,
+ info->dram_scrb_add, error_found, handle_error);
+}
+
+static void e752x_get_error_info(struct mem_ctl_info *mci,
+ struct e752x_error_info *info)
+{
+ struct pci_dev *dev;
+ struct e752x_pvt *pvt;
+
+ memset(info, 0, sizeof(*info));
+ pvt = (struct e752x_pvt *)mci->pvt_info;
+ dev = pvt->dev_d0f1;
+ pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
+
+ if (info->ferr_global) {
+ if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+ pci_read_config_dword(dev, I3100_NSI_FERR,
+ &info->nsi_ferr);
+ info->hi_ferr = 0;
+ } else {
+ pci_read_config_byte(dev, E752X_HI_FERR,
+ &info->hi_ferr);
+ info->nsi_ferr = 0;
+ }
+ pci_read_config_word(dev, E752X_SYSBUS_FERR,
+ &info->sysbus_ferr);
+ pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
+ pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
+ pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
+ &info->dram_sec1_add);
+ pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
+ &info->dram_sec1_syndrome);
+ pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
+ &info->dram_ded_add);
+ pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
+ &info->dram_scrb_add);
+ pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
+ &info->dram_retr_add);
+
+ /* ignore the reserved bits just in case */
+ if (info->hi_ferr & 0x7f)
+ pci_write_config_byte(dev, E752X_HI_FERR,
+ info->hi_ferr);
+
+ if (info->nsi_ferr & NSI_ERR_MASK)
+ pci_write_config_dword(dev, I3100_NSI_FERR,
+ info->nsi_ferr);
+
+ if (info->sysbus_ferr)
+ pci_write_config_word(dev, E752X_SYSBUS_FERR,
+ info->sysbus_ferr);
+
+ if (info->buf_ferr & 0x0f)
+ pci_write_config_byte(dev, E752X_BUF_FERR,
+ info->buf_ferr);
+
+ if (info->dram_ferr)
+ pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+ info->dram_ferr, info->dram_ferr);
+
+ pci_write_config_dword(dev, E752X_FERR_GLOBAL,
+ info->ferr_global);
+ }
+
+ pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
+
+ if (info->nerr_global) {
+ if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+ pci_read_config_dword(dev, I3100_NSI_NERR,
+ &info->nsi_nerr);
+ info->hi_nerr = 0;
+ } else {
+ pci_read_config_byte(dev, E752X_HI_NERR,
+ &info->hi_nerr);
+ info->nsi_nerr = 0;
+ }
+ pci_read_config_word(dev, E752X_SYSBUS_NERR,
+ &info->sysbus_nerr);
+ pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
+ pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
+ pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
+ &info->dram_sec2_add);
+ pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
+ &info->dram_sec2_syndrome);
+
+ if (info->hi_nerr & 0x7f)
+ pci_write_config_byte(dev, E752X_HI_NERR,
+ info->hi_nerr);
+
+ if (info->nsi_nerr & NSI_ERR_MASK)
+ pci_write_config_dword(dev, I3100_NSI_NERR,
+ info->nsi_nerr);
+
+ if (info->sysbus_nerr)
+ pci_write_config_word(dev, E752X_SYSBUS_NERR,
+ info->sysbus_nerr);
+
+ if (info->buf_nerr & 0x0f)
+ pci_write_config_byte(dev, E752X_BUF_NERR,
+ info->buf_nerr);
+
+ if (info->dram_nerr)
+ pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+ info->dram_nerr, info->dram_nerr);
+
+ pci_write_config_dword(dev, E752X_NERR_GLOBAL,
+ info->nerr_global);
+ }
+}
+
+static int e752x_process_error_info(struct mem_ctl_info *mci,
+ struct e752x_error_info *info,
+ int handle_errors)
+{
+ u32 error32, stat32;
+ int error_found;
+
+ error_found = 0;
+ error32 = (info->ferr_global >> 18) & 0x3ff;
+ stat32 = (info->ferr_global >> 4) & 0x7ff;
+
+ if (error32)
+ global_error(1, error32, &error_found, handle_errors);
+
+ if (stat32)
+ global_error(0, stat32, &error_found, handle_errors);
+
+ error32 = (info->nerr_global >> 18) & 0x3ff;
+ stat32 = (info->nerr_global >> 4) & 0x7ff;
+
+ if (error32)
+ global_error(1, error32, &error_found, handle_errors);
+
+ if (stat32)
+ global_error(0, stat32, &error_found, handle_errors);
+
+ e752x_check_hub_interface(info, &error_found, handle_errors);
+ e752x_check_ns_interface(info, &error_found, handle_errors);
+ e752x_check_sysbus(info, &error_found, handle_errors);
+ e752x_check_membuf(info, &error_found, handle_errors);
+ e752x_check_dram(mci, info, &error_found, handle_errors);
+ return error_found;
+}
+
+static void e752x_check(struct mem_ctl_info *mci)
+{
+ struct e752x_error_info info;
+
+ debugf3("%s()\n", __func__);
+ e752x_get_error_info(mci, &info);
+ e752x_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active. Else return 0. */
+static inline int dual_channel_active(u16 ddrcsr)
+{
+ return (((ddrcsr >> 12) & 3) == 3);
+}
+
+/* Remap csrow index numbers if map_type is "reverse"
+ */
+static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
+{
+ struct e752x_pvt *pvt = mci->pvt_info;
+
+ if (!pvt->map_type)
+ return (7 - index);
+
+ return (index);
+}
+
+static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+ u16 ddrcsr)
+{
+ struct csrow_info *csrow;
+ unsigned long last_cumul_size;
+ int index, mem_dev, drc_chan;
+ int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
+ u8 value;
+ u32 dra, drc, cumul_size;
+
+ dra = 0;
+ for (index = 0; index < 4; index++) {
+ u8 dra_reg;
+ pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
+ dra |= dra_reg << (index * 8);
+ }
+ pci_read_config_dword(pdev, E752X_DRC, &drc);
+ drc_chan = dual_channel_active(ddrcsr);
+ drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
+ drc_ddim = (drc >> 20) & 0x3;
+
+ /* The dram row boundary (DRB) reg values are boundary address for
+ * each DRAM row with a granularity of 64 or 128MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ /* mem_dev 0=x8, 1=x4 */
+ mem_dev = (dra >> (index * 4 + 2)) & 0x3;
+ csrow = &mci->csrows[remap_csrow_index(mci, index)];
+
+ mem_dev = (mem_dev == 2);
+ pci_read_config_byte(pdev, E752X_DRB + index, &value);
+ /* convert a 128 or 64 MiB DRB to a page size. */
+ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ csrow->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ csrow->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ csrow->edac_mode = EDAC_NONE;
+ }
+}
+
+static void e752x_init_mem_map_table(struct pci_dev *pdev,
+ struct e752x_pvt *pvt)
+{
+ int index;
+ u8 value, last, row;
+
+ last = 0;
+ row = 0;
+
+ for (index = 0; index < 8; index += 2) {
+ pci_read_config_byte(pdev, E752X_DRB + index, &value);
+ /* test if there is a dimm in this slot */
+ if (value == last) {
+ /* no dimm in the slot, so flag it as empty */
+ pvt->map[index] = 0xff;
+ pvt->map[index + 1] = 0xff;
+ } else { /* there is a dimm in the slot */
+ pvt->map[index] = row;
+ row++;
+ last = value;
+ /* test the next value to see if the dimm is double
+ * sided
+ */
+ pci_read_config_byte(pdev, E752X_DRB + index + 1,
+ &value);
+
+ /* the dimm is single sided, so flag as empty */
+ /* this is a double sided dimm to save the next row #*/
+ pvt->map[index + 1] = (value == last) ? 0xff : row;
+ row++;
+ last = value;
+ }
+ }
+}
+
+/* Return 0 on success or 1 on failure. */
+static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
+ struct e752x_pvt *pvt)
+{
+ struct pci_dev *dev;
+
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev, pvt->bridge_ck);
+
+ if (pvt->bridge_ck == NULL)
+ pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+ PCI_DEVFN(0, 1));
+
+ if (pvt->bridge_ck == NULL) {
+ e752x_printk(KERN_ERR, "error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+ return 1;
+ }
+
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ e752x_devs[dev_idx].ctl_dev,
+ NULL);
+
+ if (dev == NULL)
+ goto fail;
+
+ pvt->dev_d0f0 = dev;
+ pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
+
+ return 0;
+
+fail:
+ pci_dev_put(pvt->bridge_ck);
+ return 1;
+}
+
+/* Setup system bus parity mask register.
+ * Sysbus parity supported on:
+ * e7320/e7520/e7525 + Xeon
+ * i3100 + Xeon/Celeron
+ * Sysbus parity not supported on:
+ * i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo
+ */
+static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
+{
+ char *cpu_id = cpu_data(0).x86_model_id;
+ struct pci_dev *dev = pvt->dev_d0f1;
+ int enable = 1;
+
+ /* Allow module paramter override, else see if CPU supports parity */
+ if (sysbus_parity != -1) {
+ enable = sysbus_parity;
+ } else if (cpu_id[0] &&
+ ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) ||
+ (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) ||
+ (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) {
+ e752x_printk(KERN_INFO, "System Bus Parity not "
+ "supported by CPU, disabling\n");
+ enable = 0;
+ }
+
+ if (enable)
+ pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
+ else
+ pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
+}
+
+static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
+{
+ struct pci_dev *dev;
+
+ dev = pvt->dev_d0f1;
+ /* Turn off error disable & SMI in case the BIOS turned it on */
+ if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+ pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
+ pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
+ } else {
+ pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+ }
+
+ e752x_init_sysbus_parity_mask(pvt);
+
+ pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
+ pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
+ pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
+}
+
+static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ u16 pci_data;
+ u8 stat8;
+ struct mem_ctl_info *mci;
+ struct e752x_pvt *pvt;
+ u16 ddrcsr;
+ int drc_chan; /* Number of channels 0=1chan,1=2chan */
+ struct e752x_error_info discard;
+
+ debugf0("%s(): mci\n", __func__);
+ debugf0("Starting Probe1\n");
+
+ /* check to see if device 0 function 1 is enabled; if it isn't, we
+ * assume the BIOS has reserved it for a reason and is expecting
+ * exclusive access, we take care not to violate that assumption and
+ * fail the probe. */
+ pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
+ if (!force_function_unhide && !(stat8 & (1 << 5))) {
+ printk(KERN_INFO "Contact your BIOS vendor to see if the "
+ "E752x error registers can be safely un-hidden\n");
+ return -ENODEV;
+ }
+ stat8 |= (1 << 5);
+ pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
+
+ pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
+ /* FIXME: should check >>12 or 0xf, true for all? */
+ /* Dual channel = 1, Single channel = 0 */
+ drc_chan = dual_channel_active(ddrcsr);
+
+ mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
+
+ if (mci == NULL) {
+ return -ENOMEM;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ /* 3100 IMCH supports SECDEC only */
+ mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
+ (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
+ /* FIXME - what if different memory types are in different csrows? */
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = E752X_REVISION;
+ mci->dev = &pdev->dev;
+
+ debugf3("%s(): init pvt\n", __func__);
+ pvt = (struct e752x_pvt *)mci->pvt_info;
+ pvt->dev_info = &e752x_devs[dev_idx];
+ pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
+
+ if (e752x_get_devs(pdev, dev_idx, pvt)) {
+ edac_mc_free(mci);
+ return -ENODEV;
+ }
+
+ debugf3("%s(): more mci init\n", __func__);
+ mci->ctl_name = pvt->dev_info->ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = e752x_check;
+ mci->ctl_page_to_phys = ctl_page_to_phys;
+
+ /* set the map type. 1 = normal, 0 = reversed
+ * Must be set before e752x_init_csrows in case csrow mapping
+ * is reversed.
+ */
+ pci_read_config_byte(pdev, E752X_DRM, &stat8);
+ pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
+
+ e752x_init_csrows(mci, pdev, ddrcsr);
+ e752x_init_mem_map_table(pdev, pvt);
+
+ if (dev_idx == I3100)
+ mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
+ else
+ mci->edac_cap |= EDAC_FLAG_NONE;
+ debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
+
+ /* load the top of low memory, remap base, and remap limit vars */
+ pci_read_config_word(pdev, E752X_TOLM, &pci_data);
+ pvt->tolm = ((u32) pci_data) << 4;
+ pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
+ pvt->remapbase = ((u32) pci_data) << 14;
+ pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
+ pvt->remaplimit = ((u32) pci_data) << 14;
+ e752x_printk(KERN_INFO,
+ "tolm = %x, remapbase = %x, remaplimit = %x\n",
+ pvt->tolm, pvt->remapbase, pvt->remaplimit);
+
+ /* Here we assume that we will never see multiple instances of this
+ * type of memory controller. The ID is therefore hardcoded to 0.
+ */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ e752x_init_error_reporting_regs(pvt);
+ e752x_get_error_info(mci, &discard); /* clear other MCH errors */
+
+ /* allocating generic PCI control info */
+ e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!e752x_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n", __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail:
+ pci_dev_put(pvt->dev_d0f0);
+ pci_dev_put(pvt->dev_d0f1);
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+
+ return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e752x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("%s()\n", __func__);
+
+ /* wake up and enable device */
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ return e752x_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit e752x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct e752x_pvt *pvt;
+
+ debugf0("%s()\n", __func__);
+
+ if (e752x_pci)
+ edac_pci_release_generic_ctl(e752x_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ pvt = (struct e752x_pvt *)mci->pvt_info;
+ pci_dev_put(pvt->dev_d0f0);
+ pci_dev_put(pvt->dev_d0f1);
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7520},
+ {
+ PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7525},
+ {
+ PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7320},
+ {
+ PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3100},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
+
+static struct pci_driver e752x_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = e752x_init_one,
+ .remove = __devexit_p(e752x_remove_one),
+ .id_table = e752x_pci_tbl,
+};
+
+static int __init e752x_init(void)
+{
+ int pci_rc;
+
+ debugf3("%s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&e752x_driver);
+ return (pci_rc < 0) ? pci_rc : 0;
+}
+
+static void __exit e752x_exit(void)
+{
+ debugf3("%s()\n", __func__);
+ pci_unregister_driver(&e752x_driver);
+}
+
+module_init(e752x_init);
+module_exit(e752x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
+MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
+
+module_param(force_function_unhide, int, 0444);
+MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
+ " 1=force unhide and hope BIOS doesn't fight driver for "
+ "Dev0:Fun1 access");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+module_param(sysbus_parity, int, 0444);
+MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
+ " 1=enable system bus parity checking, default=auto-detect");
+module_param(report_non_memory_errors, int, 0644);
+MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
+ "reporting, 1=enable non-memory error reporting");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 0000000..c7d11cc
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,577 @@
+/*
+ * Intel e7xxx Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e7xxx_chips" below for supported chipsets
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Contributors:
+ * Eric Biederman (Linux Networx)
+ * Tom Zimmerman (Linux Networx)
+ * Jim Garlick (Lawrence Livermore National Labs)
+ * Dave Peterson (Lawrence Livermore National Labs)
+ * That One Guy (Some other place)
+ * Wang Zhenyu (intel.com)
+ *
+ * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define E7XXX_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR "e7xxx_edac"
+
+#define e7xxx_printk(level, fmt, arg...) \
+ edac_printk(level, "e7xxx", fmt, ##arg)
+
+#define e7xxx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_0
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
+#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
+#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
+#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_0
+#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
+#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
+#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
+#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_0
+#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
+#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
+#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
+#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_0
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
+#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
+#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
+
+#define E7XXX_NR_CSROWS 8 /* number of csrows */
+#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+
+/* E7XXX register addresses - device 0 function 0 */
+#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
+#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
+ /*
+ * 31 Device width row 7 0=x8 1=x4
+ * 27 Device width row 6
+ * 23 Device width row 5
+ * 19 Device width row 4
+ * 15 Device width row 3
+ * 11 Device width row 2
+ * 7 Device width row 1
+ * 3 Device width row 0
+ */
+#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
+ /*
+ * 22 Number channels 0=1,1=2
+ * 19:18 DRB Granularity 32/64MB
+ */
+#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
+#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
+#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
+
+/* E7XXX register addresses - device 0 function 1 */
+#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
+#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
+#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31:28 Reserved
+ * 27:6 CE address (4k block 33:12)
+ * 5:0 Reserved
+ */
+#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
+ /* error address register (32b) */
+ /*
+ * 31:28 Reserved
+ * 27:6 CE address (4k block 33:12)
+ * 5:0 Reserved
+ */
+#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
+ /* error syndrome register (16b) */
+
+enum e7xxx_chips {
+ E7500 = 0,
+ E7501,
+ E7505,
+ E7205,
+};
+
+struct e7xxx_pvt {
+ struct pci_dev *bridge_ck;
+ u32 tolm;
+ u32 remapbase;
+ u32 remaplimit;
+ const struct e7xxx_dev_info *dev_info;
+};
+
+struct e7xxx_dev_info {
+ u16 err_dev;
+ const char *ctl_name;
+};
+
+struct e7xxx_error_info {
+ u8 dram_ferr;
+ u8 dram_nerr;
+ u32 dram_celog_add;
+ u16 dram_celog_syndrome;
+ u32 dram_uelog_add;
+};
+
+static struct edac_pci_ctl_info *e7xxx_pci;
+
+static const struct e7xxx_dev_info e7xxx_devs[] = {
+ [E7500] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+ .ctl_name = "E7500"},
+ [E7501] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+ .ctl_name = "E7501"},
+ [E7505] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+ .ctl_name = "E7505"},
+ [E7205] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+ .ctl_name = "E7205"},
+};
+
+/* FIXME - is this valid for both SECDED and S4ECD4ED? */
+static inline int e7xxx_find_channel(u16 syndrome)
+{
+ debugf3("%s()\n", __func__);
+
+ if ((syndrome & 0xff00) == 0)
+ return 0;
+
+ if ((syndrome & 0x00ff) == 0)
+ return 1;
+
+ if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
+ return 0;
+
+ return 1;
+}
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+ unsigned long page)
+{
+ u32 remap;
+ struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
+
+ debugf3("%s()\n", __func__);
+
+ if ((page < pvt->tolm) ||
+ ((page >= 0x100000) && (page < pvt->remapbase)))
+ return page;
+
+ remap = (page - pvt->tolm) + pvt->remapbase;
+
+ if (remap < pvt->remaplimit)
+ return remap;
+
+ e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
+ return pvt->tolm - 1;
+}
+
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+ u32 error_1b, page;
+ u16 syndrome;
+ int row;
+ int channel;
+
+ debugf3("%s()\n", __func__);
+ /* read the error address */
+ error_1b = info->dram_celog_add;
+ /* FIXME - should use PAGE_SHIFT */
+ page = error_1b >> 6; /* convert the address to 4k page */
+ /* read the syndrome */
+ syndrome = info->dram_celog_syndrome;
+ /* FIXME - check for -1 */
+ row = edac_mc_find_csrow_by_page(mci, page);
+ /* convert syndrome to channel */
+ channel = e7xxx_find_channel(syndrome);
+ edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+}
+
+static void process_ce_no_info(struct mem_ctl_info *mci)
+{
+ debugf3("%s()\n", __func__);
+ edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+}
+
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+ u32 error_2b, block_page;
+ int row;
+
+ debugf3("%s()\n", __func__);
+ /* read the error address */
+ error_2b = info->dram_uelog_add;
+ /* FIXME - should use PAGE_SHIFT */
+ block_page = error_2b >> 6; /* convert to 4k address */
+ row = edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+}
+
+static void process_ue_no_info(struct mem_ctl_info *mci)
+{
+ debugf3("%s()\n", __func__);
+ edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+}
+
+static void e7xxx_get_error_info(struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info)
+{
+ struct e7xxx_pvt *pvt;
+
+ pvt = (struct e7xxx_pvt *)mci->pvt_info;
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
+
+ if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
+ pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
+ &info->dram_celog_add);
+ pci_read_config_word(pvt->bridge_ck,
+ E7XXX_DRAM_CELOG_SYNDROME,
+ &info->dram_celog_syndrome);
+ }
+
+ if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
+ pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
+ &info->dram_uelog_add);
+
+ if (info->dram_ferr & 3)
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
+
+ if (info->dram_nerr & 3)
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
+}
+
+static int e7xxx_process_error_info(struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info,
+ int handle_errors)
+{
+ int error_found;
+
+ error_found = 0;
+
+ /* decode and report errors */
+ if (info->dram_ferr & 1) { /* check first error correctable */
+ error_found = 1;
+
+ if (handle_errors)
+ process_ce(mci, info);
+ }
+
+ if (info->dram_ferr & 2) { /* check first error uncorrectable */
+ error_found = 1;
+
+ if (handle_errors)
+ process_ue(mci, info);
+ }
+
+ if (info->dram_nerr & 1) { /* check next error correctable */
+ error_found = 1;
+
+ if (handle_errors) {
+ if (info->dram_ferr & 1)
+ process_ce_no_info(mci);
+ else
+ process_ce(mci, info);
+ }
+ }
+
+ if (info->dram_nerr & 2) { /* check next error uncorrectable */
+ error_found = 1;
+
+ if (handle_errors) {
+ if (info->dram_ferr & 2)
+ process_ue_no_info(mci);
+ else
+ process_ue(mci, info);
+ }
+ }
+
+ return error_found;
+}
+
+static void e7xxx_check(struct mem_ctl_info *mci)
+{
+ struct e7xxx_error_info info;
+
+ debugf3("%s()\n", __func__);
+ e7xxx_get_error_info(mci, &info);
+ e7xxx_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active. Else return 0. */
+static inline int dual_channel_active(u32 drc, int dev_idx)
+{
+ return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
+}
+
+/* Return DRB granularity (0=32mb, 1=64mb). */
+static inline int drb_granularity(u32 drc, int dev_idx)
+{
+ /* only e7501 can be single channel */
+ return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
+}
+
+static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+ int dev_idx, u32 drc)
+{
+ unsigned long last_cumul_size;
+ int index;
+ u8 value;
+ u32 dra, cumul_size;
+ int drc_chan, drc_drbg, drc_ddim, mem_dev;
+ struct csrow_info *csrow;
+
+ pci_read_config_dword(pdev, E7XXX_DRA, &dra);
+ drc_chan = dual_channel_active(drc, dev_idx);
+ drc_drbg = drb_granularity(drc, dev_idx);
+ drc_ddim = (drc >> 20) & 0x3;
+ last_cumul_size = 0;
+
+ /* The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (index = 0; index < mci->nr_csrows; index++) {
+ /* mem_dev 0=x8, 1=x4 */
+ mem_dev = (dra >> (index * 4 + 3)) & 0x1;
+ csrow = &mci->csrows[index];
+
+ pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
+ /* convert a 64 or 32 MiB DRB to a page size. */
+ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ csrow->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ csrow->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ csrow->edac_mode = EDAC_NONE;
+ }
+}
+
+static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ u16 pci_data;
+ struct mem_ctl_info *mci = NULL;
+ struct e7xxx_pvt *pvt = NULL;
+ u32 drc;
+ int drc_chan;
+ struct e7xxx_error_info discard;
+
+ debugf0("%s(): mci\n", __func__);
+
+ pci_read_config_dword(pdev, E7XXX_DRC, &drc);
+
+ drc_chan = dual_channel_active(drc, dev_idx);
+ mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
+ EDAC_FLAG_S4ECD4ED;
+ /* FIXME - what if different memory types are in different csrows? */
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = E7XXX_REVISION;
+ mci->dev = &pdev->dev;
+ debugf3("%s(): init pvt\n", __func__);
+ pvt = (struct e7xxx_pvt *)mci->pvt_info;
+ pvt->dev_info = &e7xxx_devs[dev_idx];
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev, pvt->bridge_ck);
+
+ if (!pvt->bridge_ck) {
+ e7xxx_printk(KERN_ERR, "error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+ goto fail0;
+ }
+
+ debugf3("%s(): more mci init\n", __func__);
+ mci->ctl_name = pvt->dev_info->ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = e7xxx_check;
+ mci->ctl_page_to_phys = ctl_page_to_phys;
+ e7xxx_init_csrows(mci, pdev, dev_idx, drc);
+ mci->edac_cap |= EDAC_FLAG_NONE;
+ debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
+ /* load the top of low memory, remap base, and remap limit vars */
+ pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
+ pvt->tolm = ((u32) pci_data) << 4;
+ pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
+ pvt->remapbase = ((u32) pci_data) << 14;
+ pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
+ pvt->remaplimit = ((u32) pci_data) << 14;
+ e7xxx_printk(KERN_INFO,
+ "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
+
+ /* clear any pending errors, or initial state bits */
+ e7xxx_get_error_info(mci, &discard);
+
+ /* Here we assume that we will never see multiple instances of this
+ * type of memory controller. The ID is therefore hardcoded to 0.
+ */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail1;
+ }
+
+ /* allocating generic PCI control info */
+ e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!e7xxx_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail1:
+ pci_dev_put(pvt->bridge_ck);
+
+fail0:
+ edac_mc_free(mci);
+
+ return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e7xxx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("%s()\n", __func__);
+
+ /* wake up and enable device */
+ return pci_enable_device(pdev) ?
+ -EIO : e7xxx_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct e7xxx_pvt *pvt;
+
+ debugf0("%s()\n", __func__);
+
+ if (e7xxx_pci)
+ edac_pci_release_generic_ctl(e7xxx_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ pvt = (struct e7xxx_pvt *)mci->pvt_info;
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7205},
+ {
+ PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7500},
+ {
+ PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7501},
+ {
+ PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7505},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
+
+static struct pci_driver e7xxx_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = e7xxx_init_one,
+ .remove = __devexit_p(e7xxx_remove_one),
+ .id_table = e7xxx_pci_tbl,
+};
+
+static int __init e7xxx_init(void)
+{
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ return pci_register_driver(&e7xxx_driver);
+}
+
+static void __exit e7xxx_exit(void)
+{
+ pci_unregister_driver(&e7xxx_driver);
+}
+
+module_init(e7xxx_init);
+module_exit(e7xxx_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+ "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
new file mode 100644
index 0000000..4b55ec6
--- /dev/null
+++ b/drivers/edac/edac_core.h
@@ -0,0 +1,850 @@
+/*
+ * Defines, structures, APIs for edac_core module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ * Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#ifndef _EDAC_CORE_H_
+#define _EDAC_CORE_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/sysdev.h>
+#include <linux/workqueue.h>
+
+#define EDAC_MC_LABEL_LEN 31
+#define EDAC_DEVICE_NAME_LEN 31
+#define EDAC_ATTRIB_VALUE_LEN 15
+#define MC_PROC_NAME_MAX_LEN 7
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#else /* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#endif
+
+#define edac_printk(level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+ printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+/* edac_device printk */
+#define edac_device_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+/* edac_pci printk */
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+
+#define edac_debug_printk(level, fmt, arg...) \
+ do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
+ } while(0)
+
+#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
+#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
+#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
+#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
+#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+
+#else /* !CONFIG_EDAC_DEBUG */
+
+#define debugf0( ... )
+#define debugf1( ... )
+#define debugf2( ... )
+#define debugf3( ... )
+#define debugf4( ... )
+
+#endif /* !CONFIG_EDAC_DEBUG */
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+ PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+#define edac_dev_name(dev) (dev)->dev_name
+
+/* memory devices */
+enum dev_type {
+ DEV_UNKNOWN = 0,
+ DEV_X1,
+ DEV_X2,
+ DEV_X4,
+ DEV_X8,
+ DEV_X16,
+ DEV_X32, /* Do these parts exist? */
+ DEV_X64 /* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1 BIT(DEV_X1)
+#define DEV_FLAG_X2 BIT(DEV_X2)
+#define DEV_FLAG_X4 BIT(DEV_X4)
+#define DEV_FLAG_X8 BIT(DEV_X8)
+#define DEV_FLAG_X16 BIT(DEV_X16)
+#define DEV_FLAG_X32 BIT(DEV_X32)
+#define DEV_FLAG_X64 BIT(DEV_X64)
+
+/* memory types */
+enum mem_type {
+ MEM_EMPTY = 0, /* Empty csrow */
+ MEM_RESERVED, /* Reserved csrow type */
+ MEM_UNKNOWN, /* Unknown csrow type */
+ MEM_FPM, /* Fast page mode */
+ MEM_EDO, /* Extended data out */
+ MEM_BEDO, /* Burst Extended data out */
+ MEM_SDR, /* Single data rate SDRAM */
+ MEM_RDR, /* Registered single data rate SDRAM */
+ MEM_DDR, /* Double data rate SDRAM */
+ MEM_RDDR, /* Registered Double data rate SDRAM */
+ MEM_RMBS, /* Rambus DRAM */
+ MEM_DDR2, /* DDR2 RAM */
+ MEM_FB_DDR2, /* fully buffered DDR2 */
+ MEM_RDDR2, /* Registered DDR2 RAM */
+ MEM_XDR, /* Rambus XDR */
+};
+
+#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM BIT(MEM_FPM)
+#define MEM_FLAG_EDO BIT(MEM_EDO)
+#define MEM_FLAG_BEDO BIT(MEM_BEDO)
+#define MEM_FLAG_SDR BIT(MEM_SDR)
+#define MEM_FLAG_RDR BIT(MEM_RDR)
+#define MEM_FLAG_DDR BIT(MEM_DDR)
+#define MEM_FLAG_RDDR BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS BIT(MEM_RMBS)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+
+/* chipset Error Detection and Correction capabilities and mode */
+enum edac_type {
+ EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
+ EDAC_NONE, /* Doesnt support ECC */
+ EDAC_RESERVED, /* Reserved ECC type */
+ EDAC_PARITY, /* Detects parity errors */
+ EDAC_EC, /* Error Checking - no correction */
+ EDAC_SECDED, /* Single bit error correction, Double detection */
+ EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
+ EDAC_S4ECD4ED, /* Chipkill x4 devices */
+ EDAC_S8ECD8ED, /* Chipkill x8 devices */
+ EDAC_S16ECD16ED, /* Chipkill x16 devices */
+};
+
+#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
+
+/* scrubbing capabilities */
+enum scrub_type {
+ SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
+ SCRUB_NONE, /* No scrubber */
+ SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
+ SCRUB_SW_SRC, /* Software scrub only errors */
+ SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
+ SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
+ SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
+ SCRUB_HW_SRC, /* Hardware scrub only errors */
+ SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
+ SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
+};
+
+#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC)
+#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC)
+#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC)
+#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC)
+#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/* EDAC internal operation states */
+#define OP_ALLOC 0x100
+#define OP_RUNNING_POLL 0x201
+#define OP_RUNNING_INTERRUPT 0x202
+#define OP_RUNNING_POLL_INTR 0x203
+#define OP_OFFLINE 0x300
+
+/*
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!). In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices: The individual chip on a memory stick. These devices
+ * commonly output 4 and 8 bits each. Grouping several
+ * of these in parallel provides 64 bits which is common
+ * for a memory stick.
+ *
+ * Memory Stick: A printed circuit board that agregates multiple
+ * memory devices in parallel. This is the atomic
+ * memory component that is purchaseable by Joe consumer
+ * and loaded into a memory socket.
+ *
+ * Socket: A physical connector on the motherboard that accepts
+ * a single memory stick.
+ *
+ * Channel: Set of memory devices on a memory stick that must be
+ * grouped in parallel with one or more additional
+ * channels from other memory sticks. This parallel
+ * grouping of the output from multiple channels are
+ * necessary for the smallest granularity of memory access.
+ * Some memory controllers are capable of single channel -
+ * which means that memory sticks can be loaded
+ * individually. Other memory controllers are only
+ * capable of dual channel - which means that memory
+ * sticks must be loaded as pairs (see "socket set").
+ *
+ * Chip-select row: All of the memory devices that are selected together.
+ * for a single, minimum grain of memory access.
+ * This selects all of the parallel memory devices across
+ * all of the parallel channels. Common chip-select rows
+ * for single channel are 64 bits, for dual channel 128
+ * bits.
+ *
+ * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
+ * Motherboards commonly drive two chip-select pins to
+ * a memory stick. A single-ranked stick, will occupy
+ * only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick: A double-ranked stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently.
+ *
+ * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
+ * A double-sided stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently. "Double-sided"
+ * is irrespective of the memory devices being mounted
+ * on both sides of the memory stick.
+ *
+ * Socket set: All of the memory sticks that are required for for
+ * a single memory access or all of the memory sticks
+ * spanned by a chip-select row. A single socket set
+ * has two chip-select rows and if double-sided sticks
+ * are used these will occupy those chip-select rows.
+ *
+ * Bank: This term is avoided because it is unclear when
+ * needing to distinguish between chip-select rows and
+ * socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+struct channel_info {
+ int chan_idx; /* channel index */
+ u32 ce_count; /* Correctable Errors for this CHANNEL */
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+ struct csrow_info *csrow; /* the parent */
+};
+
+struct csrow_info {
+ unsigned long first_page; /* first page number in dimm */
+ unsigned long last_page; /* last page number in dimm */
+ unsigned long page_mask; /* used for interleaving -
+ * 0UL for non intlv
+ */
+ u32 nr_pages; /* number of pages in csrow */
+ u32 grain; /* granularity of reported error in bytes */
+ int csrow_idx; /* the chip-select row */
+ enum dev_type dtype; /* memory device type */
+ u32 ue_count; /* Uncorrectable Errors for this csrow */
+ u32 ce_count; /* Correctable Errors for this csrow */
+ enum mem_type mtype; /* memory csrow type */
+ enum edac_type edac_mode; /* EDAC mode for this csrow */
+ struct mem_ctl_info *mci; /* the parent */
+
+ struct kobject kobj; /* sysfs kobject for this csrow */
+
+ /* channel information for this csrow */
+ u32 nr_channels;
+ struct channel_info *channels;
+};
+
+/* mcidev_sysfs_attribute structure
+ * used for driver sysfs attributes and in mem_ctl_info
+ * sysfs top level entries
+ */
+struct mcidev_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mem_ctl_info *,char *);
+ ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+/* MEMORY controller information structure
+ */
+struct mem_ctl_info {
+ struct list_head link; /* for global list of mem_ctl_info structs */
+
+ struct module *owner; /* Module owner of this control struct */
+
+ unsigned long mtype_cap; /* memory types supported by mc */
+ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
+ unsigned long edac_cap; /* configuration capabilities - this is
+ * closely related to edac_ctl_cap. The
+ * difference is that the controller may be
+ * capable of s4ecd4ed which would be listed
+ * in edac_ctl_cap, but if channels aren't
+ * capable of s4ecd4ed then the edac_cap would
+ * not have that capability.
+ */
+ unsigned long scrub_cap; /* chipset scrub capabilities */
+ enum scrub_type scrub_mode; /* current scrub mode */
+
+ /* Translates sdram memory scrub rate given in bytes/sec to the
+ internal representation and configures whatever else needs
+ to be configured.
+ */
+ int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
+ /* Get the current sdram memory scrub rate from the internal
+ representation and converts it to the closest matching
+ bandwith in bytes/sec.
+ */
+ int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
+
+ /* pointer to edac checking routine */
+ void (*edac_check) (struct mem_ctl_info * mci);
+
+ /*
+ * Remaps memory pages: controller pages to physical pages.
+ * For most MC's, this will be NULL.
+ */
+ /* FIXME - why not send the phys page to begin with? */
+ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+ unsigned long page);
+ int mc_idx;
+ int nr_csrows;
+ struct csrow_info *csrows;
+ /*
+ * FIXME - what about controllers on other busses? - IDs must be
+ * unique. dev pointer should be sufficiently unique, but
+ * BUS:SLOT.FUNC numbers may not be unique.
+ */
+ struct device *dev;
+ const char *mod_name;
+ const char *mod_ver;
+ const char *ctl_name;
+ const char *dev_name;
+ char proc_name[MC_PROC_NAME_MAX_LEN + 1];
+ void *pvt_info;
+ u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
+ u32 ce_noinfo_count; /* Correctable Errors w/o info */
+ u32 ue_count; /* Total Uncorrectable Errors for this MC */
+ u32 ce_count; /* Total Correctable Errors for this MC */
+ unsigned long start_time; /* mci load start time (in jiffies) */
+
+ /* this stuff is for safe removal of mc devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion complete;
+
+ /* edac sysfs device control */
+ struct kobject edac_mci_kobj;
+
+ /* Additional top controller level attributes, but specified
+ * by the low level driver.
+ *
+ * Set by the low level driver to provide attributes at the
+ * controller level, same level as 'ue_count' and 'ce_count' above.
+ * An array of structures, NULL terminated
+ *
+ * If attributes are desired, then set to array of attributes
+ * If no attributes are desired, leave NULL
+ */
+ struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+
+ /* work struct for this MC */
+ struct delayed_work work;
+
+ /* the internal state of this controller instance */
+ int op_state;
+};
+
+/*
+ * The following are the structures to provide for a generic
+ * or abstract 'edac_device'. This set of structures and the
+ * code that implements the APIs for the same, provide for
+ * registering EDAC type devices which are NOT standard memory.
+ *
+ * CPU caches (L1 and L2)
+ * DMA engines
+ * Core CPU swithces
+ * Fabric switch units
+ * PCIe interface controllers
+ * other EDAC/ECC type devices that can be monitored for
+ * errors, etc.
+ *
+ * It allows for a 2 level set of hiearchry. For example:
+ *
+ * cache could be composed of L1, L2 and L3 levels of cache.
+ * Each CPU core would have its own L1 cache, while sharing
+ * L2 and maybe L3 caches.
+ *
+ * View them arranged, via the sysfs presentation:
+ * /sys/devices/system/edac/..
+ *
+ * mc/ <existing memory device directory>
+ * cpu/cpu0/.. <L1 and L2 block directory>
+ * /L1-cache/ce_count
+ * /ue_count
+ * /L2-cache/ce_count
+ * /ue_count
+ * cpu/cpu1/.. <L1 and L2 block directory>
+ * /L1-cache/ce_count
+ * /ue_count
+ * /L2-cache/ce_count
+ * /ue_count
+ * ...
+ *
+ * the L1 and L2 directories would be "edac_device_block's"
+ */
+
+struct edac_device_counter {
+ u32 ue_count;
+ u32 ce_count;
+};
+
+/* forward reference */
+struct edac_device_ctl_info;
+struct edac_device_block;
+
+/* edac_dev_sysfs_attribute structure
+ * used for driver sysfs attributes in mem_ctl_info
+ * for extra controls and attributes:
+ * like high level error Injection controls
+ */
+struct edac_dev_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct edac_device_ctl_info *, char *);
+ ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
+};
+
+/* edac_dev_sysfs_block_attribute structure
+ *
+ * used in leaf 'block' nodes for adding controls/attributes
+ *
+ * each block in each instance of the containing control structure
+ * can have an array of the following. The show and store functions
+ * will be filled in with the show/store function in the
+ * low level driver.
+ *
+ * The 'value' field will be the actual value field used for
+ * counting
+ */
+struct edac_dev_sysfs_block_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *,
+ const char *, size_t);
+ struct edac_device_block *block;
+
+ unsigned int value;
+};
+
+/* device block control structure */
+struct edac_device_block {
+ struct edac_device_instance *instance; /* Up Pointer */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ struct edac_device_counter counters; /* basic UE and CE counters */
+
+ int nr_attribs; /* how many attributes */
+
+ /* this block's attributes, could be NULL */
+ struct edac_dev_sysfs_block_attribute *block_attributes;
+
+ /* edac sysfs device control */
+ struct kobject kobj;
+};
+
+/* device instance control structure */
+struct edac_device_instance {
+ struct edac_device_ctl_info *ctl; /* Up pointer */
+ char name[EDAC_DEVICE_NAME_LEN + 4];
+
+ struct edac_device_counter counters; /* instance counters */
+
+ u32 nr_blocks; /* how many blocks */
+ struct edac_device_block *blocks; /* block array */
+
+ /* edac sysfs device control */
+ struct kobject kobj;
+};
+
+
+/*
+ * Abstract edac_device control info structure
+ *
+ */
+struct edac_device_ctl_info {
+ /* for global list of edac_device_ctl_info structs */
+ struct list_head link;
+
+ struct module *owner; /* Module owner of this control struct */
+
+ int dev_idx;
+
+ /* Per instance controls for this edac_device */
+ int log_ue; /* boolean for logging UEs */
+ int log_ce; /* boolean for logging CEs */
+ int panic_on_ue; /* boolean for panic'ing on an UE */
+ unsigned poll_msec; /* number of milliseconds to poll interval */
+ unsigned long delay; /* number of jiffies for poll_msec */
+
+ /* Additional top controller level attributes, but specified
+ * by the low level driver.
+ *
+ * Set by the low level driver to provide attributes at the
+ * controller level, same level as 'ue_count' and 'ce_count' above.
+ * An array of structures, NULL terminated
+ *
+ * If attributes are desired, then set to array of attributes
+ * If no attributes are desired, leave NULL
+ */
+ struct edac_dev_sysfs_attribute *sysfs_attributes;
+
+ /* pointer to main 'edac' class in sysfs */
+ struct sysdev_class *edac_class;
+
+ /* the internal state of this controller instance */
+ int op_state;
+ /* work struct for this instance */
+ struct delayed_work work;
+
+ /* pointer to edac polling checking routine:
+ * If NOT NULL: points to polling check routine
+ * If NULL: Then assumes INTERRUPT operation, where
+ * MC driver will receive events
+ */
+ void (*edac_check) (struct edac_device_ctl_info * edac_dev);
+
+ struct device *dev; /* pointer to device structure */
+
+ const char *mod_name; /* module name */
+ const char *ctl_name; /* edac controller name */
+ const char *dev_name; /* pci/platform/etc... name */
+
+ void *pvt_info; /* pointer to 'private driver' info */
+
+ unsigned long start_time; /* edac_device load start time (jiffies) */
+
+ /* these are for safe removal of mc devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion removal_complete;
+
+ /* sysfs top name under 'edac' directory
+ * and instance name:
+ * cpu/cpu0/...
+ * cpu/cpu1/...
+ * cpu/cpu2/...
+ * ...
+ */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ /* Number of instances supported on this control structure
+ * and the array of those instances
+ */
+ u32 nr_instances;
+ struct edac_device_instance *instances;
+
+ /* Event counters for the this whole EDAC Device */
+ struct edac_device_counter counters;
+
+ /* edac sysfs device control for the 'name'
+ * device this structure controls
+ */
+ struct kobject kobj;
+};
+
+/* To get from the instance's wq to the beginning of the ctl structure */
+#define to_edac_mem_ctl_work(w) \
+ container_of(w, struct mem_ctl_info, work)
+
+#define to_edac_device_ctl_work(w) \
+ container_of(w,struct edac_device_ctl_info,work)
+
+/*
+ * The alloc() and free() functions for the 'edac_device' control info
+ * structure. A MC driver will allocate one of these for each edac_device
+ * it is going to control/register with the EDAC CORE.
+ */
+extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+ unsigned sizeof_private,
+ char *edac_device_name, unsigned nr_instances,
+ char *edac_block_name, unsigned nr_blocks,
+ unsigned offset_value,
+ struct edac_dev_sysfs_block_attribute *block_attributes,
+ unsigned nr_attribs,
+ int device_index);
+
+/* The offset value can be:
+ * -1 indicating no offset value
+ * 0 for zero-based block numbers
+ * 1 for 1-based block number
+ * other for other-based block number
+ */
+#define BLOCK_OFFSET_VALUE_OFF ((unsigned) -1)
+
+extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
+
+#ifdef CONFIG_PCI
+
+struct edac_pci_counter {
+ atomic_t pe_count;
+ atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+ /* for global list of edac_pci_ctl_info structs */
+ struct list_head link;
+
+ int pci_idx;
+
+ struct sysdev_class *edac_class; /* pointer to class */
+
+ /* the internal state of this controller instance */
+ int op_state;
+ /* work struct for this instance */
+ struct delayed_work work;
+
+ /* pointer to edac polling checking routine:
+ * If NOT NULL: points to polling check routine
+ * If NULL: Then assumes INTERRUPT operation, where
+ * MC driver will receive events
+ */
+ void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+ struct device *dev; /* pointer to device structure */
+
+ const char *mod_name; /* module name */
+ const char *ctl_name; /* edac controller name */
+ const char *dev_name; /* pci/platform/etc... name */
+
+ void *pvt_info; /* pointer to 'private driver' info */
+
+ unsigned long start_time; /* edac_pci load start time (jiffies) */
+
+ /* these are for safe removal of devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion complete;
+
+ /* sysfs top name under 'edac' directory
+ * and instance name:
+ * cpu/cpu0/...
+ * cpu/cpu1/...
+ * cpu/cpu2/...
+ * ...
+ */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ /* Event counters for the this whole EDAC Device */
+ struct edac_pci_counter counters;
+
+ /* edac sysfs device control for the 'name'
+ * device this structure controls
+ */
+ struct kobject kobj;
+ struct completion kobj_complete;
+};
+
+#define to_edac_pci_ctl_work(w) \
+ container_of(w, struct edac_pci_ctl_info,work)
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+ u8 mask)
+{
+ if (mask != 0xff) {
+ u8 buf;
+
+ pci_read_config_byte(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_byte(pdev, offset, value);
+}
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+ u16 value, u16 mask)
+{
+ if (mask != 0xffff) {
+ u16 buf;
+
+ pci_read_config_word(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_word(pdev, offset, value);
+}
+
+/* write all or some bits in a dword-register*/
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+ u32 value, u32 mask)
+{
+ if (mask != 0xffff) {
+ u32 buf;
+
+ pci_read_config_dword(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_dword(pdev, offset, value);
+}
+
+#endif /* CONFIG_PCI */
+
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+ unsigned nr_chans, int edac_index);
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern void edac_mc_free(struct mem_ctl_info *mci);
+extern struct mem_ctl_info *edac_mc_find(int idx);
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page);
+
+/*
+ * The no info errors are used when error overflows are reported.
+ * There are a limited number of error logging registers that can
+ * be exausted. When all registers are exhausted and an additional
+ * error occurs then an error overflow register records that an
+ * error occured and the type of error, but doesn't have any
+ * further information. The ce/ue versions make for cleaner
+ * reporting logic and function interface - reduces conditional
+ * statement clutter and extra function arguments.
+ */
+extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ unsigned long syndrome, int row, int channel,
+ const char *msg);
+extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+ const char *msg);
+extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row,
+ const char *msg);
+extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+ const char *msg);
+extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
+ unsigned int channel0, unsigned int channel1,
+ char *msg);
+extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
+ unsigned int channel, char *msg);
+
+/*
+ * edac_device APIs
+ */
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
+extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg);
+extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg);
+
+/*
+ * edac_pci APIs
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+ const char *edac_pci_name);
+
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+ unsigned long value);
+
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+ struct device *dev,
+ const char *mod_name);
+
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
+
+#endif /* _EDAC_CORE_H_ */
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
new file mode 100644
index 0000000..4041e91
--- /dev/null
+++ b/drivers/edac/edac_device.c
@@ -0,0 +1,725 @@
+
+/*
+ * edac_device.c
+ * (C) 2007 www.douglaskthompson.com
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Doug Thompson <norsk5@xmission.com>
+ *
+ * edac_device API implementation
+ * 19 Jan 2007
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* lock for the list: 'edac_device_list', manipulation of this list
+ * is protected by the 'device_ctls_mutex' lock
+ */
+static DEFINE_MUTEX(device_ctls_mutex);
+static LIST_HEAD(edac_device_list);
+
+#ifdef CONFIG_EDAC_DEBUG
+static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+{
+ debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
+ debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
+ debugf3("\tdev = %p\n", edac_dev->dev);
+ debugf3("\tmod_name:ctl_name = %s:%s\n",
+ edac_dev->mod_name, edac_dev->ctl_name);
+ debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
+}
+#endif /* CONFIG_EDAC_DEBUG */
+
+
+/*
+ * edac_device_alloc_ctl_info()
+ * Allocate a new edac device control info structure
+ *
+ * The control structure is allocated in complete chunk
+ * from the OS. It is in turn sub allocated to the
+ * various objects that compose the struture
+ *
+ * The structure has a 'nr_instance' array within itself.
+ * Each instance represents a major component
+ * Example: L1 cache and L2 cache are 2 instance components
+ *
+ * Within each instance is an array of 'nr_blocks' blockoffsets
+ */
+struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+ unsigned sz_private,
+ char *edac_device_name, unsigned nr_instances,
+ char *edac_block_name, unsigned nr_blocks,
+ unsigned offset_value, /* zero, 1, or other based offset */
+ struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
+ int device_index)
+{
+ struct edac_device_ctl_info *dev_ctl;
+ struct edac_device_instance *dev_inst, *inst;
+ struct edac_device_block *dev_blk, *blk_p, *blk;
+ struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
+ unsigned total_size;
+ unsigned count;
+ unsigned instance, block, attr;
+ void *pvt;
+ int err;
+
+ debugf4("%s() instances=%d blocks=%d\n",
+ __func__, nr_instances, nr_blocks);
+
+ /* Calculate the size of memory we need to allocate AND
+ * determine the offsets of the various item arrays
+ * (instance,block,attrib) from the start of an allocated structure.
+ * We want the alignment of each item (instance,block,attrib)
+ * to be at least as stringent as what the compiler would
+ * provide if we could simply hardcode everything into a single struct.
+ */
+ dev_ctl = (struct edac_device_ctl_info *)NULL;
+
+ /* Calc the 'end' offset past end of ONE ctl_info structure
+ * which will become the start of the 'instance' array
+ */
+ dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+
+ /* Calc the 'end' offset past the instance array within the ctl_info
+ * which will become the start of the block array
+ */
+ dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+
+ /* Calc the 'end' offset past the dev_blk array
+ * which will become the start of the attrib array, if any.
+ */
+ count = nr_instances * nr_blocks;
+ dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
+
+ /* Check for case of when an attribute array is specified */
+ if (nr_attrib > 0) {
+ /* calc how many nr_attrib we need */
+ count *= nr_attrib;
+
+ /* Calc the 'end' offset past the attributes array */
+ pvt = edac_align_ptr(&dev_attrib[count], sz_private);
+ } else {
+ /* no attribute array specificed */
+ pvt = edac_align_ptr(dev_attrib, sz_private);
+ }
+
+ /* 'pvt' now points to where the private data area is.
+ * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
+ * is baselined at ZERO
+ */
+ total_size = ((unsigned long)pvt) + sz_private;
+
+ /* Allocate the amount of memory for the set of control structures */
+ dev_ctl = kzalloc(total_size, GFP_KERNEL);
+ if (dev_ctl == NULL)
+ return NULL;
+
+ /* Adjust pointers so they point within the actual memory we
+ * just allocated rather than an imaginary chunk of memory
+ * located at address 0.
+ * 'dev_ctl' points to REAL memory, while the others are
+ * ZERO based and thus need to be adjusted to point within
+ * the allocated memory.
+ */
+ dev_inst = (struct edac_device_instance *)
+ (((char *)dev_ctl) + ((unsigned long)dev_inst));
+ dev_blk = (struct edac_device_block *)
+ (((char *)dev_ctl) + ((unsigned long)dev_blk));
+ dev_attrib = (struct edac_dev_sysfs_block_attribute *)
+ (((char *)dev_ctl) + ((unsigned long)dev_attrib));
+ pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
+
+ /* Begin storing the information into the control info structure */
+ dev_ctl->dev_idx = device_index;
+ dev_ctl->nr_instances = nr_instances;
+ dev_ctl->instances = dev_inst;
+ dev_ctl->pvt_info = pvt;
+
+ /* Default logging of CEs and UEs */
+ dev_ctl->log_ce = 1;
+ dev_ctl->log_ue = 1;
+
+ /* Name of this edac device */
+ snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
+
+ debugf4("%s() edac_dev=%p next after end=%p\n",
+ __func__, dev_ctl, pvt + sz_private );
+
+ /* Initialize every Instance */
+ for (instance = 0; instance < nr_instances; instance++) {
+ inst = &dev_inst[instance];
+ inst->ctl = dev_ctl;
+ inst->nr_blocks = nr_blocks;
+ blk_p = &dev_blk[instance * nr_blocks];
+ inst->blocks = blk_p;
+
+ /* name of this instance */
+ snprintf(inst->name, sizeof(inst->name),
+ "%s%u", edac_device_name, instance);
+
+ /* Initialize every block in each instance */
+ for (block = 0; block < nr_blocks; block++) {
+ blk = &blk_p[block];
+ blk->instance = inst;
+ snprintf(blk->name, sizeof(blk->name),
+ "%s%d", edac_block_name, block+offset_value);
+
+ debugf4("%s() instance=%d inst_p=%p block=#%d "
+ "block_p=%p name='%s'\n",
+ __func__, instance, inst, block,
+ blk, blk->name);
+
+ /* if there are NO attributes OR no attribute pointer
+ * then continue on to next block iteration
+ */
+ if ((nr_attrib == 0) || (attrib_spec == NULL))
+ continue;
+
+ /* setup the attribute array for this block */
+ blk->nr_attribs = nr_attrib;
+ attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
+ blk->block_attributes = attrib_p;
+
+ debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
+ __func__, blk->block_attributes);
+
+ /* Initialize every user specified attribute in this
+ * block with the data the caller passed in
+ * Each block gets its own copy of pointers,
+ * and its unique 'value'
+ */
+ for (attr = 0; attr < nr_attrib; attr++) {
+ attrib = &attrib_p[attr];
+
+ /* populate the unique per attrib
+ * with the code pointers and info
+ */
+ attrib->attr = attrib_spec[attr].attr;
+ attrib->show = attrib_spec[attr].show;
+ attrib->store = attrib_spec[attr].store;
+
+ attrib->block = blk; /* up link */
+
+ debugf4("%s() alloc-attrib=%p attrib_name='%s' "
+ "attrib-spec=%p spec-name=%s\n",
+ __func__, attrib, attrib->attr.name,
+ &attrib_spec[attr],
+ attrib_spec[attr].attr.name
+ );
+ }
+ }
+ }
+
+ /* Mark this instance as merely ALLOCATED */
+ dev_ctl->op_state = OP_ALLOC;
+
+ /*
+ * Initialize the 'root' kobj for the edac_device controller
+ */
+ err = edac_device_register_sysfs_main_kobj(dev_ctl);
+ if (err) {
+ kfree(dev_ctl);
+ return NULL;
+ }
+
+ /* at this point, the root kobj is valid, and in order to
+ * 'free' the object, then the function:
+ * edac_device_unregister_sysfs_main_kobj() must be called
+ * which will perform kobj unregistration and the actual free
+ * will occur during the kobject callback operation
+ */
+
+ return dev_ctl;
+}
+EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
+
+/*
+ * edac_device_free_ctl_info()
+ * frees the memory allocated by the edac_device_alloc_ctl_info()
+ * function
+ */
+void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
+{
+ edac_device_unregister_sysfs_main_kobj(ctl_info);
+}
+EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
+
+/*
+ * find_edac_device_by_dev
+ * scans the edac_device list for a specific 'struct device *'
+ *
+ * lock to be held prior to call: device_ctls_mutex
+ *
+ * Return:
+ * pointer to control structure managing 'dev'
+ * NULL if not found on list
+ */
+static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct list_head *item;
+
+ debugf0("%s()\n", __func__);
+
+ list_for_each(item, &edac_device_list) {
+ edac_dev = list_entry(item, struct edac_device_ctl_info, link);
+
+ if (edac_dev->dev == dev)
+ return edac_dev;
+ }
+
+ return NULL;
+}
+
+/*
+ * add_edac_dev_to_global_list
+ * Before calling this function, caller must
+ * assign a unique value to edac_dev->dev_idx.
+ *
+ * lock to be held prior to call: device_ctls_mutex
+ *
+ * Return:
+ * 0 on success
+ * 1 on failure.
+ */
+static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
+{
+ struct list_head *item, *insert_before;
+ struct edac_device_ctl_info *rover;
+
+ insert_before = &edac_device_list;
+
+ /* Determine if already on the list */
+ rover = find_edac_device_by_dev(edac_dev->dev);
+ if (unlikely(rover != NULL))
+ goto fail0;
+
+ /* Insert in ascending order by 'dev_idx', so find position */
+ list_for_each(item, &edac_device_list) {
+ rover = list_entry(item, struct edac_device_ctl_info, link);
+
+ if (rover->dev_idx >= edac_dev->dev_idx) {
+ if (unlikely(rover->dev_idx == edac_dev->dev_idx))
+ goto fail1;
+
+ insert_before = item;
+ break;
+ }
+ }
+
+ list_add_tail_rcu(&edac_dev->link, insert_before);
+ return 0;
+
+fail0:
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "%s (%s) %s %s already assigned %d\n",
+ rover->dev->bus_id, edac_dev_name(rover),
+ rover->mod_name, rover->ctl_name, rover->dev_idx);
+ return 1;
+
+fail1:
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "bug in low-level driver: attempt to assign\n"
+ " duplicate dev_idx %d in %s()\n", rover->dev_idx,
+ __func__);
+ return 1;
+}
+
+/*
+ * complete_edac_device_list_del
+ *
+ * callback function when reference count is zero
+ */
+static void complete_edac_device_list_del(struct rcu_head *head)
+{
+ struct edac_device_ctl_info *edac_dev;
+
+ edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
+ INIT_LIST_HEAD(&edac_dev->link);
+ complete(&edac_dev->removal_complete);
+}
+
+/*
+ * del_edac_device_from_global_list
+ *
+ * remove the RCU, setup for a callback call,
+ * then wait for the callback to occur
+ */
+static void del_edac_device_from_global_list(struct edac_device_ctl_info
+ *edac_device)
+{
+ list_del_rcu(&edac_device->link);
+
+ init_completion(&edac_device->removal_complete);
+ call_rcu(&edac_device->rcu, complete_edac_device_list_del);
+ wait_for_completion(&edac_device->removal_complete);
+}
+
+/*
+ * edac_device_workq_function
+ * performs the operation scheduled by a workq request
+ *
+ * this workq is embedded within an edac_device_ctl_info
+ * structure, that needs to be polled for possible error events.
+ *
+ * This operation is to acquire the list mutex lock
+ * (thus preventing insertation or deletion)
+ * and then call the device's poll function IFF this device is
+ * running polled and there is a poll function defined.
+ */
+static void edac_device_workq_function(struct work_struct *work_req)
+{
+ struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
+
+ mutex_lock(&device_ctls_mutex);
+
+ /* If we are being removed, bail out immediately */
+ if (edac_dev->op_state == OP_OFFLINE) {
+ mutex_unlock(&device_ctls_mutex);
+ return;
+ }
+
+ /* Only poll controllers that are running polled and have a check */
+ if ((edac_dev->op_state == OP_RUNNING_POLL) &&
+ (edac_dev->edac_check != NULL)) {
+ edac_dev->edac_check(edac_dev);
+ }
+
+ mutex_unlock(&device_ctls_mutex);
+
+ /* Reschedule the workq for the next time period to start again
+ * if the number of msec is for 1 sec, then adjust to the next
+ * whole one second to save timers fireing all over the period
+ * between integral seconds
+ */
+ if (edac_dev->poll_msec == 1000)
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ round_jiffies_relative(edac_dev->delay));
+ else
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_setup
+ * initialize a workq item for this edac_device instance
+ * passing in the new delay period in msec
+ */
+void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ unsigned msec)
+{
+ debugf0("%s()\n", __func__);
+
+ /* take the arg 'msec' and set it into the control structure
+ * to used in the time period calculation
+ * then calc the number of jiffies that represents
+ */
+ edac_dev->poll_msec = msec;
+ edac_dev->delay = msecs_to_jiffies(msec);
+
+ INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
+
+ /* optimize here for the 1 second case, which will be normal value, to
+ * fire ON the 1 second time event. This helps reduce all sorts of
+ * timers firing on sub-second basis, while they are happy
+ * to fire together on the 1 second exactly
+ */
+ if (edac_dev->poll_msec == 1000)
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ round_jiffies_relative(edac_dev->delay));
+ else
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_teardown
+ * stop the workq processing on this edac_dev
+ */
+void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+{
+ int status;
+
+ status = cancel_delayed_work(&edac_dev->work);
+ if (status == 0) {
+ /* workq instance might be running, wait for it */
+ flush_workqueue(edac_workqueue);
+ }
+}
+
+/*
+ * edac_device_reset_delay_period
+ *
+ * need to stop any outstanding workq queued up at this time
+ * because we will be resetting the sleep time.
+ * Then restart the workq on the new delay
+ */
+void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ unsigned long value)
+{
+ /* cancel the current workq request, without the mutex lock */
+ edac_device_workq_teardown(edac_dev);
+
+ /* acquire the mutex before doing the workq setup */
+ mutex_lock(&device_ctls_mutex);
+
+ /* restart the workq request, with new delay value */
+ edac_device_workq_setup(edac_dev, value);
+
+ mutex_unlock(&device_ctls_mutex);
+}
+
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ * edac_device global list and create sysfs entries associated with
+ * edac_device structure.
+ * @edac_device: pointer to the edac_device structure to be added to the list
+ * 'edac_device' structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+{
+ debugf0("%s()\n", __func__);
+
+#ifdef CONFIG_EDAC_DEBUG
+ if (edac_debug_level >= 3)
+ edac_device_dump_device(edac_dev);
+#endif
+ mutex_lock(&device_ctls_mutex);
+
+ if (add_edac_dev_to_global_list(edac_dev))
+ goto fail0;
+
+ /* set load time so that error rate can be tracked */
+ edac_dev->start_time = jiffies;
+
+ /* create this instance's sysfs entries */
+ if (edac_device_create_sysfs(edac_dev)) {
+ edac_device_printk(edac_dev, KERN_WARNING,
+ "failed to create sysfs device\n");
+ goto fail1;
+ }
+
+ /* If there IS a check routine, then we are running POLLED */
+ if (edac_dev->edac_check != NULL) {
+ /* This instance is NOW RUNNING */
+ edac_dev->op_state = OP_RUNNING_POLL;
+
+ /*
+ * enable workq processing on this instance,
+ * default = 1000 msec
+ */
+ edac_device_workq_setup(edac_dev, 1000);
+ } else {
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ /* Report action taken */
+ edac_device_printk(edac_dev, KERN_INFO,
+ "Giving out device to module '%s' controller "
+ "'%s': DEV '%s' (%s)\n",
+ edac_dev->mod_name,
+ edac_dev->ctl_name,
+ edac_dev_name(edac_dev),
+ edac_op_state_to_string(edac_dev->op_state));
+
+ mutex_unlock(&device_ctls_mutex);
+ return 0;
+
+fail1:
+ /* Some error, so remove the entry from the lsit */
+ del_edac_device_from_global_list(edac_dev);
+
+fail0:
+ mutex_unlock(&device_ctls_mutex);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(edac_device_add_device);
+
+/**
+ * edac_device_del_device:
+ * Remove sysfs entries for specified edac_device structure and
+ * then remove edac_device structure from global list
+ *
+ * @pdev:
+ * Pointer to 'struct device' representing edac_device
+ * structure to remove.
+ *
+ * Return:
+ * Pointer to removed edac_device structure,
+ * OR NULL if device not found.
+ */
+struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
+{
+ struct edac_device_ctl_info *edac_dev;
+
+ debugf0("%s()\n", __func__);
+
+ mutex_lock(&device_ctls_mutex);
+
+ /* Find the structure on the list, if not there, then leave */
+ edac_dev = find_edac_device_by_dev(dev);
+ if (edac_dev == NULL) {
+ mutex_unlock(&device_ctls_mutex);
+ return NULL;
+ }
+
+ /* mark this instance as OFFLINE */
+ edac_dev->op_state = OP_OFFLINE;
+
+ /* deregister from global list */
+ del_edac_device_from_global_list(edac_dev);
+
+ mutex_unlock(&device_ctls_mutex);
+
+ /* clear workq processing on this instance */
+ edac_device_workq_teardown(edac_dev);
+
+ /* Tear down the sysfs entries for this instance */
+ edac_device_remove_sysfs(edac_dev);
+
+ edac_printk(KERN_INFO, EDAC_MC,
+ "Removed device %d for %s %s: DEV %s\n",
+ edac_dev->dev_idx,
+ edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
+
+ return edac_dev;
+}
+EXPORT_SYMBOL_GPL(edac_device_del_device);
+
+static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
+{
+ return edac_dev->log_ce;
+}
+
+static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
+{
+ return edac_dev->log_ue;
+}
+
+static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
+ *edac_dev)
+{
+ return edac_dev->panic_on_ue;
+}
+
+/*
+ * edac_device_handle_ce
+ * perform a common output and handling of an 'edac_dev' CE event
+ */
+void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg)
+{
+ struct edac_device_instance *instance;
+ struct edac_device_block *block = NULL;
+
+ if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: 'instance' out of range "
+ "(%d >= %d)\n", inst_nr,
+ edac_dev->nr_instances);
+ return;
+ }
+
+ instance = edac_dev->instances + inst_nr;
+
+ if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: instance %d 'block' "
+ "out of range (%d >= %d)\n",
+ inst_nr, block_nr,
+ instance->nr_blocks);
+ return;
+ }
+
+ if (instance->nr_blocks > 0) {
+ block = instance->blocks + block_nr;
+ block->counters.ce_count++;
+ }
+
+ /* Propogate the count up the 'totals' tree */
+ instance->counters.ce_count++;
+ edac_dev->counters.ce_count++;
+
+ if (edac_device_get_log_ce(edac_dev))
+ edac_device_printk(edac_dev, KERN_WARNING,
+ "CE: %s instance: %s block: %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ce);
+
+/*
+ * edac_device_handle_ue
+ * perform a common output and handling of an 'edac_dev' UE event
+ */
+void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg)
+{
+ struct edac_device_instance *instance;
+ struct edac_device_block *block = NULL;
+
+ if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: 'instance' out of range "
+ "(%d >= %d)\n", inst_nr,
+ edac_dev->nr_instances);
+ return;
+ }
+
+ instance = edac_dev->instances + inst_nr;
+
+ if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: instance %d 'block' "
+ "out of range (%d >= %d)\n",
+ inst_nr, block_nr,
+ instance->nr_blocks);
+ return;
+ }
+
+ if (instance->nr_blocks > 0) {
+ block = instance->blocks + block_nr;
+ block->counters.ue_count++;
+ }
+
+ /* Propogate the count up the 'totals' tree */
+ instance->counters.ue_count++;
+ edac_dev->counters.ue_count++;
+
+ if (edac_device_get_log_ue(edac_dev))
+ edac_device_printk(edac_dev, KERN_EMERG,
+ "UE: %s instance: %s block: %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
+
+ if (edac_device_get_panic_on_ue(edac_dev))
+ panic("EDAC %s: UE instance: %s block %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ue);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
new file mode 100644
index 0000000..5376457
--- /dev/null
+++ b/drivers/edac/edac_device_sysfs.c
@@ -0,0 +1,881 @@
+/*
+ * file for managing the edac_device class of devices for EDAC
+ *
+ * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_DEVICE_SYMLINK "device"
+
+#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
+
+
+/*
+ * Set of edac_device_ctl_info attribute store/show functions
+ */
+
+/* 'log_ue' */
+static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->log_ue);
+}
+
+static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ /* if parameter is zero, turn off flag, if non-zero turn on flag */
+ ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+ return count;
+}
+
+/* 'log_ce' */
+static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->log_ce);
+}
+
+static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ /* if parameter is zero, turn off flag, if non-zero turn on flag */
+ ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
+
+ return count;
+}
+
+/* 'panic_on_ue' */
+static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->panic_on_ue);
+}
+
+static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ /* if parameter is zero, turn off flag, if non-zero turn on flag */
+ ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+ return count;
+}
+
+/* 'poll_msec' show and store functions*/
+static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->poll_msec);
+}
+
+static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ unsigned long value;
+
+ /* get the value and enforce that it is non-zero, must be at least
+ * one millisecond for the delay period, between scans
+ * Then cancel last outstanding delay for the work request
+ * and set a new one.
+ */
+ value = simple_strtoul(data, NULL, 0);
+ edac_device_reset_delay_period(ctl_info, value);
+
+ return count;
+}
+
+/* edac_device_ctl_info specific attribute structure */
+struct ctl_info_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edac_device_ctl_info *, char *);
+ ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
+};
+
+#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
+
+/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+ struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+ if (ctl_info_attr->show)
+ return ctl_info_attr->show(edac_dev, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+ struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+ if (ctl_info_attr->store)
+ return ctl_info_attr->store(edac_dev, buffer, count);
+ return -EIO;
+}
+
+/* edac_dev file operations for an 'ctl_info' */
+static struct sysfs_ops device_ctl_info_ops = {
+ .show = edac_dev_ctl_info_show,
+ .store = edac_dev_ctl_info_store
+};
+
+#define CTL_INFO_ATTR(_name,_mode,_show,_store) \
+static struct ctl_info_attribute attr_ctl_info_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* Declare the various ctl_info attributes here and their respective ops */
+CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
+ edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
+CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
+ edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
+ edac_device_ctl_panic_on_ue_show,
+ edac_device_ctl_panic_on_ue_store);
+CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
+ edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
+
+/* Base Attributes of the EDAC_DEVICE ECC object */
+static struct ctl_info_attribute *device_ctrl_attr[] = {
+ &attr_ctl_info_panic_on_ue,
+ &attr_ctl_info_log_ue,
+ &attr_ctl_info_log_ce,
+ &attr_ctl_info_poll_msec,
+ NULL,
+};
+
+/*
+ * edac_device_ctrl_master_release
+ *
+ * called when the reference count for the 'main' kobj
+ * for a edac_device control struct reaches zero
+ *
+ * Reference count model:
+ * One 'main' kobject for each control structure allocated.
+ * That main kobj is initially set to one AND
+ * the reference count for the EDAC 'core' module is
+ * bumped by one, thus added 'keep in memory' dependency.
+ *
+ * Each new internal kobj (in instances and blocks) then
+ * bumps the 'main' kobject.
+ *
+ * When they are released their release functions decrement
+ * the 'main' kobj.
+ *
+ * When the main kobj reaches zero (0) then THIS function
+ * is called which then decrements the EDAC 'core' module.
+ * When the module reference count reaches zero then the
+ * module no longer has dependency on keeping the release
+ * function code in memory and module can be unloaded.
+ *
+ * This will support several control objects as well, each
+ * with its own 'main' kobj.
+ */
+static void edac_device_ctrl_master_release(struct kobject *kobj)
+{
+ struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
+
+ debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
+
+ /* decrement the EDAC CORE module ref count */
+ module_put(edac_dev->owner);
+
+ /* free the control struct containing the 'main' kobj
+ * passed in to this routine
+ */
+ kfree(edac_dev);
+}
+
+/* ktype for the main (master) kobject */
+static struct kobj_type ktype_device_ctrl = {
+ .release = edac_device_ctrl_master_release,
+ .sysfs_ops = &device_ctl_info_ops,
+ .default_attrs = (struct attribute **)device_ctrl_attr,
+};
+
+/*
+ * edac_device_register_sysfs_main_kobj
+ *
+ * perform the high level setup for the new edac_device instance
+ *
+ * Return: 0 SUCCESS
+ * !0 FAILURE
+ */
+int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+{
+ struct sysdev_class *edac_class;
+ int err;
+
+ debugf1("%s()\n", __func__);
+
+ /* get the /sys/devices/system/edac reference */
+ edac_class = edac_get_edac_class();
+ if (edac_class == NULL) {
+ debugf1("%s() no edac_class error\n", __func__);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Point to the 'edac_class' this instance 'reports' to */
+ edac_dev->edac_class = edac_class;
+
+ /* Init the devices's kobject */
+ memset(&edac_dev->kobj, 0, sizeof(struct kobject));
+
+ /* Record which module 'owns' this control structure
+ * and bump the ref count of the module
+ */
+ edac_dev->owner = THIS_MODULE;
+
+ if (!try_module_get(edac_dev->owner)) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* register */
+ err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
+ &edac_class->kset.kobj,
+ "%s", edac_dev->name);
+ if (err) {
+ debugf1("%s()Failed to register '.../edac/%s'\n",
+ __func__, edac_dev->name);
+ goto err_kobj_reg;
+ }
+ kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
+
+ /* At this point, to 'free' the control struct,
+ * edac_device_unregister_sysfs_main_kobj() must be used
+ */
+
+ debugf4("%s() Registered '.../edac/%s' kobject\n",
+ __func__, edac_dev->name);
+
+ return 0;
+
+ /* Error exit stack */
+err_kobj_reg:
+ module_put(edac_dev->owner);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_unregister_sysfs_main_kobj:
+ * the '..../edac/<name>' kobject
+ */
+void edac_device_unregister_sysfs_main_kobj(
+ struct edac_device_ctl_info *edac_dev)
+{
+ debugf0("%s()\n", __func__);
+ debugf4("%s() name of kobject is: %s\n",
+ __func__, kobject_name(&edac_dev->kobj));
+
+ /*
+ * Unregister the edac device's kobject and
+ * allow for reference count to reach 0 at which point
+ * the callback will be called to:
+ * a) module_put() this module
+ * b) 'kfree' the memory
+ */
+ kobject_put(&edac_dev->kobj);
+}
+
+/* edac_dev -> instance information */
+
+/*
+ * Set of low-level instance attribute show functions
+ */
+static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
+ char *data)
+{
+ return sprintf(data, "%u\n", instance->counters.ue_count);
+}
+
+static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
+ char *data)
+{
+ return sprintf(data, "%u\n", instance->counters.ce_count);
+}
+
+#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
+#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_device_ctrl_instance_release(struct kobject *kobj)
+{
+ struct edac_device_instance *instance;
+
+ debugf1("%s()\n", __func__);
+
+ /* map from this kobj to the main control struct
+ * and then dec the main kobj count
+ */
+ instance = to_instance(kobj);
+ kobject_put(&instance->ctl->kobj);
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edac_device_instance *, char *);
+ ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_device_instance *instance = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->show)
+ return instance_attr->show(instance, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_device_instance *instance = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->store)
+ return instance_attr->store(instance, buffer, count);
+ return -EIO;
+}
+
+/* edac_dev file operations for an 'instance' */
+static struct sysfs_ops device_instance_ops = {
+ .show = edac_dev_instance_show,
+ .store = edac_dev_instance_store
+};
+
+#define INSTANCE_ATTR(_name,_mode,_show,_store) \
+static struct instance_attribute attr_instance_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/*
+ * Define attributes visible for the edac_device instance object
+ * Each contains a pointer to a show and an optional set
+ * function pointer that does the low level output/input
+ */
+INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
+INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
+
+/* list of edac_dev 'instance' attributes */
+static struct instance_attribute *device_instance_attr[] = {
+ &attr_instance_ce_count,
+ &attr_instance_ue_count,
+ NULL,
+};
+
+/* The 'ktype' for each edac_dev 'instance' */
+static struct kobj_type ktype_instance_ctrl = {
+ .release = edac_device_ctrl_instance_release,
+ .sysfs_ops = &device_instance_ops,
+ .default_attrs = (struct attribute **)device_instance_attr,
+};
+
+/* edac_dev -> instance -> block information */
+
+#define to_block(k) container_of(k, struct edac_device_block, kobj)
+#define to_block_attr(a) \
+ container_of(a, struct edac_dev_sysfs_block_attribute, attr)
+
+/*
+ * Set of low-level block attribute show functions
+ */
+static ssize_t block_ue_count_show(struct kobject *kobj,
+ struct attribute *attr, char *data)
+{
+ struct edac_device_block *block = to_block(kobj);
+
+ return sprintf(data, "%u\n", block->counters.ue_count);
+}
+
+static ssize_t block_ce_count_show(struct kobject *kobj,
+ struct attribute *attr, char *data)
+{
+ struct edac_device_block *block = to_block(kobj);
+
+ return sprintf(data, "%u\n", block->counters.ce_count);
+}
+
+/* DEVICE block kobject release() function */
+static void edac_device_ctrl_block_release(struct kobject *kobj)
+{
+ struct edac_device_block *block;
+
+ debugf1("%s()\n", __func__);
+
+ /* get the container of the kobj */
+ block = to_block(kobj);
+
+ /* map from 'block kobj' to 'block->instance->controller->main_kobj'
+ * now 'release' the block kobject
+ */
+ kobject_put(&block->instance->ctl->kobj);
+}
+
+
+/* Function to 'show' fields from the edac_dev 'block' structure */
+static ssize_t edac_dev_block_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_dev_sysfs_block_attribute *block_attr =
+ to_block_attr(attr);
+
+ if (block_attr->show)
+ return block_attr->show(kobj, attr, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'block' structure */
+static ssize_t edac_dev_block_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_dev_sysfs_block_attribute *block_attr;
+
+ block_attr = to_block_attr(attr);
+
+ if (block_attr->store)
+ return block_attr->store(kobj, attr, buffer, count);
+ return -EIO;
+}
+
+/* edac_dev file operations for a 'block' */
+static struct sysfs_ops device_block_ops = {
+ .show = edac_dev_block_show,
+ .store = edac_dev_block_store
+};
+
+#define BLOCK_ATTR(_name,_mode,_show,_store) \
+static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
+BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
+
+/* list of edac_dev 'block' attributes */
+static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
+ &attr_block_ce_count,
+ &attr_block_ue_count,
+ NULL,
+};
+
+/* The 'ktype' for each edac_dev 'block' */
+static struct kobj_type ktype_block_ctrl = {
+ .release = edac_device_ctrl_block_release,
+ .sysfs_ops = &device_block_ops,
+ .default_attrs = (struct attribute **)device_block_attr,
+};
+
+/* block ctor/dtor code */
+
+/*
+ * edac_device_create_block
+ */
+static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+ struct edac_device_instance *instance,
+ struct edac_device_block *block)
+{
+ int i;
+ int err;
+ struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+ struct kobject *main_kobj;
+
+ debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n",
+ __func__, instance->name, instance, block->name, block);
+ debugf4("%s() block kobj=%p block kobj->parent=%p\n",
+ __func__, &block->kobj, &block->kobj.parent);
+
+ /* init this block's kobject */
+ memset(&block->kobj, 0, sizeof(struct kobject));
+
+ /* bump the main kobject's reference count for this controller
+ * and this instance is dependant on the main
+ */
+ main_kobj = kobject_get(&edac_dev->kobj);
+ if (!main_kobj) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Add this block's kobject */
+ err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
+ &instance->kobj,
+ "%s", block->name);
+ if (err) {
+ debugf1("%s() Failed to register instance '%s'\n",
+ __func__, block->name);
+ kobject_put(main_kobj);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* If there are driver level block attributes, then added them
+ * to the block kobject
+ */
+ sysfs_attrib = block->block_attributes;
+ if (sysfs_attrib && block->nr_attribs) {
+ for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+ debugf4("%s() creating block attrib='%s' "
+ "attrib->%p to kobj=%p\n",
+ __func__,
+ sysfs_attrib->attr.name,
+ sysfs_attrib, &block->kobj);
+
+ /* Create each block_attribute file */
+ err = sysfs_create_file(&block->kobj,
+ &sysfs_attrib->attr);
+ if (err)
+ goto err_on_attrib;
+ }
+ }
+ kobject_uevent(&block->kobj, KOBJ_ADD);
+
+ return 0;
+
+ /* Error unwind stack */
+err_on_attrib:
+ kobject_put(&block->kobj);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_delete_block(edac_dev,block);
+ */
+static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
+ struct edac_device_block *block)
+{
+ struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+ int i;
+
+ /* if this block has 'attributes' then we need to iterate over the list
+ * and 'remove' the attributes on this block
+ */
+ sysfs_attrib = block->block_attributes;
+ if (sysfs_attrib && block->nr_attribs) {
+ for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+ /* remove each block_attrib file */
+ sysfs_remove_file(&block->kobj,
+ (struct attribute *) sysfs_attrib);
+ }
+ }
+
+ /* unregister this block's kobject, SEE:
+ * edac_device_ctrl_block_release() callback operation
+ */
+ kobject_put(&block->kobj);
+}
+
+/* instance ctor/dtor code */
+
+/*
+ * edac_device_create_instance
+ * create just one instance of an edac_device 'instance'
+ */
+static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ int idx)
+{
+ int i, j;
+ int err;
+ struct edac_device_instance *instance;
+ struct kobject *main_kobj;
+
+ instance = &edac_dev->instances[idx];
+
+ /* Init the instance's kobject */
+ memset(&instance->kobj, 0, sizeof(struct kobject));
+
+ instance->ctl = edac_dev;
+
+ /* bump the main kobject's reference count for this controller
+ * and this instance is dependant on the main
+ */
+ main_kobj = kobject_get(&edac_dev->kobj);
+ if (!main_kobj) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Formally register this instance's kobject under the edac_device */
+ err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
+ &edac_dev->kobj, "%s", instance->name);
+ if (err != 0) {
+ debugf2("%s() Failed to register instance '%s'\n",
+ __func__, instance->name);
+ kobject_put(main_kobj);
+ goto err_out;
+ }
+
+ debugf4("%s() now register '%d' blocks for instance %d\n",
+ __func__, instance->nr_blocks, idx);
+
+ /* register all blocks of this instance */
+ for (i = 0; i < instance->nr_blocks; i++) {
+ err = edac_device_create_block(edac_dev, instance,
+ &instance->blocks[i]);
+ if (err) {
+ /* If any fail, remove all previous ones */
+ for (j = 0; j < i; j++)
+ edac_device_delete_block(edac_dev,
+ &instance->blocks[j]);
+ goto err_release_instance_kobj;
+ }
+ }
+ kobject_uevent(&instance->kobj, KOBJ_ADD);
+
+ debugf4("%s() Registered instance %d '%s' kobject\n",
+ __func__, idx, instance->name);
+
+ return 0;
+
+ /* error unwind stack */
+err_release_instance_kobj:
+ kobject_put(&instance->kobj);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_remove_instance
+ * remove an edac_device instance
+ */
+static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
+ int idx)
+{
+ struct edac_device_instance *instance;
+ int i;
+
+ instance = &edac_dev->instances[idx];
+
+ /* unregister all blocks in this instance */
+ for (i = 0; i < instance->nr_blocks; i++)
+ edac_device_delete_block(edac_dev, &instance->blocks[i]);
+
+ /* unregister this instance's kobject, SEE:
+ * edac_device_ctrl_instance_release() for callback operation
+ */
+ kobject_put(&instance->kobj);
+}
+
+/*
+ * edac_device_create_instances
+ * create the first level of 'instances' for this device
+ * (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc
+ */
+static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
+{
+ int i, j;
+ int err;
+
+ debugf0("%s()\n", __func__);
+
+ /* iterate over creation of the instances */
+ for (i = 0; i < edac_dev->nr_instances; i++) {
+ err = edac_device_create_instance(edac_dev, i);
+ if (err) {
+ /* unwind previous instances on error */
+ for (j = 0; j < i; j++)
+ edac_device_delete_instance(edac_dev, j);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * edac_device_delete_instances(edac_dev);
+ * unregister all the kobjects of the instances
+ */
+static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
+{
+ int i;
+
+ /* iterate over creation of the instances */
+ for (i = 0; i < edac_dev->nr_instances; i++)
+ edac_device_delete_instance(edac_dev, i);
+}
+
+/* edac_dev sysfs ctor/dtor code */
+
+/*
+ * edac_device_add_main_sysfs_attributes
+ * add some attributes to this instance's main kobject
+ */
+static int edac_device_add_main_sysfs_attributes(
+ struct edac_device_ctl_info *edac_dev)
+{
+ struct edac_dev_sysfs_attribute *sysfs_attrib;
+ int err = 0;
+
+ sysfs_attrib = edac_dev->sysfs_attributes;
+ if (sysfs_attrib) {
+ /* iterate over the array and create an attribute for each
+ * entry in the list
+ */
+ while (sysfs_attrib->attr.name != NULL) {
+ err = sysfs_create_file(&edac_dev->kobj,
+ (struct attribute*) sysfs_attrib);
+ if (err)
+ goto err_out;
+
+ sysfs_attrib++;
+ }
+ }
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_remove_main_sysfs_attributes
+ * remove any attributes to this instance's main kobject
+ */
+static void edac_device_remove_main_sysfs_attributes(
+ struct edac_device_ctl_info *edac_dev)
+{
+ struct edac_dev_sysfs_attribute *sysfs_attrib;
+
+ /* if there are main attributes, defined, remove them. First,
+ * point to the start of the array and iterate over it
+ * removing each attribute listed from this device's instance's kobject
+ */
+ sysfs_attrib = edac_dev->sysfs_attributes;
+ if (sysfs_attrib) {
+ while (sysfs_attrib->attr.name != NULL) {
+ sysfs_remove_file(&edac_dev->kobj,
+ (struct attribute *) sysfs_attrib);
+ sysfs_attrib++;
+ }
+ }
+}
+
+/*
+ * edac_device_create_sysfs() Constructor
+ *
+ * accept a created edac_device control structure
+ * and 'export' it to sysfs. The 'main' kobj should already have been
+ * created. 'instance' and 'block' kobjects should be registered
+ * along with any 'block' attributes from the low driver. In addition,
+ * the main attributes (if any) are connected to the main kobject of
+ * the control structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+ int err;
+ struct kobject *edac_kobj = &edac_dev->kobj;
+
+ debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
+
+ /* go create any main attributes callers wants */
+ err = edac_device_add_main_sysfs_attributes(edac_dev);
+ if (err) {
+ debugf0("%s() failed to add sysfs attribs\n", __func__);
+ goto err_out;
+ }
+
+ /* create a symlink from the edac device
+ * to the platform 'device' being used for this
+ */
+ err = sysfs_create_link(edac_kobj,
+ &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
+ if (err) {
+ debugf0("%s() sysfs_create_link() returned err= %d\n",
+ __func__, err);
+ goto err_remove_main_attribs;
+ }
+
+ /* Create the first level instance directories
+ * In turn, the nested blocks beneath the instances will
+ * be registered as well
+ */
+ err = edac_device_create_instances(edac_dev);
+ if (err) {
+ debugf0("%s() edac_device_create_instances() "
+ "returned err= %d\n", __func__, err);
+ goto err_remove_link;
+ }
+
+
+ debugf4("%s() create-instances done, idx=%d\n",
+ __func__, edac_dev->dev_idx);
+
+ return 0;
+
+ /* Error unwind stack */
+err_remove_link:
+ /* remove the sym link */
+ sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+err_remove_main_attribs:
+ edac_device_remove_main_sysfs_attributes(edac_dev);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_remove_sysfs() destructor
+ *
+ * given an edac_device struct, tear down the kobject resources
+ */
+void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+ debugf0("%s()\n", __func__);
+
+ /* remove any main attributes for this device */
+ edac_device_remove_main_sysfs_attributes(edac_dev);
+
+ /* remove the device sym link */
+ sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+ /* walk the instance/block kobject tree, deconstructing it */
+ edac_device_delete_instances(edac_dev);
+}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 0000000..d110392
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,888 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005, 2006 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Modified by Dave Peterson and Doug Thompson
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/edac.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/edac.h>
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* lock to memory controller's control array */
+static DEFINE_MUTEX(mem_ctls_mutex);
+static LIST_HEAD(mc_devices);
+
+#ifdef CONFIG_EDAC_DEBUG
+
+static void edac_mc_dump_channel(struct channel_info *chan)
+{
+ debugf4("\tchannel = %p\n", chan);
+ debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
+ debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
+ debugf4("\tchannel->label = '%s'\n", chan->label);
+ debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+}
+
+static void edac_mc_dump_csrow(struct csrow_info *csrow)
+{
+ debugf4("\tcsrow = %p\n", csrow);
+ debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
+ debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
+ debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
+ debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
+ debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
+ debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
+ debugf4("\tcsrow->channels = %p\n", csrow->channels);
+ debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+}
+
+static void edac_mc_dump_mci(struct mem_ctl_info *mci)
+{
+ debugf3("\tmci = %p\n", mci);
+ debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+ debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+ debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
+ debugf4("\tmci->edac_check = %p\n", mci->edac_check);
+ debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
+ mci->nr_csrows, mci->csrows);
+ debugf3("\tdev = %p\n", mci->dev);
+ debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
+ debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+}
+
+#endif /* CONFIG_EDAC_DEBUG */
+
+/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
+ * Adjust 'ptr' so that its alignment is at least as stringent as what the
+ * compiler would provide for X and return the aligned result.
+ *
+ * If 'size' is a constant, the compiler will optimize this whole function
+ * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ */
+void *edac_align_ptr(void *ptr, unsigned size)
+{
+ unsigned align, r;
+
+ /* Here we assume that the alignment of a "long long" is the most
+ * stringent alignment that the compiler will ever provide by default.
+ * As far as I know, this is a reasonable assumption.
+ */
+ if (size > sizeof(long))
+ align = sizeof(long long);
+ else if (size > sizeof(int))
+ align = sizeof(long);
+ else if (size > sizeof(short))
+ align = sizeof(int);
+ else if (size > sizeof(char))
+ align = sizeof(short);
+ else
+ return (char *)ptr;
+
+ r = size % align;
+
+ if (r == 0)
+ return (char *)ptr;
+
+ return (void *)(((unsigned long)ptr) + align - r);
+}
+
+/**
+ * edac_mc_alloc: Allocate a struct mem_ctl_info structure
+ * @size_pvt: size of private storage needed
+ * @nr_csrows: Number of CWROWS needed for this MC
+ * @nr_chans: Number of channels for the MC
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * Returns:
+ * NULL allocation failed
+ * struct mem_ctl_info pointer
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+ unsigned nr_chans, int edac_index)
+{
+ struct mem_ctl_info *mci;
+ struct csrow_info *csi, *csrow;
+ struct channel_info *chi, *chp, *chan;
+ void *pvt;
+ unsigned size;
+ int row, chn;
+ int err;
+
+ /* Figure out the offsets of the various items from the start of an mc
+ * structure. We want the alignment of each item to be at least as
+ * stringent as what the compiler would provide if we could simply
+ * hardcode everything into a single struct.
+ */
+ mci = (struct mem_ctl_info *)0;
+ csi = edac_align_ptr(&mci[1], sizeof(*csi));
+ chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
+ pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ size = ((unsigned long)pvt) + sz_pvt;
+
+ mci = kzalloc(size, GFP_KERNEL);
+ if (mci == NULL)
+ return NULL;
+
+ /* Adjust pointers so they point within the memory we just allocated
+ * rather than an imaginary chunk of memory located at address 0.
+ */
+ csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
+ chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi));
+ pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
+
+ /* setup index and various internal pointers */
+ mci->mc_idx = edac_index;
+ mci->csrows = csi;
+ mci->pvt_info = pvt;
+ mci->nr_csrows = nr_csrows;
+
+ for (row = 0; row < nr_csrows; row++) {
+ csrow = &csi[row];
+ csrow->csrow_idx = row;
+ csrow->mci = mci;
+ csrow->nr_channels = nr_chans;
+ chp = &chi[row * nr_chans];
+ csrow->channels = chp;
+
+ for (chn = 0; chn < nr_chans; chn++) {
+ chan = &chp[chn];
+ chan->chan_idx = chn;
+ chan->csrow = csrow;
+ }
+ }
+
+ mci->op_state = OP_ALLOC;
+
+ /*
+ * Initialize the 'root' kobj for the edac_mc controller
+ */
+ err = edac_mc_register_sysfs_main_kobj(mci);
+ if (err) {
+ kfree(mci);
+ return NULL;
+ }
+
+ /* at this point, the root kobj is valid, and in order to
+ * 'free' the object, then the function:
+ * edac_mc_unregister_sysfs_main_kobj() must be called
+ * which will perform kobj unregistration and the actual free
+ * will occur during the kobject callback operation
+ */
+ return mci;
+}
+EXPORT_SYMBOL_GPL(edac_mc_alloc);
+
+/**
+ * edac_mc_free
+ * 'Free' a previously allocated 'mci' structure
+ * @mci: pointer to a struct mem_ctl_info structure
+ */
+void edac_mc_free(struct mem_ctl_info *mci)
+{
+ edac_mc_unregister_sysfs_main_kobj(mci);
+}
+EXPORT_SYMBOL_GPL(edac_mc_free);
+
+
+/*
+ * find_mci_by_dev
+ *
+ * scan list of controllers looking for the one that manages
+ * the 'dev' device
+ */
+static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+{
+ struct mem_ctl_info *mci;
+ struct list_head *item;
+
+ debugf3("%s()\n", __func__);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->dev == dev)
+ return mci;
+ }
+
+ return NULL;
+}
+
+/*
+ * handler for EDAC to check if NMI type handler has asserted interrupt
+ */
+static int edac_mc_assert_error_check_and_clear(void)
+{
+ int old_state;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ return 1;
+
+ old_state = edac_err_assert;
+ edac_err_assert = 0;
+
+ return old_state;
+}
+
+/*
+ * edac_mc_workq_function
+ * performs the operation scheduled by a workq request
+ */
+static void edac_mc_workq_function(struct work_struct *work_req)
+{
+ struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
+
+ mutex_lock(&mem_ctls_mutex);
+
+ /* if this control struct has movd to offline state, we are done */
+ if (mci->op_state == OP_OFFLINE) {
+ mutex_unlock(&mem_ctls_mutex);
+ return;
+ }
+
+ /* Only poll controllers that are running polled and have a check */
+ if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+ mci->edac_check(mci);
+
+ mutex_unlock(&mem_ctls_mutex);
+
+ /* Reschedule */
+ queue_delayed_work(edac_workqueue, &mci->work,
+ msecs_to_jiffies(edac_mc_get_poll_msec()));
+}
+
+/*
+ * edac_mc_workq_setup
+ * initialize a workq item for this mci
+ * passing in the new delay period in msec
+ *
+ * locking model:
+ *
+ * called with the mem_ctls_mutex held
+ */
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+{
+ debugf0("%s()\n", __func__);
+
+ /* if this instance is not in the POLL state, then simply return */
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+}
+
+/*
+ * edac_mc_workq_teardown
+ * stop the workq processing on this mci
+ *
+ * locking model:
+ *
+ * called WITHOUT lock held
+ */
+static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+{
+ int status;
+
+ status = cancel_delayed_work(&mci->work);
+ if (status == 0) {
+ debugf0("%s() not canceled, flush the queue\n",
+ __func__);
+
+ /* workq instance might be running, wait for it */
+ flush_workqueue(edac_workqueue);
+ }
+}
+
+/*
+ * edac_mc_reset_delay_period(unsigned long value)
+ *
+ * user space has updated our poll period value, need to
+ * reset our workq delays
+ */
+void edac_mc_reset_delay_period(int value)
+{
+ struct mem_ctl_info *mci;
+ struct list_head *item;
+
+ mutex_lock(&mem_ctls_mutex);
+
+ /* scan the list and turn off all workq timers, doing so under lock
+ */
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->op_state == OP_RUNNING_POLL)
+ cancel_delayed_work(&mci->work);
+ }
+
+ mutex_unlock(&mem_ctls_mutex);
+
+
+ /* re-walk the list, and reset the poll delay */
+ mutex_lock(&mem_ctls_mutex);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ edac_mc_workq_setup(mci, (unsigned long) value);
+ }
+
+ mutex_unlock(&mem_ctls_mutex);
+}
+
+
+
+/* Return 0 on success, 1 on failure.
+ * Before calling this function, caller must
+ * assign a unique value to mci->mc_idx.
+ *
+ * locking model:
+ *
+ * called with the mem_ctls_mutex lock held
+ */
+static int add_mc_to_global_list(struct mem_ctl_info *mci)
+{
+ struct list_head *item, *insert_before;
+ struct mem_ctl_info *p;
+
+ insert_before = &mc_devices;
+
+ p = find_mci_by_dev(mci->dev);
+ if (unlikely(p != NULL))
+ goto fail0;
+
+ list_for_each(item, &mc_devices) {
+ p = list_entry(item, struct mem_ctl_info, link);
+
+ if (p->mc_idx >= mci->mc_idx) {
+ if (unlikely(p->mc_idx == mci->mc_idx))
+ goto fail1;
+
+ insert_before = item;
+ break;
+ }
+ }
+
+ list_add_tail_rcu(&mci->link, insert_before);
+ atomic_inc(&edac_handlers);
+ return 0;
+
+fail0:
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
+ edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
+ return 1;
+
+fail1:
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "bug in low-level driver: attempt to assign\n"
+ " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
+ return 1;
+}
+
+static void complete_mc_list_del(struct rcu_head *head)
+{
+ struct mem_ctl_info *mci;
+
+ mci = container_of(head, struct mem_ctl_info, rcu);
+ INIT_LIST_HEAD(&mci->link);
+ complete(&mci->complete);
+}
+
+static void del_mc_from_global_list(struct mem_ctl_info *mci)
+{
+ atomic_dec(&edac_handlers);
+ list_del_rcu(&mci->link);
+ init_completion(&mci->complete);
+ call_rcu(&mci->rcu, complete_mc_list_del);
+ wait_for_completion(&mci->complete);
+}
+
+/**
+ * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ *
+ * Caller must hold mem_ctls_mutex.
+ */
+struct mem_ctl_info *edac_mc_find(int idx)
+{
+ struct list_head *item;
+ struct mem_ctl_info *mci;
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->mc_idx >= idx) {
+ if (mci->mc_idx == idx)
+ return mci;
+
+ break;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(edac_mc_find);
+
+/**
+ * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
+ * create sysfs entries associated with mci structure
+ * @mci: pointer to the mci structure to be added to the list
+ * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+
+/* FIXME - should a warning be printed if no error detection? correction? */
+int edac_mc_add_mc(struct mem_ctl_info *mci)
+{
+ debugf0("%s()\n", __func__);
+
+#ifdef CONFIG_EDAC_DEBUG
+ if (edac_debug_level >= 3)
+ edac_mc_dump_mci(mci);
+
+ if (edac_debug_level >= 4) {
+ int i;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ int j;
+
+ edac_mc_dump_csrow(&mci->csrows[i]);
+ for (j = 0; j < mci->csrows[i].nr_channels; j++)
+ edac_mc_dump_channel(&mci->csrows[i].
+ channels[j]);
+ }
+ }
+#endif
+ mutex_lock(&mem_ctls_mutex);
+
+ if (add_mc_to_global_list(mci))
+ goto fail0;
+
+ /* set load time so that error rate can be tracked */
+ mci->start_time = jiffies;
+
+ if (edac_create_sysfs_mci_device(mci)) {
+ edac_mc_printk(mci, KERN_WARNING,
+ "failed to create sysfs device\n");
+ goto fail1;
+ }
+
+ /* If there IS a check routine, then we are running POLLED */
+ if (mci->edac_check != NULL) {
+ /* This instance is NOW RUNNING */
+ mci->op_state = OP_RUNNING_POLL;
+
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ } else {
+ mci->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ /* Report action taken */
+ edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
+ " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+
+ mutex_unlock(&mem_ctls_mutex);
+ return 0;
+
+fail1:
+ del_mc_from_global_list(mci);
+
+fail0:
+ mutex_unlock(&mem_ctls_mutex);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(edac_mc_add_mc);
+
+/**
+ * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
+ * remove mci structure from global list
+ * @pdev: Pointer to 'struct device' representing mci structure to remove.
+ *
+ * Return pointer to removed mci structure, or NULL if device not found.
+ */
+struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ mutex_lock(&mem_ctls_mutex);
+
+ /* find the requested mci struct in the global list */
+ mci = find_mci_by_dev(dev);
+ if (mci == NULL) {
+ mutex_unlock(&mem_ctls_mutex);
+ return NULL;
+ }
+
+ /* marking MCI offline */
+ mci->op_state = OP_OFFLINE;
+
+ del_mc_from_global_list(mci);
+ mutex_unlock(&mem_ctls_mutex);
+
+ /* flush workq processes and remove sysfs */
+ edac_mc_workq_teardown(mci);
+ edac_remove_sysfs_mci_device(mci);
+
+ edac_printk(KERN_INFO, EDAC_MC,
+ "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
+ mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+
+ return mci;
+}
+EXPORT_SYMBOL_GPL(edac_mc_del_mc);
+
+static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+ u32 size)
+{
+ struct page *pg;
+ void *virt_addr;
+ unsigned long flags = 0;
+
+ debugf3("%s()\n", __func__);
+
+ /* ECC error page was not in our memory. Ignore it. */
+ if (!pfn_valid(page))
+ return;
+
+ /* Find the actual page structure then map it and fix */
+ pg = pfn_to_page(page);
+
+ if (PageHighMem(pg))
+ local_irq_save(flags);
+
+ virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+
+ /* Perform architecture specific atomic scrub operation */
+ atomic_scrub(virt_addr + offset, size);
+
+ /* Unmap and complete */
+ kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+
+ if (PageHighMem(pg))
+ local_irq_restore(flags);
+}
+
+/* FIXME - should return -1 */
+int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
+{
+ struct csrow_info *csrows = mci->csrows;
+ int row, i;
+
+ debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
+ row = -1;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ struct csrow_info *csrow = &csrows[i];
+
+ if (csrow->nr_pages == 0)
+ continue;
+
+ debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
+ "mask(0x%lx)\n", mci->mc_idx, __func__,
+ csrow->first_page, page, csrow->last_page,
+ csrow->page_mask);
+
+ if ((page >= csrow->first_page) &&
+ (page <= csrow->last_page) &&
+ ((page & csrow->page_mask) ==
+ (csrow->first_page & csrow->page_mask))) {
+ row = i;
+ break;
+ }
+ }
+
+ if (row == -1)
+ edac_mc_printk(mci, KERN_ERR,
+ "could not look up page error address %lx\n",
+ (unsigned long)page);
+
+ return row;
+}
+EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
+
+/* FIXME - setable log (warning/emerg) levels */
+/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
+void edac_mc_handle_ce(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, unsigned long syndrome,
+ int row, int channel, const char *msg)
+{
+ unsigned long remapped_page;
+
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+
+ /* FIXME - maybe make panic on INTERNAL ERROR an option */
+ if (row >= mci->nr_csrows || row < 0) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: row out of range "
+ "(%d >= %d)\n", row, mci->nr_csrows);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (channel >= mci->csrows[row].nr_channels || channel < 0) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: channel out of range "
+ "(%d >= %d)\n", channel,
+ mci->csrows[row].nr_channels);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (edac_mc_get_log_ce())
+ /* FIXME - put in DIMM location */
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
+ "0x%lx, row %d, channel %d, label \"%s\": %s\n",
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, syndrome, row, channel,
+ mci->csrows[row].channels[channel].label, msg);
+
+ mci->ce_count++;
+ mci->csrows[row].ce_count++;
+ mci->csrows[row].channels[channel].ce_count++;
+
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some MC's can remap memory so that it is still available
+ * at a different address when PCI devices map into memory.
+ * MC's that can't do this lose the memory where PCI devices
+ * are mapped. This mapping is MC dependant and so we call
+ * back into the MC driver for it to map the MC page to
+ * a physical (CPU) page which can then be mapped to a virtual
+ * page - which can then be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page, offset_in_page,
+ mci->csrows[row].grain);
+ }
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
+
+void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+{
+ if (edac_mc_get_log_ce())
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE - no information available: %s\n", msg);
+
+ mci->ce_noinfo_count++;
+ mci->ce_count++;
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+
+void edac_mc_handle_ue(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row, const char *msg)
+{
+ int len = EDAC_MC_LABEL_LEN * 4;
+ char labels[len + 1];
+ char *pos = labels;
+ int chan;
+ int chars;
+
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+
+ /* FIXME - maybe make panic on INTERNAL ERROR an option */
+ if (row >= mci->nr_csrows || row < 0) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: row out of range "
+ "(%d >= %d)\n", row, mci->nr_csrows);
+ edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ chars = snprintf(pos, len + 1, "%s",
+ mci->csrows[row].channels[0].label);
+ len -= chars;
+ pos += chars;
+
+ for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
+ chan++) {
+ chars = snprintf(pos, len + 1, ":%s",
+ mci->csrows[row].channels[chan].label);
+ len -= chars;
+ pos += chars;
+ }
+
+ if (edac_mc_get_log_ue())
+ edac_mc_printk(mci, KERN_EMERG,
+ "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
+ "labels \"%s\": %s\n", page_frame_number,
+ offset_in_page, mci->csrows[row].grain, row,
+ labels, msg);
+
+ if (edac_mc_get_panic_on_ue())
+ panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
+ "row %d, labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
+
+ mci->ue_count++;
+ mci->csrows[row].ue_count++;
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
+
+void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+{
+ if (edac_mc_get_panic_on_ue())
+ panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+
+ if (edac_mc_get_log_ue())
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE - no information available: %s\n", msg);
+ mci->ue_noinfo_count++;
+ mci->ue_count++;
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
+
+/*************************************************************
+ * On Fully Buffered DIMM modules, this help function is
+ * called to process UE events
+ */
+void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
+ unsigned int csrow,
+ unsigned int channela,
+ unsigned int channelb, char *msg)
+{
+ int len = EDAC_MC_LABEL_LEN * 4;
+ char labels[len + 1];
+ char *pos = labels;
+ int chars;
+
+ if (csrow >= mci->nr_csrows) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: row out of range (%d >= %d)\n",
+ csrow, mci->nr_csrows);
+ edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (channela >= mci->csrows[csrow].nr_channels) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: channel-a out of range "
+ "(%d >= %d)\n",
+ channela, mci->csrows[csrow].nr_channels);
+ edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (channelb >= mci->csrows[csrow].nr_channels) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: channel-b out of range "
+ "(%d >= %d)\n",
+ channelb, mci->csrows[csrow].nr_channels);
+ edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ mci->ue_count++;
+ mci->csrows[csrow].ue_count++;
+
+ /* Generate the DIMM labels from the specified channels */
+ chars = snprintf(pos, len + 1, "%s",
+ mci->csrows[csrow].channels[channela].label);
+ len -= chars;
+ pos += chars;
+ chars = snprintf(pos, len + 1, "-%s",
+ mci->csrows[csrow].channels[channelb].label);
+
+ if (edac_mc_get_log_ue())
+ edac_mc_printk(mci, KERN_EMERG,
+ "UE row %d, channel-a= %d channel-b= %d "
+ "labels \"%s\": %s\n", csrow, channela, channelb,
+ labels, msg);
+
+ if (edac_mc_get_panic_on_ue())
+ panic("UE row %d, channel-a= %d channel-b= %d "
+ "labels \"%s\": %s\n", csrow, channela,
+ channelb, labels, msg);
+}
+EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+
+/*************************************************************
+ * On Fully Buffered DIMM modules, this help function is
+ * called to process CE events
+ */
+void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
+ unsigned int csrow, unsigned int channel, char *msg)
+{
+
+ /* Ensure boundary values */
+ if (csrow >= mci->nr_csrows) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: row out of range (%d >= %d)\n",
+ csrow, mci->nr_csrows);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+ if (channel >= mci->csrows[csrow].nr_channels) {
+ /* something is wrong */
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: channel out of range (%d >= %d)\n",
+ channel, mci->csrows[csrow].nr_channels);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (edac_mc_get_log_ce())
+ /* FIXME - put in DIMM location */
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE row %d, channel %d, label \"%s\": %s\n",
+ csrow, channel,
+ mci->csrows[csrow].channels[channel].label, msg);
+
+ mci->ce_count++;
+ mci->csrows[csrow].ce_count++;
+ mci->csrows[csrow].channels[channel].ce_count++;
+}
+EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
new file mode 100644
index 0000000..ad218fe
--- /dev/null
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -0,0 +1,939 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005-2007 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/bug.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+
+/* MC EDAC Controls, setable by module parameter, and sysfs */
+static int edac_mc_log_ue = 1;
+static int edac_mc_log_ce = 1;
+static int edac_mc_panic_on_ue;
+static int edac_mc_poll_msec = 1000;
+
+/* Getter functions for above */
+int edac_mc_get_log_ue(void)
+{
+ return edac_mc_log_ue;
+}
+
+int edac_mc_get_log_ce(void)
+{
+ return edac_mc_log_ce;
+}
+
+int edac_mc_get_panic_on_ue(void)
+{
+ return edac_mc_panic_on_ue;
+}
+
+/* this is temporary */
+int edac_mc_get_poll_msec(void)
+{
+ return edac_mc_poll_msec;
+}
+
+static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
+{
+ long l;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = strict_strtol(val, 0, &l);
+ if (ret == -EINVAL || ((int)l != l))
+ return -EINVAL;
+ *((int *)kp->arg) = l;
+
+ /* notify edac_mc engine to reset the poll period */
+ edac_mc_reset_delay_period(l);
+
+ return 0;
+}
+
+/* Parameter declarations for above */
+module_param(edac_mc_panic_on_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(edac_mc_log_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ue,
+ "Log uncorrectable error to console: 0=off 1=on");
+module_param(edac_mc_log_ce, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ce,
+ "Log correctable error to console: 0=off 1=on");
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+ &edac_mc_poll_msec, 0644);
+MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
+
+/*
+ * various constants for Memory Controllers
+ */
+static const char *mem_types[] = {
+ [MEM_EMPTY] = "Empty",
+ [MEM_RESERVED] = "Reserved",
+ [MEM_UNKNOWN] = "Unknown",
+ [MEM_FPM] = "FPM",
+ [MEM_EDO] = "EDO",
+ [MEM_BEDO] = "BEDO",
+ [MEM_SDR] = "Unbuffered-SDR",
+ [MEM_RDR] = "Registered-SDR",
+ [MEM_DDR] = "Unbuffered-DDR",
+ [MEM_RDDR] = "Registered-DDR",
+ [MEM_RMBS] = "RMBS",
+ [MEM_DDR2] = "Unbuffered-DDR2",
+ [MEM_FB_DDR2] = "FullyBuffered-DDR2",
+ [MEM_RDDR2] = "Registered-DDR2",
+ [MEM_XDR] = "XDR"
+};
+
+static const char *dev_types[] = {
+ [DEV_UNKNOWN] = "Unknown",
+ [DEV_X1] = "x1",
+ [DEV_X2] = "x2",
+ [DEV_X4] = "x4",
+ [DEV_X8] = "x8",
+ [DEV_X16] = "x16",
+ [DEV_X32] = "x32",
+ [DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+ [EDAC_UNKNOWN] = "Unknown",
+ [EDAC_NONE] = "None",
+ [EDAC_RESERVED] = "Reserved",
+ [EDAC_PARITY] = "PARITY",
+ [EDAC_EC] = "EC",
+ [EDAC_SECDED] = "SECDED",
+ [EDAC_S2ECD2ED] = "S2ECD2ED",
+ [EDAC_S4ECD4ED] = "S4ECD4ED",
+ [EDAC_S8ECD8ED] = "S8ECD8ED",
+ [EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = (int *)ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ return count;
+}
+
+
+/* EDAC sysfs CSROW data structures and methods
+ */
+
+/* Set of more default csrow<id> attribute show/store functions */
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+}
+
+/* show/store functions for DIMM Label attributes */
+static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
+ char *data, int channel)
+{
+ /* if field has not been initialized, there is nothing to send */
+ if (!csrow->channels[channel].label[0])
+ return 0;
+
+ return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
+ csrow->channels[channel].label);
+}
+
+static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
+ const char *data,
+ size_t count, int channel)
+{
+ ssize_t max_size = 0;
+
+ max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
+ strncpy(csrow->channels[channel].label, data, max_size);
+ csrow->channels[channel].label[max_size] = '\0';
+
+ return max_size;
+}
+
+/* show function for dynamic chX_ce_count attribute */
+static ssize_t channel_ce_count_show(struct csrow_info *csrow,
+ char *data, int channel)
+{
+ return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
+}
+
+/* csrow specific attribute structure */
+struct csrowdev_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct csrow_info *, char *, int);
+ ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
+ int private;
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for default csrow attributes */
+static ssize_t csrowdev_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->show)
+ return csrowdev_attr->show(csrow,
+ buffer, csrowdev_attr->private);
+ return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->store)
+ return csrowdev_attr->store(csrow,
+ buffer,
+ count, csrowdev_attr->private);
+ return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+ .show = csrowdev_show,
+ .store = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
+static struct csrowdev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+ .private = _private, \
+};
+
+/* default cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
+CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
+CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
+CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
+CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
+CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
+
+/* default attributes of the CSROW<id> object */
+static struct csrowdev_attribute *default_csrow_attr[] = {
+ &attr_dev_type,
+ &attr_mem_type,
+ &attr_edac_mode,
+ &attr_size_mb,
+ &attr_ue_count,
+ &attr_ce_count,
+ NULL,
+};
+
+/* possible dynamic channel DIMM Label attribute files */
+CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 0);
+CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 1);
+CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 2);
+CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 3);
+CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 4);
+CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 5);
+
+/* Total possible dynamic DIMM Label attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
+ &attr_ch0_dimm_label,
+ &attr_ch1_dimm_label,
+ &attr_ch2_dimm_label,
+ &attr_ch3_dimm_label,
+ &attr_ch4_dimm_label,
+ &attr_ch5_dimm_label
+};
+
+/* possible dynamic channel ce_count attribute files */
+CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
+CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
+CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
+CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
+CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
+CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
+
+/* Total possible dynamic ce_count attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
+ &attr_ch0_ce_count,
+ &attr_ch1_ce_count,
+ &attr_ch2_ce_count,
+ &attr_ch3_ce_count,
+ &attr_ch4_ce_count,
+ &attr_ch5_ce_count
+};
+
+#define EDAC_NR_CHANNELS 6
+
+/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
+static int edac_create_channel_files(struct kobject *kobj, int chan)
+{
+ int err = -ENODEV;
+
+ if (chan >= EDAC_NR_CHANNELS)
+ return err;
+
+ /* create the DIMM label attribute file */
+ err = sysfs_create_file(kobj,
+ (struct attribute *)
+ dynamic_csrow_dimm_attr[chan]);
+
+ if (!err) {
+ /* create the CE Count attribute file */
+ err = sysfs_create_file(kobj,
+ (struct attribute *)
+ dynamic_csrow_ce_count_attr[chan]);
+ } else {
+ debugf1("%s() dimm labels and ce_count files created",
+ __func__);
+ }
+
+ return err;
+}
+
+/* No memory to release for this kobj */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+ struct mem_ctl_info *mci;
+ struct csrow_info *cs;
+
+ debugf1("%s()\n", __func__);
+
+ cs = container_of(kobj, struct csrow_info, kobj);
+ mci = cs->mci;
+
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* the kobj_type instance for a CSROW */
+static struct kobj_type ktype_csrow = {
+ .release = edac_csrow_instance_release,
+ .sysfs_ops = &csrowfs_ops,
+ .default_attrs = (struct attribute **)default_csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ struct csrow_info *csrow, int index)
+{
+ struct kobject *kobj_mci = &mci->edac_mci_kobj;
+ struct kobject *kobj;
+ int chan;
+ int err;
+
+ /* generate ..../edac/mc/mc<id>/csrow<index> */
+ memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+ csrow->mci = mci; /* include container up link */
+
+ /* bump the mci instance's kobject's ref count */
+ kobj = kobject_get(&mci->edac_mci_kobj);
+ if (!kobj) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Instanstiate the csrow object */
+ err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
+ "csrow%d", index);
+ if (err)
+ goto err_release_top_kobj;
+
+ /* At this point, to release a csrow kobj, one must
+ * call the kobject_put and allow that tear down
+ * to work the releasing
+ */
+
+ /* Create the dyanmic attribute files on this csrow,
+ * namely, the DIMM labels and the channel ce_count
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ err = edac_create_channel_files(&csrow->kobj, chan);
+ if (err) {
+ /* special case the unregister here */
+ kobject_put(&csrow->kobj);
+ goto err_out;
+ }
+ }
+ kobject_uevent(&csrow->kobj, KOBJ_ADD);
+ return 0;
+
+ /* error unwind stack */
+err_release_top_kobj:
+ kobject_put(&mci->edac_mci_kobj);
+
+err_out:
+ return err;
+}
+
+/* default sysfs methods and data structures for the main MCI kobject */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ int row, chan;
+
+ mci->ue_noinfo_count = 0;
+ mci->ce_noinfo_count = 0;
+ mci->ue_count = 0;
+ mci->ce_count = 0;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ struct csrow_info *ri = &mci->csrows[row];
+
+ ri->ue_count = 0;
+ ri->ce_count = 0;
+
+ for (chan = 0; chan < ri->nr_channels; chan++)
+ ri->channels[chan].ce_count = 0;
+ }
+
+ mci->start_time = jiffies;
+ return count;
+}
+
+/* memory scrubbing */
+static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ u32 bandwidth = -1;
+
+ if (mci->set_sdram_scrub_rate) {
+
+ memctrl_int_store(&bandwidth, data, count);
+
+ if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) {
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate set successfully, applied: %d\n",
+ bandwidth);
+ } else {
+ /* FIXME: error codes maybe? */
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate set FAILED, could not apply: %d\n",
+ bandwidth);
+ }
+ } else {
+ /* FIXME: produce "not implemented" ERROR for user-side. */
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "Memory scrubbing 'set'control is not implemented!\n");
+ }
+ return count;
+}
+
+static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
+{
+ u32 bandwidth = -1;
+
+ if (mci->get_sdram_scrub_rate) {
+ if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) {
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate successfully, fetched: %d\n",
+ bandwidth);
+ } else {
+ /* FIXME: error codes maybe? */
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate fetch FAILED, got: %d\n",
+ bandwidth);
+ }
+ } else {
+ /* FIXME: produce "not implemented" ERROR for user-side. */
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "Memory scrubbing 'get' control is not implemented\n");
+ }
+ return sprintf(data, "%d\n", bandwidth);
+}
+
+/* default attribute files for the MCI object */
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%s\n", mci->ctl_name);
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+ int total_pages, csrow_idx;
+
+ for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+ csrow_idx++) {
+ struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+ if (!csrow->nr_pages)
+ continue;
+
+ total_pages += csrow->nr_pages;
+ }
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
+
+/* MCI show/store functions for top most object */
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+
+ return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+ return -EIO;
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops mci_ops = {
+ .show = mcidev_show,
+ .store = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store) \
+static struct mcidev_sysfs_attribute mci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* default Control file */
+MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
+
+/* default Attribute files */
+MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
+MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
+MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
+MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
+MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
+MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
+MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+
+/* memory scrubber attribute file */
+MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
+ mci_sdram_scrub_rate_store);
+
+static struct mcidev_sysfs_attribute *mci_attr[] = {
+ &mci_attr_reset_counters,
+ &mci_attr_mc_name,
+ &mci_attr_size_mb,
+ &mci_attr_seconds_since_reset,
+ &mci_attr_ue_noinfo_count,
+ &mci_attr_ce_noinfo_count,
+ &mci_attr_ue_count,
+ &mci_attr_ce_count,
+ &mci_attr_sdram_scrub_rate,
+ NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ *
+ * each MC control instance has the following resources upon entry:
+ * a) a ref count on the top memctl kobj
+ * b) a ref count on this module
+ *
+ * this function must decrement those ref counts and then
+ * issue a free on the instance's memory
+ */
+static void edac_mci_control_release(struct kobject *kobj)
+{
+ struct mem_ctl_info *mci;
+
+ mci = to_mci(kobj);
+
+ debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
+
+ /* decrement the module ref count */
+ module_put(mci->owner);
+
+ /* free the mci instance memory here */
+ kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+ .release = edac_mci_control_release,
+ .sysfs_ops = &mci_ops,
+ .default_attrs = (struct attribute **)mci_attr,
+};
+
+/* EDAC memory controller sysfs kset:
+ * /sys/devices/system/edac/mc
+ */
+static struct kset *mc_kset;
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ * setups and registers the main kobject for each mci
+ */
+int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+ struct kobject *kobj_mci;
+ int err;
+
+ debugf1("%s()\n", __func__);
+
+ kobj_mci = &mci->edac_mci_kobj;
+
+ /* Init the mci's kobject */
+ memset(kobj_mci, 0, sizeof(*kobj_mci));
+
+ /* Record which module 'owns' this control structure
+ * and bump the ref count of the module
+ */
+ mci->owner = THIS_MODULE;
+
+ /* bump ref count on this module */
+ if (!try_module_get(mci->owner)) {
+ err = -ENODEV;
+ goto fail_out;
+ }
+
+ /* this instance become part of the mc_kset */
+ kobj_mci->kset = mc_kset;
+
+ /* register the mc<id> kobject to the mc_kset */
+ err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
+ "mc%d", mci->mc_idx);
+ if (err) {
+ debugf1("%s()Failed to register '.../edac/mc%d'\n",
+ __func__, mci->mc_idx);
+ goto kobj_reg_fail;
+ }
+ kobject_uevent(kobj_mci, KOBJ_ADD);
+
+ /* At this point, to 'free' the control struct,
+ * edac_mc_unregister_sysfs_main_kobj() must be used
+ */
+
+ debugf1("%s() Registered '.../edac/mc%d' kobject\n",
+ __func__, mci->mc_idx);
+
+ return 0;
+
+ /* Error exit stack */
+
+kobj_reg_fail:
+ module_put(mci->owner);
+
+fail_out:
+ return err;
+}
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ * tears down and the main mci kobject from the mc_kset
+ */
+void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+ /* delete the kobj from the mc_kset */
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+#define EDAC_DEVICE_SYMLINK "device"
+
+/*
+ * edac_create_mci_instance_attributes
+ * create MC driver specific attributes at the topmost level
+ * directory of this mci instance.
+ */
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+ int err;
+ struct mcidev_sysfs_attribute *sysfs_attrib;
+
+ /* point to the start of the array and iterate over it
+ * adding each attribute listed to this mci instance's kobject
+ */
+ sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+ while (sysfs_attrib && sysfs_attrib->attr.name) {
+ err = sysfs_create_file(&mci->edac_mci_kobj,
+ (struct attribute*) sysfs_attrib);
+ if (err) {
+ return err;
+ }
+
+ sysfs_attrib++;
+ }
+
+ return 0;
+}
+
+/*
+ * edac_remove_mci_instance_attributes
+ * remove MC driver specific attributes at the topmost level
+ * directory of this mci instance.
+ */
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+ struct mcidev_sysfs_attribute *sysfs_attrib;
+
+ /* point to the start of the array and iterate over it
+ * adding each attribute listed to this mci instance's kobject
+ */
+ sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+ /* loop if there are attributes and until we hit a NULL entry */
+ while (sysfs_attrib && sysfs_attrib->attr.name) {
+ sysfs_remove_file(&mci->edac_mci_kobj,
+ (struct attribute *) sysfs_attrib);
+ sysfs_attrib++;
+ }
+}
+
+
+/*
+ * Create a new Memory Controller kobject instance,
+ * mc<id> under the 'mc' directory
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+ int err;
+ struct csrow_info *csrow;
+ struct kobject *kobj_mci = &mci->edac_mci_kobj;
+
+ debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+
+ /* create a symlink for the device */
+ err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
+ EDAC_DEVICE_SYMLINK);
+ if (err) {
+ debugf1("%s() failure to create symlink\n", __func__);
+ goto fail0;
+ }
+
+ /* If the low level driver desires some attributes,
+ * then create them now for the driver.
+ */
+ if (mci->mc_driver_sysfs_attributes) {
+ err = edac_create_mci_instance_attributes(mci);
+ if (err) {
+ debugf1("%s() failure to create mci attributes\n",
+ __func__);
+ goto fail0;
+ }
+ }
+
+ /* Make directories for each CSROW object under the mc<id> kobject
+ */
+ for (i = 0; i < mci->nr_csrows; i++) {
+ csrow = &mci->csrows[i];
+
+ /* Only expose populated CSROWs */
+ if (csrow->nr_pages > 0) {
+ err = edac_create_csrow_object(mci, csrow, i);
+ if (err) {
+ debugf1("%s() failure: create csrow %d obj\n",
+ __func__, i);
+ goto fail1;
+ }
+ }
+ }
+
+ return 0;
+
+ /* CSROW error: backout what has already been registered, */
+fail1:
+ for (i--; i >= 0; i--) {
+ if (csrow->nr_pages > 0) {
+ kobject_put(&mci->csrows[i].kobj);
+ }
+ }
+
+ /* remove the mci instance's attributes, if any */
+ edac_remove_mci_instance_attributes(mci);
+
+ /* remove the symlink */
+ sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
+
+fail0:
+ return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+
+ debugf0("%s()\n", __func__);
+
+ /* remove all csrow kobjects */
+ for (i = 0; i < mci->nr_csrows; i++) {
+ if (mci->csrows[i].nr_pages > 0) {
+ debugf0("%s() unreg csrow-%d\n", __func__, i);
+ kobject_put(&mci->csrows[i].kobj);
+ }
+ }
+
+ debugf0("%s() remove_link\n", __func__);
+
+ /* remove the symlink */
+ sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+ debugf0("%s() remove_mci_instance\n", __func__);
+
+ /* remove this mci instance's attribtes */
+ edac_remove_mci_instance_attributes(mci);
+
+ debugf0("%s() unregister this mci kobj\n", __func__);
+
+ /* unregister this instance's kobject */
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+
+
+
+/*
+ * edac_setup_sysfs_mc_kset(void)
+ *
+ * Initialize the mc_kset for the 'mc' entry
+ * This requires creating the top 'mc' directory with a kset
+ * and its controls/attributes.
+ *
+ * To this 'mc' kset, instance 'mci' will be grouped as children.
+ *
+ * Return: 0 SUCCESS
+ * !0 FAILURE error code
+ */
+int edac_sysfs_setup_mc_kset(void)
+{
+ int err = 0;
+ struct sysdev_class *edac_class;
+
+ debugf1("%s()\n", __func__);
+
+ /* get the /sys/devices/system/edac class reference */
+ edac_class = edac_get_edac_class();
+ if (edac_class == NULL) {
+ debugf1("%s() no edac_class error=%d\n", __func__, err);
+ goto fail_out;
+ }
+
+ /* Init the MC's kobject */
+ mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
+ if (!mc_kset) {
+ err = -ENOMEM;
+ debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
+ goto fail_out;
+ }
+
+ debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
+
+ return 0;
+
+
+ /* error unwind stack */
+fail_out:
+ return err;
+}
+
+/*
+ * edac_sysfs_teardown_mc_kset
+ *
+ * deconstruct the mc_ket for memory controllers
+ */
+void edac_sysfs_teardown_mc_kset(void)
+{
+ kset_unregister(mc_kset);
+}
+
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
new file mode 100644
index 0000000..7e1374a
--- /dev/null
+++ b/drivers/edac/edac_module.c
@@ -0,0 +1,222 @@
+/*
+ * edac_module.c
+ *
+ * (C) 2007 www.softwarebitmaker.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Author: Doug Thompson <dougthompson@xmission.com>
+ *
+ */
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_VERSION "Ver: 2.1.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 2;
+EXPORT_SYMBOL_GPL(edac_debug_level);
+#endif
+
+/* scope is to module level only */
+struct workqueue_struct *edac_workqueue;
+
+/*
+ * sysfs object: /sys/devices/system/edac
+ * need to export to other files in this modules
+ */
+static struct sysdev_class edac_class = {
+ .name = "edac",
+};
+static int edac_class_valid;
+
+/*
+ * edac_op_state_to_string()
+ */
+char *edac_op_state_to_string(int opstate)
+{
+ if (opstate == OP_RUNNING_POLL)
+ return "POLLED";
+ else if (opstate == OP_RUNNING_INTERRUPT)
+ return "INTERRUPT";
+ else if (opstate == OP_RUNNING_POLL_INTR)
+ return "POLL-INTR";
+ else if (opstate == OP_ALLOC)
+ return "ALLOC";
+ else if (opstate == OP_OFFLINE)
+ return "OFFLINE";
+
+ return "UNKNOWN";
+}
+
+/*
+ * edac_get_edac_class()
+ *
+ * return pointer to the edac class of 'edac'
+ */
+struct sysdev_class *edac_get_edac_class(void)
+{
+ struct sysdev_class *classptr = NULL;
+
+ if (edac_class_valid)
+ classptr = &edac_class;
+
+ return classptr;
+}
+
+/*
+ * edac_register_sysfs_edac_name()
+ *
+ * register the 'edac' into /sys/devices/system
+ *
+ * return:
+ * 0 success
+ * !0 error
+ */
+static int edac_register_sysfs_edac_name(void)
+{
+ int err;
+
+ /* create the /sys/devices/system/edac directory */
+ err = sysdev_class_register(&edac_class);
+
+ if (err) {
+ debugf1("%s() error=%d\n", __func__, err);
+ return err;
+ }
+
+ edac_class_valid = 1;
+ return 0;
+}
+
+/*
+ * sysdev_class_unregister()
+ *
+ * unregister the 'edac' from /sys/devices/system
+ */
+static void edac_unregister_sysfs_edac_name(void)
+{
+ /* only if currently registered, then unregister it */
+ if (edac_class_valid)
+ sysdev_class_unregister(&edac_class);
+
+ edac_class_valid = 0;
+}
+
+/*
+ * edac_workqueue_setup
+ * initialize the edac work queue for polling operations
+ */
+static int edac_workqueue_setup(void)
+{
+ edac_workqueue = create_singlethread_workqueue("edac-poller");
+ if (edac_workqueue == NULL)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+/*
+ * edac_workqueue_teardown
+ * teardown the edac workqueue
+ */
+static void edac_workqueue_teardown(void)
+{
+ if (edac_workqueue) {
+ flush_workqueue(edac_workqueue);
+ destroy_workqueue(edac_workqueue);
+ edac_workqueue = NULL;
+ }
+}
+
+/*
+ * edac_init
+ * module initialization entry point
+ */
+static int __init edac_init(void)
+{
+ int err = 0;
+
+ edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n");
+
+ /*
+ * Harvest and clear any boot/initialization PCI parity errors
+ *
+ * FIXME: This only clears errors logged by devices present at time of
+ * module initialization. We should also do an initial clear
+ * of each newly hotplugged device.
+ */
+ edac_pci_clear_parity_errors();
+
+ /*
+ * perform the registration of the /sys/devices/system/edac class object
+ */
+ if (edac_register_sysfs_edac_name()) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Error initializing 'edac' kobject\n");
+ err = -ENODEV;
+ goto error;
+ }
+
+ /*
+ * now set up the mc_kset under the edac class object
+ */
+ err = edac_sysfs_setup_mc_kset();
+ if (err)
+ goto sysfs_setup_fail;
+
+ /* Setup/Initialize the workq for this core */
+ err = edac_workqueue_setup();
+ if (err) {
+ edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
+ goto workq_fail;
+ }
+
+ return 0;
+
+ /* Error teardown stack */
+workq_fail:
+ edac_sysfs_teardown_mc_kset();
+
+sysfs_setup_fail:
+ edac_unregister_sysfs_edac_name();
+
+error:
+ return err;
+}
+
+/*
+ * edac_exit()
+ * module exit/termination function
+ */
+static void __exit edac_exit(void)
+{
+ debugf0("%s()\n", __func__);
+
+ /* tear down the various subsystems */
+ edac_workqueue_teardown();
+ edac_sysfs_teardown_mc_kset();
+ edac_unregister_sysfs_edac_name();
+}
+
+/*
+ * Inform the kernel of our entry and exit points
+ */
+module_init(edac_init);
+module_exit(edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al");
+MODULE_DESCRIPTION("Core library routines for EDAC reporting");
+
+/* refer to *_sysfs.c files for parameters that are exported via sysfs */
+
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
new file mode 100644
index 0000000..233d479
--- /dev/null
+++ b/drivers/edac/edac_module.h
@@ -0,0 +1,84 @@
+
+/*
+ * edac_module.h
+ *
+ * For defining functions/data for within the EDAC_CORE module only
+ *
+ * written by doug thompson <norsk5@xmission.h>
+ */
+
+#ifndef __EDAC_MODULE_H__
+#define __EDAC_MODULE_H__
+
+#include <linux/sysdev.h>
+
+#include "edac_core.h"
+
+/*
+ * INTERNAL EDAC MODULE:
+ * EDAC memory controller sysfs create/remove functions
+ * and setup/teardown functions
+ *
+ * edac_mc objects
+ */
+extern int edac_sysfs_setup_mc_kset(void);
+extern void edac_sysfs_teardown_mc_kset(void);
+extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
+extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
+extern int edac_get_log_ue(void);
+extern int edac_get_log_ce(void);
+extern int edac_get_panic_on_ue(void);
+extern int edac_mc_get_log_ue(void);
+extern int edac_mc_get_log_ce(void);
+extern int edac_mc_get_panic_on_ue(void);
+extern int edac_get_poll_msec(void);
+extern int edac_mc_get_poll_msec(void);
+
+extern int edac_device_register_sysfs_main_kobj(
+ struct edac_device_ctl_info *edac_dev);
+extern void edac_device_unregister_sysfs_main_kobj(
+ struct edac_device_ctl_info *edac_dev);
+extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
+extern struct sysdev_class *edac_get_edac_class(void);
+
+/* edac core workqueue: single CPU mode */
+extern struct workqueue_struct *edac_workqueue;
+extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ unsigned msec);
+extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+ *edac_dev, unsigned long value);
+extern void edac_mc_reset_delay_period(int value);
+
+extern void *edac_align_ptr(void *ptr, unsigned size);
+
+/*
+ * EDAC PCI functions
+ */
+#ifdef CONFIG_PCI
+extern void edac_pci_do_parity_check(void);
+extern void edac_pci_clear_parity_errors(void);
+extern int edac_sysfs_pci_setup(void);
+extern void edac_sysfs_pci_teardown(void);
+extern int edac_pci_get_check_errors(void);
+extern int edac_pci_get_poll_msec(void);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg);
+extern void edac_pci_handle_npe(struct edac_pci_ctl_info *pci,
+ const char *msg);
+#else /* CONFIG_PCI */
+/* pre-process these away */
+#define edac_pci_do_parity_check()
+#define edac_pci_clear_parity_errors()
+#define edac_sysfs_pci_setup() (0)
+#define edac_sysfs_pci_teardown()
+#define edac_pci_get_check_errors()
+#define edac_pci_get_poll_msec()
+#define edac_pci_handle_pe()
+#define edac_pci_handle_npe()
+#endif /* CONFIG_PCI */
+
+#endif /* __EDAC_MODULE_H__ */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
new file mode 100644
index 0000000..22ec9d5
--- /dev/null
+++ b/drivers/edac/edac_pci.c
@@ -0,0 +1,497 @@
+/*
+ * EDAC PCI component
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static DEFINE_MUTEX(edac_pci_ctls_mutex);
+static LIST_HEAD(edac_pci_list);
+
+/*
+ * edac_pci_alloc_ctl_info
+ *
+ * The alloc() function for the 'edac_pci' control info
+ * structure. The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
+ */
+struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+ const char *edac_pci_name)
+{
+ struct edac_pci_ctl_info *pci;
+ void *pvt;
+ unsigned int size;
+
+ debugf1("%s()\n", __func__);
+
+ pci = (struct edac_pci_ctl_info *)0;
+ pvt = edac_align_ptr(&pci[1], sz_pvt);
+ size = ((unsigned long)pvt) + sz_pvt;
+
+ /* Alloc the needed control struct memory */
+ pci = kzalloc(size, GFP_KERNEL);
+ if (pci == NULL)
+ return NULL;
+
+ /* Now much private space */
+ pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
+
+ pci->pvt_info = pvt;
+ pci->op_state = OP_ALLOC;
+
+ snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
+
+ return pci;
+}
+EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
+
+/*
+ * edac_pci_free_ctl_info()
+ *
+ * Last action on the pci control structure.
+ *
+ * call the remove sysfs information, which will unregister
+ * this control struct's kobj. When that kobj's ref count
+ * goes to zero, its release function will be call and then
+ * kfree() the memory.
+ */
+void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
+{
+ debugf1("%s()\n", __func__);
+
+ edac_pci_remove_sysfs(pci);
+}
+EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
+
+/*
+ * find_edac_pci_by_dev()
+ * scans the edac_pci list for a specific 'struct device *'
+ *
+ * return NULL if not found, or return control struct pointer
+ */
+static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct list_head *item;
+
+ debugf1("%s()\n", __func__);
+
+ list_for_each(item, &edac_pci_list) {
+ pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+ if (pci->dev == dev)
+ return pci;
+ }
+
+ return NULL;
+}
+
+/*
+ * add_edac_pci_to_global_list
+ * Before calling this function, caller must assign a unique value to
+ * edac_dev->pci_idx.
+ * Return:
+ * 0 on success
+ * 1 on failure
+ */
+static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
+{
+ struct list_head *item, *insert_before;
+ struct edac_pci_ctl_info *rover;
+
+ debugf1("%s()\n", __func__);
+
+ insert_before = &edac_pci_list;
+
+ /* Determine if already on the list */
+ rover = find_edac_pci_by_dev(pci->dev);
+ if (unlikely(rover != NULL))
+ goto fail0;
+
+ /* Insert in ascending order by 'pci_idx', so find position */
+ list_for_each(item, &edac_pci_list) {
+ rover = list_entry(item, struct edac_pci_ctl_info, link);
+
+ if (rover->pci_idx >= pci->pci_idx) {
+ if (unlikely(rover->pci_idx == pci->pci_idx))
+ goto fail1;
+
+ insert_before = item;
+ break;
+ }
+ }
+
+ list_add_tail_rcu(&pci->link, insert_before);
+ return 0;
+
+fail0:
+ edac_printk(KERN_WARNING, EDAC_PCI,
+ "%s (%s) %s %s already assigned %d\n",
+ rover->dev->bus_id, edac_dev_name(rover),
+ rover->mod_name, rover->ctl_name, rover->pci_idx);
+ return 1;
+
+fail1:
+ edac_printk(KERN_WARNING, EDAC_PCI,
+ "but in low-level driver: attempt to assign\n"
+ "\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
+ __func__);
+ return 1;
+}
+
+/*
+ * complete_edac_pci_list_del
+ *
+ * RCU completion callback to indicate item is deleted
+ */
+static void complete_edac_pci_list_del(struct rcu_head *head)
+{
+ struct edac_pci_ctl_info *pci;
+
+ pci = container_of(head, struct edac_pci_ctl_info, rcu);
+ INIT_LIST_HEAD(&pci->link);
+ complete(&pci->complete);
+}
+
+/*
+ * del_edac_pci_from_global_list
+ *
+ * remove the PCI control struct from the global list
+ */
+static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
+{
+ list_del_rcu(&pci->link);
+ init_completion(&pci->complete);
+ call_rcu(&pci->rcu, complete_edac_pci_list_del);
+ wait_for_completion(&pci->complete);
+}
+
+#if 0
+/* Older code, but might use in the future */
+
+/*
+ * edac_pci_find()
+ * Search for an edac_pci_ctl_info structure whose index is 'idx'
+ *
+ * If found, return a pointer to the structure
+ * Else return NULL.
+ *
+ * Caller must hold pci_ctls_mutex.
+ */
+struct edac_pci_ctl_info *edac_pci_find(int idx)
+{
+ struct list_head *item;
+ struct edac_pci_ctl_info *pci;
+
+ /* Iterage over list, looking for exact match of ID */
+ list_for_each(item, &edac_pci_list) {
+ pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+ if (pci->pci_idx >= idx) {
+ if (pci->pci_idx == idx)
+ return pci;
+
+ /* not on list, so terminate early */
+ break;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(edac_pci_find);
+#endif
+
+/*
+ * edac_pci_workq_function()
+ *
+ * periodic function that performs the operation
+ * scheduled by a workq request, for a given PCI control struct
+ */
+static void edac_pci_workq_function(struct work_struct *work_req)
+{
+ struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
+ int msec;
+ unsigned long delay;
+
+ debugf3("%s() checking\n", __func__);
+
+ mutex_lock(&edac_pci_ctls_mutex);
+
+ if (pci->op_state == OP_RUNNING_POLL) {
+ /* we might be in POLL mode, but there may NOT be a poll func
+ */
+ if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
+ pci->edac_check(pci);
+
+ /* if we are on a one second period, then use round */
+ msec = edac_pci_get_poll_msec();
+ if (msec == 1000)
+ delay = round_jiffies_relative(msecs_to_jiffies(msec));
+ else
+ delay = msecs_to_jiffies(msec);
+
+ /* Reschedule only if we are in POLL mode */
+ queue_delayed_work(edac_workqueue, &pci->work, delay);
+ }
+
+ mutex_unlock(&edac_pci_ctls_mutex);
+}
+
+/*
+ * edac_pci_workq_setup()
+ * initialize a workq item for this edac_pci instance
+ * passing in the new delay period in msec
+ *
+ * locking model:
+ * called when 'edac_pci_ctls_mutex' is locked
+ */
+static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+ unsigned int msec)
+{
+ debugf0("%s()\n", __func__);
+
+ INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+ queue_delayed_work(edac_workqueue, &pci->work,
+ msecs_to_jiffies(edac_pci_get_poll_msec()));
+}
+
+/*
+ * edac_pci_workq_teardown()
+ * stop the workq processing on this edac_pci instance
+ */
+static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+{
+ int status;
+
+ debugf0("%s()\n", __func__);
+
+ status = cancel_delayed_work(&pci->work);
+ if (status == 0)
+ flush_workqueue(edac_workqueue);
+}
+
+/*
+ * edac_pci_reset_delay_period
+ *
+ * called with a new period value for the workq period
+ * a) stop current workq timer
+ * b) restart workq timer with new value
+ */
+void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+ unsigned long value)
+{
+ debugf0("%s()\n", __func__);
+
+ edac_pci_workq_teardown(pci);
+
+ /* need to lock for the setup */
+ mutex_lock(&edac_pci_ctls_mutex);
+
+ edac_pci_workq_setup(pci, value);
+
+ mutex_unlock(&edac_pci_ctls_mutex);
+}
+EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+
+/*
+ * edac_pci_add_device: Insert the 'edac_dev' structure into the
+ * edac_pci global list and create sysfs entries associated with
+ * edac_pci structure.
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ * 'edac_pci' structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
+{
+ debugf0("%s()\n", __func__);
+
+ pci->pci_idx = edac_idx;
+ pci->start_time = jiffies;
+
+ mutex_lock(&edac_pci_ctls_mutex);
+
+ if (add_edac_pci_to_global_list(pci))
+ goto fail0;
+
+ if (edac_pci_create_sysfs(pci)) {
+ edac_pci_printk(pci, KERN_WARNING,
+ "failed to create sysfs pci\n");
+ goto fail1;
+ }
+
+ if (pci->edac_check != NULL) {
+ pci->op_state = OP_RUNNING_POLL;
+
+ edac_pci_workq_setup(pci, 1000);
+ } else {
+ pci->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ edac_pci_printk(pci, KERN_INFO,
+ "Giving out device to module '%s' controller '%s':"
+ " DEV '%s' (%s)\n",
+ pci->mod_name,
+ pci->ctl_name,
+ edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
+
+ mutex_unlock(&edac_pci_ctls_mutex);
+ return 0;
+
+ /* error unwind stack */
+fail1:
+ del_edac_pci_from_global_list(pci);
+fail0:
+ mutex_unlock(&edac_pci_ctls_mutex);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(edac_pci_add_device);
+
+/*
+ * edac_pci_del_device()
+ * Remove sysfs entries for specified edac_pci structure and
+ * then remove edac_pci structure from global list
+ *
+ * @dev:
+ * Pointer to 'struct device' representing edac_pci structure
+ * to remove
+ *
+ * Return:
+ * Pointer to removed edac_pci structure,
+ * or NULL if device not found
+ */
+struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
+{
+ struct edac_pci_ctl_info *pci;
+
+ debugf0("%s()\n", __func__);
+
+ mutex_lock(&edac_pci_ctls_mutex);
+
+ /* ensure the control struct is on the global list
+ * if not, then leave
+ */
+ pci = find_edac_pci_by_dev(dev);
+ if (pci == NULL) {
+ mutex_unlock(&edac_pci_ctls_mutex);
+ return NULL;
+ }
+
+ pci->op_state = OP_OFFLINE;
+
+ del_edac_pci_from_global_list(pci);
+
+ mutex_unlock(&edac_pci_ctls_mutex);
+
+ /* stop the workq timer */
+ edac_pci_workq_teardown(pci);
+
+ edac_printk(KERN_INFO, EDAC_PCI,
+ "Removed device %d for %s %s: DEV %s\n",
+ pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci));
+
+ return pci;
+}
+EXPORT_SYMBOL_GPL(edac_pci_del_device);
+
+/*
+ * edac_pci_generic_check
+ *
+ * a Generic parity check API
+ */
+static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
+{
+ debugf4("%s()\n", __func__);
+ edac_pci_do_parity_check();
+}
+
+/* free running instance index counter */
+static int edac_pci_idx;
+#define EDAC_PCI_GENCTL_NAME "EDAC PCI controller"
+
+struct edac_pci_gen_data {
+ int edac_idx;
+};
+
+/*
+ * edac_pci_create_generic_ctl
+ *
+ * A generic constructor for a PCI parity polling device
+ * Some systems have more than one domain of PCI busses.
+ * For systems with one domain, then this API will
+ * provide for a generic poller.
+ *
+ * This routine calls the edac_pci_alloc_ctl_info() for
+ * the generic device, with default values
+ */
+struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
+ const char *mod_name)
+{
+ struct edac_pci_ctl_info *pci;
+ struct edac_pci_gen_data *pdata;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME);
+ if (!pci)
+ return NULL;
+
+ pdata = pci->pvt_info;
+ pci->dev = dev;
+ dev_set_drvdata(pci->dev, pci);
+ pci->dev_name = pci_name(to_pci_dev(dev));
+
+ pci->mod_name = mod_name;
+ pci->ctl_name = EDAC_PCI_GENCTL_NAME;
+ pci->edac_check = edac_pci_generic_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ edac_pci_free_ctl_info(pci);
+ return NULL;
+ }
+
+ return pci;
+}
+EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
+
+/*
+ * edac_pci_release_generic_ctl
+ *
+ * The release function of a generic EDAC PCI polling device
+ */
+void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
+{
+ debugf0("%s() pci mod=%s\n", __func__, pci->mod_name);
+
+ edac_pci_del_device(pci->dev);
+ edac_pci_free_ctl_info(pci);
+}
+EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
new file mode 100644
index 0000000..5c153dc
--- /dev/null
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -0,0 +1,767 @@
+/*
+ * (C) 2005, 2006 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* Turn off this whole feature if PCI is not configured */
+#ifdef CONFIG_PCI
+
+#define EDAC_PCI_SYMLINK "device"
+
+/* data variables exported via sysfs */
+static int check_pci_errors; /* default NO check PCI parity */
+static int edac_pci_panic_on_pe; /* default NO panic on PCI Parity */
+static int edac_pci_log_pe = 1; /* log PCI parity errors */
+static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
+static int edac_pci_poll_msec = 1000; /* one second workq period */
+
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
+
+static struct kobject *edac_pci_top_main_kobj;
+static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+
+/* getter functions for the data variables */
+int edac_pci_get_check_errors(void)
+{
+ return check_pci_errors;
+}
+
+static int edac_pci_get_log_pe(void)
+{
+ return edac_pci_log_pe;
+}
+
+static int edac_pci_get_log_npe(void)
+{
+ return edac_pci_log_npe;
+}
+
+static int edac_pci_get_panic_on_pe(void)
+{
+ return edac_pci_panic_on_pe;
+}
+
+int edac_pci_get_poll_msec(void)
+{
+ return edac_pci_poll_msec;
+}
+
+/**************************** EDAC PCI sysfs instance *******************/
+static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data)
+{
+ return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count));
+}
+
+static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
+ char *data)
+{
+ return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
+}
+
+#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_instance_attr(a) container_of(a, struct instance_attribute, attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_pci_instance_release(struct kobject *kobj)
+{
+ struct edac_pci_ctl_info *pci;
+
+ debugf0("%s()\n", __func__);
+
+ /* Form pointer to containing struct, the pci control struct */
+ pci = to_instance(kobj);
+
+ /* decrement reference count on top main kobj */
+ kobject_put(edac_pci_top_main_kobj);
+
+ kfree(pci); /* Free the control struct */
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edac_pci_ctl_info *, char *);
+ ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_pci_ctl_info *pci = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->show)
+ return instance_attr->show(pci, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_pci_ctl_info *pci = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->store)
+ return instance_attr->store(pci, buffer, count);
+ return -EIO;
+}
+
+/* fs_ops table */
+static struct sysfs_ops pci_instance_ops = {
+ .show = edac_pci_instance_show,
+ .store = edac_pci_instance_store
+};
+
+#define INSTANCE_ATTR(_name, _mode, _show, _store) \
+static struct instance_attribute attr_instance_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
+INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
+
+/* pci instance attributes */
+static struct instance_attribute *pci_instance_attr[] = {
+ &attr_instance_pe_count,
+ &attr_instance_npe_count,
+ NULL
+};
+
+/* the ktype for a pci instance */
+static struct kobj_type ktype_pci_instance = {
+ .release = edac_pci_instance_release,
+ .sysfs_ops = &pci_instance_ops,
+ .default_attrs = (struct attribute **)pci_instance_attr,
+};
+
+/*
+ * edac_pci_create_instance_kobj
+ *
+ * construct one EDAC PCI instance's kobject for use
+ */
+static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+{
+ struct kobject *main_kobj;
+ int err;
+
+ debugf0("%s()\n", __func__);
+
+ /* First bump the ref count on the top main kobj, which will
+ * track the number of PCI instances we have, and thus nest
+ * properly on keeping the module loaded
+ */
+ main_kobj = kobject_get(edac_pci_top_main_kobj);
+ if (!main_kobj) {
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* And now register this new kobject under the main kobj */
+ err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
+ edac_pci_top_main_kobj, "pci%d", idx);
+ if (err != 0) {
+ debugf2("%s() failed to register instance pci%d\n",
+ __func__, idx);
+ kobject_put(edac_pci_top_main_kobj);
+ goto error_out;
+ }
+
+ kobject_uevent(&pci->kobj, KOBJ_ADD);
+ debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
+
+ return 0;
+
+ /* Error unwind statck */
+error_out:
+ return err;
+}
+
+/*
+ * edac_pci_unregister_sysfs_instance_kobj
+ *
+ * unregister the kobj for the EDAC PCI instance
+ */
+static void edac_pci_unregister_sysfs_instance_kobj(
+ struct edac_pci_ctl_info *pci)
+{
+ debugf0("%s()\n", __func__);
+
+ /* Unregister the instance kobject and allow its release
+ * function release the main reference count and then
+ * kfree the memory
+ */
+ kobject_put(&pci->kobj);
+}
+
+/***************************** EDAC PCI sysfs root **********************/
+#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
+
+/* simple show/store functions for attributes */
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+ int *value = ptr;
+ return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ return count;
+}
+
+struct edac_pci_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t(*show) (void *, char *);
+ ssize_t(*store) (void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->show(edac_pci_dev->value, buffer);
+ return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer,
+ size_t count)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+ .show = edac_pci_dev_show,
+ .store = edac_pci_dev_store
+};
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
+EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+ &edac_pci_attr_check_pci_errors,
+ &edac_pci_attr_edac_pci_log_pe,
+ &edac_pci_attr_edac_pci_log_npe,
+ &edac_pci_attr_edac_pci_panic_on_pe,
+ &edac_pci_attr_pci_parity_count,
+ &edac_pci_attr_pci_nonparity_count,
+ NULL,
+};
+
+/*
+ * edac_pci_release_main_kobj
+ *
+ * This release function is called when the reference count to the
+ * passed kobj goes to zero.
+ *
+ * This kobj is the 'main' kobject that EDAC PCI instances
+ * link to, and thus provide for proper nesting counts
+ */
+static void edac_pci_release_main_kobj(struct kobject *kobj)
+{
+ debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
+
+ kfree(kobj);
+
+ /* last reference to top EDAC PCI kobject has been removed,
+ * NOW release our ref count on the core module
+ */
+ module_put(THIS_MODULE);
+}
+
+/* ktype struct for the EDAC PCI main kobj */
+static struct kobj_type ktype_edac_pci_main_kobj = {
+ .release = edac_pci_release_main_kobj,
+ .sysfs_ops = &edac_pci_sysfs_ops,
+ .default_attrs = (struct attribute **)edac_pci_attr,
+};
+
+/**
+ * edac_pci_main_kobj_setup()
+ *
+ * setup the sysfs for EDAC PCI attributes
+ * assumes edac_class has already been initialized
+ */
+static int edac_pci_main_kobj_setup(void)
+{
+ int err;
+ struct sysdev_class *edac_class;
+
+ debugf0("%s()\n", __func__);
+
+ /* check and count if we have already created the main kobject */
+ if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
+ return 0;
+
+ /* First time, so create the main kobject and its
+ * controls and atributes
+ */
+ edac_class = edac_get_edac_class();
+ if (edac_class == NULL) {
+ debugf1("%s() no edac_class\n", __func__);
+ err = -ENODEV;
+ goto decrement_count_fail;
+ }
+
+ /* Bump the reference count on this module to ensure the
+ * modules isn't unloaded until we deconstruct the top
+ * level main kobj for EDAC PCI
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ debugf1("%s() try_module_get() failed\n", __func__);
+ err = -ENODEV;
+ goto decrement_count_fail;
+ }
+
+ edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!edac_pci_top_main_kobj) {
+ debugf1("Failed to allocate\n");
+ err = -ENOMEM;
+ goto kzalloc_fail;
+ }
+
+ /* Instanstiate the pci object */
+ err = kobject_init_and_add(edac_pci_top_main_kobj,
+ &ktype_edac_pci_main_kobj,
+ &edac_class->kset.kobj, "pci");
+ if (err) {
+ debugf1("Failed to register '.../edac/pci'\n");
+ goto kobject_init_and_add_fail;
+ }
+
+ /* At this point, to 'release' the top level kobject
+ * for EDAC PCI, then edac_pci_main_kobj_teardown()
+ * must be used, for resources to be cleaned up properly
+ */
+ kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
+ debugf1("Registered '.../edac/pci' kobject\n");
+
+ return 0;
+
+ /* Error unwind statck */
+kobject_init_and_add_fail:
+ kfree(edac_pci_top_main_kobj);
+
+kzalloc_fail:
+ module_put(THIS_MODULE);
+
+decrement_count_fail:
+ /* if are on this error exit, nothing to tear down */
+ atomic_dec(&edac_pci_sysfs_refcount);
+
+ return err;
+}
+
+/*
+ * edac_pci_main_kobj_teardown()
+ *
+ * if no longer linked (needed) remove the top level EDAC PCI
+ * kobject with its controls and attributes
+ */
+static void edac_pci_main_kobj_teardown(void)
+{
+ debugf0("%s()\n", __func__);
+
+ /* Decrement the count and only if no more controller instances
+ * are connected perform the unregisteration of the top level
+ * main kobj
+ */
+ if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
+ debugf0("%s() called kobject_put on main kobj\n",
+ __func__);
+ kobject_put(edac_pci_top_main_kobj);
+ }
+}
+
+/*
+ *
+ * edac_pci_create_sysfs
+ *
+ * Create the controls/attributes for the specified EDAC PCI device
+ */
+int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
+{
+ int err;
+ struct kobject *edac_kobj = &pci->kobj;
+
+ debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
+
+ /* create the top main EDAC PCI kobject, IF needed */
+ err = edac_pci_main_kobj_setup();
+ if (err)
+ return err;
+
+ /* Create this instance's kobject under the MAIN kobject */
+ err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
+ if (err)
+ goto unregister_cleanup;
+
+ err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
+ if (err) {
+ debugf0("%s() sysfs_create_link() returned err= %d\n",
+ __func__, err);
+ goto symlink_fail;
+ }
+
+ return 0;
+
+ /* Error unwind stack */
+symlink_fail:
+ edac_pci_unregister_sysfs_instance_kobj(pci);
+
+unregister_cleanup:
+ edac_pci_main_kobj_teardown();
+
+ return err;
+}
+
+/*
+ * edac_pci_remove_sysfs
+ *
+ * remove the controls and attributes for this EDAC PCI device
+ */
+void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
+{
+ debugf0("%s() index=%d\n", __func__, pci->pci_idx);
+
+ /* Remove the symlink */
+ sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
+
+ /* remove this PCI instance's sysfs entries */
+ edac_pci_unregister_sysfs_instance_kobj(pci);
+
+ /* Call the main unregister function, which will determine
+ * if this 'pci' is the last instance.
+ * If it is, the main kobject will be unregistered as a result
+ */
+ debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__);
+ edac_pci_main_kobj_teardown();
+}
+
+/************************ PCI error handling *************************/
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+ int where;
+ u16 status;
+
+ where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+ pci_read_config_word(dev, where, &status);
+
+ /* If we get back 0xFFFF then we must suspect that the card has been
+ * pulled but the Linux PCI layer has not yet finished cleaning up.
+ * We don't want to report on such devices
+ */
+
+ if (status == 0xFFFF) {
+ u32 sanity;
+
+ pci_read_config_dword(dev, 0, &sanity);
+
+ if (sanity == 0xFFFFFFFF)
+ return 0;
+ }
+
+ status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_PARITY;
+
+ if (status)
+ /* reset only the bits we are interested in */
+ pci_write_config_word(dev, where, status);
+
+ return status;
+}
+
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear(struct pci_dev *dev)
+{
+ u8 header_type;
+
+ debugf0("%s()\n", __func__);
+
+ get_pci_parity_status(dev, 0);
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+ get_pci_parity_status(dev, 1);
+}
+
+/*
+ * PCI Parity polling
+ *
+ * Fucntion to retrieve the current parity status
+ * and decode it
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+ unsigned long flags;
+ u16 status;
+ u8 header_type;
+
+ /* stop any interrupts until we can acquire the status */
+ local_irq_save(flags);
+
+ /* read the STATUS register on this device */
+ status = get_pci_parity_status(dev, 0);
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ local_irq_restore(flags);
+
+ debugf4("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+
+ /* check the status reg for errors on boards NOT marked as broken
+ * if broken, we cannot trust any of the status bits
+ */
+ if (status && !dev->broken_parity_status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+ atomic_inc(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Master Data Parity Error on %s\n",
+ pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+
+
+ debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* On bridges, need to examine secondary status register */
+ status = get_pci_parity_status(dev, 1);
+
+ debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+
+ /* check the secondary status reg for errors,
+ * on NOT broken boards
+ */
+ if (status && !dev->broken_parity_status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+ atomic_inc(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+ }
+}
+
+/* reduce some complexity in definition of the iterator */
+typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
+
+/*
+ * pci_dev parity list iterator
+ * Scan the PCI device list for one pass, looking for SERRORs
+ * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+ struct pci_dev *dev = NULL;
+
+ /* request for kernel access to the next PCI device, if any,
+ * and while we are looking at it have its reference count
+ * bumped until we are done with it
+ */
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ fn(dev);
+ }
+}
+
+/*
+ * edac_pci_do_parity_check
+ *
+ * performs the actual PCI parity check operation
+ */
+void edac_pci_do_parity_check(void)
+{
+ int before_count;
+
+ debugf3("%s()\n", __func__);
+
+ /* if policy has PCI check off, leave now */
+ if (!check_pci_errors)
+ return;
+
+ before_count = atomic_read(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges.
+ * The iterator calls pci_get_device() which might sleep, thus
+ * we cannot disable interrupts in this scan.
+ */
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+
+ /* Only if operator has selected panic on PCI Error */
+ if (edac_pci_get_panic_on_pe()) {
+ /* If the count is different 'after' from 'before' */
+ if (before_count != atomic_read(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+}
+
+/*
+ * edac_pci_clear_parity_errors
+ *
+ * function to perform an iteration over the PCI devices
+ * and clearn their current status
+ */
+void edac_pci_clear_parity_errors(void)
+{
+ /* Clear any PCI bus parity errors that devices initially have logged
+ * in their registers.
+ */
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+
+/*
+ * edac_pci_handle_pe
+ *
+ * Called to handle a PARITY ERROR event
+ */
+void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+ /* global PE counter incremented by edac_pci_do_parity_check() */
+ atomic_inc(&pci->counters.pe_count);
+
+ if (edac_pci_get_log_pe())
+ edac_pci_printk(pci, KERN_WARNING,
+ "Parity Error ctl: %s %d: %s\n",
+ pci->ctl_name, pci->pci_idx, msg);
+
+ /*
+ * poke all PCI devices and see which one is the troublemaker
+ * panic() is called if set
+ */
+ edac_pci_do_parity_check();
+}
+EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
+
+
+/*
+ * edac_pci_handle_npe
+ *
+ * Called to handle a NON-PARITY ERROR event
+ */
+void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+ /* global NPE counter incremented by edac_pci_do_parity_check() */
+ atomic_inc(&pci->counters.npe_count);
+
+ if (edac_pci_get_log_npe())
+ edac_pci_printk(pci, KERN_WARNING,
+ "Non-Parity Error ctl: %s %d: %s\n",
+ pci->ctl_name, pci->pci_idx, msg);
+
+ /*
+ * poke all PCI devices and see which one is the troublemaker
+ * panic() is called if set
+ */
+ edac_pci_do_parity_check();
+}
+EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
+
+/*
+ * Define the PCI parameter to the module
+ */
+module_param(check_pci_errors, int, 0644);
+MODULE_PARM_DESC(check_pci_errors,
+ "Check for PCI bus parity errors: 0=off 1=on");
+module_param(edac_pci_panic_on_pe, int, 0644);
+MODULE_PARM_DESC(edac_pci_panic_on_pe,
+ "Panic on PCI Bus Parity error: 0=off 1=on");
+
+#endif /* CONFIG_PCI */
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
new file mode 100644
index 0000000..20b428a
--- /dev/null
+++ b/drivers/edac/edac_stub.c
@@ -0,0 +1,46 @@
+/*
+ * common EDAC components that must be in kernel
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/edac.h>
+#include <asm/atomic.h>
+#include <asm/edac.h>
+
+int edac_op_state = EDAC_OPSTATE_INVAL;
+EXPORT_SYMBOL_GPL(edac_op_state);
+
+atomic_t edac_handlers = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(edac_handlers);
+
+int edac_err_assert = 0;
+EXPORT_SYMBOL_GPL(edac_err_assert);
+
+/*
+ * called to determine if there is an EDAC driver interested in
+ * knowing an event (such as NMI) occurred
+ */
+int edac_handler_set(void)
+{
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ return 0;
+
+ return atomic_read(&edac_handlers);
+}
+EXPORT_SYMBOL_GPL(edac_handler_set);
+
+/*
+ * handler for NMI type of interrupts to assert error
+ */
+void edac_atomic_assert_error(void)
+{
+ edac_err_assert++;
+}
+EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
new file mode 100644
index 0000000..6c9a0f2
--- /dev/null
+++ b/drivers/edac/i3000_edac.c
@@ -0,0 +1,554 @@
+/*
+ * Intel 3000/3010 Memory Controller kernel module
+ * Copyright (C) 2007 Akamai Technologies, Inc.
+ * Shamelessly copied from:
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I3000_REVISION "1.1"
+
+#define EDAC_MOD_STR "i3000_edac"
+
+#define I3000_RANKS 8
+#define I3000_RANKS_PER_CHANNEL 4
+#define I3000_CHANNELS 2
+
+/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
+#define I3000_MCHBAR_MASK 0xffffc000
+#define I3000_MMR_WINDOW_SIZE 16384
+
+#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
+ *
+ * 7:1 reserved
+ * 0 bit 32 of address
+ */
+#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
+ *
+ * 31:7 address
+ * 6:1 reserved
+ * 0 Error channel 0/1
+ */
+#define I3000_DEAP_GRAIN (1 << 7)
+
+/*
+ * Helper functions to decode the DEAP/EDEAP hardware registers.
+ *
+ * The type promotion here is deliberate; we're deriving an
+ * unsigned long pfn and offset from hardware regs which are u8/u32.
+ */
+
+static inline unsigned long deap_pfn(u8 edeap, u32 deap)
+{
+ deap >>= PAGE_SHIFT;
+ deap |= (edeap & 1) << (32 - PAGE_SHIFT);
+ return deap;
+}
+
+static inline unsigned long deap_offset(u32 deap)
+{
+ return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
+}
+
+static inline int deap_channel(u32 deap)
+{
+ return deap & 1;
+}
+
+#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 Received Refresh Timeout Flag (RRTOF)
+ * 7:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
+#define I3000_ERRSTS_UE 0x0002
+#define I3000_ERRSTS_CE 0x0001
+
+#define I3000_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:12 reserved
+ * 11 SERR on MCH Thermal Sensor Event
+ * (TSESERR)
+ * 10 reserved
+ * 9 SERR on LOCK to non-DRAM Memory
+ * (LCKERR)
+ * 8 SERR on DRAM Refresh Timeout
+ * (DRTOERR)
+ * 7:2 reserved
+ * 1 SERR Multi-Bit DRAM ECC Error
+ * (DMERR)
+ * 0 SERR on Single-Bit ECC Error
+ * (DSERR)
+ */
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define I3000_DRB_SHIFT 25 /* 32MiB grain */
+
+#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 1 DRAM Rank Boundary Address
+ */
+
+#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
+ *
+ * 7 reserved
+ * 6:4 DRAM odd Rank Attribute
+ * 3 reserved
+ * 2:0 DRAM even Rank Attribute
+ *
+ * Each attribute defines the page
+ * size of the corresponding rank:
+ * 000: unpopulated
+ * 001: reserved
+ * 010: 4 KB
+ * 011: 8 KB
+ * 100: 16 KB
+ * Others: reserved
+ */
+#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
+
+static inline unsigned char odd_rank_attrib(unsigned char dra)
+{
+ return (dra & 0x70) >> 4;
+}
+
+static inline unsigned char even_rank_attrib(unsigned char dra)
+{
+ return dra & 0x07;
+}
+
+#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
+ *
+ * 31:30 reserved
+ * 29 Initialization Complete (IC)
+ * 28:11 reserved
+ * 10:8 Refresh Mode Select (RMS)
+ * 7 reserved
+ * 6:4 Mode Select (SMS)
+ * 3:2 reserved
+ * 1:0 DRAM Type (DT)
+ */
+
+#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
+ *
+ * 31 Enhanced Addressing Enable (ENHADE)
+ * 30:0 reserved
+ */
+
+enum i3000p_chips {
+ I3000 = 0,
+};
+
+struct i3000_dev_info {
+ const char *ctl_name;
+};
+
+struct i3000_error_info {
+ u16 errsts;
+ u8 derrsyn;
+ u8 edeap;
+ u32 deap;
+ u16 errsts2;
+};
+
+static const struct i3000_dev_info i3000_devs[] = {
+ [I3000] = {
+ .ctl_name = "i3000"},
+};
+
+static struct pci_dev *mci_pdev;
+static int i3000_registered = 1;
+static struct edac_pci_ctl_info *i3000_pci;
+
+static void i3000_get_error_info(struct mem_ctl_info *mci,
+ struct i3000_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
+ if (!(info->errsts & I3000_ERRSTS_BITS))
+ return;
+ pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+ pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+ pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+ pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+ pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+ pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+ pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+ }
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+ I3000_ERRSTS_BITS);
+}
+
+static int i3000_process_error_info(struct mem_ctl_info *mci,
+ struct i3000_error_info *info,
+ int handle_errors)
+{
+ int row, multi_chan, channel;
+ unsigned long pfn, offset;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts & I3000_ERRSTS_BITS))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ pfn = deap_pfn(info->edeap, info->deap);
+ offset = deap_offset(info->deap);
+ channel = deap_channel(info->deap);
+
+ row = edac_mc_find_csrow_by_page(mci, pfn);
+
+ if (info->errsts & I3000_ERRSTS_UE)
+ edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+ else
+ edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
+ multi_chan ? channel : 0, "i3000 CE");
+
+ return 1;
+}
+
+static void i3000_check(struct mem_ctl_info *mci)
+{
+ struct i3000_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i3000_get_error_info(mci, &info);
+ i3000_process_error_info(mci, &info, 1);
+}
+
+static int i3000_is_interleaved(const unsigned char *c0dra,
+ const unsigned char *c1dra,
+ const unsigned char *c0drb,
+ const unsigned char *c1drb)
+{
+ int i;
+
+ /*
+ * If the channels aren't populated identically then
+ * we're not interleaved.
+ */
+ for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
+ if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
+ even_rank_attrib(c0dra[i]) !=
+ even_rank_attrib(c1dra[i]))
+ return 0;
+
+ /*
+ * If the rank boundaries for the two channels are different
+ * then we're not interleaved.
+ */
+ for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
+ if (c0drb[i] != c1drb[i])
+ return 0;
+
+ return 1;
+}
+
+static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_cumul_size;
+ int interleaved, nr_channels;
+ unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
+ unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
+ unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
+ unsigned long mchbar;
+ void __iomem *window;
+
+ debugf0("MC: %s()\n", __func__);
+
+ pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
+ mchbar &= I3000_MCHBAR_MASK;
+ window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+ if (!window) {
+ printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
+ mchbar);
+ return -ENODEV;
+ }
+
+ c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
+ c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
+ c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
+ c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
+
+ for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
+ c0drb[i] = readb(window + I3000_C0DRB + i);
+ c1drb[i] = readb(window + I3000_C1DRB + i);
+ }
+
+ iounmap(window);
+
+ /*
+ * Figure out how many channels we have.
+ *
+ * If we have what the datasheet calls "asymmetric channels"
+ * (essentially the same as what was called "virtual single
+ * channel mode" in the i82875) then it's a single channel as
+ * far as EDAC is concerned.
+ */
+ interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
+ nr_channels = interleaved ? 2 : 1;
+ mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I3000_REVISION;
+ mci->ctl_name = i3000_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i3000_check;
+ mci->ctl_page_to_phys = NULL;
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 32MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ *
+ * If we're in interleaved mode then we're only walking through
+ * the ranks of controller 0, so we double all the values we see.
+ */
+ for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
+ u8 value;
+ u32 cumul_size;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ value = drb[i];
+ cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
+ if (interleaved)
+ cumul_size <<= 1;
+ debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
+ __func__, i, cumul_size);
+ if (cumul_size == last_cumul_size) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = I3000_DEAP_GRAIN;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+ I3000_ERRSTS_BITS);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i3000_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i3000_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i3000_probe1(pdev, ent->driver_data);
+ if (!mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i3000_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ if (i3000_pci)
+ edac_pci_release_generic_ctl(i3000_pci);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3000},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
+
+static struct pci_driver i3000_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i3000_init_one,
+ .remove = __devexit_p(i3000_remove_one),
+ .id_table = i3000_pci_tbl,
+};
+
+static int __init i3000_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i3000_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ i3000_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_3000_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("i3000 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("i3000 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i3000_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i3000_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&i3000_driver);
+ if (!i3000_registered) {
+ i3000_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i3000_init);
+module_exit(i3000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
+MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
new file mode 100644
index 0000000..d335086
--- /dev/null
+++ b/drivers/edac/i5000_edac.c
@@ -0,0 +1,1573 @@
+/*
+ * Intel 5000(P/V/X) class Memory Controllers kernel module
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Douglas Thompson Linux Networx (http://lnxi.com)
+ * norsk5@xmission.com
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet
+ * http://developer.intel.com/design/chipsets/datashts/313070.htm
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <asm/mmzone.h>
+
+#include "edac_core.h"
+
+/*
+ * Alter this version for the I5000 module when modifications are made
+ */
+#define I5000_REVISION " Ver: 2.0.12 " __DATE__
+#define EDAC_MOD_STR "i5000_edac"
+
+#define i5000_printk(level, fmt, arg...) \
+ edac_printk(level, "i5000", fmt, ##arg)
+
+#define i5000_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_FBD_0
+#define PCI_DEVICE_ID_INTEL_FBD_0 0x25F5
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_FBD_1
+#define PCI_DEVICE_ID_INTEL_FBD_1 0x25F6
+#endif
+
+/* Device 16,
+ * Function 0: System Address
+ * Function 1: Memory Branch Map, Control, Errors Register
+ * Function 2: FSB Error Registers
+ *
+ * All 3 functions of Device 16 (0,1,2) share the SAME DID
+ */
+#define PCI_DEVICE_ID_INTEL_I5000_DEV16 0x25F0
+
+/* OFFSETS for Function 0 */
+
+/* OFFSETS for Function 1 */
+#define AMBASE 0x48
+#define MAXCH 0x56
+#define MAXDIMMPERCH 0x57
+#define TOLM 0x6C
+#define REDMEMB 0x7C
+#define RED_ECC_LOCATOR(x) ((x) & 0x3FFFF)
+#define REC_ECC_LOCATOR_EVEN(x) ((x) & 0x001FF)
+#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3FE00)
+#define MIR0 0x80
+#define MIR1 0x84
+#define MIR2 0x88
+#define AMIR0 0x8C
+#define AMIR1 0x90
+#define AMIR2 0x94
+
+#define FERR_FAT_FBD 0x98
+#define NERR_FAT_FBD 0x9C
+#define EXTRACT_FBDCHAN_INDX(x) (((x)>>28) & 0x3)
+#define FERR_FAT_FBDCHAN 0x30000000
+#define FERR_FAT_M3ERR 0x00000004
+#define FERR_FAT_M2ERR 0x00000002
+#define FERR_FAT_M1ERR 0x00000001
+#define FERR_FAT_MASK (FERR_FAT_M1ERR | \
+ FERR_FAT_M2ERR | \
+ FERR_FAT_M3ERR)
+
+#define FERR_NF_FBD 0xA0
+
+/* Thermal and SPD or BFD errors */
+#define FERR_NF_M28ERR 0x01000000
+#define FERR_NF_M27ERR 0x00800000
+#define FERR_NF_M26ERR 0x00400000
+#define FERR_NF_M25ERR 0x00200000
+#define FERR_NF_M24ERR 0x00100000
+#define FERR_NF_M23ERR 0x00080000
+#define FERR_NF_M22ERR 0x00040000
+#define FERR_NF_M21ERR 0x00020000
+
+/* Correctable errors */
+#define FERR_NF_M20ERR 0x00010000
+#define FERR_NF_M19ERR 0x00008000
+#define FERR_NF_M18ERR 0x00004000
+#define FERR_NF_M17ERR 0x00002000
+
+/* Non-Retry or redundant Retry errors */
+#define FERR_NF_M16ERR 0x00001000
+#define FERR_NF_M15ERR 0x00000800
+#define FERR_NF_M14ERR 0x00000400
+#define FERR_NF_M13ERR 0x00000200
+
+/* Uncorrectable errors */
+#define FERR_NF_M12ERR 0x00000100
+#define FERR_NF_M11ERR 0x00000080
+#define FERR_NF_M10ERR 0x00000040
+#define FERR_NF_M9ERR 0x00000020
+#define FERR_NF_M8ERR 0x00000010
+#define FERR_NF_M7ERR 0x00000008
+#define FERR_NF_M6ERR 0x00000004
+#define FERR_NF_M5ERR 0x00000002
+#define FERR_NF_M4ERR 0x00000001
+
+#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \
+ FERR_NF_M11ERR | \
+ FERR_NF_M10ERR | \
+ FERR_NF_M9ERR | \
+ FERR_NF_M8ERR | \
+ FERR_NF_M7ERR | \
+ FERR_NF_M6ERR | \
+ FERR_NF_M5ERR | \
+ FERR_NF_M4ERR)
+#define FERR_NF_CORRECTABLE (FERR_NF_M20ERR | \
+ FERR_NF_M19ERR | \
+ FERR_NF_M18ERR | \
+ FERR_NF_M17ERR)
+#define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \
+ FERR_NF_M28ERR)
+#define FERR_NF_THERMAL (FERR_NF_M26ERR | \
+ FERR_NF_M25ERR | \
+ FERR_NF_M24ERR | \
+ FERR_NF_M23ERR)
+#define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR)
+#define FERR_NF_NORTH_CRC (FERR_NF_M21ERR)
+#define FERR_NF_NON_RETRY (FERR_NF_M13ERR | \
+ FERR_NF_M14ERR | \
+ FERR_NF_M15ERR)
+
+#define NERR_NF_FBD 0xA4
+#define FERR_NF_MASK (FERR_NF_UNCORRECTABLE | \
+ FERR_NF_CORRECTABLE | \
+ FERR_NF_DIMM_SPARE | \
+ FERR_NF_THERMAL | \
+ FERR_NF_SPD_PROTOCOL | \
+ FERR_NF_NORTH_CRC | \
+ FERR_NF_NON_RETRY)
+
+#define EMASK_FBD 0xA8
+#define EMASK_FBD_M28ERR 0x08000000
+#define EMASK_FBD_M27ERR 0x04000000
+#define EMASK_FBD_M26ERR 0x02000000
+#define EMASK_FBD_M25ERR 0x01000000
+#define EMASK_FBD_M24ERR 0x00800000
+#define EMASK_FBD_M23ERR 0x00400000
+#define EMASK_FBD_M22ERR 0x00200000
+#define EMASK_FBD_M21ERR 0x00100000
+#define EMASK_FBD_M20ERR 0x00080000
+#define EMASK_FBD_M19ERR 0x00040000
+#define EMASK_FBD_M18ERR 0x00020000
+#define EMASK_FBD_M17ERR 0x00010000
+
+#define EMASK_FBD_M15ERR 0x00004000
+#define EMASK_FBD_M14ERR 0x00002000
+#define EMASK_FBD_M13ERR 0x00001000
+#define EMASK_FBD_M12ERR 0x00000800
+#define EMASK_FBD_M11ERR 0x00000400
+#define EMASK_FBD_M10ERR 0x00000200
+#define EMASK_FBD_M9ERR 0x00000100
+#define EMASK_FBD_M8ERR 0x00000080
+#define EMASK_FBD_M7ERR 0x00000040
+#define EMASK_FBD_M6ERR 0x00000020
+#define EMASK_FBD_M5ERR 0x00000010
+#define EMASK_FBD_M4ERR 0x00000008
+#define EMASK_FBD_M3ERR 0x00000004
+#define EMASK_FBD_M2ERR 0x00000002
+#define EMASK_FBD_M1ERR 0x00000001
+
+#define ENABLE_EMASK_FBD_FATAL_ERRORS (EMASK_FBD_M1ERR | \
+ EMASK_FBD_M2ERR | \
+ EMASK_FBD_M3ERR)
+
+#define ENABLE_EMASK_FBD_UNCORRECTABLE (EMASK_FBD_M4ERR | \
+ EMASK_FBD_M5ERR | \
+ EMASK_FBD_M6ERR | \
+ EMASK_FBD_M7ERR | \
+ EMASK_FBD_M8ERR | \
+ EMASK_FBD_M9ERR | \
+ EMASK_FBD_M10ERR | \
+ EMASK_FBD_M11ERR | \
+ EMASK_FBD_M12ERR)
+#define ENABLE_EMASK_FBD_CORRECTABLE (EMASK_FBD_M17ERR | \
+ EMASK_FBD_M18ERR | \
+ EMASK_FBD_M19ERR | \
+ EMASK_FBD_M20ERR)
+#define ENABLE_EMASK_FBD_DIMM_SPARE (EMASK_FBD_M27ERR | \
+ EMASK_FBD_M28ERR)
+#define ENABLE_EMASK_FBD_THERMALS (EMASK_FBD_M26ERR | \
+ EMASK_FBD_M25ERR | \
+ EMASK_FBD_M24ERR | \
+ EMASK_FBD_M23ERR)
+#define ENABLE_EMASK_FBD_SPD_PROTOCOL (EMASK_FBD_M22ERR)
+#define ENABLE_EMASK_FBD_NORTH_CRC (EMASK_FBD_M21ERR)
+#define ENABLE_EMASK_FBD_NON_RETRY (EMASK_FBD_M15ERR | \
+ EMASK_FBD_M14ERR | \
+ EMASK_FBD_M13ERR)
+
+#define ENABLE_EMASK_ALL (ENABLE_EMASK_FBD_NON_RETRY | \
+ ENABLE_EMASK_FBD_NORTH_CRC | \
+ ENABLE_EMASK_FBD_SPD_PROTOCOL | \
+ ENABLE_EMASK_FBD_THERMALS | \
+ ENABLE_EMASK_FBD_DIMM_SPARE | \
+ ENABLE_EMASK_FBD_FATAL_ERRORS | \
+ ENABLE_EMASK_FBD_CORRECTABLE | \
+ ENABLE_EMASK_FBD_UNCORRECTABLE)
+
+#define ERR0_FBD 0xAC
+#define ERR1_FBD 0xB0
+#define ERR2_FBD 0xB4
+#define MCERR_FBD 0xB8
+#define NRECMEMA 0xBE
+#define NREC_BANK(x) (((x)>>12) & 0x7)
+#define NREC_RDWR(x) (((x)>>11) & 1)
+#define NREC_RANK(x) (((x)>>8) & 0x7)
+#define NRECMEMB 0xC0
+#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF)
+#define NREC_RAS(x) ((x) & 0x7FFF)
+#define NRECFGLOG 0xC4
+#define NREEECFBDA 0xC8
+#define NREEECFBDB 0xCC
+#define NREEECFBDC 0xD0
+#define NREEECFBDD 0xD4
+#define NREEECFBDE 0xD8
+#define REDMEMA 0xDC
+#define RECMEMA 0xE2
+#define REC_BANK(x) (((x)>>12) & 0x7)
+#define REC_RDWR(x) (((x)>>11) & 1)
+#define REC_RANK(x) (((x)>>8) & 0x7)
+#define RECMEMB 0xE4
+#define REC_CAS(x) (((x)>>16) & 0xFFFFFF)
+#define REC_RAS(x) ((x) & 0x7FFF)
+#define RECFGLOG 0xE8
+#define RECFBDA 0xEC
+#define RECFBDB 0xF0
+#define RECFBDC 0xF4
+#define RECFBDD 0xF8
+#define RECFBDE 0xFC
+
+/* OFFSETS for Function 2 */
+
+/*
+ * Device 21,
+ * Function 0: Memory Map Branch 0
+ *
+ * Device 22,
+ * Function 0: Memory Map Branch 1
+ */
+#define PCI_DEVICE_ID_I5000_BRANCH_0 0x25F5
+#define PCI_DEVICE_ID_I5000_BRANCH_1 0x25F6
+
+#define AMB_PRESENT_0 0x64
+#define AMB_PRESENT_1 0x66
+#define MTR0 0x80
+#define MTR1 0x84
+#define MTR2 0x88
+#define MTR3 0x8C
+
+#define NUM_MTRS 4
+#define CHANNELS_PER_BRANCH (2)
+
+/* Defines to extract the vaious fields from the
+ * MTRx - Memory Technology Registers
+ */
+#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
+#define MTR_DRAM_WIDTH(mtr) ((((mtr) >> 6) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS(mtr) ((((mtr) >> 5) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
+#define MTR_DIMM_RANK(mtr) (((mtr) >> 4) & 0x1)
+#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
+#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
+#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
+#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
+#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
+
+#ifdef CONFIG_EDAC_DEBUG
+static char *numrow_toString[] = {
+ "8,192 - 13 rows",
+ "16,384 - 14 rows",
+ "32,768 - 15 rows",
+ "reserved"
+};
+
+static char *numcol_toString[] = {
+ "1,024 - 10 columns",
+ "2,048 - 11 columns",
+ "4,096 - 12 columns",
+ "reserved"
+};
+#endif
+
+/* enables the report of miscellaneous messages as CE errors - default off */
+static int misc_messages;
+
+/* Enumeration of supported devices */
+enum i5000_chips {
+ I5000P = 0,
+ I5000V = 1, /* future */
+ I5000X = 2 /* future */
+};
+
+/* Device name and register DID (Device ID) */
+struct i5000_dev_info {
+ const char *ctl_name; /* name for this device */
+ u16 fsb_mapping_errors; /* DID for the branchmap,control */
+};
+
+/* Table of devices attributes supported by this driver */
+static const struct i5000_dev_info i5000_devs[] = {
+ [I5000P] = {
+ .ctl_name = "I5000",
+ .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
+ },
+};
+
+struct i5000_dimm_info {
+ int megabytes; /* size, 0 means not present */
+ int dual_rank;
+};
+
+#define MAX_CHANNELS 6 /* max possible channels */
+#define MAX_CSROWS (8*2) /* max possible csrows per channel */
+
+/* driver private data structure */
+struct i5000_pvt {
+ struct pci_dev *system_address; /* 16.0 */
+ struct pci_dev *branchmap_werrors; /* 16.1 */
+ struct pci_dev *fsb_error_regs; /* 16.2 */
+ struct pci_dev *branch_0; /* 21.0 */
+ struct pci_dev *branch_1; /* 22.0 */
+
+ u16 tolm; /* top of low memory */
+ u64 ambase; /* AMB BAR */
+
+ u16 mir0, mir1, mir2;
+
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_ambpresent0; /* Branch 0, Channel 0 */
+ u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_ambpresent0; /* Branch 1, Channel 8 */
+ u16 b1_ambpresent1; /* Branch 1, Channel 1 */
+
+ /* DIMM information matrix, allocating architecture maximums */
+ struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+
+ /* Actual values for this controller */
+ int maxch; /* Max channels */
+ int maxdimmperch; /* Max DIMMs per channel */
+};
+
+/* I5000 MCH error information retrieved from Hardware */
+struct i5000_error_info {
+
+ /* These registers are always read from the MC */
+ u32 ferr_fat_fbd; /* First Errors Fatal */
+ u32 nerr_fat_fbd; /* Next Errors Fatal */
+ u32 ferr_nf_fbd; /* First Errors Non-Fatal */
+ u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
+
+ /* These registers are input ONLY if there was a Recoverable Error */
+ u32 redmemb; /* Recoverable Mem Data Error log B */
+ u16 recmema; /* Recoverable Mem Error log A */
+ u32 recmemb; /* Recoverable Mem Error log B */
+
+ /* These registers are input ONLY if there was a
+ * Non-Recoverable Error */
+ u16 nrecmema; /* Non-Recoverable Mem log A */
+ u16 nrecmemb; /* Non-Recoverable Mem log B */
+
+};
+
+static struct edac_pci_ctl_info *i5000_pci;
+
+/*
+ * i5000_get_error_info Retrieve the hardware error information from
+ * the hardware and cache it in the 'info'
+ * structure
+ */
+static void i5000_get_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info)
+{
+ struct i5000_pvt *pvt;
+ u32 value;
+
+ pvt = mci->pvt_info;
+
+ /* read in the 1st FATAL error register */
+ pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
+
+ /* Mask only the bits that the doc says are valid
+ */
+ value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
+
+ /* If there is an error, then read in the */
+ /* NEXT FATAL error register and the Memory Error Log Register A */
+ if (value & FERR_FAT_MASK) {
+ info->ferr_fat_fbd = value;
+
+ /* harvest the various error data we need */
+ pci_read_config_dword(pvt->branchmap_werrors,
+ NERR_FAT_FBD, &info->nerr_fat_fbd);
+ pci_read_config_word(pvt->branchmap_werrors,
+ NRECMEMA, &info->nrecmema);
+ pci_read_config_word(pvt->branchmap_werrors,
+ NRECMEMB, &info->nrecmemb);
+
+ /* Clear the error bits, by writing them back */
+ pci_write_config_dword(pvt->branchmap_werrors,
+ FERR_FAT_FBD, value);
+ } else {
+ info->ferr_fat_fbd = 0;
+ info->nerr_fat_fbd = 0;
+ info->nrecmema = 0;
+ info->nrecmemb = 0;
+ }
+
+ /* read in the 1st NON-FATAL error register */
+ pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
+
+ /* If there is an error, then read in the 1st NON-FATAL error
+ * register as well */
+ if (value & FERR_NF_MASK) {
+ info->ferr_nf_fbd = value;
+
+ /* harvest the various error data we need */
+ pci_read_config_dword(pvt->branchmap_werrors,
+ NERR_NF_FBD, &info->nerr_nf_fbd);
+ pci_read_config_word(pvt->branchmap_werrors,
+ RECMEMA, &info->recmema);
+ pci_read_config_dword(pvt->branchmap_werrors,
+ RECMEMB, &info->recmemb);
+ pci_read_config_dword(pvt->branchmap_werrors,
+ REDMEMB, &info->redmemb);
+
+ /* Clear the error bits, by writing them back */
+ pci_write_config_dword(pvt->branchmap_werrors,
+ FERR_NF_FBD, value);
+ } else {
+ info->ferr_nf_fbd = 0;
+ info->nerr_nf_fbd = 0;
+ info->recmema = 0;
+ info->recmemb = 0;
+ info->redmemb = 0;
+ }
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * struct i5000_error_info *info,
+ * int handle_errors);
+ *
+ * handle the Intel FATAL errors, if any
+ */
+static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info,
+ int handle_errors)
+{
+ char msg[EDAC_MC_LABEL_LEN + 1 + 160];
+ char *specific = NULL;
+ u32 allErrors;
+ int branch;
+ int channel;
+ int bank;
+ int rank;
+ int rdwr;
+ int ras, cas;
+
+ /* mask off the Error bits that are possible */
+ allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
+ if (!allErrors)
+ return; /* if no error, return now */
+
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
+ channel = branch;
+
+ /* Use the NON-Recoverable macros to extract data */
+ bank = NREC_BANK(info->nrecmema);
+ rank = NREC_RANK(info->nrecmema);
+ rdwr = NREC_RDWR(info->nrecmema);
+ ras = NREC_RAS(info->nrecmemb);
+ cas = NREC_CAS(info->nrecmemb);
+
+ debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
+
+ /* Only 1 bit will be on */
+ switch (allErrors) {
+ case FERR_FAT_M1ERR:
+ specific = "Alert on non-redundant retry or fast "
+ "reset timeout";
+ break;
+ case FERR_FAT_M2ERR:
+ specific = "Northbound CRC error on non-redundant "
+ "retry";
+ break;
+ case FERR_FAT_M3ERR:
+ {
+ static int done;
+
+ /*
+ * This error is generated to inform that the intelligent
+ * throttling is disabled and the temperature passed the
+ * specified middle point. Since this is something the BIOS
+ * should take care of, we'll warn only once to avoid
+ * worthlessly flooding the log.
+ */
+ if (done)
+ return;
+ done++;
+
+ specific = ">Tmid Thermal event with intelligent "
+ "throttling disabled";
+ }
+ break;
+ }
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
+ "FATAL Err=0x%x (%s))",
+ branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+ allErrors, specific);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * struct i5000_error_info *info,
+ * int handle_errors);
+ *
+ * handle the Intel NON-FATAL errors, if any
+ */
+static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info,
+ int handle_errors)
+{
+ char msg[EDAC_MC_LABEL_LEN + 1 + 170];
+ char *specific = NULL;
+ u32 allErrors;
+ u32 ue_errors;
+ u32 ce_errors;
+ u32 misc_errors;
+ int branch;
+ int channel;
+ int bank;
+ int rank;
+ int rdwr;
+ int ras, cas;
+
+ /* mask off the Error bits that are possible */
+ allErrors = (info->ferr_nf_fbd & FERR_NF_MASK);
+ if (!allErrors)
+ return; /* if no error, return now */
+
+ /* ONLY ONE of the possible error bits will be set, as per the docs */
+ ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
+ if (ue_errors) {
+ debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
+
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+ channel = branch;
+ bank = NREC_BANK(info->nrecmema);
+ rank = NREC_RANK(info->nrecmema);
+ rdwr = NREC_RDWR(info->nrecmema);
+ ras = NREC_RAS(info->nrecmemb);
+ cas = NREC_CAS(info->nrecmemb);
+
+ debugf0
+ ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
+
+ switch (ue_errors) {
+ case FERR_NF_M12ERR:
+ specific = "Non-Aliased Uncorrectable Patrol Data ECC";
+ break;
+ case FERR_NF_M11ERR:
+ specific = "Non-Aliased Uncorrectable Spare-Copy "
+ "Data ECC";
+ break;
+ case FERR_NF_M10ERR:
+ specific = "Non-Aliased Uncorrectable Mirrored Demand "
+ "Data ECC";
+ break;
+ case FERR_NF_M9ERR:
+ specific = "Non-Aliased Uncorrectable Non-Mirrored "
+ "Demand Data ECC";
+ break;
+ case FERR_NF_M8ERR:
+ specific = "Aliased Uncorrectable Patrol Data ECC";
+ break;
+ case FERR_NF_M7ERR:
+ specific = "Aliased Uncorrectable Spare-Copy Data ECC";
+ break;
+ case FERR_NF_M6ERR:
+ specific = "Aliased Uncorrectable Mirrored Demand "
+ "Data ECC";
+ break;
+ case FERR_NF_M5ERR:
+ specific = "Aliased Uncorrectable Non-Mirrored Demand "
+ "Data ECC";
+ break;
+ case FERR_NF_M4ERR:
+ specific = "Uncorrectable Data ECC on Replay";
+ break;
+ }
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "CAS=%d, UE Err=0x%x (%s))",
+ branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+ ue_errors, specific);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ }
+
+ /* Check correctable errors */
+ ce_errors = allErrors & FERR_NF_CORRECTABLE;
+ if (ce_errors) {
+ debugf0("\tCorrected bits= 0x%x\n", ce_errors);
+
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+ channel = 0;
+ if (REC_ECC_LOCATOR_ODD(info->redmemb))
+ channel = 1;
+
+ /* Convert channel to be based from zero, instead of
+ * from branch base of 0 */
+ channel += branch;
+
+ bank = REC_BANK(info->recmema);
+ rank = REC_RANK(info->recmema);
+ rdwr = REC_RDWR(info->recmema);
+ ras = REC_RAS(info->recmemb);
+ cas = REC_CAS(info->recmemb);
+
+ debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
+
+ switch (ce_errors) {
+ case FERR_NF_M17ERR:
+ specific = "Correctable Non-Mirrored Demand Data ECC";
+ break;
+ case FERR_NF_M18ERR:
+ specific = "Correctable Mirrored Demand Data ECC";
+ break;
+ case FERR_NF_M19ERR:
+ specific = "Correctable Spare-Copy Data ECC";
+ break;
+ case FERR_NF_M20ERR:
+ specific = "Correctable Patrol Data ECC";
+ break;
+ }
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas, ce_errors,
+ specific);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ }
+
+ if (!misc_messages)
+ return;
+
+ misc_errors = allErrors & (FERR_NF_NON_RETRY | FERR_NF_NORTH_CRC |
+ FERR_NF_SPD_PROTOCOL | FERR_NF_DIMM_SPARE);
+ if (misc_errors) {
+ switch (misc_errors) {
+ case FERR_NF_M13ERR:
+ specific = "Non-Retry or Redundant Retry FBD Memory "
+ "Alert or Redundant Fast Reset Timeout";
+ break;
+ case FERR_NF_M14ERR:
+ specific = "Non-Retry or Redundant Retry FBD "
+ "Configuration Alert";
+ break;
+ case FERR_NF_M15ERR:
+ specific = "Non-Retry or Redundant Retry FBD "
+ "Northbound CRC error on read data";
+ break;
+ case FERR_NF_M21ERR:
+ specific = "FBD Northbound CRC error on "
+ "FBD Sync Status";
+ break;
+ case FERR_NF_M22ERR:
+ specific = "SPD protocol error";
+ break;
+ case FERR_NF_M27ERR:
+ specific = "DIMM-spare copy started";
+ break;
+ case FERR_NF_M28ERR:
+ specific = "DIMM-spare copy completed";
+ break;
+ }
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d Err=%#x (%s))", branch >> 1,
+ misc_errors, specific);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+ }
+}
+
+/*
+ * i5000_process_error_info Process the error info that is
+ * in the 'info' structure, previously retrieved from hardware
+ */
+static void i5000_process_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info,
+ int handle_errors)
+{
+ /* First handle any fatal errors that occurred */
+ i5000_process_fatal_error_info(mci, info, handle_errors);
+
+ /* now handle any non-fatal errors that occurred */
+ i5000_process_nonfatal_error_info(mci, info, handle_errors);
+}
+
+/*
+ * i5000_clear_error Retrieve any error from the hardware
+ * but do NOT process that error.
+ * Used for 'clearing' out of previous errors
+ * Called by the Core module.
+ */
+static void i5000_clear_error(struct mem_ctl_info *mci)
+{
+ struct i5000_error_info info;
+
+ i5000_get_error_info(mci, &info);
+}
+
+/*
+ * i5000_check_error Retrieve and process errors reported by the
+ * hardware. Called by the Core module.
+ */
+static void i5000_check_error(struct mem_ctl_info *mci)
+{
+ struct i5000_error_info info;
+ debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i5000_get_error_info(mci, &info);
+ i5000_process_error_info(mci, &info, 1);
+}
+
+/*
+ * i5000_get_devices Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver
+ *
+ * Need to 'get' device 16 func 1 and func 2
+ */
+static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
+{
+ //const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx];
+ struct i5000_pvt *pvt;
+ struct pci_dev *pdev;
+
+ pvt = mci->pvt_info;
+
+ /* Attempt to 'get' the MCH register we want */
+ pdev = NULL;
+ while (1) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+ /* End of list, leave */
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "'system address,Process Bus' "
+ "device not found:"
+ "vendor 0x%x device 0x%x FUNC 1 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+ return 1;
+ }
+
+ /* Scan for device 16 func 1 */
+ if (PCI_FUNC(pdev->devfn) == 1)
+ break;
+ }
+
+ pvt->branchmap_werrors = pdev;
+
+ /* Attempt to 'get' the MCH register we want */
+ pdev = NULL;
+ while (1) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "MC: 'branchmap,control,errors' "
+ "device not found:"
+ "vendor 0x%x device 0x%x Func 2 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+ pci_dev_put(pvt->branchmap_werrors);
+ return 1;
+ }
+
+ /* Scan for device 16 func 1 */
+ if (PCI_FUNC(pdev->devfn) == 2)
+ break;
+ }
+
+ pvt->fsb_error_regs = pdev;
+
+ debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->system_address),
+ pvt->system_address->vendor, pvt->system_address->device);
+ debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->branchmap_werrors),
+ pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
+ debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->fsb_error_regs),
+ pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+
+ pdev = NULL;
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
+
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "MC: 'BRANCH 0' device not found:"
+ "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
+
+ pci_dev_put(pvt->branchmap_werrors);
+ pci_dev_put(pvt->fsb_error_regs);
+ return 1;
+ }
+
+ pvt->branch_0 = pdev;
+
+ /* If this device claims to have more than 2 channels then
+ * fetch Branch 1's information
+ */
+ if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+ pdev = NULL;
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
+
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "MC: 'BRANCH 1' device not found:"
+ "vendor 0x%x device 0x%x Func 0 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_1);
+
+ pci_dev_put(pvt->branchmap_werrors);
+ pci_dev_put(pvt->fsb_error_regs);
+ pci_dev_put(pvt->branch_0);
+ return 1;
+ }
+
+ pvt->branch_1 = pdev;
+ }
+
+ return 0;
+}
+
+/*
+ * i5000_put_devices 'put' all the devices that we have
+ * reserved via 'get'
+ */
+static void i5000_put_devices(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+
+ pvt = mci->pvt_info;
+
+ pci_dev_put(pvt->branchmap_werrors); /* FUNC 1 */
+ pci_dev_put(pvt->fsb_error_regs); /* FUNC 2 */
+ pci_dev_put(pvt->branch_0); /* DEV 21 */
+
+ /* Only if more than 2 channels do we release the second branch */
+ if (pvt->maxch >= CHANNELS_PER_BRANCH)
+ pci_dev_put(pvt->branch_1); /* DEV 22 */
+}
+
+/*
+ * determine_amb_resent
+ *
+ * the information is contained in NUM_MTRS different registers
+ * determineing which of the NUM_MTRS requires knowing
+ * which channel is in question
+ *
+ * 2 branches, each with 2 channels
+ * b0_ambpresent0 for channel '0'
+ * b0_ambpresent1 for channel '1'
+ * b1_ambpresent0 for channel '2'
+ * b1_ambpresent1 for channel '3'
+ */
+static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
+{
+ int amb_present;
+
+ if (channel < CHANNELS_PER_BRANCH) {
+ if (channel & 0x1)
+ amb_present = pvt->b0_ambpresent1;
+ else
+ amb_present = pvt->b0_ambpresent0;
+ } else {
+ if (channel & 0x1)
+ amb_present = pvt->b1_ambpresent1;
+ else
+ amb_present = pvt->b1_ambpresent0;
+ }
+
+ return amb_present;
+}
+
+/*
+ * determine_mtr(pvt, csrow, channel)
+ *
+ * return the proper MTR register as determine by the csrow and channel desired
+ */
+static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+{
+ int mtr;
+
+ if (channel < CHANNELS_PER_BRANCH)
+ mtr = pvt->b0_mtr[csrow >> 1];
+ else
+ mtr = pvt->b1_mtr[csrow >> 1];
+
+ return mtr;
+}
+
+/*
+ */
+static void decode_mtr(int slot_row, u16 mtr)
+{
+ int ans;
+
+ ans = MTR_DIMMS_PRESENT(mtr);
+
+ debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
+ ans ? "Present" : "NOT Present");
+ if (!ans)
+ return;
+
+ debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+ debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
+ debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
+ debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+}
+
+static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+ struct i5000_dimm_info *dinfo)
+{
+ int mtr;
+ int amb_present_reg;
+ int addrBits;
+
+ mtr = determine_mtr(pvt, csrow, channel);
+ if (MTR_DIMMS_PRESENT(mtr)) {
+ amb_present_reg = determine_amb_present_reg(pvt, channel);
+
+ /* Determine if there is a DIMM present in this DIMM slot */
+ if (amb_present_reg & (1 << (csrow >> 1))) {
+ dinfo->dual_rank = MTR_DIMM_RANK(mtr);
+
+ if (!((dinfo->dual_rank == 0) &&
+ ((csrow & 0x1) == 0x1))) {
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add thenumber of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
+ }
+ }
+ }
+}
+
+/*
+ * calculate_dimm_size
+ *
+ * also will output a DIMM matrix map, if debug is enabled, for viewing
+ * how the DIMMs are populated
+ */
+static void calculate_dimm_size(struct i5000_pvt *pvt)
+{
+ struct i5000_dimm_info *dinfo;
+ int csrow, max_csrows;
+ char *p, *mem_buffer;
+ int space, n;
+ int channel;
+
+ /* ================= Generate some debug output ================= */
+ space = PAGE_SIZE;
+ mem_buffer = p = kmalloc(space, GFP_KERNEL);
+ if (p == NULL) {
+ i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
+ __FILE__, __func__);
+ return;
+ }
+
+ n = snprintf(p, space, "\n");
+ p += n;
+ space -= n;
+
+ /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ * and calculate the information for each DIMM
+ * Start with the highest csrow first, to display it first
+ * and work toward the 0th csrow
+ */
+ max_csrows = pvt->maxdimmperch * 2;
+ for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+
+ /* on an odd csrow, first output a 'boundary' marker,
+ * then reset the message buffer */
+ if (csrow & 0x1) {
+ n = snprintf(p, space, "---------------------------"
+ "--------------------------------");
+ p += n;
+ space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+ }
+ n = snprintf(p, space, "csrow %2d ", csrow);
+ p += n;
+ space -= n;
+
+ for (channel = 0; channel < pvt->maxch; channel++) {
+ dinfo = &pvt->dimm_info[csrow][channel];
+ handle_channel(pvt, csrow, channel, dinfo);
+ n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
+ p += n;
+ space -= n;
+ }
+ n = snprintf(p, space, "\n");
+ p += n;
+ space -= n;
+ }
+
+ /* Output the last bottom 'boundary' marker */
+ n = snprintf(p, space, "---------------------------"
+ "--------------------------------\n");
+ p += n;
+ space -= n;
+
+ /* now output the 'channel' labels */
+ n = snprintf(p, space, " ");
+ p += n;
+ space -= n;
+ for (channel = 0; channel < pvt->maxch; channel++) {
+ n = snprintf(p, space, "channel %d | ", channel);
+ p += n;
+ space -= n;
+ }
+ n = snprintf(p, space, "\n");
+ p += n;
+ space -= n;
+
+ /* output the last message and free buffer */
+ debugf2("%s\n", mem_buffer);
+ kfree(mem_buffer);
+}
+
+/*
+ * i5000_get_mc_regs read in the necessary registers and
+ * cache locally
+ *
+ * Fills in the private data members
+ */
+static void i5000_get_mc_regs(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+ u32 actual_tolm;
+ u16 limit;
+ int slot_row;
+ int maxch;
+ int maxdimmperch;
+ int way0, way1;
+
+ pvt = mci->pvt_info;
+
+ pci_read_config_dword(pvt->system_address, AMBASE,
+ (u32 *) & pvt->ambase);
+ pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
+ ((u32 *) & pvt->ambase) + sizeof(u32));
+
+ maxdimmperch = pvt->maxdimmperch;
+ maxch = pvt->maxch;
+
+ debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
+ (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+
+ /* Get the Branch Map regs */
+ pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
+ pvt->tolm >>= 12;
+ debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
+ pvt->tolm);
+
+ actual_tolm = pvt->tolm << 28;
+ debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
+
+ pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
+ pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
+ pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2);
+
+ /* Get the MIR[0-2] regs */
+ limit = (pvt->mir0 >> 4) & 0x0FFF;
+ way0 = pvt->mir0 & 0x1;
+ way1 = pvt->mir0 & 0x2;
+ debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ limit = (pvt->mir1 >> 4) & 0x0FFF;
+ way0 = pvt->mir1 & 0x1;
+ way1 = pvt->mir1 & 0x2;
+ debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ limit = (pvt->mir2 >> 4) & 0x0FFF;
+ way0 = pvt->mir2 & 0x1;
+ way1 = pvt->mir2 & 0x2;
+ debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+
+ /* Get the MTR[0-3] regs */
+ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+ int where = MTR0 + (slot_row * sizeof(u32));
+
+ pci_read_config_word(pvt->branch_0, where,
+ &pvt->b0_mtr[slot_row]);
+
+ debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
+ pvt->b0_mtr[slot_row]);
+
+ if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+ pci_read_config_word(pvt->branch_1, where,
+ &pvt->b1_mtr[slot_row]);
+ debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
+ where, pvt->b0_mtr[slot_row]);
+ } else {
+ pvt->b1_mtr[slot_row] = 0;
+ }
+ }
+
+ /* Read and dump branch 0's MTRs */
+ debugf2("\nMemory Technology Registers:\n");
+ debugf2(" Branch 0:\n");
+ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+ decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
+ }
+ pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
+ &pvt->b0_ambpresent0);
+ debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+ pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
+ &pvt->b0_ambpresent1);
+ debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+
+ /* Only if we have 2 branchs (4 channels) */
+ if (pvt->maxch < CHANNELS_PER_BRANCH) {
+ pvt->b1_ambpresent0 = 0;
+ pvt->b1_ambpresent1 = 0;
+ } else {
+ /* Read and dump branch 1's MTRs */
+ debugf2(" Branch 1:\n");
+ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+ decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
+ }
+ pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
+ &pvt->b1_ambpresent0);
+ debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
+ pvt->b1_ambpresent0);
+ pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
+ &pvt->b1_ambpresent1);
+ debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
+ pvt->b1_ambpresent1);
+ }
+
+ /* Go and determine the size of each DIMM and place in an
+ * orderly matrix */
+ calculate_dimm_size(pvt);
+}
+
+/*
+ * i5000_init_csrows Initialize the 'csrows' table within
+ * the mci control structure with the
+ * addressing of memory.
+ *
+ * return:
+ * 0 success
+ * 1 no actual memory found on this MC
+ */
+static int i5000_init_csrows(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+ struct csrow_info *p_csrow;
+ int empty, channel_count;
+ int max_csrows;
+ int mtr;
+ int csrow_megs;
+ int channel;
+ int csrow;
+
+ pvt = mci->pvt_info;
+
+ channel_count = pvt->maxch;
+ max_csrows = pvt->maxdimmperch * 2;
+
+ empty = 1; /* Assume NO memory */
+
+ for (csrow = 0; csrow < max_csrows; csrow++) {
+ p_csrow = &mci->csrows[csrow];
+
+ p_csrow->csrow_idx = csrow;
+
+ /* use branch 0 for the basis */
+ mtr = pvt->b0_mtr[csrow >> 1];
+
+ /* if no DIMMS on this row, continue */
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
+
+ /* FAKE OUT VALUES, FIXME */
+ p_csrow->first_page = 0 + csrow * 20;
+ p_csrow->last_page = 9 + csrow * 20;
+ p_csrow->page_mask = 0xFFF;
+
+ p_csrow->grain = 8;
+
+ csrow_megs = 0;
+ for (channel = 0; channel < pvt->maxch; channel++) {
+ csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
+ }
+
+ p_csrow->nr_pages = csrow_megs << 8;
+
+ /* Assume DDR2 for now */
+ p_csrow->mtype = MEM_FB_DDR2;
+
+ /* ask what device type on this row */
+ if (MTR_DRAM_WIDTH(mtr))
+ p_csrow->dtype = DEV_X8;
+ else
+ p_csrow->dtype = DEV_X4;
+
+ p_csrow->edac_mode = EDAC_S8ECD8ED;
+
+ empty = 0;
+ }
+
+ return empty;
+}
+
+/*
+ * i5000_enable_error_reporting
+ * Turn on the memory reporting features of the hardware
+ */
+static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+ u32 fbd_error_mask;
+
+ pvt = mci->pvt_info;
+
+ /* Read the FBD Error Mask Register */
+ pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+ &fbd_error_mask);
+
+ /* Enable with a '0' */
+ fbd_error_mask &= ~(ENABLE_EMASK_ALL);
+
+ pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+ fbd_error_mask);
+}
+
+/*
+ * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ *
+ * ask the device how many channels are present and how many CSROWS
+ * as well
+ */
+static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
+ int *num_dimms_per_channel,
+ int *num_channels)
+{
+ u8 value;
+
+ /* Need to retrieve just how many channels and dimms per channel are
+ * supported on this memory controller
+ */
+ pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
+ *num_dimms_per_channel = (int)value *2;
+
+ pci_read_config_byte(pdev, MAXCH, &value);
+ *num_channels = (int)value;
+}
+
+/*
+ * i5000_probe1 Probe for ONE instance of device to see if it is
+ * present.
+ * return:
+ * 0 for FOUND a device
+ * < 0 for error code
+ */
+static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ struct mem_ctl_info *mci;
+ struct i5000_pvt *pvt;
+ int num_channels;
+ int num_dimms_per_channel;
+ int num_csrows;
+
+ debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __func__,
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ /* We only are looking for func 0 of the set */
+ if (PCI_FUNC(pdev->devfn) != 0)
+ return -ENODEV;
+
+ /* Ask the devices for the number of CSROWS and CHANNELS so
+ * that we can calculate the memory resources, etc
+ *
+ * The Chipset will report what it can handle which will be greater
+ * or equal to what the motherboard manufacturer will implement.
+ *
+ * As we don't have a motherboard identification routine to determine
+ * actual number of slots/dimms per channel, we thus utilize the
+ * resource as specified by the chipset. Thus, we might have
+ * have more DIMMs per channel than actually on the mobo, but this
+ * allows the driver to support upto the chipset max, without
+ * some fancy mobo determination.
+ */
+ i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
+ &num_channels);
+ num_csrows = num_dimms_per_channel * 2;
+
+ debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
+ __func__, num_channels, num_dimms_per_channel, num_csrows);
+
+ /* allocate a new MC control structure */
+ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ kobject_get(&mci->edac_mci_kobj);
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ mci->dev = &pdev->dev; /* record ptr to the generic device */
+
+ pvt = mci->pvt_info;
+ pvt->system_address = pdev; /* Record this device in our private */
+ pvt->maxch = num_channels;
+ pvt->maxdimmperch = num_dimms_per_channel;
+
+ /* 'get' the pci devices we want to reserve for our use */
+ if (i5000_get_devices(mci, dev_idx))
+ goto fail0;
+
+ /* Time to get serious */
+ i5000_get_mc_regs(mci); /* retrieve the hardware registers */
+
+ mci->mc_idx = 0;
+ mci->mtype_cap = MEM_FLAG_FB_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "i5000_edac.c";
+ mci->mod_ver = I5000_REVISION;
+ mci->ctl_name = i5000_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->ctl_page_to_phys = NULL;
+
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i5000_check_error;
+
+ /* initialize the MC control structure 'csrows' table
+ * with the mapping and control information */
+ if (i5000_init_csrows(mci)) {
+ debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
+ " because i5000_init_csrows() returned nonzero "
+ "value\n");
+ mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+ } else {
+ debugf1("MC: Enable error reporting now\n");
+ i5000_enable_error_reporting(mci);
+ }
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (edac_mc_add_mc(mci)) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ /* FIXME: perhaps some code should go here that disables error
+ * reporting if we just enabled it
+ */
+ goto fail1;
+ }
+
+ i5000_clear_error(mci);
+
+ /* allocating generic PCI control info */
+ i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i5000_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ return 0;
+
+ /* Error exit unwinding stack */
+fail1:
+
+ i5000_put_devices(mci);
+
+fail0:
+ kobject_put(&mci->edac_mci_kobj);
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+/*
+ * i5000_init_one constructor for one instance of device
+ *
+ * returns:
+ * negative on error
+ * count (>= 0)
+ */
+static int __devinit i5000_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* wake up device */
+ rc = pci_enable_device(pdev);
+ if (rc == -EIO)
+ return rc;
+
+ /* now probe and enable the device */
+ return i5000_probe1(pdev, id->driver_data);
+}
+
+/*
+ * i5000_remove_one destructor for one instance of device
+ *
+ */
+static void __devexit i5000_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i5000_pci)
+ edac_pci_release_generic_ctl(i5000_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ /* retrieve references to resources, and free those resources */
+ i5000_put_devices(mci);
+ kobject_put(&mci->edac_mci_kobj);
+ edac_mc_free(mci);
+}
+
+/*
+ * pci_device_id table for which devices we are looking for
+ *
+ * The "E500P" device is the first device supported.
+ */
+static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
+ .driver_data = I5000P},
+
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
+
+/*
+ * i5000_driver pci_driver structure for this module
+ *
+ */
+static struct pci_driver i5000_driver = {
+ .name = KBUILD_BASENAME,
+ .probe = i5000_init_one,
+ .remove = __devexit_p(i5000_remove_one),
+ .id_table = i5000_pci_tbl,
+};
+
+/*
+ * i5000_init Module entry function
+ * Try to initialize this module for its devices
+ */
+static int __init i5000_init(void)
+{
+ int pci_rc;
+
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i5000_driver);
+
+ return (pci_rc < 0) ? pci_rc : 0;
+}
+
+/*
+ * i5000_exit() Module exit function
+ * Unregister the driver
+ */
+static void __exit i5000_exit(void)
+{
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i5000_driver);
+}
+
+module_init(i5000_init);
+module_exit(i5000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+ ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
+ I5000_REVISION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+module_param(misc_messages, int, 0444);
+MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
+
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
new file mode 100644
index 0000000..22db05a
--- /dev/null
+++ b/drivers/edac/i5100_edac.c
@@ -0,0 +1,981 @@
+/*
+ * Intel 5100 Memory Controllers kernel module
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
+ * http://download.intel.com/design/chipsets/datashts/318378.pdf
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <linux/delay.h>
+#include <linux/mmzone.h>
+
+#include "edac_core.h"
+
+/* register addresses */
+
+/* device 16, func 1 */
+#define I5100_MC 0x40 /* Memory Control Register */
+#define I5100_MS 0x44 /* Memory Status Register */
+#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
+#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
+#define I5100_TOLM 0x6c /* Top of Low Memory */
+#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
+#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
+#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
+#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
+#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
+#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
+#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
+#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
+#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
+#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
+#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
+#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
+#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
+#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
+#define I5100_FERR_NF_MEM_M1ERR_MASK 1
+#define I5100_FERR_NF_MEM_ANY_MASK \
+ (I5100_FERR_NF_MEM_M16ERR_MASK | \
+ I5100_FERR_NF_MEM_M15ERR_MASK | \
+ I5100_FERR_NF_MEM_M14ERR_MASK | \
+ I5100_FERR_NF_MEM_M12ERR_MASK | \
+ I5100_FERR_NF_MEM_M11ERR_MASK | \
+ I5100_FERR_NF_MEM_M10ERR_MASK | \
+ I5100_FERR_NF_MEM_M6ERR_MASK | \
+ I5100_FERR_NF_MEM_M5ERR_MASK | \
+ I5100_FERR_NF_MEM_M4ERR_MASK | \
+ I5100_FERR_NF_MEM_M1ERR_MASK)
+#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
+#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
+
+/* device 21 and 22, func 0 */
+#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
+#define I5100_DMIR 0x15c /* DIMM Interleave Range */
+#define I5100_VALIDLOG 0x18c /* Valid Log Markers */
+#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
+#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
+#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
+#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
+#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
+#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
+#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
+
+/* bit field accessors */
+
+static inline u32 i5100_mc_errdeten(u32 mc)
+{
+ return mc >> 5 & 1;
+}
+
+static inline u16 i5100_spddata_rdo(u16 a)
+{
+ return a >> 15 & 1;
+}
+
+static inline u16 i5100_spddata_sbe(u16 a)
+{
+ return a >> 13 & 1;
+}
+
+static inline u16 i5100_spddata_busy(u16 a)
+{
+ return a >> 12 & 1;
+}
+
+static inline u16 i5100_spddata_data(u16 a)
+{
+ return a & ((1 << 8) - 1);
+}
+
+static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
+ u32 data, u32 cmd)
+{
+ return ((dti & ((1 << 4) - 1)) << 28) |
+ ((ckovrd & 1) << 27) |
+ ((sa & ((1 << 3) - 1)) << 24) |
+ ((ba & ((1 << 8) - 1)) << 16) |
+ ((data & ((1 << 8) - 1)) << 8) |
+ (cmd & 1);
+}
+
+static inline u16 i5100_tolm_tolm(u16 a)
+{
+ return a >> 12 & ((1 << 4) - 1);
+}
+
+static inline u16 i5100_mir_limit(u16 a)
+{
+ return a >> 4 & ((1 << 12) - 1);
+}
+
+static inline u16 i5100_mir_way1(u16 a)
+{
+ return a >> 1 & 1;
+}
+
+static inline u16 i5100_mir_way0(u16 a)
+{
+ return a & 1;
+}
+
+static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
+{
+ return a >> 28 & 1;
+}
+
+static inline u32 i5100_ferr_nf_mem_any(u32 a)
+{
+ return a & I5100_FERR_NF_MEM_ANY_MASK;
+}
+
+static inline u32 i5100_nerr_nf_mem_any(u32 a)
+{
+ return i5100_ferr_nf_mem_any(a);
+}
+
+static inline u32 i5100_dmir_limit(u32 a)
+{
+ return a >> 16 & ((1 << 11) - 1);
+}
+
+static inline u32 i5100_dmir_rank(u32 a, u32 i)
+{
+ return a >> (4 * i) & ((1 << 2) - 1);
+}
+
+static inline u16 i5100_mtr_present(u16 a)
+{
+ return a >> 10 & 1;
+}
+
+static inline u16 i5100_mtr_ethrottle(u16 a)
+{
+ return a >> 9 & 1;
+}
+
+static inline u16 i5100_mtr_width(u16 a)
+{
+ return a >> 8 & 1;
+}
+
+static inline u16 i5100_mtr_numbank(u16 a)
+{
+ return a >> 6 & 1;
+}
+
+static inline u16 i5100_mtr_numrow(u16 a)
+{
+ return a >> 2 & ((1 << 2) - 1);
+}
+
+static inline u16 i5100_mtr_numcol(u16 a)
+{
+ return a & ((1 << 2) - 1);
+}
+
+
+static inline u32 i5100_validlog_redmemvalid(u32 a)
+{
+ return a >> 2 & 1;
+}
+
+static inline u32 i5100_validlog_recmemvalid(u32 a)
+{
+ return a >> 1 & 1;
+}
+
+static inline u32 i5100_validlog_nrecmemvalid(u32 a)
+{
+ return a & 1;
+}
+
+static inline u32 i5100_nrecmema_merr(u32 a)
+{
+ return a >> 15 & ((1 << 5) - 1);
+}
+
+static inline u32 i5100_nrecmema_bank(u32 a)
+{
+ return a >> 12 & ((1 << 3) - 1);
+}
+
+static inline u32 i5100_nrecmema_rank(u32 a)
+{
+ return a >> 8 & ((1 << 3) - 1);
+}
+
+static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
+{
+ return a & ((1 << 8) - 1);
+}
+
+static inline u32 i5100_nrecmemb_cas(u32 a)
+{
+ return a >> 16 & ((1 << 13) - 1);
+}
+
+static inline u32 i5100_nrecmemb_ras(u32 a)
+{
+ return a & ((1 << 16) - 1);
+}
+
+static inline u32 i5100_redmemb_ecc_locator(u32 a)
+{
+ return a & ((1 << 18) - 1);
+}
+
+static inline u32 i5100_recmema_merr(u32 a)
+{
+ return i5100_nrecmema_merr(a);
+}
+
+static inline u32 i5100_recmema_bank(u32 a)
+{
+ return i5100_nrecmema_bank(a);
+}
+
+static inline u32 i5100_recmema_rank(u32 a)
+{
+ return i5100_nrecmema_rank(a);
+}
+
+static inline u32 i5100_recmema_dm_buf_id(u32 a)
+{
+ return i5100_nrecmema_dm_buf_id(a);
+}
+
+static inline u32 i5100_recmemb_cas(u32 a)
+{
+ return i5100_nrecmemb_cas(a);
+}
+
+static inline u32 i5100_recmemb_ras(u32 a)
+{
+ return i5100_nrecmemb_ras(a);
+}
+
+/* some generic limits */
+#define I5100_MAX_RANKS_PER_CTLR 6
+#define I5100_MAX_CTLRS 2
+#define I5100_MAX_RANKS_PER_DIMM 4
+#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
+#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
+#define I5100_MAX_RANK_INTERLEAVE 4
+#define I5100_MAX_DMIRS 5
+
+struct i5100_priv {
+ /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
+ int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
+
+ /*
+ * mainboard chip select map -- maps i5100 chip selects to
+ * DIMM slot chip selects. In the case of only 4 ranks per
+ * controller, the mapping is fairly obvious but not unique.
+ * we map -1 -> NC and assume both controllers use the same
+ * map...
+ *
+ */
+ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
+
+ /* memory interleave range */
+ struct {
+ u64 limit;
+ unsigned way[2];
+ } mir[I5100_MAX_CTLRS];
+
+ /* adjusted memory interleave range register */
+ unsigned amir[I5100_MAX_CTLRS];
+
+ /* dimm interleave range */
+ struct {
+ unsigned rank[I5100_MAX_RANK_INTERLEAVE];
+ u64 limit;
+ } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
+
+ /* memory technology registers... */
+ struct {
+ unsigned present; /* 0 or 1 */
+ unsigned ethrottle; /* 0 or 1 */
+ unsigned width; /* 4 or 8 bits */
+ unsigned numbank; /* 2 or 3 lines */
+ unsigned numrow; /* 13 .. 16 lines */
+ unsigned numcol; /* 11 .. 12 lines */
+ } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
+
+ u64 tolm; /* top of low memory in bytes */
+ unsigned ranksperctlr; /* number of ranks per controller */
+
+ struct pci_dev *mc; /* device 16 func 1 */
+ struct pci_dev *ch0mm; /* device 21 func 0 */
+ struct pci_dev *ch1mm; /* device 22 func 0 */
+};
+
+/* map a rank/ctlr to a slot number on the mainboard */
+static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
+ int ctlr, int rank)
+{
+ const struct i5100_priv *priv = mci->pvt_info;
+ int i;
+
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ int j;
+ const int numrank = priv->dimm_numrank[ctlr][i];
+
+ for (j = 0; j < numrank; j++)
+ if (priv->dimm_csmap[i][j] == rank)
+ return i * 2 + ctlr;
+ }
+
+ return -1;
+}
+
+static const char *i5100_err_msg(unsigned err)
+{
+ static const char *merrs[] = {
+ "unknown", /* 0 */
+ "uncorrectable data ECC on replay", /* 1 */
+ "unknown", /* 2 */
+ "unknown", /* 3 */
+ "aliased uncorrectable demand data ECC", /* 4 */
+ "aliased uncorrectable spare-copy data ECC", /* 5 */
+ "aliased uncorrectable patrol data ECC", /* 6 */
+ "unknown", /* 7 */
+ "unknown", /* 8 */
+ "unknown", /* 9 */
+ "non-aliased uncorrectable demand data ECC", /* 10 */
+ "non-aliased uncorrectable spare-copy data ECC", /* 11 */
+ "non-aliased uncorrectable patrol data ECC", /* 12 */
+ "unknown", /* 13 */
+ "correctable demand data ECC", /* 14 */
+ "correctable spare-copy data ECC", /* 15 */
+ "correctable patrol data ECC", /* 16 */
+ "unknown", /* 17 */
+ "SPD protocol error", /* 18 */
+ "unknown", /* 19 */
+ "spare copy initiated", /* 20 */
+ "spare copy completed", /* 21 */
+ };
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(merrs); i++)
+ if (1 << i & err)
+ return merrs[i];
+
+ return "none";
+}
+
+/* convert csrow index into a rank (per controller -- 0..5) */
+static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
+{
+ const struct i5100_priv *priv = mci->pvt_info;
+
+ return csrow % priv->ranksperctlr;
+}
+
+/* convert csrow index into a controller (0..1) */
+static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
+{
+ const struct i5100_priv *priv = mci->pvt_info;
+
+ return csrow / priv->ranksperctlr;
+}
+
+static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
+ int ctlr, int rank)
+{
+ const struct i5100_priv *priv = mci->pvt_info;
+
+ return ctlr * priv->ranksperctlr + rank;
+}
+
+static void i5100_handle_ce(struct mem_ctl_info *mci,
+ int ctlr,
+ unsigned bank,
+ unsigned rank,
+ unsigned long syndrome,
+ unsigned cas,
+ unsigned ras,
+ const char *msg)
+{
+ const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+
+ printk(KERN_ERR
+ "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
+ ctlr, bank, rank, syndrome, cas, ras,
+ csrow, mci->csrows[csrow].channels[0].label, msg);
+
+ mci->ce_count++;
+ mci->csrows[csrow].ce_count++;
+ mci->csrows[csrow].channels[0].ce_count++;
+}
+
+static void i5100_handle_ue(struct mem_ctl_info *mci,
+ int ctlr,
+ unsigned bank,
+ unsigned rank,
+ unsigned long syndrome,
+ unsigned cas,
+ unsigned ras,
+ const char *msg)
+{
+ const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+
+ printk(KERN_ERR
+ "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
+ ctlr, bank, rank, syndrome, cas, ras,
+ csrow, mci->csrows[csrow].channels[0].label, msg);
+
+ mci->ue_count++;
+ mci->csrows[csrow].ue_count++;
+}
+
+static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
+ u32 ferr, u32 nerr)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
+ u32 dw;
+ u32 dw2;
+ unsigned syndrome = 0;
+ unsigned ecc_loc = 0;
+ unsigned merr;
+ unsigned bank;
+ unsigned rank;
+ unsigned cas;
+ unsigned ras;
+
+ pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
+
+ if (i5100_validlog_redmemvalid(dw)) {
+ pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
+ syndrome = dw2;
+ pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
+ ecc_loc = i5100_redmemb_ecc_locator(dw2);
+ }
+
+ if (i5100_validlog_recmemvalid(dw)) {
+ const char *msg;
+
+ pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
+ merr = i5100_recmema_merr(dw2);
+ bank = i5100_recmema_bank(dw2);
+ rank = i5100_recmema_rank(dw2);
+
+ pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
+ cas = i5100_recmemb_cas(dw2);
+ ras = i5100_recmemb_ras(dw2);
+
+ /* FIXME: not really sure if this is what merr is...
+ */
+ if (!merr)
+ msg = i5100_err_msg(ferr);
+ else
+ msg = i5100_err_msg(nerr);
+
+ i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ }
+
+ if (i5100_validlog_nrecmemvalid(dw)) {
+ const char *msg;
+
+ pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
+ merr = i5100_nrecmema_merr(dw2);
+ bank = i5100_nrecmema_bank(dw2);
+ rank = i5100_nrecmema_rank(dw2);
+
+ pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
+ cas = i5100_nrecmemb_cas(dw2);
+ ras = i5100_nrecmemb_ras(dw2);
+
+ /* FIXME: not really sure if this is what merr is...
+ */
+ if (!merr)
+ msg = i5100_err_msg(ferr);
+ else
+ msg = i5100_err_msg(nerr);
+
+ i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ }
+
+ pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
+}
+
+static void i5100_check_error(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+
+ pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
+ if (i5100_ferr_nf_mem_any(dw)) {
+ u32 dw2;
+
+ pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
+ if (dw2)
+ pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
+ dw2);
+ pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
+
+ i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
+ i5100_ferr_nf_mem_any(dw),
+ i5100_nerr_nf_mem_any(dw2));
+ }
+}
+
+static struct pci_dev *pci_get_device_func(unsigned vendor,
+ unsigned device,
+ unsigned func)
+{
+ struct pci_dev *ret = NULL;
+
+ while (1) {
+ ret = pci_get_device(vendor, device, ret);
+
+ if (!ret)
+ break;
+
+ if (PCI_FUNC(ret->devfn) == func)
+ break;
+ }
+
+ return ret;
+}
+
+static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
+ int csrow)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
+ unsigned addr_lines;
+
+ /* dimm present? */
+ if (!priv->mtr[ctlr][ctlr_rank].present)
+ return 0ULL;
+
+ addr_lines =
+ I5100_DIMM_ADDR_LINES +
+ priv->mtr[ctlr][ctlr_rank].numcol +
+ priv->mtr[ctlr][ctlr_rank].numrow +
+ priv->mtr[ctlr][ctlr_rank].numbank;
+
+ return (unsigned long)
+ ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
+}
+
+static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
+ int i;
+
+ for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ int j;
+ struct pci_dev *pdev = mms[i];
+
+ for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
+ const unsigned addr =
+ (j < 4) ? I5100_MTR_0 + j * 2 :
+ I5100_MTR_4 + (j - 4) * 2;
+ u16 w;
+
+ pci_read_config_word(pdev, addr, &w);
+
+ priv->mtr[i][j].present = i5100_mtr_present(w);
+ priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
+ priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
+ priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
+ priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
+ priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
+ }
+ }
+}
+
+/*
+ * FIXME: make this into a real i2c adapter (so that dimm-decode
+ * will work)?
+ */
+static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
+ u8 ch, u8 slot, u8 addr, u8 *byte)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u16 w;
+ unsigned long et;
+
+ pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
+ if (i5100_spddata_busy(w))
+ return -1;
+
+ pci_write_config_dword(priv->mc, I5100_SPDCMD,
+ i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
+ 0, 0));
+
+ /* wait up to 100ms */
+ et = jiffies + HZ / 10;
+ udelay(100);
+ while (1) {
+ pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
+ if (!i5100_spddata_busy(w))
+ break;
+ udelay(100);
+ }
+
+ if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
+ return -1;
+
+ *byte = i5100_spddata_data(w);
+
+ return 0;
+}
+
+/*
+ * fill dimm chip select map
+ *
+ * FIXME:
+ * o only valid for 4 ranks per controller
+ * o not the only way to may chip selects to dimm slots
+ * o investigate if there is some way to obtain this map from the bios
+ */
+static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ int i;
+
+ WARN_ON(priv->ranksperctlr != 4);
+
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ int j;
+
+ for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
+ priv->dimm_csmap[i][j] = -1; /* default NC */
+ }
+
+ /* only 2 chip selects per slot... */
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 3;
+ priv->dimm_csmap[1][0] = 1;
+ priv->dimm_csmap[1][1] = 2;
+ priv->dimm_csmap[2][0] = 2;
+ priv->dimm_csmap[3][0] = 3;
+}
+
+static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
+ struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ int i;
+
+ for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ int j;
+
+ for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
+ u8 rank;
+
+ if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
+ priv->dimm_numrank[i][j] = 0;
+ else
+ priv->dimm_numrank[i][j] = (rank & 3) + 1;
+ }
+ }
+
+ i5100_init_dimm_csmap(mci);
+}
+
+static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
+ struct mem_ctl_info *mci)
+{
+ u16 w;
+ u32 dw;
+ struct i5100_priv *priv = mci->pvt_info;
+ struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
+ int i;
+
+ pci_read_config_word(pdev, I5100_TOLM, &w);
+ priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
+
+ pci_read_config_word(pdev, I5100_MIR0, &w);
+ priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
+ priv->mir[0].way[1] = i5100_mir_way1(w);
+ priv->mir[0].way[0] = i5100_mir_way0(w);
+
+ pci_read_config_word(pdev, I5100_MIR1, &w);
+ priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
+ priv->mir[1].way[1] = i5100_mir_way1(w);
+ priv->mir[1].way[0] = i5100_mir_way0(w);
+
+ pci_read_config_word(pdev, I5100_AMIR_0, &w);
+ priv->amir[0] = w;
+ pci_read_config_word(pdev, I5100_AMIR_1, &w);
+ priv->amir[1] = w;
+
+ for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ int j;
+
+ for (j = 0; j < 5; j++) {
+ int k;
+
+ pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
+
+ priv->dmir[i][j].limit =
+ (u64) i5100_dmir_limit(dw) << 28;
+ for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
+ priv->dmir[i][j].rank[k] =
+ i5100_dmir_rank(dw, k);
+ }
+ }
+
+ i5100_init_mtr(mci);
+}
+
+static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
+{
+ int i;
+ unsigned long total_pages = 0UL;
+ struct i5100_priv *priv = mci->pvt_info;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ const unsigned long npages = i5100_npages(mci, i);
+ const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
+ const unsigned rank = i5100_csrow_to_rank(mci, i);
+
+ if (!npages)
+ continue;
+
+ /*
+ * FIXME: these two are totally bogus -- I don't see how to
+ * map them correctly to this structure...
+ */
+ mci->csrows[i].first_page = total_pages;
+ mci->csrows[i].last_page = total_pages + npages - 1;
+ mci->csrows[i].page_mask = 0UL;
+
+ mci->csrows[i].nr_pages = npages;
+ mci->csrows[i].grain = 32;
+ mci->csrows[i].csrow_idx = i;
+ mci->csrows[i].dtype =
+ (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
+ mci->csrows[i].ue_count = 0;
+ mci->csrows[i].ce_count = 0;
+ mci->csrows[i].mtype = MEM_RDDR2;
+ mci->csrows[i].edac_mode = EDAC_SECDED;
+ mci->csrows[i].mci = mci;
+ mci->csrows[i].nr_channels = 1;
+ mci->csrows[i].channels[0].chan_idx = 0;
+ mci->csrows[i].channels[0].ce_count = 0;
+ mci->csrows[i].channels[0].csrow = mci->csrows + i;
+ snprintf(mci->csrows[i].channels[0].label,
+ sizeof(mci->csrows[i].channels[0].label),
+ "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
+
+ total_pages += npages;
+ }
+}
+
+static int __devinit i5100_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+ struct mem_ctl_info *mci;
+ struct i5100_priv *priv;
+ struct pci_dev *ch0mm, *ch1mm;
+ int ret = 0;
+ u32 dw;
+ int ranksperch;
+
+ if (PCI_FUNC(pdev->devfn) != 1)
+ return -ENODEV;
+
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ ret = rc;
+ goto bail;
+ }
+
+ /* ECC enabled? */
+ pci_read_config_dword(pdev, I5100_MC, &dw);
+ if (!i5100_mc_errdeten(dw)) {
+ printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
+ ret = -ENODEV;
+ goto bail_pdev;
+ }
+
+ /* figure out how many ranks, from strapped state of 48GB_Mode input */
+ pci_read_config_dword(pdev, I5100_MS, &dw);
+ ranksperch = !!(dw & (1 << 8)) * 2 + 4;
+
+ if (ranksperch != 4) {
+ /* FIXME: get 6 ranks / controller to work - need hw... */
+ printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
+ ret = -ENODEV;
+ goto bail_pdev;
+ }
+
+ /* enable error reporting... */
+ pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
+ dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
+ pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
+
+ /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
+ ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_5100_21, 0);
+ if (!ch0mm) {
+ ret = -ENODEV;
+ goto bail_pdev;
+ }
+
+ rc = pci_enable_device(ch0mm);
+ if (rc < 0) {
+ ret = rc;
+ goto bail_ch0;
+ }
+
+ /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
+ ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_5100_22, 0);
+ if (!ch1mm) {
+ ret = -ENODEV;
+ goto bail_disable_ch0;
+ }
+
+ rc = pci_enable_device(ch1mm);
+ if (rc < 0) {
+ ret = rc;
+ goto bail_ch1;
+ }
+
+ mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+ if (!mci) {
+ ret = -ENOMEM;
+ goto bail_disable_ch1;
+ }
+
+ mci->dev = &pdev->dev;
+
+ priv = mci->pvt_info;
+ priv->ranksperctlr = ranksperch;
+ priv->mc = pdev;
+ priv->ch0mm = ch0mm;
+ priv->ch1mm = ch1mm;
+
+ i5100_init_dimm_layout(pdev, mci);
+ i5100_init_interleaving(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_FB_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = "i5100_edac.c";
+ mci->mod_ver = "not versioned";
+ mci->ctl_name = "i5100";
+ mci->dev_name = pci_name(pdev);
+ mci->ctl_page_to_phys = NULL;
+
+ mci->edac_check = i5100_check_error;
+
+ i5100_init_csrows(mci);
+
+ /* this strange construction seems to be in every driver, dunno why */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ break;
+ }
+
+ if (edac_mc_add_mc(mci)) {
+ ret = -ENODEV;
+ goto bail_mc;
+ }
+
+ return ret;
+
+bail_mc:
+ edac_mc_free(mci);
+
+bail_disable_ch1:
+ pci_disable_device(ch1mm);
+
+bail_ch1:
+ pci_dev_put(ch1mm);
+
+bail_disable_ch0:
+ pci_disable_device(ch0mm);
+
+bail_ch0:
+ pci_dev_put(ch0mm);
+
+bail_pdev:
+ pci_disable_device(pdev);
+
+bail:
+ return ret;
+}
+
+static void __devexit i5100_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i5100_priv *priv;
+
+ mci = edac_mc_del_mc(&pdev->dev);
+
+ if (!mci)
+ return;
+
+ priv = mci->pvt_info;
+ pci_disable_device(pdev);
+ pci_disable_device(priv->ch0mm);
+ pci_disable_device(priv->ch1mm);
+ pci_dev_put(priv->ch0mm);
+ pci_dev_put(priv->ch1mm);
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
+ /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
+
+static struct pci_driver i5100_driver = {
+ .name = KBUILD_BASENAME,
+ .probe = i5100_init_one,
+ .remove = __devexit_p(i5100_remove_one),
+ .id_table = i5100_pci_tbl,
+};
+
+static int __init i5100_init(void)
+{
+ int pci_rc;
+
+ pci_rc = pci_register_driver(&i5100_driver);
+
+ return (pci_rc < 0) ? pci_rc : 0;
+}
+
+static void __exit i5100_exit(void)
+{
+ pci_unregister_driver(&i5100_driver);
+}
+
+module_init(i5100_init);
+module_exit(i5100_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+ ("Arthur Jones <ajones@riverbed.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
new file mode 100644
index 0000000..577760a
--- /dev/null
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -0,0 +1,468 @@
+/*
+ * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
+ * module (C) 2006 Tim Small
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Linux
+ * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
+ * others.
+ *
+ * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
+ *
+ * Written with reference to 82443BX Host Bridge Datasheet:
+ * http://www.intel.com/design/chipsets/440/documentation.htm
+ * references to this document given in [].
+ *
+ * This module doesn't support the 440LX, but it may be possible to
+ * make it do so (the 440LX's register definitions are different, but
+ * not completely so - I haven't studied them in enough detail to know
+ * how easy this would be).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82443_REVISION "0.1"
+
+#define EDAC_MOD_STR "i82443bxgx_edac"
+
+/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
+ * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
+ * rows" "The 82443BX supports multiple-bit error detection and
+ * single-bit error correction when ECC mode is enabled and
+ * single/multi-bit error detection when correction is disabled.
+ * During writes to the DRAM, the 82443BX generates ECC for the data
+ * on a QWord basis. Partial QWord writes require a read-modify-write
+ * cycle when ECC is enabled."
+*/
+
+/* "Additionally, the 82443BX ensures that the data is corrected in
+ * main memory so that accumulation of errors is prevented. Another
+ * error within the same QWord would result in a double-bit error
+ * which is unrecoverable. This is known as hardware scrubbing since
+ * it requires no software intervention to correct the data in memory."
+ */
+
+/* [Also see page 100 (section 4.3), "DRAM Interface"]
+ * [Also see page 112 (section 4.6.1.4), ECC]
+ */
+
+#define I82443BXGX_NR_CSROWS 8
+#define I82443BXGX_NR_CHANS 1
+#define I82443BXGX_NR_DIMMS 4
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI
+ * config space offset */
+#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if
+ * row is non-ECC */
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */
+
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */
+#define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */
+#define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */
+
+#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI
+ * config space offset, Error Address
+ * Pointer Register */
+#define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */
+#define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */
+#define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */
+
+#define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI
+ * config space offset. */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */
+
+#define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI
+ * config space offset. */
+#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */
+#define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */
+#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */
+#define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */
+
+#define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI
+ * config space offset. */
+#define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */
+#define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */
+#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */
+#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */
+
+#define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI
+ * config space offset. */
+
+/* FIXME - don't poll when ECC disabled? */
+
+struct i82443bxgx_edacmc_error_info {
+ u32 eap;
+};
+
+static struct edac_pci_ctl_info *i82443bxgx_pci;
+
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
+ * already registered driver
+ */
+
+static int i82443bxgx_registered = 1;
+
+static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
+ struct i82443bxgx_edacmc_error_info
+ *info)
+{
+ struct pci_dev *pdev;
+ pdev = to_pci_dev(mci->dev);
+ pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
+ if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
+ /* Clear error to allow next error to be reported [p.61] */
+ pci_write_bits32(pdev, I82443BXGX_EAP,
+ I82443BXGX_EAP_OFFSET_SBE,
+ I82443BXGX_EAP_OFFSET_SBE);
+
+ if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
+ /* Clear error to allow next error to be reported [p.61] */
+ pci_write_bits32(pdev, I82443BXGX_EAP,
+ I82443BXGX_EAP_OFFSET_MBE,
+ I82443BXGX_EAP_OFFSET_MBE);
+}
+
+static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
+ struct
+ i82443bxgx_edacmc_error_info
+ *info, int handle_errors)
+{
+ int error_found = 0;
+ u32 eapaddr, page, pageoffset;
+
+ /* bits 30:12 hold the 4kb block in which the error occurred
+ * [p.61] */
+ eapaddr = (info->eap & 0xfffff000);
+ page = eapaddr >> PAGE_SHIFT;
+ pageoffset = eapaddr - (page << PAGE_SHIFT);
+
+ if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
+ error_found = 1;
+ if (handle_errors)
+ edac_mc_handle_ce(mci, page, pageoffset,
+ /* 440BX/GX don't make syndrome information
+ * available */
+ 0, edac_mc_find_csrow_by_page(mci, page), 0,
+ mci->ctl_name);
+ }
+
+ if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
+ error_found = 1;
+ if (handle_errors)
+ edac_mc_handle_ue(mci, page, pageoffset,
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
+ }
+
+ return error_found;
+}
+
+static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
+{
+ struct i82443bxgx_edacmc_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i82443bxgx_edacmc_get_error_info(mci, &info);
+ i82443bxgx_edacmc_process_error_info(mci, &info, 1);
+}
+
+static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev,
+ enum edac_type edac_mode,
+ enum mem_type mtype)
+{
+ struct csrow_info *csrow;
+ int index;
+ u8 drbar, dramc;
+ u32 row_base, row_high_limit, row_high_limit_last;
+
+ pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+ row_high_limit_last = 0;
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+ pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
+ mci->mc_idx, __func__, index, drbar);
+ row_high_limit = ((u32) drbar << 23);
+ /* find the DRAM Chip Select Base address and mask */
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x \n",
+ mci->mc_idx, __func__, index, row_high_limit,
+ row_high_limit_last);
+
+ /* 440GX goes to 2GB, represented with a DRB of 0. */
+ if (row_high_limit_last && !row_high_limit)
+ row_high_limit = 1UL << 31;
+
+ /* This row is empty [p.49] */
+ if (row_high_limit == row_high_limit_last)
+ continue;
+ row_base = row_high_limit_last;
+ csrow->first_page = row_base >> PAGE_SHIFT;
+ csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+ csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ /* EAP reports in 4kilobyte granularity [61] */
+ csrow->grain = 1 << 12;
+ csrow->mtype = mtype;
+ /* I don't think 440BX can tell you device type? FIXME? */
+ csrow->dtype = DEV_UNKNOWN;
+ /* Mode is global to all rows on 440BX */
+ csrow->edac_mode = edac_mode;
+ row_high_limit_last = row_high_limit;
+ }
+}
+
+static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ struct mem_ctl_info *mci;
+ u8 dramc;
+ u32 nbxcfg, ecc_mode;
+ enum mem_type mtype;
+ enum edac_type edac_mode;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Something is really hosed if PCI config space reads from
+ * the MC aren't working.
+ */
+ if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
+ return -EIO;
+
+ mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+ switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
+ case I82443BXGX_DRAMC_DRAM_IS_EDO:
+ mtype = MEM_EDO;
+ break;
+ case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
+ mtype = MEM_SDR;
+ break;
+ case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
+ mtype = MEM_RDR;
+ break;
+ default:
+ debugf0("Unknown/reserved DRAM type value "
+ "in DRAMC register!\n");
+ mtype = -MEM_UNKNOWN;
+ }
+
+ if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
+ mci->edac_cap = mci->edac_ctl_cap;
+ else
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
+ ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
+ (BIT(0) | BIT(1)));
+
+ mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
+ ? SCRUB_HW_SRC : SCRUB_NONE;
+
+ switch (ecc_mode) {
+ case I82443BXGX_NBXCFG_INTEGRITY_NONE:
+ edac_mode = EDAC_NONE;
+ break;
+ case I82443BXGX_NBXCFG_INTEGRITY_EC:
+ edac_mode = EDAC_EC;
+ break;
+ case I82443BXGX_NBXCFG_INTEGRITY_ECC:
+ case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
+ edac_mode = EDAC_SECDED;
+ break;
+ default:
+ debugf0("%s(): Unknown/reserved ECC state "
+ "in NBXCFG register!\n", __func__);
+ edac_mode = EDAC_UNKNOWN;
+ break;
+ }
+
+ i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
+
+ /* Many BIOSes don't clear error flags on boot, so do this
+ * here, or we get "phantom" errors occuring at module-load
+ * time. */
+ pci_write_bits32(pdev, I82443BXGX_EAP,
+ (I82443BXGX_EAP_OFFSET_SBE |
+ I82443BXGX_EAP_OFFSET_MBE),
+ (I82443BXGX_EAP_OFFSET_SBE |
+ I82443BXGX_EAP_OFFSET_MBE));
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I82443_REVISION;
+ mci->ctl_name = "I82443BXGX";
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i82443bxgx_edacmc_check;
+ mci->ctl_page_to_phys = NULL;
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i82443bxgx_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
+
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i82443bxgx_pci)
+ edac_pci_release_generic_ctl(i82443bxgx_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
+
+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
+
+static struct pci_driver i82443bxgx_edacmc_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i82443bxgx_edacmc_init_one,
+ .remove = __devexit_p(i82443bxgx_edacmc_remove_one),
+ .id_table = i82443bxgx_pci_tbl,
+};
+
+static int __init i82443bxgx_edacmc_init(void)
+{
+ int pci_rc;
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (mci_pdev == NULL) {
+ const struct pci_device_id *id = &i82443bxgx_pci_tbl[0];
+ int i = 0;
+ i82443bxgx_registered = 0;
+
+ while (mci_pdev == NULL && id->vendor != 0) {
+ mci_pdev = pci_get_device(id->vendor,
+ id->device, NULL);
+ i++;
+ id = &i82443bxgx_pci_tbl[i];
+ }
+ if (!mci_pdev) {
+ debugf0("i82443bxgx pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
+
+ if (pci_rc < 0) {
+ debugf0("i82443bxgx init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i82443bxgx_edacmc_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i82443bxgx_edacmc_exit(void)
+{
+ pci_unregister_driver(&i82443bxgx_edacmc_driver);
+
+ if (!i82443bxgx_registered)
+ i82443bxgx_edacmc_remove_one(mci_pdev);
+
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+}
+
+module_init(i82443bxgx_edacmc_init);
+module_exit(i82443bxgx_edacmc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
+MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 0000000..c0088ba
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,354 @@
+/*
+ * Intel 82860 Memory Controller kernel module
+ * (C) 2005 Red Hat (http://www.redhat.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Ben Woodard <woodard@redhat.com>
+ * shamelessly copied from and based upon the edac_i82875 driver
+ * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82860_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR "i82860_edac"
+
+#define i82860_printk(level, fmt, arg...) \
+ edac_printk(level, "i82860", fmt, ##arg)
+
+#define i82860_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82860_0
+#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
+#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
+
+#define I82860_MCHCFG 0x50
+#define I82860_GBA 0x60
+#define I82860_GBA_MASK 0x7FF
+#define I82860_GBA_SHIFT 24
+#define I82860_ERRSTS 0xC8
+#define I82860_EAP 0xE4
+#define I82860_DERRCTL_STS 0xE2
+
+enum i82860_chips {
+ I82860 = 0,
+};
+
+struct i82860_dev_info {
+ const char *ctl_name;
+};
+
+struct i82860_error_info {
+ u16 errsts;
+ u32 eap;
+ u16 derrsyn;
+ u16 errsts2;
+};
+
+static const struct i82860_dev_info i82860_devs[] = {
+ [I82860] = {
+ .ctl_name = "i82860"},
+};
+
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code
+ * has already registered driver
+ */
+static struct edac_pci_ctl_info *i82860_pci;
+
+static void i82860_get_error_info(struct mem_ctl_info *mci,
+ struct i82860_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts);
+ pci_read_config_dword(pdev, I82860_EAP, &info->eap);
+ pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
+ pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+ /*
+ * If the error is the same for both reads then the first set of reads
+ * is valid. If there is a change then there is a CE no info and the
+ * second set of reads is valid and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0003))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ pci_read_config_dword(pdev, I82860_EAP, &info->eap);
+ pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
+ }
+}
+
+static int i82860_process_error_info(struct mem_ctl_info *mci,
+ struct i82860_error_info *info,
+ int handle_errors)
+{
+ int row;
+
+ if (!(info->errsts2 & 0x0003))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0002)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
+ "i82860 UE");
+
+ return 1;
+}
+
+static void i82860_check(struct mem_ctl_info *mci)
+{
+ struct i82860_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i82860_get_error_info(mci, &info);
+ i82860_process_error_info(mci, &info, 1);
+}
+
+static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
+{
+ unsigned long last_cumul_size;
+ u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
+ u16 value;
+ u32 cumul_size;
+ struct csrow_info *csrow;
+ int index;
+
+ pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
+ mchcfg_ddim = mchcfg_ddim & 0x180;
+ last_cumul_size = 0;
+
+ /* The group row boundary (GRA) reg values are boundary address
+ * for each DRAM row with a granularity of 16MB. GRA regs are
+ * cumulative; therefore GRA15 will contain the total memory contained
+ * in all eight rows.
+ */
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+ pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
+ cumul_size = (value & I82860_GBA_MASK) <<
+ (I82860_GBA_SHIFT - PAGE_SHIFT);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ csrow->mtype = MEM_RMBS;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
+}
+
+static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ struct mem_ctl_info *mci;
+ struct i82860_error_info discard;
+
+ /* RDRAM has channels but these don't map onto the abstractions that
+ edac uses.
+ The device groups from the GRA registers seem to map reasonably
+ well onto the notion of a chip select row.
+ There are 16 GRA registers and since the name is associated with
+ the channel and the GRA registers map to physical devices so we are
+ going to make 1 channel for group.
+ */
+ mci = edac_mc_alloc(0, 16, 1, 0);
+
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ /* I"m not sure about this but I think that all RDRAM is SECDED */
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I82860_REVISION;
+ mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i82860_check;
+ mci->ctl_page_to_phys = NULL;
+ i82860_init_csrows(mci, pdev);
+ i82860_get_error_info(mci, &discard); /* clear counters */
+
+ /* Here we assume that we will never see multiple instances of this
+ * type of memory controller. The ID is therefore hardcoded to 0.
+ */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i82860_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82860_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("%s()\n", __func__);
+ i82860_printk(KERN_INFO, "i82860 init one\n");
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i82860_probe1(pdev, ent->driver_data);
+
+ if (rc == 0)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i82860_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ if (i82860_pci)
+ edac_pci_release_generic_ctl(i82860_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82860},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
+
+static struct pci_driver i82860_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i82860_init_one,
+ .remove = __devexit_p(i82860_remove_one),
+ .id_table = i82860_pci_tbl,
+};
+
+static int __init i82860_init(void)
+{
+ int pci_rc;
+
+ debugf3("%s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82860_0, NULL);
+
+ if (mci_pdev == NULL) {
+ debugf0("860 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
+
+ if (pci_rc < 0) {
+ debugf0("860 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i82860_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i82860_exit(void)
+{
+ debugf3("%s()\n", __func__);
+
+ pci_unregister_driver(&i82860_driver);
+
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+}
+
+module_init(i82860_init);
+module_exit(i82860_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
+ "Ben Woodard <woodard@redhat.com>");
+MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 0000000..ebb037b
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,599 @@
+/*
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Contributors:
+ * Wang Zhenyu at intel.com
+ *
+ * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82875P_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR "i82875p_edac"
+
+#define i82875p_printk(level, fmt, arg...) \
+ edac_printk(level, "i82875p", fmt, ##arg)
+
+#define i82875p_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_0
+#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
+#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_6
+#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
+#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
+
+/* four csrows in dual channel, eight in single channel */
+#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
+#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
+ *
+ * 31:12 block address
+ * 11:0 reserved
+ */
+
+#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I82875P_DES 0x5d /* DRAM Error Status (8b)
+ *
+ * 7:1 reserved
+ * 0 Error channel 0/1
+ */
+
+#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Sftwr Generated SMI
+ * 7 ECC UE
+ * 6 reserved
+ * 5 MCH detects unimplemented cycle
+ * 4 AGP access outside GA
+ * 3 Invalid AGP access
+ * 2 Invalid GA translation table
+ * 1 Unsupported AGP command
+ * 0 ECC CE
+ */
+
+#define I82875P_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:10 reserved
+ * 9 SERR on non-DRAM lock
+ * 8 SERR on ECC UE
+ * 7 SERR on ECC CE
+ * 6 target abort on high exception
+ * 5 detect unimplemented cyc
+ * 4 AGP access outside of GA
+ * 3 SERR on invalid AGP access
+ * 2 invalid translation table
+ * 1 SERR on unsupported AGP command
+ * 0 reserved
+ */
+
+/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
+#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
+ *
+ * 15:10 reserved
+ * 9 fast back-to-back - ro 0
+ * 8 SERR enable - ro 0
+ * 7 addr/data stepping - ro 0
+ * 6 parity err enable - ro 0
+ * 5 VGA palette snoop - ro 0
+ * 4 mem wr & invalidate - ro 0
+ * 3 special cycle - ro 0
+ * 2 bus master - ro 0
+ * 1 mem access dev6 - 0(dis),1(en)
+ * 0 IO access dev3 - 0(dis),1(en)
+ */
+
+#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
+ *
+ * 31:12 mem base addr [31:12]
+ * 11:4 address mask - ro 0
+ * 3 prefetchable - ro 0(non),1(pre)
+ * 2:1 mem type - ro 0
+ * 0 mem space - ro 0
+ */
+
+/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
+
+#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
+#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
+ *
+ * 7 reserved
+ * 6:0 64MiB row boundary addr
+ */
+
+#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
+ *
+ * 7 reserved
+ * 6:4 row attr row 1
+ * 3 reserved
+ * 2:0 row attr row 0
+ *
+ * 000 = 4KiB
+ * 001 = 8KiB
+ * 010 = 16KiB
+ * 011 = 32KiB
+ */
+
+#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
+ *
+ * 31:30 reserved
+ * 29 init complete
+ * 28:23 reserved
+ * 22:21 nr chan 00=1,01=2
+ * 20 reserved
+ * 19:18 Data Integ Mode 00=none,01=ecc
+ * 17:11 reserved
+ * 10:8 refresh mode
+ * 7 reserved
+ * 6:4 mode select
+ * 3:2 reserved
+ * 1:0 DRAM type 01=DDR
+ */
+
+enum i82875p_chips {
+ I82875P = 0,
+};
+
+struct i82875p_pvt {
+ struct pci_dev *ovrfl_pdev;
+ void __iomem *ovrfl_window;
+};
+
+struct i82875p_dev_info {
+ const char *ctl_name;
+};
+
+struct i82875p_error_info {
+ u16 errsts;
+ u32 eap;
+ u8 des;
+ u8 derrsyn;
+ u16 errsts2;
+};
+
+static const struct i82875p_dev_info i82875p_devs[] = {
+ [I82875P] = {
+ .ctl_name = "i82875p"},
+};
+
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
+ * already registered driver
+ */
+
+static struct edac_pci_ctl_info *i82875p_pci;
+
+static void i82875p_get_error_info(struct mem_ctl_info *mci,
+ struct i82875p_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
+
+ if (!(info->errsts & 0x0081))
+ return;
+
+ pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
+ pci_read_config_byte(pdev, I82875P_DES, &info->des);
+ pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
+ pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same then we can for both reads then
+ * the first set of reads is valid. If there is a change then
+ * there is a CE no info and the second set of reads is valid
+ * and should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & 0x0081) {
+ pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
+ pci_read_config_byte(pdev, I82875P_DES, &info->des);
+ pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
+ }
+
+ pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+}
+
+static int i82875p_process_error_info(struct mem_ctl_info *mci,
+ struct i82875p_error_info *info,
+ int handle_errors)
+{
+ int row, multi_chan;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts & 0x0081))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0081) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0080)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ multi_chan ? (info->des & 0x1) : 0,
+ "i82875p CE");
+
+ return 1;
+}
+
+static void i82875p_check(struct mem_ctl_info *mci)
+{
+ struct i82875p_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i82875p_get_error_info(mci, &info);
+ i82875p_process_error_info(mci, &info, 1);
+}
+
+/* Return 0 on success or 1 on failure. */
+static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
+ struct pci_dev **ovrfl_pdev,
+ void __iomem **ovrfl_window)
+{
+ struct pci_dev *dev;
+ void __iomem *window;
+ int err;
+
+ *ovrfl_pdev = NULL;
+ *ovrfl_window = NULL;
+ dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+ if (dev == NULL) {
+ /* Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+ pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
+ dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+
+ if (dev == NULL)
+ return 1;
+
+ err = pci_bus_add_device(dev);
+ if (err) {
+ i82875p_printk(KERN_ERR,
+ "%s(): pci_bus_add_device() Failed\n",
+ __func__);
+ }
+ pci_bus_assign_resources(dev->bus);
+ }
+
+ *ovrfl_pdev = dev;
+
+ if (pci_enable_device(dev)) {
+ i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
+ "device\n", __func__);
+ return 1;
+ }
+
+ if (pci_request_regions(dev, pci_name(dev))) {
+#ifdef CORRECT_BIOS
+ goto fail0;
+#endif
+ }
+
+ /* cache is irrelevant for PCI bus reads/writes */
+ window = ioremap_nocache(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
+
+ if (window == NULL) {
+ i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
+ __func__);
+ goto fail1;
+ }
+
+ *ovrfl_window = window;
+ return 0;
+
+fail1:
+ pci_release_regions(dev);
+
+#ifdef CORRECT_BIOS
+fail0:
+ pci_disable_device(dev);
+#endif
+ /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+ return 1;
+}
+
+/* Return 1 if dual channel mode is active. Else return 0. */
+static inline int dual_channel_active(u32 drc)
+{
+ return (drc >> 21) & 0x1;
+}
+
+static void i82875p_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev,
+ void __iomem * ovrfl_window, u32 drc)
+{
+ struct csrow_info *csrow;
+ unsigned long last_cumul_size;
+ u8 value;
+ u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ u32 cumul_size;
+ int index;
+
+ drc_ddim = (drc >> 18) & 0x1;
+ last_cumul_size = 0;
+
+ /* The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ value = readb(ovrfl_window + I82875P_DRB + index);
+ cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ csrow->mtype = MEM_DDR;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
+}
+
+static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ struct mem_ctl_info *mci;
+ struct i82875p_pvt *pvt;
+ struct pci_dev *ovrfl_pdev;
+ void __iomem *ovrfl_window;
+ u32 drc;
+ u32 nr_chans;
+ struct i82875p_error_info discard;
+
+ debugf0("%s()\n", __func__);
+
+ ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+ if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
+ return -ENODEV;
+ drc = readl(ovrfl_window + I82875P_DRC);
+ nr_chans = dual_channel_active(drc) + 1;
+ mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
+ nr_chans, 0);
+
+ if (!mci) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
+ /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */
+ kobject_get(&mci->edac_mci_kobj);
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_UNKNOWN;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I82875P_REVISION;
+ mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i82875p_check;
+ mci->ctl_page_to_phys = NULL;
+ debugf3("%s(): init pvt\n", __func__);
+ pvt = (struct i82875p_pvt *)mci->pvt_info;
+ pvt->ovrfl_pdev = ovrfl_pdev;
+ pvt->ovrfl_window = ovrfl_window;
+ i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
+ i82875p_get_error_info(mci, &discard); /* clear counters */
+
+ /* Here we assume that we will never see multiple instances of this
+ * type of memory controller. The ID is therefore hardcoded to 0.
+ */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail1;
+ }
+
+ /* allocating generic PCI control info */
+ i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i82875p_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail1:
+ kobject_put(&mci->edac_mci_kobj);
+ edac_mc_free(mci);
+
+fail0:
+ iounmap(ovrfl_window);
+ pci_release_regions(ovrfl_pdev);
+
+ pci_disable_device(ovrfl_pdev);
+ /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82875p_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("%s()\n", __func__);
+ i82875p_printk(KERN_INFO, "i82875p init one\n");
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i82875p_probe1(pdev, ent->driver_data);
+
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i82875p_pvt *pvt = NULL;
+
+ debugf0("%s()\n", __func__);
+
+ if (i82875p_pci)
+ edac_pci_release_generic_ctl(i82875p_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ pvt = (struct i82875p_pvt *)mci->pvt_info;
+
+ if (pvt->ovrfl_window)
+ iounmap(pvt->ovrfl_window);
+
+ if (pvt->ovrfl_pdev) {
+#ifdef CORRECT_BIOS
+ pci_release_regions(pvt->ovrfl_pdev);
+#endif /*CORRECT_BIOS */
+ pci_disable_device(pvt->ovrfl_pdev);
+ pci_dev_put(pvt->ovrfl_pdev);
+ }
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82875P},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
+
+static struct pci_driver i82875p_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i82875p_init_one,
+ .remove = __devexit_p(i82875p_remove_one),
+ .id_table = i82875p_pci_tbl,
+};
+
+static int __init i82875p_init(void)
+{
+ int pci_rc;
+
+ debugf3("%s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i82875p_driver);
+
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (mci_pdev == NULL) {
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82875_0, NULL);
+
+ if (!mci_pdev) {
+ debugf0("875p pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
+
+ if (pci_rc < 0) {
+ debugf0("875p init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i82875p_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i82875p_exit(void)
+{
+ debugf3("%s()\n", __func__);
+
+ i82875p_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+
+ pci_unregister_driver(&i82875p_driver);
+
+}
+
+module_init(i82875p_init);
+module_exit(i82875p_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
new file mode 100644
index 0000000..2eed3ea
--- /dev/null
+++ b/drivers/edac/i82975x_edac.c
@@ -0,0 +1,672 @@
+/*
+ * Intel 82975X Memory Controller kernel module
+ * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
+ * (C) 2007 jetzbroadband (http://jetzbroadband.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Arvind R.
+ * Copied from i82875p_edac.c source:
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82975X_REVISION " Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR "i82975x_edac"
+
+#define i82975x_printk(level, fmt, arg...) \
+ edac_printk(level, "i82975x", fmt, ##arg)
+
+#define i82975x_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82975_0
+#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
+#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
+
+#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
+#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
+ *
+ * 31:7 128 byte cache-line address
+ * 6:1 reserved
+ * 0 0: CH0; 1: CH1
+ */
+
+#define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b)
+ * 0h: Processor Memory Reads
+ * 1h:7h reserved
+ * More - See Page 65 of Intel DocSheet.
+ */
+
+#define I82975X_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:12 reserved
+ * 11 Thermal Sensor Event
+ * 10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Refresh Timeout
+ * 7:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+/* Error Reporting is supported by 3 mechanisms:
+ 1. DMI SERR generation ( ERRCMD )
+ 2. SMI DMI generation ( SMICMD )
+ 3. SCI DMI generation ( SCICMD )
+NOTE: Only ONE of the three must be enabled
+*/
+#define I82975X_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:12 reserved
+ * 11 Thermal Sensor Event
+ * 10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Refresh Timeout
+ * 7:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+#define I82975X_SMICMD 0xcc /* Error Command (16b)
+ *
+ * 15:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+#define I82975X_SCICMD 0xce /* Error Command (16b)
+ *
+ * 15:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+#define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b)
+ *
+ * 7:1 reserved
+ * 0 Bit32 of the Dram Error Address
+ */
+
+#define I82975X_MCHBAR 0x44 /*
+ *
+ * 31:14 Base Addr of 16K memory-mapped
+ * configuration space
+ * 13:1 reserverd
+ * 0 mem-mapped config space enable
+ */
+
+/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
+/* Intel 82975x memory mapped register space */
+
+#define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */
+
+#define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8)
+ *
+ * 7 set to 1 in highest DRB of
+ * channel if 4GB in ch.
+ * 6:2 upper boundary of rank in
+ * 32MB grains
+ * 1:0 set to 0
+ */
+#define I82975X_DRB_CH0R0 0x100
+#define I82975X_DRB_CH0R1 0x101
+#define I82975X_DRB_CH0R2 0x102
+#define I82975X_DRB_CH0R3 0x103
+#define I82975X_DRB_CH1R0 0x180
+#define I82975X_DRB_CH1R1 0x181
+#define I82975X_DRB_CH1R2 0x182
+#define I82975X_DRB_CH1R3 0x183
+
+
+#define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8)
+ * defines the PAGE SIZE to be used
+ * for the rank
+ * 7 reserved
+ * 6:4 row attr of odd rank, i.e. 1
+ * 3 reserved
+ * 2:0 row attr of even rank, i.e. 0
+ *
+ * 000 = unpopulated
+ * 001 = reserved
+ * 010 = 4KiB
+ * 011 = 8KiB
+ * 100 = 16KiB
+ * others = reserved
+ */
+#define I82975X_DRA_CH0R01 0x108
+#define I82975X_DRA_CH0R23 0x109
+#define I82975X_DRA_CH1R01 0x188
+#define I82975X_DRA_CH1R23 0x189
+
+
+#define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b)
+ *
+ * 15:8 reserved
+ * 7:6 Rank 3 architecture
+ * 5:4 Rank 2 architecture
+ * 3:2 Rank 1 architecture
+ * 1:0 Rank 0 architecture
+ *
+ * 00 => x16 devices; i.e 4 banks
+ * 01 => x8 devices; i.e 8 banks
+ */
+#define I82975X_C0BNKARC 0x10e
+#define I82975X_C1BNKARC 0x18e
+
+
+
+#define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b)
+ *
+ * 31:30 reserved
+ * 29 init complete
+ * 28:11 reserved, according to Intel
+ * 22:21 number of channels
+ * 00=1 01=2 in 82875
+ * seems to be ECC mode
+ * bits in 82975 in Asus
+ * P5W
+ * 19:18 Data Integ Mode
+ * 00=none 01=ECC in 82875
+ * 10:8 refresh mode
+ * 7 reserved
+ * 6:4 mode select
+ * 3:2 reserved
+ * 1:0 DRAM type 10=Second Revision
+ * DDR2 SDRAM
+ * 00, 01, 11 reserved
+ */
+#define I82975X_DRC_CH0M0 0x120
+#define I82975X_DRC_CH1M0 0x1A0
+
+
+#define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b)
+ * 31 0=Standard Address Map
+ * 1=Enhanced Address Map
+ * 30:0 reserved
+ */
+
+#define I82975X_DRC_CH0M1 0x124
+#define I82975X_DRC_CH1M1 0x1A4
+
+enum i82975x_chips {
+ I82975X = 0,
+};
+
+struct i82975x_pvt {
+ void __iomem *mch_window;
+};
+
+struct i82975x_dev_info {
+ const char *ctl_name;
+};
+
+struct i82975x_error_info {
+ u16 errsts;
+ u32 eap;
+ u8 des;
+ u8 derrsyn;
+ u16 errsts2;
+ u8 chan; /* the channel is bit 0 of EAP */
+ u8 xeap; /* extended eap bit */
+};
+
+static const struct i82975x_dev_info i82975x_devs[] = {
+ [I82975X] = {
+ .ctl_name = "i82975x"
+ },
+};
+
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
+ * already registered driver
+ */
+
+static int i82975x_registered = 1;
+
+static void i82975x_get_error_info(struct mem_ctl_info *mci,
+ struct i82975x_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
+ pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+ pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+ pci_read_config_byte(pdev, I82975X_DES, &info->des);
+ pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
+ pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
+
+ /*
+ * If the error is the same then we can for both reads then
+ * the first set of reads is valid. If there is a change then
+ * there is a CE no info and the second set of reads is valid
+ * and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0003))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+ pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+ pci_read_config_byte(pdev, I82975X_DES, &info->des);
+ pci_read_config_byte(pdev, I82975X_DERRSYN,
+ &info->derrsyn);
+ }
+}
+
+static int i82975x_process_error_info(struct mem_ctl_info *mci,
+ struct i82975x_error_info *info, int handle_errors)
+{
+ int row, multi_chan, chan;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts2 & 0x0003))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ chan = info->eap & 1;
+ info->eap >>= 1;
+ if (info->xeap )
+ info->eap |= 0x80000000;
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0002)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ multi_chan ? chan : 0,
+ "i82975x CE");
+
+ return 1;
+}
+
+static void i82975x_check(struct mem_ctl_info *mci)
+{
+ struct i82975x_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i82975x_get_error_info(mci, &info);
+ i82975x_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active. Else return 0. */
+static int dual_channel_active(void __iomem *mch_window)
+{
+ /*
+ * We treat interleaved-symmetric configuration as dual-channel - EAP's
+ * bit-0 giving the channel of the error location.
+ *
+ * All other configurations are treated as single channel - the EAP's
+ * bit-0 will resolve ok in symmetric area of mixed
+ * (symmetric/asymmetric) configurations
+ */
+ u8 drb[4][2];
+ int row;
+ int dualch;
+
+ for (dualch = 1, row = 0; dualch && (row < 4); row++) {
+ drb[row][0] = readb(mch_window + I82975X_DRB + row);
+ drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
+ dualch = dualch && (drb[row][0] == drb[row][1]);
+ }
+ return dualch;
+}
+
+static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
+{
+ /*
+ * ASUS P5W DH either does not program this register or programs
+ * it wrong!
+ * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
+ * for each rank should be 01b - the LSB of the word should be 0x55;
+ * but it reads 0!
+ */
+ return DEV_X8;
+}
+
+static void i82975x_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev, void __iomem *mch_window)
+{
+ struct csrow_info *csrow;
+ unsigned long last_cumul_size;
+ u8 value;
+ u32 cumul_size;
+ int index;
+
+ last_cumul_size = 0;
+
+ /*
+ * 82875 comment:
+ * The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ *
+ * FIXME:
+ * EDAC currently works for Dual-channel Interleaved configuration.
+ * Other configurations, which the chip supports, need fixing/testing.
+ *
+ */
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ value = readb(mch_window + I82975X_DRB + index +
+ ((index >= 4) ? 0x80 : 0));
+ cumul_size = value;
+ cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */
+ csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+ csrow->dtype = i82975x_dram_type(mch_window, index);
+ csrow->edac_mode = EDAC_SECDED; /* only supported */
+ }
+}
+
+/* #define i82975x_DEBUG_IOMEM */
+
+#ifdef i82975x_DEBUG_IOMEM
+static void i82975x_print_dram_timings(void __iomem *mch_window)
+{
+ /*
+ * The register meanings are from Intel specs;
+ * (shows 13-5-5-5 for 800-DDR2)
+ * Asus P5W Bios reports 15-5-4-4
+ * What's your religion?
+ */
+ static const int caslats[4] = { 5, 4, 3, 6 };
+ u32 dtreg[2];
+
+ dtreg[0] = readl(mch_window + 0x114);
+ dtreg[1] = readl(mch_window + 0x194);
+ i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n"
+ " RAS Active Min = %d %d\n"
+ " CAS latency = %d %d\n"
+ " RAS to CAS = %d %d\n"
+ " RAS precharge = %d %d\n",
+ (dtreg[0] >> 19 ) & 0x0f,
+ (dtreg[1] >> 19) & 0x0f,
+ caslats[(dtreg[0] >> 8) & 0x03],
+ caslats[(dtreg[1] >> 8) & 0x03],
+ ((dtreg[0] >> 4) & 0x07) + 2,
+ ((dtreg[1] >> 4) & 0x07) + 2,
+ (dtreg[0] & 0x07) + 2,
+ (dtreg[1] & 0x07) + 2
+ );
+
+}
+#endif
+
+static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ struct mem_ctl_info *mci;
+ struct i82975x_pvt *pvt;
+ void __iomem *mch_window;
+ u32 mchbar;
+ u32 drc[2];
+ struct i82975x_error_info discard;
+ int chans;
+#ifdef i82975x_DEBUG_IOMEM
+ u8 c0drb[4];
+ u8 c1drb[4];
+#endif
+
+ debugf0("%s()\n", __func__);
+
+ pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
+ if (!(mchbar & 1)) {
+ debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
+ goto fail0;
+ }
+ mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
+ mch_window = ioremap_nocache(mchbar, 0x1000);
+
+#ifdef i82975x_DEBUG_IOMEM
+ i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
+ mchbar, mch_window);
+
+ c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
+ c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
+ c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
+ c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
+ c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
+ c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
+ c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
+ c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
+ i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
+ i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
+ i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
+ i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
+ i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
+ i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
+ i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
+ i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
+#endif
+
+ drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
+ drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
+#ifdef i82975x_DEBUG_IOMEM
+ i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
+ ((drc[0] >> 21) & 3) == 1 ?
+ "ECC enabled" : "ECC disabled");
+ i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
+ ((drc[1] >> 21) & 3) == 1 ?
+ "ECC enabled" : "ECC disabled");
+
+ i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
+ readw(mch_window + I82975X_C0BNKARC));
+ i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
+ readw(mch_window + I82975X_C1BNKARC));
+ i82975x_print_dram_timings(mch_window);
+ goto fail1;
+#endif
+ if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
+ i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
+ goto fail1;
+ }
+
+ chans = dual_channel_active(mch_window) + 1;
+
+ /* assuming only one controller, index thus is 0 */
+ mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
+ chans, 0);
+ if (!mci) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I82975X_REVISION;
+ mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+ mci->edac_check = i82975x_check;
+ mci->ctl_page_to_phys = NULL;
+ debugf3("%s(): init pvt\n", __func__);
+ pvt = (struct i82975x_pvt *) mci->pvt_info;
+ pvt->mch_window = mch_window;
+ i82975x_init_csrows(mci, pdev, mch_window);
+ i82975x_get_error_info(mci, &discard); /* clear counters */
+
+ /* finalize this instance of memory controller with edac core */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail2;
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail2:
+ edac_mc_free(mci);
+
+fail1:
+ iounmap(mch_window);
+fail0:
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82975x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("%s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i82975x_probe1(pdev, ent->driver_data);
+
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i82975x_pvt *pvt;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (mci == NULL)
+ return;
+
+ pvt = mci->pvt_info;
+ if (pvt->mch_window)
+ iounmap( pvt->mch_window );
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82975X
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
+
+static struct pci_driver i82975x_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i82975x_init_one,
+ .remove = __devexit_p(i82975x_remove_one),
+ .id_table = i82975x_pci_tbl,
+};
+
+static int __init i82975x_init(void)
+{
+ int pci_rc;
+
+ debugf3("%s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i82975x_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (mci_pdev == NULL) {
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82975_0, NULL);
+
+ if (!mci_pdev) {
+ debugf0("i82975x pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
+
+ if (pci_rc < 0) {
+ debugf0("i82975x init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i82975x_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i82975x_exit(void)
+{
+ debugf3("%s()\n", __func__);
+
+ pci_unregister_driver(&i82975x_driver);
+
+ if (!i82975x_registered) {
+ i82975x_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i82975x_init);
+module_exit(i82975x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
new file mode 100644
index 0000000..0cfcb2d
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.c
@@ -0,0 +1,1078 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/edac.h>
+#include <linux/smp.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include "edac_module.h"
+#include "edac_core.h"
+#include "mpc85xx_edac.h"
+
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+static u32 orig_ddr_err_disable;
+static u32 orig_ddr_err_sbe;
+
+/*
+ * PCI Err defines
+ */
+#ifdef CONFIG_PCI
+static u32 orig_pci_err_cap_dr;
+static u32 orig_pci_err_en;
+#endif
+
+static u32 orig_l2_err_disable;
+static u32 orig_hid1[2];
+
+/************************ MC SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_DATA_ERR_INJECT_HI));
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_DATA_ERR_INJECT_LO));
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
+}
+
+static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_data_hi",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_data_hi_show,
+ .store = mpc85xx_mc_inject_data_hi_store},
+ {
+ .attr = {
+ .name = "inject_data_lo",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_data_lo_show,
+ .store = mpc85xx_mc_inject_data_lo_store},
+ {
+ .attr = {
+ .name = "inject_ctrl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_ctrl_show,
+ .store = mpc85xx_mc_inject_ctrl_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
+}
+
+/**************************** PCI Err device ***************************/
+#ifdef CONFIG_PCI
+
+static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
+{
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ /* master aborts can happen during PCI config cycles */
+ if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+ return;
+ }
+
+ printk(KERN_ERR "PCI error(s) detected\n");
+ printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
+
+ printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
+ printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
+ printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
+ printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
+ printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+
+ if (err_detect & PCI_EDE_PERR_MASK)
+ edac_pci_handle_pe(pci, pci->ctl_name);
+
+ if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
+ edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
+{
+ struct edac_pci_ctl_info *pci = dev_id;
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ if (!err_detect)
+ return IRQ_NONE;
+
+ mpc85xx_pci_check(pci);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct edac_pci_ctl_info *pci;
+ struct mpc85xx_pci_pdata *pdata;
+ struct resource r;
+ int res = 0;
+
+ if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pdata = pci->pvt_info;
+ pdata->name = "mpc85xx_pci_err";
+ pdata->irq = NO_IRQ;
+ dev_set_drvdata(&op->dev, pci);
+ pci->dev = &op->dev;
+ pci->mod_name = EDAC_MOD_STR;
+ pci->ctl_name = pdata->name;
+ pci->dev_name = op->dev.bus_id;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ pci->edac_check = mpc85xx_pci_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ goto err;
+ }
+
+ /* we only need the error registers */
+ r.start += 0xe00;
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->pci_vbase = devm_ioremap(&op->dev, r.start,
+ r.end - r.start + 1);
+ if (!pdata->pci_vbase) {
+ printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
+
+ /* PCI master abort is expected during config cycles */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
+
+ orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+
+ /* disable master abort reporting */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_pci_isr, IRQF_DISABLED,
+ "[EDAC] PCI err", pci);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to requiest irq %d for "
+ "MPC85xx PCI err\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
+
+ return 0;
+
+err2:
+ edac_pci_del_device(&op->dev);
+err:
+ edac_pci_free_ctl_info(pci);
+ devres_release_group(&op->dev, mpc85xx_pci_err_probe);
+ return res;
+}
+
+static int mpc85xx_pci_err_remove(struct of_device *op)
+{
+ struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
+ orig_pci_err_cap_dr);
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+ edac_pci_del_device(pci->dev);
+
+ if (edac_op_state == EDAC_OPSTATE_INT)
+ irq_dispose_mapping(pdata->irq);
+
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct of_device_id mpc85xx_pci_err_of_match[] = {
+ {
+ .compatible = "fsl,mpc8540-pcix",
+ },
+ {
+ .compatible = "fsl,mpc8540-pci",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_pci_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_pci_err",
+ .match_table = mpc85xx_pci_err_of_match,
+ .probe = mpc85xx_pci_err_probe,
+ .remove = __devexit_p(mpc85xx_pci_err_remove),
+ .driver = {
+ .name = "mpc85xx_pci_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+#endif /* CONFIG_PCI */
+
+/**************************** L2 Err device ***************************/
+
+/************************ L2 SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
+}
+
+static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_data_hi",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_data_hi_show,
+ .store = mpc85xx_l2_inject_data_hi_store},
+ {
+ .attr = {
+ .name = "inject_data_lo",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_data_lo_show,
+ .store = mpc85xx_l2_inject_data_lo_store},
+ {
+ .attr = {
+ .name = "inject_ctrl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_ctrl_show,
+ .store = mpc85xx_l2_inject_ctrl_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
+}
+
+/***************************** L2 ops ***********************************/
+
+static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+ if (!(err_detect & L2_EDE_MASK))
+ return;
+
+ printk(KERN_ERR "ECC Error in CPU L2 cache\n");
+ printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
+ printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
+ printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
+ printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
+ printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
+ printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
+
+ /* clear error detect register */
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
+
+ if (err_detect & L2_EDE_CE_MASK)
+ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+
+ if (err_detect & L2_EDE_UE_MASK)
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+ if (!(err_detect & L2_EDE_MASK))
+ return IRQ_NONE;
+
+ mpc85xx_l2_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct mpc85xx_l2_pdata *pdata;
+ struct resource r;
+ int res;
+
+ if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "cpu", 1, "L", 1, 2, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mpc85xx_l2_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &op->dev;
+ dev_set_drvdata(edac_dev->dev, edac_dev);
+ edac_dev->ctl_name = pdata->name;
+ edac_dev->dev_name = pdata->name;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "L2 err regs\n", __func__);
+ goto err;
+ }
+
+ /* we only need the error registers */
+ r.start += 0xe00;
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ if (!pdata->l2_vbase) {
+ printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
+
+ orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
+
+ /* clear the err_dis */
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mpc85xx_l2_check;
+
+ mpc85xx_set_l2_sysfs_attributes(edac_dev);
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_l2_isr, IRQF_DISABLED,
+ "[EDAC] L2 err", edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to requiest irq %d for "
+ "MPC85xx L2 err\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
+ pdata->irq);
+
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
+
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
+
+ return 0;
+
+err2:
+ edac_device_del_device(&op->dev);
+err:
+ devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mpc85xx_l2_err_remove(struct of_device *op)
+{
+ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
+ irq_dispose_mapping(pdata->irq);
+ }
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
+ edac_device_del_device(&op->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+static struct of_device_id mpc85xx_l2_err_of_match[] = {
+ {
+ .compatible = "fsl,8540-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8541-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8544-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8548-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8555-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8568-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,mpc8572-l2-cache-controller",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_l2_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_l2_err",
+ .match_table = mpc85xx_l2_err_of_match,
+ .probe = mpc85xx_l2_err_probe,
+ .remove = mpc85xx_l2_err_remove,
+ .driver = {
+ .name = "mpc85xx_l2_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**************************** MC Err device ***************************/
+
+static void mpc85xx_mc_check(struct mem_ctl_info *mci)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 err_detect;
+ u32 syndrome;
+ u32 err_addr;
+ u32 pfn;
+ int row_index;
+
+ err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+ if (err_detect)
+ return;
+
+ mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
+ err_detect);
+
+ /* no more processing if not ECC bit errors */
+ if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+ return;
+ }
+
+ syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
+ err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
+ pfn = err_addr >> PAGE_SHIFT;
+
+ for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
+ csrow = &mci->csrows[row_index];
+ if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
+ break;
+ }
+
+ mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data High: %#8.8x\n",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_CAPTURE_DATA_HI));
+ mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data Low: %#8.8x\n",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_CAPTURE_DATA_LO));
+ mpc85xx_mc_printk(mci, KERN_ERR, "syndrome: %#8.8x\n", syndrome);
+ mpc85xx_mc_printk(mci, KERN_ERR, "err addr: %#8.8x\n", err_addr);
+ mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
+
+ /* we are out of range */
+ if (row_index == mci->nr_csrows)
+ mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
+
+ if (err_detect & DDR_EDE_SBE)
+ edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
+ syndrome, row_index, 0, mci->ctl_name);
+
+ if (err_detect & DDR_EDE_MBE)
+ edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
+ row_index, mci->ctl_name);
+
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+}
+
+static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+ if (!err_detect)
+ return IRQ_NONE;
+
+ mpc85xx_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 sdram_ctl;
+ u32 sdtype;
+ enum mem_type mtype;
+ u32 cs_bnds;
+ int index;
+
+ sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+
+ sdtype = sdram_ctl & DSC_SDTYPE_MASK;
+ if (sdram_ctl & DSC_RD_EN) {
+ switch (sdtype) {
+ case DSC_SDTYPE_DDR:
+ mtype = MEM_RDDR;
+ break;
+ case DSC_SDTYPE_DDR2:
+ mtype = MEM_RDDR2;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (sdtype) {
+ case DSC_SDTYPE_DDR:
+ mtype = MEM_DDR;
+ break;
+ case DSC_SDTYPE_DDR2:
+ mtype = MEM_DDR2;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ }
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ u32 start;
+ u32 end;
+
+ csrow = &mci->csrows[index];
+ cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
+ (index * MPC85XX_MC_CS_BNDS_OFS));
+ start = (cs_bnds & 0xfff0000) << 4;
+ end = ((cs_bnds & 0xfff) << 20);
+ if (start)
+ start |= 0xfffff;
+ if (end)
+ end |= 0xfffff;
+
+ if (start == end)
+ continue; /* not populated */
+
+ csrow->first_page = start >> PAGE_SHIFT;
+ csrow->last_page = end >> PAGE_SHIFT;
+ csrow->nr_pages = csrow->last_page + 1 - csrow->first_page;
+ csrow->grain = 8;
+ csrow->mtype = mtype;
+ csrow->dtype = DEV_UNKNOWN;
+ if (sdram_ctl & DSC_X32_EN)
+ csrow->dtype = DEV_X32;
+ csrow->edac_mode = EDAC_SECDED;
+ }
+}
+
+static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct mem_ctl_info *mci;
+ struct mpc85xx_mc_pdata *pdata;
+ struct resource r;
+ u32 sdram_ctl;
+ int res;
+
+ if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ if (!mci) {
+ devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ pdata->name = "mpc85xx_mc_err";
+ pdata->irq = NO_IRQ;
+ mci->dev = &op->dev;
+ pdata->edac_idx = edac_mc_idx++;
+ dev_set_drvdata(mci->dev, mci);
+ mci->ctl_name = pdata->name;
+ mci->dev_name = pdata->name;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
+ __func__);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ if (!pdata->mc_vbase) {
+ printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+ if (!(sdram_ctl & DSC_ECC_EN)) {
+ /* no ECC */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
+ MEM_FLAG_DDR | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = MPC85XX_REVISION;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = mpc85xx_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ mpc85xx_set_mc_sysfs_attributes(mci);
+
+ mpc85xx_init_csrows(mci);
+
+#ifdef CONFIG_EDAC_DEBUG
+ edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
+#endif
+
+ /* store the original error disable bits */
+ orig_ddr_err_disable =
+ in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
+
+ /* clear all error bits */
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
+ DDR_EIE_MBEE | DDR_EIE_SBEE);
+
+ /* store the original error management threshold */
+ orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
+ MPC85XX_MC_ERR_SBE) & 0xff0000;
+
+ /* set threshold to 1 error per interrupt */
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
+
+ /* register interrupts */
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_mc_isr,
+ IRQF_DISABLED | IRQF_SHARED,
+ "[EDAC] MC err", mci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MPC85xx DRAM ERR\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&op->dev);
+err:
+ devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int mpc85xx_mc_err_remove(struct of_device *op)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
+ irq_dispose_mapping(pdata->irq);
+ }
+
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
+ orig_ddr_err_disable);
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
+
+ edac_mc_del_mc(&op->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct of_device_id mpc85xx_mc_err_of_match[] = {
+ {
+ .compatible = "fsl,8540-memory-controller",
+ },
+ {
+ .compatible = "fsl,8541-memory-controller",
+ },
+ {
+ .compatible = "fsl,8544-memory-controller",
+ },
+ {
+ .compatible = "fsl,8548-memory-controller",
+ },
+ {
+ .compatible = "fsl,8555-memory-controller",
+ },
+ {
+ .compatible = "fsl,8568-memory-controller",
+ },
+ {
+ .compatible = "fsl,mpc8572-memory-controller",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_mc_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_mc_err",
+ .match_table = mpc85xx_mc_err_of_match,
+ .probe = mpc85xx_mc_err_probe,
+ .remove = mpc85xx_mc_err_remove,
+ .driver = {
+ .name = "mpc85xx_mc_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static void __init mpc85xx_mc_clear_rfxe(void *data)
+{
+ orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
+ mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000));
+}
+
+
+static int __init mpc85xx_mc_init(void)
+{
+ int res = 0;
+
+ printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
+ "(C) 2006 Montavista Software\n");
+
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ res = of_register_platform_driver(&mpc85xx_mc_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
+
+ res = of_register_platform_driver(&mpc85xx_l2_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
+
+#ifdef CONFIG_PCI
+ res = of_register_platform_driver(&mpc85xx_pci_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
+#endif
+
+ /*
+ * need to clear HID1[RFXE] to disable machine check int
+ * so we can catch it
+ */
+ if (edac_op_state == EDAC_OPSTATE_INT)
+ on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
+
+ return 0;
+}
+
+module_init(mpc85xx_mc_init);
+
+static void __exit mpc85xx_mc_restore_hid1(void *data)
+{
+ mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
+}
+
+static void __exit mpc85xx_mc_exit(void)
+{
+ on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
+#ifdef CONFIG_PCI
+ of_unregister_platform_driver(&mpc85xx_pci_err_driver);
+#endif
+ of_unregister_platform_driver(&mpc85xx_l2_err_driver);
+ of_unregister_platform_driver(&mpc85xx_mc_err_driver);
+}
+
+module_exit(mpc85xx_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
new file mode 100644
index 0000000..135b353
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.h
@@ -0,0 +1,162 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MPC85XX_EDAC_H_
+#define _MPC85XX_EDAC_H_
+
+#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR "MPC85xx_edac"
+
+#define mpc85xx_printk(level, fmt, arg...) \
+ edac_printk(level, "MPC85xx", fmt, ##arg)
+
+#define mpc85xx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "MPC85xx", fmt, ##arg)
+
+/*
+ * DRAM error defines
+ */
+
+/* DDR_SDRAM_CFG */
+#define MPC85XX_MC_DDR_SDRAM_CFG 0x0110
+#define MPC85XX_MC_CS_BNDS_0 0x0000
+#define MPC85XX_MC_CS_BNDS_1 0x0008
+#define MPC85XX_MC_CS_BNDS_2 0x0010
+#define MPC85XX_MC_CS_BNDS_3 0x0018
+#define MPC85XX_MC_CS_BNDS_OFS 0x0008
+
+#define MPC85XX_MC_DATA_ERR_INJECT_HI 0x0e00
+#define MPC85XX_MC_DATA_ERR_INJECT_LO 0x0e04
+#define MPC85XX_MC_ECC_ERR_INJECT 0x0e08
+#define MPC85XX_MC_CAPTURE_DATA_HI 0x0e20
+#define MPC85XX_MC_CAPTURE_DATA_LO 0x0e24
+#define MPC85XX_MC_CAPTURE_ECC 0x0e28
+#define MPC85XX_MC_ERR_DETECT 0x0e40
+#define MPC85XX_MC_ERR_DISABLE 0x0e44
+#define MPC85XX_MC_ERR_INT_EN 0x0e48
+#define MPC85XX_MC_CAPTURE_ATRIBUTES 0x0e4c
+#define MPC85XX_MC_CAPTURE_ADDRESS 0x0e50
+#define MPC85XX_MC_ERR_SBE 0x0e58
+
+#define DSC_MEM_EN 0x80000000
+#define DSC_ECC_EN 0x20000000
+#define DSC_RD_EN 0x10000000
+
+#define DSC_SDTYPE_MASK 0x07000000
+
+#define DSC_SDTYPE_DDR 0x02000000
+#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_X32_EN 0x00000020
+
+/* Err_Int_En */
+#define DDR_EIE_MSEE 0x1 /* memory select */
+#define DDR_EIE_SBEE 0x4 /* single-bit ECC error */
+#define DDR_EIE_MBEE 0x8 /* multi-bit ECC error */
+
+/* Err_Detect */
+#define DDR_EDE_MSE 0x1 /* memory select */
+#define DDR_EDE_SBE 0x4 /* single-bit ECC error */
+#define DDR_EDE_MBE 0x8 /* multi-bit ECC error */
+#define DDR_EDE_MME 0x80000000 /* multiple memory errors */
+
+/* Err_Disable */
+#define DDR_EDI_MSED 0x1 /* memory select disable */
+#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
+#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
+
+/*
+ * L2 Err defines
+ */
+#define MPC85XX_L2_ERRINJHI 0x0000
+#define MPC85XX_L2_ERRINJLO 0x0004
+#define MPC85XX_L2_ERRINJCTL 0x0008
+#define MPC85XX_L2_CAPTDATAHI 0x0020
+#define MPC85XX_L2_CAPTDATALO 0x0024
+#define MPC85XX_L2_CAPTECC 0x0028
+#define MPC85XX_L2_ERRDET 0x0040
+#define MPC85XX_L2_ERRDIS 0x0044
+#define MPC85XX_L2_ERRINTEN 0x0048
+#define MPC85XX_L2_ERRATTR 0x004c
+#define MPC85XX_L2_ERRADDR 0x0050
+#define MPC85XX_L2_ERRCTL 0x0058
+
+/* Error Interrupt Enable */
+#define L2_EIE_L2CFGINTEN 0x1
+#define L2_EIE_SBECCINTEN 0x4
+#define L2_EIE_MBECCINTEN 0x8
+#define L2_EIE_TPARINTEN 0x10
+#define L2_EIE_MASK (L2_EIE_L2CFGINTEN | L2_EIE_SBECCINTEN | \
+ L2_EIE_MBECCINTEN | L2_EIE_TPARINTEN)
+
+/* Error Detect */
+#define L2_EDE_L2CFGERR 0x1
+#define L2_EDE_SBECCERR 0x4
+#define L2_EDE_MBECCERR 0x8
+#define L2_EDE_TPARERR 0x10
+#define L2_EDE_MULL2ERR 0x80000000
+
+#define L2_EDE_CE_MASK L2_EDE_SBECCERR
+#define L2_EDE_UE_MASK (L2_EDE_L2CFGERR | L2_EDE_MBECCERR | \
+ L2_EDE_TPARERR)
+#define L2_EDE_MASK (L2_EDE_L2CFGERR | L2_EDE_SBECCERR | \
+ L2_EDE_MBECCERR | L2_EDE_TPARERR | L2_EDE_MULL2ERR)
+
+/*
+ * PCI Err defines
+ */
+#define PCI_EDE_TOE 0x00000001
+#define PCI_EDE_SCM 0x00000002
+#define PCI_EDE_IRMSV 0x00000004
+#define PCI_EDE_ORMSV 0x00000008
+#define PCI_EDE_OWMSV 0x00000010
+#define PCI_EDE_TGT_ABRT 0x00000020
+#define PCI_EDE_MST_ABRT 0x00000040
+#define PCI_EDE_TGT_PERR 0x00000080
+#define PCI_EDE_MST_PERR 0x00000100
+#define PCI_EDE_RCVD_SERR 0x00000200
+#define PCI_EDE_ADDR_PERR 0x00000400
+#define PCI_EDE_MULTI_ERR 0x80000000
+
+#define PCI_EDE_PERR_MASK (PCI_EDE_TGT_PERR | PCI_EDE_MST_PERR | \
+ PCI_EDE_ADDR_PERR)
+
+#define MPC85XX_PCI_ERR_DR 0x0000
+#define MPC85XX_PCI_ERR_CAP_DR 0x0004
+#define MPC85XX_PCI_ERR_EN 0x0008
+#define MPC85XX_PCI_ERR_ATTRIB 0x000c
+#define MPC85XX_PCI_ERR_ADDR 0x0010
+#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
+#define MPC85XX_PCI_ERR_DL 0x0018
+#define MPC85XX_PCI_ERR_DH 0x001c
+#define MPC85XX_PCI_GAS_TIMR 0x0020
+#define MPC85XX_PCI_PCIX_TIMR 0x0024
+
+struct mpc85xx_mc_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *mc_vbase;
+ int irq;
+};
+
+struct mpc85xx_l2_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *l2_vbase;
+ int irq;
+};
+
+struct mpc85xx_pci_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *pci_vbase;
+ int irq;
+};
+
+#endif
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
new file mode 100644
index 0000000..083ce8d
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.c
@@ -0,0 +1,890 @@
+/*
+ * Marvell MV64x60 Memory Controller kernel module for PPC platforms
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "mv64x60_edac.h"
+
+static const char *mv64x60_ctl_name = "MV64x60";
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+/*********************** PCI err device **********************************/
+#ifdef CONFIG_PCI
+static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
+{
+ struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
+ printk(KERN_ERR "Attribute: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
+ printk(KERN_ERR "Command: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
+
+ if (cause & MV64X60_PCI_PE_MASK)
+ edac_pci_handle_pe(pci, pci->ctl_name);
+
+ if (!(cause & MV64X60_PCI_PE_MASK))
+ edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
+{
+ struct edac_pci_ctl_info *pci = dev_id;
+ struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+ u32 val;
+
+ val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ if (!val)
+ return IRQ_NONE;
+
+ mv64x60_pci_check(pci);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
+ * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
+ * well. IOW, don't set bit 0.
+ */
+
+/* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
+static int __init mv64x60_pci_fixup(struct platform_device *pdev)
+{
+ struct resource *r;
+ void __iomem *pci_serr;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ return -ENOENT;
+ }
+
+ pci_serr = ioremap(r->start, r->end - r->start + 1);
+ if (!pci_serr)
+ return -ENOMEM;
+
+ out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
+ iounmap(pci_serr);
+
+ return 0;
+}
+
+static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct mv64x60_pci_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pdata = pci->pvt_info;
+
+ pdata->pci_hose = pdev->id;
+ pdata->name = "mpc85xx_pci_err";
+ pdata->irq = NO_IRQ;
+ platform_set_drvdata(pdev, pci);
+ pci->dev = &pdev->dev;
+ pci->dev_name = pdev->dev.bus_id;
+ pci->mod_name = EDAC_MOD_STR;
+ pci->ctl_name = pdata->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ pci->edac_check = mv64x60_pci_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->pci_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->pci_vbase) {
+ printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ res = mv64x60_pci_fixup(pdev);
+ if (res < 0) {
+ printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
+ goto err;
+ }
+
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
+ MV64X60_PCIx_ERR_MASK_VAL);
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_pci_isr,
+ IRQF_DISABLED,
+ "[EDAC] PCI err",
+ pci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MV64x60 PCI ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_pci_del_device(&pdev->dev);
+err:
+ edac_pci_free_ctl_info(pci);
+ devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
+ return res;
+}
+
+static int mv64x60_pci_err_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_pci_del_device(&pdev->dev);
+
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver mv64x60_pci_err_driver = {
+ .probe = mv64x60_pci_err_probe,
+ .remove = __devexit_p(mv64x60_pci_err_remove),
+ .driver = {
+ .name = "mv64x60_pci_err",
+ }
+};
+
+#endif /* CONFIG_PCI */
+
+/*********************** SRAM err device **********************************/
+static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error in internal SRAM\n");
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
+ printk(KERN_ERR "Data Low: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
+ printk(KERN_ERR "Data High: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
+ printk(KERN_ERR "Parity: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
+ out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ if (!cause)
+ return IRQ_NONE;
+
+ mv64x60_sram_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct mv64x60_sram_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "sram", 1, NULL, 0, 0, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mv64x60_sram_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac_dev);
+ edac_dev->dev_name = pdev->dev.bus_id;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "SRAM err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while request mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->sram_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->sram_vbase) {
+ printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
+ __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* setup SRAM err registers */
+ out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+ edac_dev->ctl_name = pdata->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mv64x60_sram_check;
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_sram_isr,
+ IRQF_DISABLED,
+ "[EDAC] SRAM err",
+ edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to request irq %d for "
+ "MV64x60 SRAM ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mv64x60_sram_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+
+ return 0;
+}
+
+static struct platform_driver mv64x60_sram_err_driver = {
+ .probe = mv64x60_sram_err_probe,
+ .remove = mv64x60_sram_err_remove,
+ .driver = {
+ .name = "mv64x60_sram_err",
+ }
+};
+
+/*********************** CPU err device **********************************/
+static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ MV64x60_CPU_CAUSE_MASK;
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error on CPU interface\n");
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
+ printk(KERN_ERR "Data Low: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
+ printk(KERN_ERR "Data High: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
+ printk(KERN_ERR "Parity: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ MV64x60_CPU_CAUSE_MASK;
+ if (!cause)
+ return IRQ_NONE;
+
+ mv64x60_cpu_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct resource *r;
+ struct mv64x60_cpu_pdata *pdata;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "cpu", 1, NULL, 0, 0, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mv64x60_cpu_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac_dev);
+ edac_dev->dev_name = pdev->dev.bus_id;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "CPU err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->cpu_vbase[0]) {
+ printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "CPU err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->cpu_vbase[1]) {
+ printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* setup CPU err registers */
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+ edac_dev->ctl_name = pdata->name;
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mv64x60_cpu_check;
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_cpu_isr,
+ IRQF_DISABLED,
+ "[EDAC] CPU err",
+ edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to request irq %d for MV64x60 "
+ "CPU ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR
+ " acquired irq %d for CPU Err\n", pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mv64x60_cpu_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+static struct platform_driver mv64x60_cpu_err_driver = {
+ .probe = mv64x60_cpu_err_probe,
+ .remove = mv64x60_cpu_err_remove,
+ .driver = {
+ .name = "mv64x60_cpu_err",
+ }
+};
+
+/*********************** DRAM err device **********************************/
+
+static void mv64x60_mc_check(struct mem_ctl_info *mci)
+{
+ struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+ u32 reg;
+ u32 err_addr;
+ u32 sdram_ecc;
+ u32 comp_ecc;
+ u32 syndrome;
+
+ reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ if (!reg)
+ return;
+
+ err_addr = reg & ~0x3;
+ sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
+ comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
+ syndrome = sdram_ecc ^ comp_ecc;
+
+ /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
+ if (!(reg & 0x1))
+ edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome, 0, 0,
+ mci->ctl_name);
+ else /* 2 bit error, UE */
+ edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0, mci->ctl_name);
+
+ /* clear the error */
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+}
+
+static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+ u32 reg;
+
+ reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ if (!reg)
+ return IRQ_NONE;
+
+ /* writing 0's to the ECC err addr in check function clears irq */
+ mv64x60_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void get_total_mem(struct mv64x60_mc_pdata *pdata)
+{
+ struct device_node *np = NULL;
+ const unsigned int *reg;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return;
+
+ reg = of_get_property(np, "reg", NULL);
+
+ pdata->total_mem = reg[1];
+}
+
+static void mv64x60_init_csrows(struct mem_ctl_info *mci,
+ struct mv64x60_mc_pdata *pdata)
+{
+ struct csrow_info *csrow;
+ u32 devtype;
+ u32 ctl;
+
+ get_total_mem(pdata);
+
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+
+ csrow = &mci->csrows[0];
+ csrow->first_page = 0;
+ csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->grain = 8;
+
+ csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+
+ devtype = (ctl >> 20) & 0x3;
+ switch (devtype) {
+ case 0x0:
+ csrow->dtype = DEV_X32;
+ break;
+ case 0x2: /* could be X8 too, but no way to tell */
+ csrow->dtype = DEV_X16;
+ break;
+ case 0x3:
+ csrow->dtype = DEV_X4;
+ break;
+ default:
+ csrow->dtype = DEV_UNKNOWN;
+ break;
+ }
+
+ csrow->edac_mode = EDAC_SECDED;
+}
+
+static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct mv64x60_mc_pdata *pdata;
+ struct resource *r;
+ u32 ctl;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ if (!mci) {
+ printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
+ devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ mci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mci);
+ pdata->name = "mv64x60_mc_err";
+ pdata->irq = NO_IRQ;
+ mci->dev_name = pdev->dev.bus_id;
+ pdata->edac_idx = edac_mc_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "MC err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->mc_vbase) {
+ printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+ if (!(ctl & MV64X60_SDRAM_ECC)) {
+ /* Non-ECC RAM? */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = MV64x60_REVISION;
+ mci->ctl_name = mv64x60_ctl_name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = mv64x60_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ mv64x60_init_csrows(mci, pdata);
+
+ /* setup MC registers */
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
+ ctl = (ctl & 0xff00ffff) | 0x10000;
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ /* acquire interrupt that reports errors */
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_mc_isr,
+ IRQF_DISABLED,
+ "[EDAC] MC err",
+ mci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MV64x60 DRAM ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
+ pdata->irq);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int mv64x60_mc_err_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver mv64x60_mc_err_driver = {
+ .probe = mv64x60_mc_err_probe,
+ .remove = mv64x60_mc_err_remove,
+ .driver = {
+ .name = "mv64x60_mc_err",
+ }
+};
+
+static int __init mv64x60_edac_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
+ printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ ret = platform_driver_register(&mv64x60_mc_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
+
+ ret = platform_driver_register(&mv64x60_cpu_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "CPU err failed to register\n");
+
+ ret = platform_driver_register(&mv64x60_sram_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "SRAM err failed to register\n");
+
+#ifdef CONFIG_PCI
+ ret = platform_driver_register(&mv64x60_pci_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "PCI err failed to register\n");
+#endif
+
+ return ret;
+}
+module_init(mv64x60_edac_init);
+
+static void __exit mv64x60_edac_exit(void)
+{
+#ifdef CONFIG_PCI
+ platform_driver_unregister(&mv64x60_pci_err_driver);
+#endif
+ platform_driver_unregister(&mv64x60_sram_err_driver);
+ platform_driver_unregister(&mv64x60_cpu_err_driver);
+ platform_driver_unregister(&mv64x60_mc_err_driver);
+}
+module_exit(mv64x60_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h
new file mode 100644
index 0000000..e042e2d
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.h
@@ -0,0 +1,114 @@
+/*
+ * EDAC defs for Marvell MV64x60 bridge chip
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MV64X60_EDAC_H_
+#define _MV64X60_EDAC_H_
+
+#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR "MV64x60_edac"
+
+#define mv64x60_printk(level, fmt, arg...) \
+ edac_printk(level, "MV64x60", fmt, ##arg)
+
+#define mv64x60_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "MV64x60", fmt, ##arg)
+
+/* CPU Error Report Registers */
+#define MV64x60_CPU_ERR_ADDR_LO 0x00 /* 0x0070 */
+#define MV64x60_CPU_ERR_ADDR_HI 0x08 /* 0x0078 */
+#define MV64x60_CPU_ERR_DATA_LO 0x00 /* 0x0128 */
+#define MV64x60_CPU_ERR_DATA_HI 0x08 /* 0x0130 */
+#define MV64x60_CPU_ERR_PARITY 0x10 /* 0x0138 */
+#define MV64x60_CPU_ERR_CAUSE 0x18 /* 0x0140 */
+#define MV64x60_CPU_ERR_MASK 0x20 /* 0x0148 */
+
+#define MV64x60_CPU_CAUSE_MASK 0x07ffffff
+
+/* SRAM Error Report Registers */
+#define MV64X60_SRAM_ERR_CAUSE 0x08 /* 0x0388 */
+#define MV64X60_SRAM_ERR_ADDR_LO 0x10 /* 0x0390 */
+#define MV64X60_SRAM_ERR_ADDR_HI 0x78 /* 0x03f8 */
+#define MV64X60_SRAM_ERR_DATA_LO 0x18 /* 0x0398 */
+#define MV64X60_SRAM_ERR_DATA_HI 0x20 /* 0x03a0 */
+#define MV64X60_SRAM_ERR_PARITY 0x28 /* 0x03a8 */
+
+/* SDRAM Controller Registers */
+#define MV64X60_SDRAM_CONFIG 0x00 /* 0x1400 */
+#define MV64X60_SDRAM_ERR_DATA_HI 0x40 /* 0x1440 */
+#define MV64X60_SDRAM_ERR_DATA_LO 0x44 /* 0x1444 */
+#define MV64X60_SDRAM_ERR_ECC_RCVD 0x48 /* 0x1448 */
+#define MV64X60_SDRAM_ERR_ECC_CALC 0x4c /* 0x144c */
+#define MV64X60_SDRAM_ERR_ADDR 0x50 /* 0x1450 */
+#define MV64X60_SDRAM_ERR_ECC_CNTL 0x54 /* 0x1454 */
+#define MV64X60_SDRAM_ERR_ECC_ERR_CNT 0x58 /* 0x1458 */
+
+#define MV64X60_SDRAM_REGISTERED 0x20000
+#define MV64X60_SDRAM_ECC 0x40000
+
+#ifdef CONFIG_PCI
+/*
+ * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
+ * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
+ * well. IOW, don't set bit 0.
+ */
+#define MV64X60_PCIx_ERR_MASK_VAL 0x00a50c24
+
+/* Register offsets from PCIx error address low register */
+#define MV64X60_PCI_ERROR_ADDR_LO 0x00
+#define MV64X60_PCI_ERROR_ADDR_HI 0x04
+#define MV64X60_PCI_ERROR_ATTR 0x08
+#define MV64X60_PCI_ERROR_CMD 0x10
+#define MV64X60_PCI_ERROR_CAUSE 0x18
+#define MV64X60_PCI_ERROR_MASK 0x1c
+
+#define MV64X60_PCI_ERR_SWrPerr 0x0002
+#define MV64X60_PCI_ERR_SRdPerr 0x0004
+#define MV64X60_PCI_ERR_MWrPerr 0x0020
+#define MV64X60_PCI_ERR_MRdPerr 0x0040
+
+#define MV64X60_PCI_PE_MASK (MV64X60_PCI_ERR_SWrPerr | \
+ MV64X60_PCI_ERR_SRdPerr | \
+ MV64X60_PCI_ERR_MWrPerr | \
+ MV64X60_PCI_ERR_MRdPerr)
+
+struct mv64x60_pci_pdata {
+ int pci_hose;
+ void __iomem *pci_vbase;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+#endif /* CONFIG_PCI */
+
+struct mv64x60_mc_pdata {
+ void __iomem *mc_vbase;
+ int total_mem;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+struct mv64x60_cpu_pdata {
+ void __iomem *cpu_vbase[2];
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+struct mv64x60_sram_pdata {
+ void __iomem *sram_vbase;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+#endif
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
new file mode 100644
index 0000000..8e6b91b
--- /dev/null
+++ b/drivers/edac/pasemi_edac.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip memory controllers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define MODULE_NAME "pasemi_edac"
+
+#define MCCFG_MCEN 0x300
+#define MCCFG_MCEN_MMC_EN 0x00000001
+#define MCCFG_ERRCOR 0x388
+#define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100
+#define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010
+#define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001
+#define MCCFG_SCRUB 0x384
+#define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001
+#define MCDEBUG_ERRCTL1 0x728
+#define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000
+#define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000
+#define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000
+#define MCDEBUG_ERRSTA 0x730
+#define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004
+#define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002
+#define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001
+#define MCDEBUG_ERRCNT1 0x734
+#define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080
+#define MCDEBUG_ERRLOG1A 0x738
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000
+#define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000
+#define MCDEBUG_ERRLOG1A_MERR_BA_S 20
+#define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000
+#define MCDEBUG_ERRLOG1A_MERR_CS_S 16
+#define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff
+#define MCDRAM_RANKCFG 0x114
+#define MCDRAM_RANKCFG_EN 0x00000001
+#define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0
+#define MCDRAM_RANKCFG_TYPE_SIZE_S 6
+
+#define PASEMI_EDAC_NR_CSROWS 8
+#define PASEMI_EDAC_NR_CHANS 1
+#define PASEMI_EDAC_ERROR_GRAIN 64
+
+static int last_page_in_mmc;
+static int system_mmc_id;
+
+
+static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev = to_pci_dev(mci->dev);
+ u32 tmp;
+
+ pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
+ &tmp);
+
+ tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
+ | MCDEBUG_ERRSTA_SBE_STATUS);
+
+ if (tmp) {
+ if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
+ pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
+ MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
+ pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
+ }
+
+ return tmp;
+}
+
+static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
+{
+ struct pci_dev *pdev = to_pci_dev(mci->dev);
+ u32 errlog1a;
+ u32 cs;
+
+ if (!errsta)
+ return;
+
+ pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
+
+ cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
+ MCDEBUG_ERRLOG1A_MERR_CS_S;
+
+ /* uncorrectable/multi-bit errors */
+ if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
+ MCDEBUG_ERRSTA_RFL_STATUS)) {
+ edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
+ cs, mci->ctl_name);
+ }
+
+ /* correctable/single-bit errors */
+ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
+ edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
+ 0, cs, 0, mci->ctl_name);
+ }
+}
+
+static void pasemi_edac_check(struct mem_ctl_info *mci)
+{
+ u32 errsta;
+
+ errsta = pasemi_edac_get_error_info(mci);
+ if (errsta)
+ pasemi_edac_process_error_info(mci, errsta);
+}
+
+static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev,
+ enum edac_type edac_mode)
+{
+ struct csrow_info *csrow;
+ u32 rankcfg;
+ int index;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ pci_read_config_dword(pdev,
+ MCDRAM_RANKCFG + (index * 12),
+ &rankcfg);
+
+ if (!(rankcfg & MCDRAM_RANKCFG_EN))
+ continue;
+
+ switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
+ MCDRAM_RANKCFG_TYPE_SIZE_S) {
+ case 0:
+ csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+ break;
+ case 1:
+ csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+ break;
+ case 2:
+ case 3:
+ csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+ break;
+ case 4:
+ csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+ break;
+ case 5:
+ csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+ break;
+ default:
+ edac_mc_printk(mci, KERN_ERR,
+ "Unrecognized Rank Config. rankcfg=%u\n",
+ rankcfg);
+ return -EINVAL;
+ }
+
+ csrow->first_page = last_page_in_mmc;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ last_page_in_mmc += csrow->nr_pages;
+ csrow->page_mask = 0;
+ csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
+ csrow->mtype = MEM_DDR;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = edac_mode;
+ }
+ return 0;
+}
+
+static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mem_ctl_info *mci = NULL;
+ u32 errctl1, errcor, scrub, mcen;
+
+ pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
+ if (!(mcen & MCCFG_MCEN_MMC_EN))
+ return -ENODEV;
+
+ /*
+ * We should think about enabling other error detection later on
+ */
+
+ pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
+ errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
+ MCDEBUG_ERRCTL1_MBE_LOG_EN |
+ MCDEBUG_ERRCTL1_RFL_LOG_EN;
+ pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
+
+ mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
+ system_mmc_id++);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
+ errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
+ MCCFG_ERRCOR_ECC_GEN_EN |
+ MCCFG_ERRCOR_ECC_CRR_EN;
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
+ ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
+ EDAC_FLAG_NONE;
+ mci->mod_name = MODULE_NAME;
+ mci->dev_name = pci_name(pdev);
+ mci->ctl_name = "pasemi,pwrficient-mc";
+ mci->edac_check = pasemi_edac_check;
+ mci->ctl_page_to_phys = NULL;
+ pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
+ mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode =
+ ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
+ ((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
+
+ if (pasemi_edac_init_csrows(mci, pdev,
+ (mci->edac_cap & EDAC_FLAG_SECDED) ?
+ EDAC_SECDED :
+ ((mci->edac_cap & EDAC_FLAG_EC) ?
+ EDAC_EC : EDAC_NONE)))
+ goto fail;
+
+ /*
+ * Clear status
+ */
+ pasemi_edac_get_error_info(mci);
+
+ if (edac_mc_add_mc(mci))
+ goto fail;
+
+ /* get this far and it's successful */
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id pasemi_edac_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
+
+static struct pci_driver pasemi_edac_driver = {
+ .name = MODULE_NAME,
+ .probe = pasemi_edac_probe,
+ .remove = __devexit_p(pasemi_edac_remove),
+ .id_table = pasemi_edac_pci_tbl,
+};
+
+static int __init pasemi_edac_init(void)
+{
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ return pci_register_driver(&pasemi_edac_driver);
+}
+
+static void __exit pasemi_edac_exit(void)
+{
+ pci_unregister_driver(&pasemi_edac_driver);
+}
+
+module_init(pasemi_edac_init);
+module_exit(pasemi_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 0000000..9900675
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,421 @@
+/*
+ * Radisys 82600 Embedded chipset Memory Controller kernel module
+ * (C) 2005 EADS Astrium
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
+ * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
+ *
+ * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Written with reference to 82600 High Integration Dual PCI System
+ * Controller Data Book:
+ * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * references to this document given in []
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define R82600_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR "r82600_edac"
+
+#define r82600_printk(level, fmt, arg...) \
+ edac_printk(level, "r82600", fmt, ##arg)
+
+#define r82600_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
+
+/* Radisys say "The 82600 integrates a main memory SDRAM controller that
+ * supports up to four banks of memory. The four banks can support a mix of
+ * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
+ * each of which can be any size from 16MB to 512MB. Both registered (control
+ * signals buffered) and unbuffered DIMM types are supported. Mixing of
+ * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
+ * is not allowed. The 82600 SDRAM interface operates at the same frequency as
+ * the CPU bus, 66MHz, 100MHz or 133MHz."
+ */
+
+#define R82600_NR_CSROWS 4
+#define R82600_NR_CHANS 1
+#define R82600_NR_DIMMS 4
+
+#define R82600_BRIDGE_ID 0x8200
+
+/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
+#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
+ * all bits are R/W
+ *
+ * 7 SDRAM ISA Hole Enable
+ * 6 Flash Page Mode Enable
+ * 5 ECC Enable: 1=ECC 0=noECC
+ * 4 DRAM DIMM Type: 1=
+ * 3 BIOS Alias Disable
+ * 2 SDRAM BIOS Flash Write Enable
+ * 1:0 SDRAM Refresh Rate: 00=Disabled
+ * 01=7.8usec (256Mbit SDRAMs)
+ * 10=15.6us 11=125usec
+ */
+
+#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
+ * More SDRAM related control bits
+ * all bits are R/W
+ *
+ * 15:8 Reserved.
+ *
+ * 7:5 Special SDRAM Mode Select
+ *
+ * 4 Force ECC
+ *
+ * 1=Drive ECC bits to 0 during
+ * write cycles (i.e. ECC test mode)
+ *
+ * 0=Normal ECC functioning
+ *
+ * 3 Enhanced Paging Enable
+ *
+ * 2 CAS# Latency 0=3clks 1=2clks
+ *
+ * 1 RAS# to CAS# Delay 0=3 1=2
+ *
+ * 0 RAS# Precharge 0=3 1=2
+ */
+
+#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
+ *
+ * 31 Disable Hardware Scrubbing (RW)
+ * 0=Scrub on corrected read
+ * 1=Don't scrub on corrected read
+ *
+ * 30:12 Error Address Pointer (RO)
+ * Upper 19 bits of error address
+ *
+ * 11:4 Syndrome Bits (RO)
+ *
+ * 3 BSERR# on multibit error (RW)
+ * 1=enable 0=disable
+ *
+ * 2 NMI on Single Bit Eror (RW)
+ * 1=NMI triggered by SBE n.b. other
+ * prerequeists
+ * 0=NMI not triggered
+ *
+ * 1 MBE (R/WC)
+ * read 1=MBE at EAP (see above)
+ * read 0=no MBE, or SBE occurred first
+ * write 1=Clear MBE status (must also
+ * clear SBE)
+ * write 0=NOP
+ *
+ * 1 SBE (R/WC)
+ * read 1=SBE at EAP (see above)
+ * read 0=no SBE, or MBE occurred first
+ * write 1=Clear SBE status (must also
+ * clear MBE)
+ * write 0=NOP
+ */
+
+#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
+ * Registers
+ *
+ * 7:0 Address lines 30:24 - upper limit of
+ * each row [p57]
+ */
+
+struct r82600_error_info {
+ u32 eapr;
+};
+
+static unsigned int disable_hardware_scrub;
+
+static struct edac_pci_ctl_info *r82600_pci;
+
+static void r82600_get_error_info(struct mem_ctl_info *mci,
+ struct r82600_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+ pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
+
+ if (info->eapr & BIT(0))
+ /* Clear error to allow next error to be reported [p.62] */
+ pci_write_bits32(pdev, R82600_EAP,
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
+
+ if (info->eapr & BIT(1))
+ /* Clear error to allow next error to be reported [p.62] */
+ pci_write_bits32(pdev, R82600_EAP,
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
+}
+
+static int r82600_process_error_info(struct mem_ctl_info *mci,
+ struct r82600_error_info *info,
+ int handle_errors)
+{
+ int error_found;
+ u32 eapaddr, page;
+ u32 syndrome;
+
+ error_found = 0;
+
+ /* bits 30:12 store the upper 19 bits of the 32 bit error address */
+ eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
+ /* Syndrome in bits 11:4 [p.62] */
+ syndrome = (info->eapr >> 4) & 0xFF;
+
+ /* the R82600 reports at less than page *
+ * granularity (upper 19 bits only) */
+ page = eapaddr >> PAGE_SHIFT;
+
+ if (info->eapr & BIT(0)) { /* CE? */
+ error_found = 1;
+
+ if (handle_errors)
+ edac_mc_handle_ce(mci, page, 0, /* not avail */
+ syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, mci->ctl_name);
+ }
+
+ if (info->eapr & BIT(1)) { /* UE? */
+ error_found = 1;
+
+ if (handle_errors)
+ /* 82600 doesn't give enough info */
+ edac_mc_handle_ue(mci, page, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
+ }
+
+ return error_found;
+}
+
+static void r82600_check(struct mem_ctl_info *mci)
+{
+ struct r82600_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ r82600_get_error_info(mci, &info);
+ r82600_process_error_info(mci, &info, 1);
+}
+
+static inline int ecc_enabled(u8 dramcr)
+{
+ return dramcr & BIT(5);
+}
+
+static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+ u8 dramcr)
+{
+ struct csrow_info *csrow;
+ int index;
+ u8 drbar; /* SDRAM Row Boundry Address Register */
+ u32 row_high_limit, row_high_limit_last;
+ u32 reg_sdram, ecc_on, row_base;
+
+ ecc_on = ecc_enabled(dramcr);
+ reg_sdram = dramcr & BIT(4);
+ row_high_limit_last = 0;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ /* find the DRAM Chip Select Base address and mask */
+ pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
+
+ debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
+
+ row_high_limit = ((u32) drbar << 24);
+/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
+
+ debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
+ __func__, index, row_high_limit, row_high_limit_last);
+
+ /* Empty row [p.57] */
+ if (row_high_limit == row_high_limit_last)
+ continue;
+
+ row_base = row_high_limit_last;
+
+ csrow->first_page = row_base >> PAGE_SHIFT;
+ csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+ csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ /* Error address is top 19 bits - so granularity is *
+ * 14 bits */
+ csrow->grain = 1 << 14;
+ csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ /* FIXME - check that this is unknowable with this chipset */
+ csrow->dtype = DEV_UNKNOWN;
+
+ /* Mode is global on 82600 */
+ csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ row_high_limit_last = row_high_limit;
+ }
+}
+
+static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ struct mem_ctl_info *mci;
+ u8 dramcr;
+ u32 eapr;
+ u32 scrub_disabled;
+ u32 sdram_refresh_rate;
+ struct r82600_error_info discard;
+
+ debugf0("%s()\n", __func__);
+ pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
+ pci_read_config_dword(pdev, R82600_EAP, &eapr);
+ scrub_disabled = eapr & BIT(31);
+ sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
+ debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
+ sdram_refresh_rate);
+ debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
+ mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ debugf0("%s(): mci = %p\n", __func__, mci);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ /* FIXME try to work out if the chip leads have been used for COM2
+ * instead on this board? [MA6?] MAYBE:
+ */
+
+ /* On the R82600, the pins for memory bits 72:65 - i.e. the *
+ * EC bits are shared with the pins for COM2 (!), so if COM2 *
+ * is enabled, we assume COM2 is wired up, and thus no EDAC *
+ * is possible. */
+ mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+
+ if (ecc_enabled(dramcr)) {
+ if (scrub_disabled)
+ debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
+ "%#0x\n", __func__, mci, eapr);
+ } else
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = R82600_REVISION;
+ mci->ctl_name = "R82600";
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = r82600_check;
+ mci->ctl_page_to_phys = NULL;
+ r82600_init_csrows(mci, pdev, dramcr);
+ r82600_get_error_info(mci, &discard); /* clear counters */
+
+ /* Here we assume that we will never see multiple instances of this
+ * type of memory controller. The ID is therefore hardcoded to 0.
+ */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+
+ if (disable_hardware_scrub) {
+ debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
+ __func__);
+ pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
+ }
+
+ /* allocating generic PCI control info */
+ r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!r82600_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit r82600_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("%s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return r82600_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit r82600_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ if (r82600_pci)
+ edac_pci_release_generic_ctl(r82600_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
+
+static struct pci_driver r82600_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = r82600_init_one,
+ .remove = __devexit_p(r82600_remove_one),
+ .id_table = r82600_pci_tbl,
+};
+
+static int __init r82600_init(void)
+{
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ return pci_register_driver(&r82600_driver);
+}
+
+static void __exit r82600_exit(void)
+{
+ pci_unregister_driver(&r82600_driver);
+}
+
+module_init(r82600_init);
+module_exit(r82600_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
+ "on behalf of EADS Astrium");
+MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
+
+module_param(disable_hardware_scrub, bool, 0644);
+MODULE_PARM_DESC(disable_hardware_scrub,
+ "If set, disable the chipset's automatic scrub for CEs");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
new file mode 100644
index 0000000..2406c2c
--- /dev/null
+++ b/drivers/edac/x38_edac.c
@@ -0,0 +1,524 @@
+/*
+ * Intel X38 Memory Controller kernel module
+ * Copyright (C) 2008 Cluster Computing, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * This file is based on i3200_edac.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define X38_REVISION "1.1"
+
+#define EDAC_MOD_STR "x38_edac"
+
+#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
+
+#define X38_RANKS 8
+#define X38_RANKS_PER_CHANNEL 4
+#define X38_CHANNELS 2
+
+/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
+
+#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
+#define X38_MCHBAR_HIGH 0x4b
+#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
+#define X38_MMR_WINDOW_SIZE 16384
+
+#define X38_TOM 0xa0 /* Top of Memory (16b)
+ *
+ * 15:10 reserved
+ * 9:0 total populated physical memory
+ */
+#define X38_TOM_MASK 0x3ff /* bits 9:0 */
+#define X38_TOM_SHIFT 26 /* 64MiB grain */
+
+#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15 reserved
+ * 14 Isochronous TBWRR Run Behind FIFO Full
+ * (ITCV)
+ * 13 Isochronous TBWRR Run Behind FIFO Put
+ * (ITSTV)
+ * 12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR (GTSE)
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 reserved
+ * 7 DRAM Throttle Flag (DTF)
+ * 6:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define X38_ERRSTS_UE 0x0002
+#define X38_ERRSTS_CE 0x0001
+#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
+
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
+ *
+ * 15:10 reserved
+ * 9:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define X38_DRB_MASK 0x3ff /* bits 9:0 */
+#define X38_DRB_SHIFT 26 /* 64MiB grain */
+
+#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ * 1 Multiple Bit Error Status (MERRSTS)
+ * 0 Correctable Error Status (CERRSTS)
+ */
+#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
+#define X38_ECCERRLOG_CE 0x1
+#define X38_ECCERRLOG_UE 0x2
+#define X38_ECCERRLOG_RANK_BITS 0x18000000
+#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
+
+#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
+
+static int x38_channel_num;
+
+static int how_many_channel(struct pci_dev *pdev)
+{
+ unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+ pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
+ if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
+ debugf0("In single channel mode.\n");
+ x38_channel_num = 1;
+ } else {
+ debugf0("In dual channel mode.\n");
+ x38_channel_num = 2;
+ }
+
+ return x38_channel_num;
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+ return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+ return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
+ (channel * X38_RANKS_PER_CHANNEL);
+}
+
+enum x38_chips {
+ X38 = 0,
+};
+
+struct x38_dev_info {
+ const char *ctl_name;
+};
+
+struct x38_error_info {
+ u16 errsts;
+ u16 errsts2;
+ u64 eccerrlog[X38_CHANNELS];
+};
+
+static const struct x38_dev_info x38_devs[] = {
+ [X38] = {
+ .ctl_name = "x38"},
+};
+
+static struct pci_dev *mci_pdev;
+static int x38_registered = 1;
+
+
+static void x38_clear_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
+ X38_ERRSTS_BITS);
+}
+
+static u64 x38_readq(const void __iomem *addr)
+{
+ return readl(addr) | (((u64)readl(addr + 4)) << 32);
+}
+
+static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
+ struct x38_error_info *info)
+{
+ struct pci_dev *pdev;
+ void __iomem *window = mci->pvt_info;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
+ if (!(info->errsts & X38_ERRSTS_BITS))
+ return;
+
+ info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+ if (x38_channel_num == 2)
+ info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
+
+ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+ info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+ if (x38_channel_num == 2)
+ info->eccerrlog[1] =
+ x38_readq(window + X38_C1ECCERRLOG);
+ }
+
+ x38_clear_error_info(mci);
+}
+
+static void x38_process_error_info(struct mem_ctl_info *mci,
+ struct x38_error_info *info)
+{
+ int channel;
+ u64 log;
+
+ if (!(info->errsts & X38_ERRSTS_BITS))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ for (channel = 0; channel < x38_channel_num; channel++) {
+ log = info->eccerrlog[channel];
+ if (log & X38_ECCERRLOG_UE) {
+ edac_mc_handle_ue(mci, 0, 0,
+ eccerrlog_row(channel, log), "x38 UE");
+ } else if (log & X38_ECCERRLOG_CE) {
+ edac_mc_handle_ce(mci, 0, 0,
+ eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log), 0, "x38 CE");
+ }
+ }
+}
+
+static void x38_check(struct mem_ctl_info *mci)
+{
+ struct x38_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ x38_get_and_clear_error_info(mci, &info);
+ x38_process_error_info(mci, &info);
+}
+
+
+void __iomem *x38_map_mchbar(struct pci_dev *pdev)
+{
+ union {
+ u64 mchbar;
+ struct {
+ u32 mchbar_low;
+ u32 mchbar_high;
+ };
+ } u;
+ void __iomem *window;
+
+ pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
+ pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
+ pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
+ u.mchbar &= X38_MCHBAR_MASK;
+
+ if (u.mchbar != (resource_size_t)u.mchbar) {
+ printk(KERN_ERR
+ "x38: mmio space beyond accessible range (0x%llx)\n",
+ (unsigned long long)u.mchbar);
+ return NULL;
+ }
+
+ window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+ if (!window)
+ printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
+ (unsigned long long)u.mchbar);
+
+ return window;
+}
+
+
+static void x38_get_drbs(void __iomem *window,
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+ int i;
+
+ for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
+ drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
+ drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
+ }
+}
+
+static bool x38_is_stacked(struct pci_dev *pdev,
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+ u16 tom;
+
+ pci_read_config_word(pdev, X38_TOM, &tom);
+ tom &= X38_TOM_MASK;
+
+ return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
+ bool stacked, int channel, int rank)
+{
+ int n;
+
+ n = drbs[channel][rank];
+ if (rank > 0)
+ n -= drbs[channel][rank - 1];
+ if (stacked && (channel == 1) && drbs[channel][rank] ==
+ drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
+ n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
+ }
+
+ n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
+ return n;
+}
+
+static int x38_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_page;
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
+ bool stacked;
+ void __iomem *window;
+
+ debugf0("MC: %s()\n", __func__);
+
+ window = x38_map_mchbar(pdev);
+ if (!window)
+ return -ENODEV;
+
+ x38_get_drbs(window, drbs);
+
+ how_many_channel(pdev);
+
+ /* FIXME: unconventional pvt_info usage */
+ mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = X38_REVISION;
+ mci->ctl_name = x38_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = x38_check;
+ mci->ctl_page_to_phys = NULL;
+ mci->pvt_info = window;
+
+ stacked = x38_is_stacked(pdev, drbs);
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 64MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ */
+ last_page = -1UL;
+ for (i = 0; i < mci->nr_csrows; i++) {
+ unsigned long nr_pages;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ nr_pages = drb_to_nr_pages(drbs, stacked,
+ i / X38_RANKS_PER_CHANNEL,
+ i % X38_RANKS_PER_CHANNEL);
+
+ if (nr_pages == 0) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_page + 1;
+ last_page += nr_pages;
+ csrow->last_page = last_page;
+ csrow->nr_pages = nr_pages;
+
+ csrow->grain = nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ x38_clear_error_info(mci);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+fail:
+ iounmap(window);
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+static int __devinit x38_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = x38_probe1(pdev, ent->driver_data);
+ if (!mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit x38_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ iounmap(mci->pvt_info);
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ X38},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
+
+static struct pci_driver x38_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = x38_init_one,
+ .remove = __devexit_p(x38_remove_one),
+ .id_table = x38_pci_tbl,
+};
+
+static int __init x38_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&x38_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ x38_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_X38_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("x38 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("x38 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&x38_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit x38_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&x38_driver);
+ if (!x38_registered) {
+ x38_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(x38_init);
+module_exit(x38_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
+MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
OpenPOWER on IntegriCloud