diff options
author | Mauro Carvalho Chehab <mchehab@infradead.org> | 2009-01-06 14:43:00 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-06 15:59:30 -0800 |
commit | 920c8df6ac678fdb8c49a6ce2e47a98e62757d77 (patch) | |
tree | 3c9b3699310332798b4d928cf2fac09b28df9235 /drivers/edac | |
parent | 29d6cf26a74b8575a6416b7ad4d369a455f8d009 (diff) | |
download | op-kernel-dev-920c8df6ac678fdb8c49a6ce2e47a98e62757d77.zip op-kernel-dev-920c8df6ac678fdb8c49a6ce2e47a98e62757d77.tar.gz |
edac: driver for i5400 MCH (Seaburg)
EDAC driver for i5400 MCH (Seaburg)
This driver adds support for i5400 MCH chipset.
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Signed-off-by: Ben Woodard <woodard@redhat.com>
Cc: Doug Thompson <norsk5@yahoo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/edac')
-rw-r--r-- | drivers/edac/Kconfig | 7 | ||||
-rw-r--r-- | drivers/edac/Makefile | 1 | ||||
-rw-r--r-- | drivers/edac/i5400_edac.c | 1471 |
3 files changed, 1479 insertions, 0 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index e2667a8..eee47fd 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -109,6 +109,13 @@ config EDAC_X38 Support for error detection and correction on the Intel X38 server chipsets. +config EDAC_I5400 + tristate "Intel 5400 (Seaburg) chipsets" + depends on EDAC_MM_EDAC && PCI && X86 + help + Support for error detection and correction the Intel + i5400 MCH chipset (Seaburg). + config EDAC_I82860 tristate "Intel 82860" depends on EDAC_MM_EDAC && PCI && X86_32 diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 62c2d9b..b751969 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -20,6 +20,7 @@ endif obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o obj-$(CONFIG_EDAC_I5000) += i5000_edac.o obj-$(CONFIG_EDAC_I5100) += i5100_edac.o +obj-$(CONFIG_EDAC_I5400) += i5400_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c new file mode 100644 index 0000000..8ec3eca --- /dev/null +++ b/drivers/edac/i5400_edac.c @@ -0,0 +1,1471 @@ +/* + * Intel 5400 class Memory Controllers kernel module + * + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Copyright (c) 2008 by: + * Ben Woodard <woodard@redhat.com> + * Mauro Carvalho Chehab <mchehab@redhat.com> + * + * Red Hat Inc. http://www.redhat.com + * + * Forked and adapted from the i5000_edac driver which was + * written by Douglas Thompson Linux Networx <norsk5@xmission.com> + * + * This module is based on the following document: + * + * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet + * http://developer.intel.com/design/chipsets/datashts/313070.htm + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> +#include <linux/edac.h> +#include <linux/mmzone.h> + +#include "edac_core.h" + +/* + * Alter this version for the I5400 module when modifications are made + */ +#define I5400_REVISION " Ver: 1.0.0 " __DATE__ + +#define EDAC_MOD_STR "i5400_edac" + +#define i5400_printk(level, fmt, arg...) \ + edac_printk(level, "i5400", fmt, ##arg) + +#define i5400_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) + +/* Limits for i5400 */ +#define NUM_MTRS_PER_BRANCH 4 +#define CHANNELS_PER_BRANCH 2 +#define MAX_CHANNELS 4 +#define MAX_DIMMS (MAX_CHANNELS * 4) /* Up to 4 DIMM's per channel */ +#define MAX_CSROWS (MAX_DIMMS * 2) /* max possible csrows per channel */ + +/* Device 16, + * Function 0: System Address + * Function 1: Memory Branch Map, Control, Errors Register + * Function 2: FSB Error Registers + * + * All 3 functions of Device 16 (0,1,2) share the SAME DID + */ +#ifndef PCI_DEVICE_ID_INTEL_5400_ERR +#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 /* Device 16 (0,1,2) */ +#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 /* Device 21 (0,1) */ +#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 /* Device 21 (0,1) */ +#endif + + /* OFFSETS for Function 0 */ +#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ +#define MAXCH 0x56 /* Max Channel Number */ +#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ + + /* OFFSETS for Function 1 */ +#define TOLM 0x6C +#define REDMEMB 0x7C +#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */ +#define MIR0 0x80 +#define MIR1 0x84 +#define AMIR0 0x8c +#define AMIR1 0x90 + + /* Fatal error registers */ +#define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */ +#define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */ + +#define NERR_FAT_FBD 0x9c +#define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */ + + /* Non-fatal error register */ +#define NERR_NF_FBD 0xa4 + + /* Enable error mask */ +#define EMASK_FBD 0xa8 + +#define ERR0_FBD 0xac +#define ERR1_FBD 0xb0 +#define ERR2_FBD 0xb4 +#define MCERR_FBD 0xb8 + + /* No OFFSETS for Device 16 Function 2 */ + +/* + * Device 21, + * Function 0: Memory Map Branch 0 + * + * Device 22, + * Function 0: Memory Map Branch 1 + */ + + /* OFFSETS for Function 0 */ +#define AMBPRESENT_0 0x64 +#define AMBPRESENT_1 0x66 +#define MTR0 0x80 +#define MTR1 0x82 +#define MTR2 0x84 +#define MTR3 0x86 + + /* OFFSETS for Function 1 */ +#define NRECFGLOG 0x74 +#define RECFGLOG 0x78 +#define NRECMEMA 0xbe +#define NRECMEMB 0xc0 +#define NRECFB_DIMMA 0xc4 +#define NRECFB_DIMMB 0xc8 +#define NRECFB_DIMMC 0xcc +#define NRECFB_DIMMD 0xd0 +#define NRECFB_DIMME 0xd4 +#define NRECFB_DIMMF 0xd8 +#define REDMEMA 0xdC +#define RECMEMA 0xf0 +#define RECMEMB 0xf4 +#define RECFB_DIMMA 0xf8 +#define RECFB_DIMMB 0xec +#define RECFB_DIMMC 0xf0 +#define RECFB_DIMMD 0xf4 +#define RECFB_DIMME 0xf8 +#define RECFB_DIMMF 0xfC + +/* + * Error indicator bits and masks + * Error masks are according with Table 5-17 of i5400 datasheet + */ + +enum error_mask { + EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */ + EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */ + EMASK_M3 = 1<<2, /* Reserved */ + EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */ + EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */ + EMASK_M6 = 1<<5, /* Unsupported on i5400 */ + EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */ + EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */ + EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */ + EMASK_M10 = 1<<9, /* Unsupported on i5400 */ + EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */ + EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */ + EMASK_M13 = 1<<12, /* Memory Write error on first attempt */ + EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */ + EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */ + EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */ + EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */ + EMASK_M18 = 1<<17, /* Unsupported on i5400 */ + EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */ + EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */ + EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */ + EMASK_M22 = 1<<21, /* SPD protocol Error */ + EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */ + EMASK_M24 = 1<<23, /* Refresh error */ + EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */ + EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */ + EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */ + EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */ + EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */ +}; + +/* + * Names to translate bit error into something useful + */ +char *error_name[] = { + [0] = "Memory Write error on non-redundant retry", + [1] = "Memory or FB-DIMM configuration CRC read error", + /* Reserved */ + [3] = "Uncorrectable Data ECC on Replay", + [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", + /* Unsupported on i5400 */ + [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", + [7] = "Aliased Uncorrectable Patrol Data ECC", + [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", + /* Unsupported */ + [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", + [11] = "Non-Aliased Uncorrectable Patrol Data ECC", + [12] = "Memory Write error on first attempt", + [13] = "FB-DIMM Configuration Write error on first attempt", + [14] = "Memory or FB-DIMM configuration CRC read error", + [15] = "Channel Failed-Over Occurred", + [16] = "Correctable Non-Mirrored Demand Data ECC", + /* Unsupported */ + [18] = "Correctable Resilver- or Spare-Copy Data ECC", + [19] = "Correctable Patrol Data ECC", + [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status", + [21] = "SPD protocol Error", + [22] = "Non-Redundant Fast Reset Timeout", + [23] = "Refresh error", + [24] = "Memory Write error on redundant retry", + [25] = "Redundant Fast Reset Timeout", + [26] = "Correctable Counter Threshold Exceeded", + [27] = "DIMM-Spare Copy Completed", + [28] = "DIMM-Isolation Completed", +}; + +/* Fatal errors */ +#define ERROR_FAT_MASK (EMASK_M1 | \ + EMASK_M2 | \ + EMASK_M23) + +/* Correctable errors */ +#define ERROR_NF_CORRECTABLE (EMASK_M27 | \ + EMASK_M20 | \ + EMASK_M19 | \ + EMASK_M18 | \ + EMASK_M17 | \ + EMASK_M16) +#define ERROR_NF_DIMM_SPARE (EMASK_M29 | \ + EMASK_M28) +#define ERROR_NF_SPD_PROTOCOL (EMASK_M22) +#define ERROR_NF_NORTH_CRC (EMASK_M21) + +/* Recoverable errors */ +#define ERROR_NF_RECOVERABLE (EMASK_M26 | \ + EMASK_M25 | \ + EMASK_M24 | \ + EMASK_M15 | \ + EMASK_M14 | \ + EMASK_M13 | \ + EMASK_M12 | \ + EMASK_M11 | \ + EMASK_M9 | \ + EMASK_M8 | \ + EMASK_M7 | \ + EMASK_M5) + +/* uncorrectable errors */ +#define ERROR_NF_UNCORRECTABLE (EMASK_M4) + +/* mask to all non-fatal errors */ +#define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \ + ERROR_NF_UNCORRECTABLE | \ + ERROR_NF_RECOVERABLE | \ + ERROR_NF_DIMM_SPARE | \ + ERROR_NF_SPD_PROTOCOL | \ + ERROR_NF_NORTH_CRC) + +/* + * Define error masks for the several registers + */ + +/* Enable all fatal and non fatal errors */ +#define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK) + +/* mask for fatal error registers */ +#define FERR_FAT_MASK ERROR_FAT_MASK + +/* masks for non-fatal error register */ +#define TO_NF_MASK(a) (((a) & EMASK_M29) | ((a) >> 3)) +#define FROM_NF_FERR(a) (((a) & EMASK_M29) | (((a) << 3) & ((1 << 30)-1))) + +#define FERR_NF_MASK TO_NF_MASK(ERROR_NF_MASK) +#define FERR_NF_CORRECTABLE TO_NF_MASK(ERROR_NF_CORRECTABLE) +#define FERR_NF_DIMM_SPARE TO_NF_MASK(ERROR_NF_DIMM_SPARE) +#define FERR_NF_SPD_PROTOCOL TO_NF_MASK(ERROR_NF_SPD_PROTOCOL) +#define FERR_NF_NORTH_CRC TO_NF_MASK(ERROR_NF_NORTH_CRC) +#define FERR_NF_RECOVERABLE TO_NF_MASK(ERROR_NF_RECOVERABLE) +#define FERR_NF_UNCORRECTABLE TO_NF_MASK(ERROR_NF_UNCORRECTABLE) + +/* Defines to extract the vaious fields from the + * MTRx - Memory Technology Registers + */ +#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10)) +#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9)) +#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1<< 8)) ? 8 : 4) +#define MTR_DRAM_BANKS(mtr) (((mtr) & (1<< 6)) ? 8 : 4) +#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2) +#define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1) +#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1) +#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) +#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) +#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) +#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) + +/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */ +static inline int extract_fbdchan_indx(u32 x) +{ + return (x>>28) & 0x3; +} + +#ifdef CONFIG_EDAC_DEBUG +/* MTR NUMROW */ +static char *numrow_toString[] = { + "8,192 - 13 rows", + "16,384 - 14 rows", + "32,768 - 15 rows", + "65,536 - 16 rows" +}; + +/* MTR NUMCOL */ +static char *numcol_toString[] = { + "1,024 - 10 columns", + "2,048 - 11 columns", + "4,096 - 12 columns", + "reserved" +}; +#endif + +/* Device name and register DID (Device ID) */ +struct i5400_dev_info { + const char *ctl_name; /* name for this device */ + u16 fsb_mapping_errors; /* DID for the branchmap,control */ +}; + +/* Table of devices attributes supported by this driver */ +static const struct i5400_dev_info i5400_devs[] = { + { + .ctl_name = "I5400", + .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR, + }, +}; + +struct i5400_dimm_info { + int megabytes; /* size, 0 means not present */ + int dual_rank; +}; + +/* driver private data structure */ +struct i5400_pvt { + struct pci_dev *system_address; /* 16.0 */ + struct pci_dev *branchmap_werrors; /* 16.1 */ + struct pci_dev *fsb_error_regs; /* 16.2 */ + struct pci_dev *branch_0; /* 21.0 */ + struct pci_dev *branch_1; /* 22.0 */ + + u16 tolm; /* top of low memory */ + u64 ambase; /* AMB BAR */ + + u16 mir0, mir1; + + u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ + u16 b0_ambpresent0; /* Branch 0, Channel 0 */ + u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ + + u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ + u16 b1_ambpresent0; /* Branch 1, Channel 8 */ + u16 b1_ambpresent1; /* Branch 1, Channel 1 */ + + /* DIMM information matrix, allocating architecture maximums */ + struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; + + /* Actual values for this controller */ + int maxch; /* Max channels */ + int maxdimmperch; /* Max DIMMs per channel */ +}; + +/* I5400 MCH error information retrieved from Hardware */ +struct i5400_error_info { + /* These registers are always read from the MC */ + u32 ferr_fat_fbd; /* First Errors Fatal */ + u32 nerr_fat_fbd; /* Next Errors Fatal */ + u32 ferr_nf_fbd; /* First Errors Non-Fatal */ + u32 nerr_nf_fbd; /* Next Errors Non-Fatal */ + + /* These registers are input ONLY if there was a Recoverable Error */ + u32 redmemb; /* Recoverable Mem Data Error log B */ + u16 recmema; /* Recoverable Mem Error log A */ + u32 recmemb; /* Recoverable Mem Error log B */ + + /* These registers are input ONLY if there was a Non-Recoverable Error */ + u16 nrecmema; /* Non-Recoverable Mem log A */ + u16 nrecmemb; /* Non-Recoverable Mem log B */ + +}; + +/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and + 5400 better to use an inline function than a macro in this case */ +static inline int nrec_bank(struct i5400_error_info *info) +{ + return ((info->nrecmema) >> 12) & 0x7; +} +static inline int nrec_rank(struct i5400_error_info *info) +{ + return ((info->nrecmema) >> 8) & 0xf; +} +static inline int nrec_buf_id(struct i5400_error_info *info) +{ + return ((info->nrecmema)) & 0xff; +} +static inline int nrec_rdwr(struct i5400_error_info *info) +{ + return (info->nrecmemb) >> 31; +} +/* This applies to both NREC and REC string so it can be used with nrec_rdwr + and rec_rdwr */ +static inline const char *rdwr_str(int rdwr) +{ + return rdwr ? "Write" : "Read"; +} +static inline int nrec_cas(struct i5400_error_info *info) +{ + return ((info->nrecmemb) >> 16) & 0x1fff; +} +static inline int nrec_ras(struct i5400_error_info *info) +{ + return (info->nrecmemb) & 0xffff; +} +static inline int rec_bank(struct i5400_error_info *info) +{ + return ((info->recmema) >> 12) & 0x7; +} +static inline int rec_rank(struct i5400_error_info *info) +{ + return ((info->recmema) >> 8) & 0xf; +} +static inline int rec_rdwr(struct i5400_error_info *info) +{ + return (info->recmemb) >> 31; +} +static inline int rec_cas(struct i5400_error_info *info) +{ + return ((info->recmemb) >> 16) & 0x1fff; +} +static inline int rec_ras(struct i5400_error_info *info) +{ + return (info->recmemb) & 0xffff; +} + +static struct edac_pci_ctl_info *i5400_pci; + +/* + * i5400_get_error_info Retrieve the hardware error information from + * the hardware and cache it in the 'info' + * structure + */ +static void i5400_get_error_info(struct mem_ctl_info *mci, + struct i5400_error_info *info) +{ + struct i5400_pvt *pvt; + u32 value; + + pvt = mci->pvt_info; + + /* read in the 1st FATAL error register */ + pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value); + + /* Mask only the bits that the doc says are valid + */ + value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK); + + /* If there is an error, then read in the + NEXT FATAL error register and the Memory Error Log Register A + */ + if (value & FERR_FAT_MASK) { + info->ferr_fat_fbd = value; + + /* harvest the various error data we need */ + pci_read_config_dword(pvt->branchmap_werrors, + NERR_FAT_FBD, &info->nerr_fat_fbd); + pci_read_config_word(pvt->branchmap_werrors, + NRECMEMA, &info->nrecmema); + pci_read_config_word(pvt->branchmap_werrors, + NRECMEMB, &info->nrecmemb); + + /* Clear the error bits, by writing them back */ + pci_write_config_dword(pvt->branchmap_werrors, + FERR_FAT_FBD, value); + } else { + info->ferr_fat_fbd = 0; + info->nerr_fat_fbd = 0; + info->nrecmema = 0; + info->nrecmemb = 0; + } + + /* read in the 1st NON-FATAL error register */ + pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value); + + /* If there is an error, then read in the 1st NON-FATAL error + * register as well */ + if (value & FERR_NF_MASK) { + info->ferr_nf_fbd = value; + + /* harvest the various error data we need */ + pci_read_config_dword(pvt->branchmap_werrors, + NERR_NF_FBD, &info->nerr_nf_fbd); + pci_read_config_word(pvt->branchmap_werrors, + RECMEMA, &info->recmema); + pci_read_config_dword(pvt->branchmap_werrors, + RECMEMB, &info->recmemb); + pci_read_config_dword(pvt->branchmap_werrors, + REDMEMB, &info->redmemb); + + /* Clear the error bits, by writing them back */ + pci_write_config_dword(pvt->branchmap_werrors, + FERR_NF_FBD, value); + } else { + info->ferr_nf_fbd = 0; + info->nerr_nf_fbd = 0; + info->recmema = 0; + info->recmemb = 0; + info->redmemb = 0; + } +} + +/* + * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, + * struct i5400_error_info *info, + * int handle_errors); + * + * handle the Intel FATAL and unrecoverable errors, if any + */ +static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, + struct i5400_error_info *info, + unsigned long allErrors) +{ + char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80]; + int branch; + int channel; + int bank; + int buf_id; + int rank; + int rdwr; + int ras, cas; + int errnum; + char *type = NULL; + + if (!allErrors) + return; /* if no error, return now */ + + if (allErrors & ERROR_FAT_MASK) + type = "FATAL"; + else if (allErrors & FERR_NF_UNCORRECTABLE) + type = "NON-FATAL uncorrected"; + else + type = "NON-FATAL recoverable"; + + /* ONLY ONE of the possible error bits will be set, as per the docs */ + + branch = extract_fbdchan_indx(info->ferr_fat_fbd); + channel = branch; + + /* Use the NON-Recoverable macros to extract data */ + bank = nrec_bank(info); + rank = nrec_rank(info); + buf_id = nrec_buf_id(info); + rdwr = nrec_rdwr(info); + ras = nrec_ras(info); + cas = nrec_cas(info); + + debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " + "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, channel + 1, branch >> 1, bank, + buf_id, rdwr_str(rdwr), ras, cas); + + /* Only 1 bit will be on */ + errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); + + /* Form out message */ + snprintf(msg, sizeof(msg), + "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s RAS=%d CAS=%d " + "%s Err=0x%lx (%s))", + type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas, type, + allErrors, error_name[errnum]); + + /* Call the helper to output message */ + edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); +} + +/* + * i5400_process_fatal_error_info(struct mem_ctl_info *mci, + * struct i5400_error_info *info, + * int handle_errors); + * + * handle the Intel NON-FATAL errors, if any + */ +static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, + struct i5400_error_info *info) +{ + char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80]; + unsigned long allErrors; + int branch; + int channel; + int bank; + int rank; + int rdwr; + int ras, cas; + int errnum; + + /* mask off the Error bits that are possible */ + allErrors = FROM_NF_FERR(info->ferr_nf_fbd & FERR_NF_MASK); + if (!allErrors) + return; /* if no error, return now */ + + /* ONLY ONE of the possible error bits will be set, as per the docs */ + + if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) { + i5400_proccess_non_recoverable_info(mci, info, allErrors); + return; + } + + /* Correctable errors */ + if (allErrors & ERROR_NF_CORRECTABLE) { + debugf0("\tCorrected bits= 0x%lx\n", allErrors); + + branch = extract_fbdchan_indx(info->ferr_nf_fbd); + + channel = 0; + if (REC_ECC_LOCATOR_ODD(info->redmemb)) + channel = 1; + + /* Convert channel to be based from zero, instead of + * from branch base of 0 */ + channel += branch; + + bank = rec_bank(info); + rank = rec_rank(info); + rdwr = rec_rdwr(info); + ras = rec_ras(info); + cas = rec_cas(info); + + /* Only 1 bit will be on */ + errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); + + debugf0("\t\tCSROW= %d Channel= %d (Branch %d " + "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, branch >> 1, bank, + rdwr_str(rdwr), ras, cas); + + /* Form out message */ + snprintf(msg, sizeof(msg), + "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " + "CAS=%d, CE Err=0x%lx (%s))", branch >> 1, bank, + rdwr_str(rdwr), ras, cas, allErrors, + error_name[errnum]); + + /* Call the helper to output message */ + edac_mc_handle_fbd_ce(mci, rank, channel, msg); + + return; + } + + /* Miscelaneous errors */ + errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); + + branch = extract_fbdchan_indx(info->ferr_nf_fbd); + + i5400_mc_printk(mci, KERN_EMERG, + "Non-Fatal misc error (Branch=%d Err=%#lx (%s))", + branch >> 1, allErrors, error_name[errnum]); +} + +/* + * i5400_process_error_info Process the error info that is + * in the 'info' structure, previously retrieved from hardware + */ +static void i5400_process_error_info(struct mem_ctl_info *mci, + struct i5400_error_info *info) +{ u32 allErrors; + + /* First handle any fatal errors that occurred */ + allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK); + i5400_proccess_non_recoverable_info(mci, info, allErrors); + + /* now handle any non-fatal errors that occurred */ + i5400_process_nonfatal_error_info(mci, info); +} + +/* + * i5400_clear_error Retrieve any error from the hardware + * but do NOT process that error. + * Used for 'clearing' out of previous errors + * Called by the Core module. + */ +static void i5400_clear_error(struct mem_ctl_info *mci) +{ + struct i5400_error_info info; + + i5400_get_error_info(mci, &info); +} + +/* + * i5400_check_error Retrieve and process errors reported by the + * hardware. Called by the Core module. + */ +static void i5400_check_error(struct mem_ctl_info *mci) +{ + struct i5400_error_info info; + debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + i5400_get_error_info(mci, &info); + i5400_process_error_info(mci, &info); +} + +/* + * i5400_put_devices 'put' all the devices that we have + * reserved via 'get' + */ +static void i5400_put_devices(struct mem_ctl_info *mci) +{ + struct i5400_pvt *pvt; + + pvt = mci->pvt_info; + + /* Decrement usage count for devices */ + if (pvt->branch_1) + pci_dev_put(pvt->branch_1); + + if (pvt->branch_0) + pci_dev_put(pvt->branch_0); + + if (pvt->fsb_error_regs) + pci_dev_put(pvt->fsb_error_regs); + + if (pvt->branchmap_werrors) + pci_dev_put(pvt->branchmap_werrors); +} + +/* + * i5400_get_devices Find and perform 'get' operation on the MCH's + * device/functions we want to reference for this driver + * + * Need to 'get' device 16 func 1 and func 2 + */ +static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) +{ + struct i5400_pvt *pvt; + struct pci_dev *pdev; + + pvt = mci->pvt_info; + pvt->branchmap_werrors = NULL; + pvt->fsb_error_regs = NULL; + pvt->branch_0 = NULL; + pvt->branch_1 = NULL; + + /* Attempt to 'get' the MCH register we want */ + pdev = NULL; + while (!pvt->branchmap_werrors || !pvt->fsb_error_regs) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_5400_ERR, pdev); + if (!pdev) { + /* End of list, leave */ + i5400_printk(KERN_ERR, + "'system address,Process Bus' " + "device not found:" + "vendor 0x%x device 0x%x ERR funcs " + "(broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_5400_ERR); + goto error; + } + + /* Store device 16 funcs 1 and 2 */ + switch (PCI_FUNC(pdev->devfn)) { + case 1: + pvt->branchmap_werrors = pdev; + break; + case 2: + pvt->fsb_error_regs = pdev; + break; + } + } + + debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", + pci_name(pvt->system_address), + pvt->system_address->vendor, pvt->system_address->device); + debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->branchmap_werrors), + pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); + debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->fsb_error_regs), + pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); + + pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); + if (!pvt->branch_0) { + i5400_printk(KERN_ERR, + "MC: 'BRANCH 0' device not found:" + "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0); + goto error; + } + + /* If this device claims to have more than 2 channels then + * fetch Branch 1's information + */ + if (pvt->maxch < CHANNELS_PER_BRANCH) + return 0; + + pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_5400_FBD1, NULL); + if (!pvt->branch_1) { + i5400_printk(KERN_ERR, + "MC: 'BRANCH 1' device not found:" + "vendor 0x%x device 0x%x Func 0 " + "(broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_5400_FBD1); + goto error; + } + + return 0; + +error: + i5400_put_devices(mci); + return -ENODEV; +} + +/* + * determine_amb_present + * + * the information is contained in NUM_MTRS_PER_BRANCH different registers + * determining which of the NUM_MTRS_PER_BRANCH requires knowing + * which channel is in question + * + * 2 branches, each with 2 channels + * b0_ambpresent0 for channel '0' + * b0_ambpresent1 for channel '1' + * b1_ambpresent0 for channel '2' + * b1_ambpresent1 for channel '3' + */ +static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel) +{ + int amb_present; + + if (channel < CHANNELS_PER_BRANCH) { + if (channel & 0x1) + amb_present = pvt->b0_ambpresent1; + else + amb_present = pvt->b0_ambpresent0; + } else { + if (channel & 0x1) + amb_present = pvt->b1_ambpresent1; + else + amb_present = pvt->b1_ambpresent0; + } + + return amb_present; +} + +/* + * determine_mtr(pvt, csrow, channel) + * + * return the proper MTR register as determine by the csrow and channel desired + */ +static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel) +{ + int mtr; + int n; + + /* There is one MTR for each slot pair of FB-DIMMs, + Each slot may have one or two ranks (2 csrows), + Each slot pair may be at branch 0 or branch 1. + So, csrow should be divided by eight + */ + n = csrow >> 3; + + if (n >= NUM_MTRS_PER_BRANCH) { + debugf0("ERROR: trying to access an invalid csrow: %d\n", csrow); + return 0; + } + + if (channel < CHANNELS_PER_BRANCH) + mtr = pvt->b0_mtr[n]; + else + mtr = pvt->b1_mtr[n]; + + return mtr; +} + +/* + */ +static void decode_mtr(int slot_row, u16 mtr) +{ + int ans; + + ans = MTR_DIMMS_PRESENT(mtr); + + debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, + ans ? "Present" : "NOT Present"); + if (!ans) + return; + + debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); + + debugf2("\t\tELECTRICAL THROTTLING is %s\n", + MTR_DIMMS_ETHROTTLE(mtr) ? "enabled": "disabled"); + + debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); + debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); + debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); + debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); +} + +static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel, + struct i5400_dimm_info *dinfo) +{ + int mtr; + int amb_present_reg; + int addrBits; + + mtr = determine_mtr(pvt, csrow, channel); + if (MTR_DIMMS_PRESENT(mtr)) { + amb_present_reg = determine_amb_present_reg(pvt, channel); + + /* Determine if there is a DIMM present in this DIMM slot */ + if (amb_present_reg & (1 << (csrow >> 1))) { + dinfo->dual_rank = MTR_DIMM_RANK(mtr); + + if (!((dinfo->dual_rank == 0) && + ((csrow & 0x1) == 0x1))) { + /* Start with the number of bits for a Bank + * on the DRAM */ + addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); + /* Add thenumber of ROW bits */ + addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); + /* add the number of COLUMN bits */ + addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); + + addrBits += 6; /* add 64 bits per DIMM */ + addrBits -= 20; /* divide by 2^^20 */ + addrBits -= 3; /* 8 bits per bytes */ + + dinfo->megabytes = 1 << addrBits; + } + } + } +} + +/* + * calculate_dimm_size + * + * also will output a DIMM matrix map, if debug is enabled, for viewing + * how the DIMMs are populated + */ +static void calculate_dimm_size(struct i5400_pvt *pvt) +{ + struct i5400_dimm_info *dinfo; + int csrow, max_csrows; + char *p, *mem_buffer; + int space, n; + int channel; + + /* ================= Generate some debug output ================= */ + space = PAGE_SIZE; + mem_buffer = p = kmalloc(space, GFP_KERNEL); + if (p == NULL) { + i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n", + __FILE__, __func__); + return; + } + + /* Scan all the actual CSROWS (which is # of DIMMS * 2) + * and calculate the information for each DIMM + * Start with the highest csrow first, to display it first + * and work toward the 0th csrow + */ + max_csrows = pvt->maxdimmperch * 2; + for (csrow = max_csrows - 1; csrow >= 0; csrow--) { + + /* on an odd csrow, first output a 'boundary' marker, + * then reset the message buffer */ + if (csrow & 0x1) { + n = snprintf(p, space, "---------------------------" + "--------------------------------"); + p += n; + space -= n; + debugf2("%s\n", mem_buffer); + p = mem_buffer; + space = PAGE_SIZE; + } + n = snprintf(p, space, "csrow %2d ", csrow); + p += n; + space -= n; + + for (channel = 0; channel < pvt->maxch; channel++) { + dinfo = &pvt->dimm_info[csrow][channel]; + handle_channel(pvt, csrow, channel, dinfo); + n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); + p += n; + space -= n; + } + debugf2("%s\n", mem_buffer); + p = mem_buffer; + space = PAGE_SIZE; + } + + /* Output the last bottom 'boundary' marker */ + n = snprintf(p, space, "---------------------------" + "--------------------------------"); + p += n; + space -= n; + debugf2("%s\n", mem_buffer); + p = mem_buffer; + space = PAGE_SIZE; + + /* now output the 'channel' labels */ + n = snprintf(p, space, " "); + p += n; + space -= n; + for (channel = 0; channel < pvt->maxch; channel++) { + n = snprintf(p, space, "channel %d | ", channel); + p += n; + space -= n; + } + + /* output the last message and free buffer */ + debugf2("%s\n", mem_buffer); + kfree(mem_buffer); +} + +/* + * i5400_get_mc_regs read in the necessary registers and + * cache locally + * + * Fills in the private data members + */ +static void i5400_get_mc_regs(struct mem_ctl_info *mci) +{ + struct i5400_pvt *pvt; + u32 actual_tolm; + u16 limit; + int slot_row; + int maxch; + int maxdimmperch; + int way0, way1; + + pvt = mci->pvt_info; + + pci_read_config_dword(pvt->system_address, AMBASE, + (u32 *) &pvt->ambase); + pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), + ((u32 *) &pvt->ambase) + sizeof(u32)); + + maxdimmperch = pvt->maxdimmperch; + maxch = pvt->maxch; + + debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", + (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); + + /* Get the Branch Map regs */ + pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); + pvt->tolm >>= 12; + debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, + pvt->tolm); + + actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); + debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", + actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); + + pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); + pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); + + /* Get the MIR[0-1] regs */ + limit = (pvt->mir0 >> 4) & 0x0fff; + way0 = pvt->mir0 & 0x1; + way1 = pvt->mir0 & 0x2; + debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + limit = (pvt->mir1 >> 4) & 0xfff; + way0 = pvt->mir1 & 0x1; + way1 = pvt->mir1 & 0x2; + debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + + /* Get the set of MTR[0-3] regs by each branch */ + for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { + int where = MTR0 + (slot_row * sizeof(u32)); + + /* Branch 0 set of MTR registers */ + pci_read_config_word(pvt->branch_0, where, + &pvt->b0_mtr[slot_row]); + + debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, + pvt->b0_mtr[slot_row]); + + if (pvt->maxch < CHANNELS_PER_BRANCH) { + pvt->b1_mtr[slot_row] = 0; + continue; + } + + /* Branch 1 set of MTR registers */ + pci_read_config_word(pvt->branch_1, where, + &pvt->b1_mtr[slot_row]); + debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, + pvt->b1_mtr[slot_row]); + } + + /* Read and dump branch 0's MTRs */ + debugf2("\nMemory Technology Registers:\n"); + debugf2(" Branch 0:\n"); + for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) + decode_mtr(slot_row, pvt->b0_mtr[slot_row]); + + pci_read_config_word(pvt->branch_0, AMBPRESENT_0, + &pvt->b0_ambpresent0); + debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); + pci_read_config_word(pvt->branch_0, AMBPRESENT_1, + &pvt->b0_ambpresent1); + debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); + + /* Only if we have 2 branchs (4 channels) */ + if (pvt->maxch < CHANNELS_PER_BRANCH) { + pvt->b1_ambpresent0 = 0; + pvt->b1_ambpresent1 = 0; + } else { + /* Read and dump branch 1's MTRs */ + debugf2(" Branch 1:\n"); + for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) + decode_mtr(slot_row, pvt->b1_mtr[slot_row]); + + pci_read_config_word(pvt->branch_1, AMBPRESENT_0, + &pvt->b1_ambpresent0); + debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", + pvt->b1_ambpresent0); + pci_read_config_word(pvt->branch_1, AMBPRESENT_1, + &pvt->b1_ambpresent1); + debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", + pvt->b1_ambpresent1); + } + + /* Go and determine the size of each DIMM and place in an + * orderly matrix */ + calculate_dimm_size(pvt); +} + +/* + * i5400_init_csrows Initialize the 'csrows' table within + * the mci control structure with the + * addressing of memory. + * + * return: + * 0 success + * 1 no actual memory found on this MC + */ +static int i5400_init_csrows(struct mem_ctl_info *mci) +{ + struct i5400_pvt *pvt; + struct csrow_info *p_csrow; + int empty, channel_count; + int max_csrows; + int mtr; + int csrow_megs; + int channel; + int csrow; + + pvt = mci->pvt_info; + + channel_count = pvt->maxch; + max_csrows = pvt->maxdimmperch * 2; + + empty = 1; /* Assume NO memory */ + + for (csrow = 0; csrow < max_csrows; csrow++) { + p_csrow = &mci->csrows[csrow]; + + p_csrow->csrow_idx = csrow; + + /* use branch 0 for the basis */ + mtr = determine_mtr(pvt, csrow, 0); + + /* if no DIMMS on this row, continue */ + if (!MTR_DIMMS_PRESENT(mtr)) + continue; + + /* FAKE OUT VALUES, FIXME */ + p_csrow->first_page = 0 + csrow * 20; + p_csrow->last_page = 9 + csrow * 20; + p_csrow->page_mask = 0xFFF; + + p_csrow->grain = 8; + + csrow_megs = 0; + for (channel = 0; channel < pvt->maxch; channel++) + csrow_megs += pvt->dimm_info[csrow][channel].megabytes; + + p_csrow->nr_pages = csrow_megs << 8; + + /* Assume DDR2 for now */ + p_csrow->mtype = MEM_FB_DDR2; + + /* ask what device type on this row */ + if (MTR_DRAM_WIDTH(mtr)) + p_csrow->dtype = DEV_X8; + else + p_csrow->dtype = DEV_X4; + + p_csrow->edac_mode = EDAC_S8ECD8ED; + + empty = 0; + } + + return empty; +} + +/* + * i5400_enable_error_reporting + * Turn on the memory reporting features of the hardware + */ +static void i5400_enable_error_reporting(struct mem_ctl_info *mci) +{ + struct i5400_pvt *pvt; + u32 fbd_error_mask; + + pvt = mci->pvt_info; + + /* Read the FBD Error Mask Register */ + pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD, + &fbd_error_mask); + + /* Enable with a '0' */ + fbd_error_mask &= ~(ENABLE_EMASK_ALL); + + pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD, + fbd_error_mask); +} + +/* + * i5400_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) + * + * ask the device how many channels are present and how many CSROWS + * as well + */ +static void i5400_get_dimm_and_channel_counts(struct pci_dev *pdev, + int *num_dimms_per_channel, + int *num_channels) +{ + u8 value; + + /* Need to retrieve just how many channels and dimms per channel are + * supported on this memory controller + */ + pci_read_config_byte(pdev, MAXDIMMPERCH, &value); + *num_dimms_per_channel = (int)value * 2; + + pci_read_config_byte(pdev, MAXCH, &value); + *num_channels = (int)value; +} + +/* + * i5400_probe1 Probe for ONE instance of device to see if it is + * present. + * return: + * 0 for FOUND a device + * < 0 for error code + */ +static int i5400_probe1(struct pci_dev *pdev, int dev_idx) +{ + struct mem_ctl_info *mci; + struct i5400_pvt *pvt; + int num_channels; + int num_dimms_per_channel; + int num_csrows; + + debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __func__, + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + /* We only are looking for func 0 of the set */ + if (PCI_FUNC(pdev->devfn) != 0) + return -ENODEV; + + /* Ask the devices for the number of CSROWS and CHANNELS so + * that we can calculate the memory resources, etc + * + * The Chipset will report what it can handle which will be greater + * or equal to what the motherboard manufacturer will implement. + * + * As we don't have a motherboard identification routine to determine + * actual number of slots/dimms per channel, we thus utilize the + * resource as specified by the chipset. Thus, we might have + * have more DIMMs per channel than actually on the mobo, but this + * allows the driver to support upto the chipset max, without + * some fancy mobo determination. + */ + i5400_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, + &num_channels); + num_csrows = num_dimms_per_channel * 2; + + debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", + __func__, num_channels, num_dimms_per_channel, num_csrows); + + /* allocate a new MC control structure */ + mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); + + if (mci == NULL) + return -ENOMEM; + + debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + + mci->dev = &pdev->dev; /* record ptr to the generic device */ + + pvt = mci->pvt_info; + pvt->system_address = pdev; /* Record this device in our private */ + pvt->maxch = num_channels; + pvt->maxdimmperch = num_dimms_per_channel; + + /* 'get' the pci devices we want to reserve for our use */ + if (i5400_get_devices(mci, dev_idx)) + goto fail0; + + /* Time to get serious */ + i5400_get_mc_regs(mci); /* retrieve the hardware registers */ + + mci->mc_idx = 0; + mci->mtype_cap = MEM_FLAG_FB_DDR2; + mci->edac_ctl_cap = EDAC_FLAG_NONE; + mci->edac_cap = EDAC_FLAG_NONE; + mci->mod_name = "i5400_edac.c"; + mci->mod_ver = I5400_REVISION; + mci->ctl_name = i5400_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); + mci->ctl_page_to_phys = NULL; + + /* Set the function pointer to an actual operation function */ + mci->edac_check = i5400_check_error; + + /* initialize the MC control structure 'csrows' table + * with the mapping and control information */ + if (i5400_init_csrows(mci)) { + debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" + " because i5400_init_csrows() returned nonzero " + "value\n"); + mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ + } else { + debugf1("MC: Enable error reporting now\n"); + i5400_enable_error_reporting(mci); + } + + /* add this new MC control structure to EDAC's list of MCs */ + if (edac_mc_add_mc(mci)) { + debugf0("MC: " __FILE__ + ": %s(): failed edac_mc_add_mc()\n", __func__); + /* FIXME: perhaps some code should go here that disables error + * reporting if we just enabled it + */ + goto fail1; + } + + i5400_clear_error(mci); + + /* allocating generic PCI control info */ + i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i5400_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + + return 0; + + /* Error exit unwinding stack */ +fail1: + + i5400_put_devices(mci); + +fail0: + edac_mc_free(mci); + return -ENODEV; +} + +/* + * i5400_init_one constructor for one instance of device + * + * returns: + * negative on error + * count (>= 0) + */ +static int __devinit i5400_init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rc; + + debugf0("MC: " __FILE__ ": %s()\n", __func__); + + /* wake up device */ + rc = pci_enable_device(pdev); + if (rc == -EIO) + return rc; + + /* now probe and enable the device */ + return i5400_probe1(pdev, id->driver_data); +} + +/* + * i5400_remove_one destructor for one instance of device + * + */ +static void __devexit i5400_remove_one(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci; + + debugf0(__FILE__ ": %s()\n", __func__); + + if (i5400_pci) + edac_pci_release_generic_ctl(i5400_pci); + + mci = edac_mc_del_mc(&pdev->dev); + if (!mci) + return; + + /* retrieve references to resources, and free those resources */ + i5400_put_devices(mci); + + edac_mc_free(mci); +} + +/* + * pci_device_id table for which devices we are looking for + * + * The "E500P" device is the first device supported. + */ +static const struct pci_device_id i5400_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, + {0,} /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, i5400_pci_tbl); + +/* + * i5400_driver pci_driver structure for this module + * + */ +static struct pci_driver i5400_driver = { + .name = KBUILD_BASENAME, + .probe = i5400_init_one, + .remove = __devexit_p(i5400_remove_one), + .id_table = i5400_pci_tbl, +}; + +/* + * i5400_init Module entry function + * Try to initialize this module for its devices + */ +static int __init i5400_init(void) +{ + int pci_rc; + + debugf2("MC: " __FILE__ ": %s()\n", __func__); + + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + + pci_rc = pci_register_driver(&i5400_driver); + + return (pci_rc < 0) ? pci_rc : 0; +} + +/* + * i5400_exit() Module exit function + * Unregister the driver + */ +static void __exit i5400_exit(void) +{ + debugf2("MC: " __FILE__ ": %s()\n", __func__); + pci_unregister_driver(&i5400_driver); +} + +module_init(i5400_init); +module_exit(i5400_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ben Woodard <woodard@redhat.com> Red Hat Inc. (http://www.redhat.com)"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com> Red Hat Inc. (http://www.redhat.com)"); +MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - " I5400_REVISION); + +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |