summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/cell_edac.c5
-rw-r--r--drivers/edac/i5000_edac.c200
-rw-r--r--drivers/edac/i82443bxgx_edac.c63
-rw-r--r--drivers/edac/mpc85xx_edac.c33
-rw-r--r--drivers/edac/x38_edac.c524
7 files changed, 760 insertions, 73 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5a11e3cbcae..e0dbd388757 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -102,6 +102,13 @@ config EDAC_I3000
Support for error detection and correction on the Intel
3000 and 3010 server chipsets.
+config EDAC_X38
+ tristate "Intel X38"
+ depends on EDAC_MM_EDAC && PCI && X86
+ help
+ Support for error detection and correction on the Intel
+ X38 server chipsets.
+
config EDAC_I82860
tristate "Intel 82860"
depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index e5e9104b552..62c2d9bad8d 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
+obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 0e024fe2d8c..cd2e3b8087e 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -9,6 +9,7 @@
*/
#undef DEBUG
+#include <linux/edac.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -142,7 +143,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
csrow->mtype = MEM_XDR;
- csrow->edac_mode = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ csrow->edac_mode = EDAC_SECDED;
dev_dbg(mci->dev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
@@ -164,6 +165,8 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
if (regs == NULL)
return -ENODEV;
+ edac_op_state = EDAC_OPSTATE_POLL;
+
/* Get channel population */
reg = in_be64(&regs->mic_mnt_cfg);
dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 4a16b5b61cf..d335086f4a2 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -119,6 +119,7 @@
#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \
FERR_NF_M11ERR | \
FERR_NF_M10ERR | \
+ FERR_NF_M9ERR | \
FERR_NF_M8ERR | \
FERR_NF_M7ERR | \
FERR_NF_M6ERR | \
@@ -301,6 +302,9 @@ static char *numcol_toString[] = {
};
#endif
+/* enables the report of miscellaneous messages as CE errors - default off */
+static int misc_messages;
+
/* Enumeration of supported devices */
enum i5000_chips {
I5000P = 0,
@@ -466,7 +470,8 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
struct i5000_error_info *info,
int handle_errors)
{
- char msg[EDAC_MC_LABEL_LEN + 1 + 90];
+ char msg[EDAC_MC_LABEL_LEN + 1 + 160];
+ char *specific = NULL;
u32 allErrors;
int branch;
int channel;
@@ -480,11 +485,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
if (!allErrors)
return; /* if no error, return now */
- /* ONLY ONE of the possible error bits will be set, as per the docs */
- i5000_mc_printk(mci, KERN_ERR,
- "FATAL ERRORS Found!!! 1st FATAL Err Reg= 0x%x\n",
- allErrors);
-
branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
channel = branch;
@@ -501,28 +501,42 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
- if (allErrors & FERR_FAT_M1ERR) {
- i5000_mc_printk(mci, KERN_ERR,
- "Alert on non-redundant retry or fast "
- "reset timeout\n");
-
- } else if (allErrors & FERR_FAT_M2ERR) {
- i5000_mc_printk(mci, KERN_ERR,
- "Northbound CRC error on non-redundant "
- "retry\n");
-
- } else if (allErrors & FERR_FAT_M3ERR) {
- i5000_mc_printk(mci, KERN_ERR,
- ">Tmid Thermal event with intelligent "
- "throttling disabled\n");
+ switch (allErrors) {
+ case FERR_FAT_M1ERR:
+ specific = "Alert on non-redundant retry or fast "
+ "reset timeout";
+ break;
+ case FERR_FAT_M2ERR:
+ specific = "Northbound CRC error on non-redundant "
+ "retry";
+ break;
+ case FERR_FAT_M3ERR:
+ {
+ static int done;
+
+ /*
+ * This error is generated to inform that the intelligent
+ * throttling is disabled and the temperature passed the
+ * specified middle point. Since this is something the BIOS
+ * should take care of, we'll warn only once to avoid
+ * worthlessly flooding the log.
+ */
+ if (done)
+ return;
+ done++;
+
+ specific = ">Tmid Thermal event with intelligent "
+ "throttling disabled";
+ }
+ break;
}
/* Form out message */
snprintf(msg, sizeof(msg),
"(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
- "FATAL Err=0x%x)",
+ "FATAL Err=0x%x (%s))",
branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- allErrors);
+ allErrors, specific);
/* Call the helper to output message */
edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
@@ -539,7 +553,8 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
struct i5000_error_info *info,
int handle_errors)
{
- char msg[EDAC_MC_LABEL_LEN + 1 + 90];
+ char msg[EDAC_MC_LABEL_LEN + 1 + 170];
+ char *specific = NULL;
u32 allErrors;
u32 ue_errors;
u32 ce_errors;
@@ -557,10 +572,6 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
return; /* if no error, return now */
/* ONLY ONE of the possible error bits will be set, as per the docs */
- i5000_mc_printk(mci, KERN_WARNING,
- "NON-FATAL ERRORS Found!!! 1st NON-FATAL Err "
- "Reg= 0x%x\n", allErrors);
-
ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
if (ue_errors) {
debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
@@ -579,12 +590,47 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
rank, channel, channel + 1, branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas);
+ switch (ue_errors) {
+ case FERR_NF_M12ERR:
+ specific = "Non-Aliased Uncorrectable Patrol Data ECC";
+ break;
+ case FERR_NF_M11ERR:
+ specific = "Non-Aliased Uncorrectable Spare-Copy "
+ "Data ECC";
+ break;
+ case FERR_NF_M10ERR:
+ specific = "Non-Aliased Uncorrectable Mirrored Demand "
+ "Data ECC";
+ break;
+ case FERR_NF_M9ERR:
+ specific = "Non-Aliased Uncorrectable Non-Mirrored "
+ "Demand Data ECC";
+ break;
+ case FERR_NF_M8ERR:
+ specific = "Aliased Uncorrectable Patrol Data ECC";
+ break;
+ case FERR_NF_M7ERR:
+ specific = "Aliased Uncorrectable Spare-Copy Data ECC";
+ break;
+ case FERR_NF_M6ERR:
+ specific = "Aliased Uncorrectable Mirrored Demand "
+ "Data ECC";
+ break;
+ case FERR_NF_M5ERR:
+ specific = "Aliased Uncorrectable Non-Mirrored Demand "
+ "Data ECC";
+ break;
+ case FERR_NF_M4ERR:
+ specific = "Uncorrectable Data ECC on Replay";
+ break;
+ }
+
/* Form out message */
snprintf(msg, sizeof(msg),
"(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, UE Err=0x%x)",
+ "CAS=%d, UE Err=0x%x (%s))",
branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- ue_errors);
+ ue_errors, specific);
/* Call the helper to output message */
edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
@@ -616,51 +662,74 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
rank, channel, branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas);
+ switch (ce_errors) {
+ case FERR_NF_M17ERR:
+ specific = "Correctable Non-Mirrored Demand Data ECC";
+ break;
+ case FERR_NF_M18ERR:
+ specific = "Correctable Mirrored Demand Data ECC";
+ break;
+ case FERR_NF_M19ERR:
+ specific = "Correctable Spare-Copy Data ECC";
+ break;
+ case FERR_NF_M20ERR:
+ specific = "Correctable Patrol Data ECC";
+ break;
+ }
+
/* Form out message */
snprintf(msg, sizeof(msg),
"(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, CE Err=0x%x)", branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas, ce_errors);
+ "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas, ce_errors,
+ specific);
/* Call the helper to output message */
edac_mc_handle_fbd_ce(mci, rank, channel, msg);
}
- /* See if any of the thermal errors have fired */
- misc_errors = allErrors & FERR_NF_THERMAL;
- if (misc_errors) {
- i5000_printk(KERN_WARNING, "\tTHERMAL Error, bits= 0x%x\n",
- misc_errors);
- }
-
- /* See if any of the thermal errors have fired */
- misc_errors = allErrors & FERR_NF_NON_RETRY;
- if (misc_errors) {
- i5000_printk(KERN_WARNING, "\tNON-Retry Errors, bits= 0x%x\n",
- misc_errors);
- }
+ if (!misc_messages)
+ return;
- /* See if any of the thermal errors have fired */
- misc_errors = allErrors & FERR_NF_NORTH_CRC;
+ misc_errors = allErrors & (FERR_NF_NON_RETRY | FERR_NF_NORTH_CRC |
+ FERR_NF_SPD_PROTOCOL | FERR_NF_DIMM_SPARE);
if (misc_errors) {
- i5000_printk(KERN_WARNING,
- "\tNORTHBOUND CRC Error, bits= 0x%x\n",
- misc_errors);
- }
+ switch (misc_errors) {
+ case FERR_NF_M13ERR:
+ specific = "Non-Retry or Redundant Retry FBD Memory "
+ "Alert or Redundant Fast Reset Timeout";
+ break;
+ case FERR_NF_M14ERR:
+ specific = "Non-Retry or Redundant Retry FBD "
+ "Configuration Alert";
+ break;
+ case FERR_NF_M15ERR:
+ specific = "Non-Retry or Redundant Retry FBD "
+ "Northbound CRC error on read data";
+ break;
+ case FERR_NF_M21ERR:
+ specific = "FBD Northbound CRC error on "
+ "FBD Sync Status";
+ break;
+ case FERR_NF_M22ERR:
+ specific = "SPD protocol error";
+ break;
+ case FERR_NF_M27ERR:
+ specific = "DIMM-spare copy started";
+ break;
+ case FERR_NF_M28ERR:
+ specific = "DIMM-spare copy completed";
+ break;
+ }
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
- /* See if any of the thermal errors have fired */
- misc_errors = allErrors & FERR_NF_SPD_PROTOCOL;
- if (misc_errors) {
- i5000_printk(KERN_WARNING,
- "\tSPD Protocol Error, bits= 0x%x\n",
- misc_errors);
- }
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d Err=%#x (%s))", branch >> 1,
+ misc_errors, specific);
- /* See if any of the thermal errors have fired */
- misc_errors = allErrors & FERR_NF_DIMM_SPARE;
- if (misc_errors) {
- i5000_printk(KERN_WARNING, "\tDIMM-Spare Error, bits= 0x%x\n",
- misc_errors);
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ce(mci, 0, 0, msg);
}
}
@@ -1312,6 +1381,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
+ kobject_get(&mci->edac_mci_kobj);
debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1384,6 +1454,7 @@ fail1:
i5000_put_devices(mci);
fail0:
+ kobject_put(&mci->edac_mci_kobj);
edac_mc_free(mci);
return -ENODEV;
}
@@ -1429,7 +1500,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
/* retrieve references to resources, and free those resources */
i5000_put_devices(mci);
-
+ kobject_put(&mci->edac_mci_kobj);
edac_mc_free(mci);
}
@@ -1497,3 +1568,6 @@ MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+module_param(misc_messages, int, 0444);
+MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
+
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index c5305e3ee43..577760a82a0 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -114,6 +114,12 @@ struct i82443bxgx_edacmc_error_info {
static struct edac_pci_ctl_info *i82443bxgx_pci;
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
+ * already registered driver
+ */
+
+static int i82443bxgx_registered = 1;
+
static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
struct i82443bxgx_edacmc_error_info
*info)
@@ -345,10 +351,17 @@ EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ int rc;
+
debugf0("MC: " __FILE__ ": %s()\n", __func__);
/* don't need to call pci_device_enable() */
- return i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
+ rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
+
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
}
static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
@@ -387,15 +400,61 @@ static struct pci_driver i82443bxgx_edacmc_driver = {
static int __init i82443bxgx_edacmc_init(void)
{
+ int pci_rc;
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- return pci_register_driver(&i82443bxgx_edacmc_driver);
+ pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (mci_pdev == NULL) {
+ const struct pci_device_id *id = &i82443bxgx_pci_tbl[0];
+ int i = 0;
+ i82443bxgx_registered = 0;
+
+ while (mci_pdev == NULL && id->vendor != 0) {
+ mci_pdev = pci_get_device(id->vendor,
+ id->device, NULL);
+ i++;
+ id = &i82443bxgx_pci_tbl[i];
+ }
+ if (!mci_pdev) {
+ debugf0("i82443bxgx pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
+
+ if (pci_rc < 0) {
+ debugf0("i82443bxgx init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i82443bxgx_edacmc_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
}
static void __exit i82443bxgx_edacmc_exit(void)
{
pci_unregister_driver(&i82443bxgx_edacmc_driver);
+
+ if (!i82443bxgx_registered)
+ i82443bxgx_edacmc_remove_one(mci_pdev);
+
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
}
module_init(i82443bxgx_edacmc_init);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 2265d9ca153..0cfcb2d075a 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/edac.h>
+#include <linux/smp.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
@@ -40,7 +41,7 @@ static u32 orig_pci_err_en;
#endif
static u32 orig_l2_err_disable;
-static u32 orig_hid1;
+static u32 orig_hid1[2];
/************************ MC SYSFS parts ***********************************/
@@ -647,6 +648,9 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
{
.compatible = "fsl,8568-l2-cache-controller",
},
+ {
+ .compatible = "fsl,mpc8572-l2-cache-controller",
+ },
{},
};
@@ -912,7 +916,8 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
/* register interrupts */
pdata->irq = irq_of_parse_and_map(op->node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_mc_isr, IRQF_DISABLED,
+ mpc85xx_mc_isr,
+ IRQF_DISABLED | IRQF_SHARED,
"[EDAC] MC err", mci);
if (res < 0) {
printk(KERN_ERR "%s: Unable to request irq %d for "
@@ -980,6 +985,9 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{
.compatible = "fsl,8568-memory-controller",
},
+ {
+ .compatible = "fsl,mpc8572-memory-controller",
+ },
{},
};
@@ -995,6 +1003,14 @@ static struct of_platform_driver mpc85xx_mc_err_driver = {
},
};
+
+static void __init mpc85xx_mc_clear_rfxe(void *data)
+{
+ orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
+ mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000));
+}
+
+
static int __init mpc85xx_mc_init(void)
{
int res = 0;
@@ -1030,19 +1046,22 @@ static int __init mpc85xx_mc_init(void)
* need to clear HID1[RFXE] to disable machine check int
* so we can catch it
*/
- if (edac_op_state == EDAC_OPSTATE_INT) {
- orig_hid1 = mfspr(SPRN_HID1);
- mtspr(SPRN_HID1, (orig_hid1 & ~0x20000));
- }
+ if (edac_op_state == EDAC_OPSTATE_INT)
+ on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
return 0;
}
module_init(mpc85xx_mc_init);
+static void __exit mpc85xx_mc_restore_hid1(void *data)
+{
+ mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
+}
+
static void __exit mpc85xx_mc_exit(void)
{
- mtspr(SPRN_HID1, orig_hid1);
+ on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
#ifdef CONFIG_PCI
of_unregister_platform_driver(&mpc85xx_pci_err_driver);
#endif
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
new file mode 100644
index 00000000000..2406c2ce284
--- /dev/null
+++ b/drivers/edac/x38_edac.c
@@ -0,0 +1,524 @@
+/*
+ * Intel X38 Memory Controller kernel module
+ * Copyright (C) 2008 Cluster Computing, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * This file is based on i3200_edac.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define X38_REVISION "1.1"
+
+#define EDAC_MOD_STR "x38_edac"
+
+#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
+
+#define X38_RANKS 8
+#define X38_RANKS_PER_CHANNEL 4
+#define X38_CHANNELS 2
+
+/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
+
+#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
+#define X38_MCHBAR_HIGH 0x4b
+#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
+#define X38_MMR_WINDOW_SIZE 16384
+
+#define X38_TOM 0xa0 /* Top of Memory (16b)
+ *
+ * 15:10 reserved
+ * 9:0 total populated physical memory
+ */
+#define X38_TOM_MASK 0x3ff /* bits 9:0 */
+#define X38_TOM_SHIFT 26 /* 64MiB grain */
+
+#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15 reserved
+ * 14 Isochronous TBWRR Run Behind FIFO Full
+ * (ITCV)
+ * 13 Isochronous TBWRR Run Behind FIFO Put
+ * (ITSTV)
+ * 12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR (GTSE)
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 reserved
+ * 7 DRAM Throttle Flag (DTF)
+ * 6:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define X38_ERRSTS_UE 0x0002
+#define X38_ERRSTS_CE 0x0001
+#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
+
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
+ *
+ * 15:10 reserved
+ * 9:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define X38_DRB_MASK 0x3ff /* bits 9:0 */
+#define X38_DRB_SHIFT 26 /* 64MiB grain */
+
+#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ * 1 Multiple Bit Error Status (MERRSTS)
+ * 0 Correctable Error Status (CERRSTS)
+ */
+#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
+#define X38_ECCERRLOG_CE 0x1
+#define X38_ECCERRLOG_UE 0x2
+#define X38_ECCERRLOG_RANK_BITS 0x18000000
+#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
+
+#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
+
+static int x38_channel_num;
+
+static int how_many_channel(struct pci_dev *pdev)
+{
+ unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+ pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
+ if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
+ debugf0("In single channel mode.\n");
+ x38_channel_num = 1;
+ } else {
+ debugf0("In dual channel mode.\n");
+ x38_channel_num = 2;
+ }
+
+ return x38_channel_num;
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+ return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+ return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
+ (channel * X38_RANKS_PER_CHANNEL);
+}
+
+enum x38_chips {
+ X38 = 0,
+};
+
+struct x38_dev_info {
+ const char *ctl_name;
+};
+
+struct x38_error_info {
+ u16 errsts;
+ u16 errsts2;
+ u64 eccerrlog[X38_CHANNELS];
+};
+
+static const struct x38_dev_info x38_devs[] = {
+ [X38] = {
+ .ctl_name = "x38"},
+};
+
+static struct pci_dev *mci_pdev;
+static int x38_registered = 1;
+
+
+static void x38_clear_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
+ X38_ERRSTS_BITS);
+}
+
+static u64 x38_readq(const void __iomem *addr)
+{
+ return readl(addr) | (((u64)readl(addr + 4)) << 32);
+}
+
+static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
+ struct x38_error_info *info)
+{
+ struct pci_dev *pdev;
+ void __iomem *window = mci->pvt_info;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
+ if (!(info->errsts & X38_ERRSTS_BITS))
+ return;
+
+ info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+ if (x38_channel_num == 2)
+ info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
+
+ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+ info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+ if (x38_channel_num == 2)
+ info->eccerrlog[1] =
+ x38_readq(window + X38_C1ECCERRLOG);
+ }
+
+ x38_clear_error_info(mci);
+}
+
+static void x38_process_error_info(struct mem_ctl_info *mci,
+ struct x38_error_info *info)
+{
+ int channel;
+ u64 log;
+
+ if (!(info->errsts & X38_ERRSTS_BITS))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ for (channel = 0; channel < x38_channel_num; channel++) {
+ log = info->eccerrlog[channel];
+ if (log & X38_ECCERRLOG_UE) {
+ edac_mc_handle_ue(mci, 0, 0,
+ eccerrlog_row(channel, log), "x38 UE");
+ } else if (log & X38_ECCERRLOG_CE) {
+ edac_mc_handle_ce(mci, 0, 0,
+ eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log), 0, "x38 CE");
+ }
+ }
+}
+
+static void x38_check(struct mem_ctl_info *mci)
+{
+ struct x38_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ x38_get_and_clear_error_info(mci, &info);
+ x38_process_error_info(mci, &info);
+}
+
+
+void __iomem *x38_map_mchbar(struct pci_dev *pdev)
+{
+ union {
+ u64 mchbar;
+ struct {
+ u32 mchbar_low;
+ u32 mchbar_high;
+ };
+ } u;
+ void __iomem *window;
+
+ pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
+ pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
+ pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
+ u.mchbar &= X38_MCHBAR_MASK;
+
+ if (u.mchbar != (resource_size_t)u.mchbar) {
+ printk(KERN_ERR
+ "x38: mmio space beyond accessible range (0x%llx)\n",
+ (unsigned long long)u.mchbar);
+ return NULL;
+ }
+
+ window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+ if (!window)
+ printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
+ (unsigned long long)u.mchbar);
+
+ return window;
+}
+
+
+static void x38_get_drbs(void __iomem *window,
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+ int i;
+
+ for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
+ drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
+ drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
+ }
+}
+
+static bool x38_is_stacked(struct pci_dev *pdev,
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+ u16 tom;
+
+ pci_read_config_word(pdev, X38_TOM, &tom);
+ tom &= X38_TOM_MASK;
+
+ return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
+ bool stacked, int channel, int rank)
+{
+ int n;
+
+ n = drbs[channel][rank];
+ if (rank > 0)
+ n -= drbs[channel][rank - 1];
+ if (stacked && (channel == 1) && drbs[channel][rank] ==
+ drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
+ n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
+ }
+
+ n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
+ return n;
+}
+
+static int x38_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_page;
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
+ bool stacked;
+ void __iomem *window;
+
+ debugf0("MC: %s()\n", __func__);
+
+ window = x38_map_mchbar(pdev);
+ if (!window)
+ return -ENODEV;
+
+ x38_get_drbs(window, drbs);
+
+ how_many_channel(pdev);
+
+ /* FIXME: unconventional pvt_info usage */
+ mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = X38_REVISION;
+ mci->ctl_name = x38_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = x38_check;
+ mci->ctl_page_to_phys = NULL;
+ mci->pvt_info = window;
+
+ stacked = x38_is_stacked(pdev, drbs);
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 64MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ */
+ last_page = -1UL;
+ for (i = 0; i < mci->nr_csrows; i++) {
+ unsigned long nr_pages;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ nr_pages = drb_to_nr_pages(drbs, stacked,
+ i / X38_RANKS_PER_CHANNEL,
+ i % X38_RANKS_PER_CHANNEL);
+
+ if (nr_pages == 0) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_page + 1;
+ last_page += nr_pages;
+ csrow->last_page = last_page;
+ csrow->nr_pages = nr_pages;
+
+ csrow->grain = nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ x38_clear_error_info(mci);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+fail:
+ iounmap(window);
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+static int __devinit x38_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = x38_probe1(pdev, ent->driver_data);
+ if (!mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit x38_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ iounmap(mci->pvt_info);
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ X38},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
+
+static struct pci_driver x38_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = x38_init_one,
+ .remove = __devexit_p(x38_remove_one),
+ .id_table = x38_pci_tbl,
+};
+
+static int __init x38_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&x38_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ x38_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_X38_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("x38 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("x38 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&x38_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit x38_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&x38_driver);
+ if (!x38_registered) {
+ x38_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(x38_init);
+module_exit(x38_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
+MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");